The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mwl/if_mwl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
    5  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer,
   13  *    without modification.
   14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
   15  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
   16  *    redistribution must be conditioned upon including a substantially
   17  *    similar Disclaimer requirement for further binary redistribution.
   18  *
   19  * NO WARRANTY
   20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   22  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
   23  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
   24  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
   25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
   28  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGES.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 /*
   37  * Driver for the Marvell 88W8363 Wireless LAN controller.
   38  */
   39 
   40 #include "opt_inet.h"
   41 #include "opt_mwl.h"
   42 #include "opt_wlan.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h> 
   46 #include <sys/sysctl.h>
   47 #include <sys/mbuf.h>   
   48 #include <sys/malloc.h>
   49 #include <sys/lock.h>
   50 #include <sys/mutex.h>
   51 #include <sys/kernel.h>
   52 #include <sys/socket.h>
   53 #include <sys/sockio.h>
   54 #include <sys/errno.h>
   55 #include <sys/callout.h>
   56 #include <sys/bus.h>
   57 #include <sys/endian.h>
   58 #include <sys/kthread.h>
   59 #include <sys/taskqueue.h>
   60 
   61 #include <machine/bus.h>
   62 
   63 #include <net/if.h>
   64 #include <net/if_var.h>
   65 #include <net/if_dl.h>
   66 #include <net/if_media.h>
   67 #include <net/if_types.h>
   68 #include <net/if_arp.h>
   69 #include <net/ethernet.h>
   70 #include <net/if_llc.h>
   71 
   72 #include <net/bpf.h>
   73 
   74 #include <net80211/ieee80211_var.h>
   75 #include <net80211/ieee80211_input.h>
   76 #include <net80211/ieee80211_regdomain.h>
   77 
   78 #ifdef INET
   79 #include <netinet/in.h>
   80 #include <netinet/if_ether.h>
   81 #endif /* INET */
   82 
   83 #include <dev/mwl/if_mwlvar.h>
   84 #include <dev/mwl/mwldiag.h>
   85 
   86 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
   87                     const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
   88                     const uint8_t [IEEE80211_ADDR_LEN],
   89                     const uint8_t [IEEE80211_ADDR_LEN]);
   90 static void     mwl_vap_delete(struct ieee80211vap *);
   91 static int      mwl_setupdma(struct mwl_softc *);
   92 static int      mwl_hal_reset(struct mwl_softc *sc);
   93 static int      mwl_init(struct mwl_softc *);
   94 static void     mwl_parent(struct ieee80211com *);
   95 static int      mwl_reset(struct ieee80211vap *, u_long);
   96 static void     mwl_stop(struct mwl_softc *);
   97 static void     mwl_start(struct mwl_softc *);
   98 static int      mwl_transmit(struct ieee80211com *, struct mbuf *);
   99 static int      mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
  100                         const struct ieee80211_bpf_params *);
  101 static int      mwl_media_change(struct ifnet *);
  102 static void     mwl_watchdog(void *);
  103 static int      mwl_ioctl(struct ieee80211com *, u_long, void *);
  104 static void     mwl_radar_proc(void *, int);
  105 static void     mwl_chanswitch_proc(void *, int);
  106 static void     mwl_bawatchdog_proc(void *, int);
  107 static int      mwl_key_alloc(struct ieee80211vap *,
  108                         struct ieee80211_key *,
  109                         ieee80211_keyix *, ieee80211_keyix *);
  110 static int      mwl_key_delete(struct ieee80211vap *,
  111                         const struct ieee80211_key *);
  112 static int      mwl_key_set(struct ieee80211vap *,
  113                         const struct ieee80211_key *);
  114 static int      _mwl_key_set(struct ieee80211vap *,
  115                         const struct ieee80211_key *,
  116                         const uint8_t mac[IEEE80211_ADDR_LEN]);
  117 static int      mwl_mode_init(struct mwl_softc *);
  118 static void     mwl_update_mcast(struct ieee80211com *);
  119 static void     mwl_update_promisc(struct ieee80211com *);
  120 static void     mwl_updateslot(struct ieee80211com *);
  121 static int      mwl_beacon_setup(struct ieee80211vap *);
  122 static void     mwl_beacon_update(struct ieee80211vap *, int);
  123 #ifdef MWL_HOST_PS_SUPPORT
  124 static void     mwl_update_ps(struct ieee80211vap *, int);
  125 static int      mwl_set_tim(struct ieee80211_node *, int);
  126 #endif
  127 static int      mwl_dma_setup(struct mwl_softc *);
  128 static void     mwl_dma_cleanup(struct mwl_softc *);
  129 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
  130                     const uint8_t [IEEE80211_ADDR_LEN]);
  131 static void     mwl_node_cleanup(struct ieee80211_node *);
  132 static void     mwl_node_drain(struct ieee80211_node *);
  133 static void     mwl_node_getsignal(const struct ieee80211_node *,
  134                         int8_t *, int8_t *);
  135 static void     mwl_node_getmimoinfo(const struct ieee80211_node *,
  136                         struct ieee80211_mimo_info *);
  137 static int      mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
  138 static void     mwl_rx_proc(void *, int);
  139 static void     mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
  140 static int      mwl_tx_setup(struct mwl_softc *, int, int);
  141 static int      mwl_wme_update(struct ieee80211com *);
  142 static void     mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
  143 static void     mwl_tx_cleanup(struct mwl_softc *);
  144 static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
  145 static int      mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
  146                              struct mwl_txbuf *, struct mbuf *);
  147 static void     mwl_tx_proc(void *, int);
  148 static int      mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
  149 static void     mwl_draintxq(struct mwl_softc *);
  150 static void     mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
  151 static int      mwl_recv_action(struct ieee80211_node *,
  152                         const struct ieee80211_frame *,
  153                         const uint8_t *, const uint8_t *);
  154 static int      mwl_addba_request(struct ieee80211_node *,
  155                         struct ieee80211_tx_ampdu *, int dialogtoken,
  156                         int baparamset, int batimeout);
  157 static int      mwl_addba_response(struct ieee80211_node *,
  158                         struct ieee80211_tx_ampdu *, int status,
  159                         int baparamset, int batimeout);
  160 static void     mwl_addba_stop(struct ieee80211_node *,
  161                         struct ieee80211_tx_ampdu *);
  162 static int      mwl_startrecv(struct mwl_softc *);
  163 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
  164                         struct ieee80211_channel *);
  165 static int      mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
  166 static void     mwl_scan_start(struct ieee80211com *);
  167 static void     mwl_scan_end(struct ieee80211com *);
  168 static void     mwl_set_channel(struct ieee80211com *);
  169 static int      mwl_peerstadb(struct ieee80211_node *,
  170                         int aid, int staid, MWL_HAL_PEERINFO *pi);
  171 static int      mwl_localstadb(struct ieee80211vap *);
  172 static int      mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
  173 static int      allocstaid(struct mwl_softc *sc, int aid);
  174 static void     delstaid(struct mwl_softc *sc, int staid);
  175 static void     mwl_newassoc(struct ieee80211_node *, int);
  176 static void     mwl_agestations(void *);
  177 static int      mwl_setregdomain(struct ieee80211com *,
  178                         struct ieee80211_regdomain *, int,
  179                         struct ieee80211_channel []);
  180 static void     mwl_getradiocaps(struct ieee80211com *, int, int *,
  181                         struct ieee80211_channel []);
  182 static int      mwl_getchannels(struct mwl_softc *);
  183 
  184 static void     mwl_sysctlattach(struct mwl_softc *);
  185 static void     mwl_announce(struct mwl_softc *);
  186 
  187 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
  188     "Marvell driver parameters");
  189 
  190 static  int mwl_rxdesc = MWL_RXDESC;            /* # rx desc's to allocate */
  191 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
  192             0, "rx descriptors allocated");
  193 static  int mwl_rxbuf = MWL_RXBUF;              /* # rx buffers to allocate */
  194 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
  195             0, "rx buffers allocated");
  196 static  int mwl_txbuf = MWL_TXBUF;              /* # tx buffers to allocate */
  197 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
  198             0, "tx buffers allocated");
  199 static  int mwl_txcoalesce = 8;         /* # tx packets to q before poking f/w*/
  200 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
  201             0, "tx buffers to send at once");
  202 static  int mwl_rxquota = MWL_RXBUF;            /* # max buffers to process */
  203 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
  204             0, "max rx buffers to process per interrupt");
  205 static  int mwl_rxdmalow = 3;                   /* # min buffers for wakeup */
  206 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
  207             0, "min free rx buffers before restarting traffic");
  208 
  209 #ifdef MWL_DEBUG
  210 static  int mwl_debug = 0;
  211 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
  212             0, "control debugging printfs");
  213 enum {
  214         MWL_DEBUG_XMIT          = 0x00000001,   /* basic xmit operation */
  215         MWL_DEBUG_XMIT_DESC     = 0x00000002,   /* xmit descriptors */
  216         MWL_DEBUG_RECV          = 0x00000004,   /* basic recv operation */
  217         MWL_DEBUG_RECV_DESC     = 0x00000008,   /* recv descriptors */
  218         MWL_DEBUG_RESET         = 0x00000010,   /* reset processing */
  219         MWL_DEBUG_BEACON        = 0x00000020,   /* beacon handling */
  220         MWL_DEBUG_INTR          = 0x00000040,   /* ISR */
  221         MWL_DEBUG_TX_PROC       = 0x00000080,   /* tx ISR proc */
  222         MWL_DEBUG_RX_PROC       = 0x00000100,   /* rx ISR proc */
  223         MWL_DEBUG_KEYCACHE      = 0x00000200,   /* key cache management */
  224         MWL_DEBUG_STATE         = 0x00000400,   /* 802.11 state transitions */
  225         MWL_DEBUG_NODE          = 0x00000800,   /* node management */
  226         MWL_DEBUG_RECV_ALL      = 0x00001000,   /* trace all frames (beacons) */
  227         MWL_DEBUG_TSO           = 0x00002000,   /* TSO processing */
  228         MWL_DEBUG_AMPDU         = 0x00004000,   /* BA stream handling */
  229         MWL_DEBUG_ANY           = 0xffffffff
  230 };
  231 #define IS_BEACON(wh) \
  232     ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
  233          (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
  234 #define IFF_DUMPPKTS_RECV(sc, wh) \
  235     ((sc->sc_debug & MWL_DEBUG_RECV) && \
  236       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
  237 #define IFF_DUMPPKTS_XMIT(sc) \
  238         (sc->sc_debug & MWL_DEBUG_XMIT)
  239 
  240 #define DPRINTF(sc, m, fmt, ...) do {                           \
  241         if (sc->sc_debug & (m))                                 \
  242                 printf(fmt, __VA_ARGS__);                       \
  243 } while (0)
  244 #define KEYPRINTF(sc, hk, mac) do {                             \
  245         if (sc->sc_debug & MWL_DEBUG_KEYCACHE)                  \
  246                 mwl_keyprint(sc, __func__, hk, mac);            \
  247 } while (0)
  248 static  void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
  249 static  void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
  250 #else
  251 #define IFF_DUMPPKTS_RECV(sc, wh)       0
  252 #define IFF_DUMPPKTS_XMIT(sc)           0
  253 #define DPRINTF(sc, m, fmt, ...)        do { (void )sc; } while (0)
  254 #define KEYPRINTF(sc, k, mac)           do { (void )sc; } while (0)
  255 #endif
  256 
  257 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
  258 
  259 /*
  260  * Each packet has fixed front matter: a 2-byte length
  261  * of the payload, followed by a 4-address 802.11 header
  262  * (regardless of the actual header and always w/o any
  263  * QoS header).  The payload then follows.
  264  */
  265 struct mwltxrec {
  266         uint16_t fwlen;
  267         struct ieee80211_frame_addr4 wh;
  268 } __packed;
  269 
  270 /*
  271  * Read/Write shorthands for accesses to BAR 0.  Note
  272  * that all BAR 1 operations are done in the "hal" and
  273  * there should be no reference to them here.
  274  */
  275 #ifdef MWL_DEBUG
  276 static __inline uint32_t
  277 RD4(struct mwl_softc *sc, bus_size_t off)
  278 {
  279         return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
  280 }
  281 #endif
  282 
  283 static __inline void
  284 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
  285 {
  286         bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
  287 }
  288 
  289 int
  290 mwl_attach(uint16_t devid, struct mwl_softc *sc)
  291 {
  292         struct ieee80211com *ic = &sc->sc_ic;
  293         struct mwl_hal *mh;
  294         int error = 0;
  295 
  296         DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
  297 
  298         /*
  299          * Setup the RX free list lock early, so it can be consistently
  300          * removed.
  301          */
  302         MWL_RXFREE_INIT(sc);
  303 
  304         mh = mwl_hal_attach(sc->sc_dev, devid,
  305             sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
  306         if (mh == NULL) {
  307                 device_printf(sc->sc_dev, "unable to attach HAL\n");
  308                 error = EIO;
  309                 goto bad;
  310         }
  311         sc->sc_mh = mh;
  312         /*
  313          * Load firmware so we can get setup.  We arbitrarily
  314          * pick station firmware; we'll re-load firmware as
  315          * needed so setting up the wrong mode isn't a big deal.
  316          */
  317         if (mwl_hal_fwload(mh, NULL) != 0) {
  318                 device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
  319                 error = EIO;
  320                 goto bad1;
  321         }
  322         if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
  323                 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
  324                 error = EIO;
  325                 goto bad1;
  326         }
  327         error = mwl_getchannels(sc);
  328         if (error != 0)
  329                 goto bad1;
  330 
  331         sc->sc_txantenna = 0;           /* h/w default */
  332         sc->sc_rxantenna = 0;           /* h/w default */
  333         sc->sc_invalid = 0;             /* ready to go, enable int handling */
  334         sc->sc_ageinterval = MWL_AGEINTERVAL;
  335 
  336         /*
  337          * Allocate tx+rx descriptors and populate the lists.
  338          * We immediately push the information to the firmware
  339          * as otherwise it gets upset.
  340          */
  341         error = mwl_dma_setup(sc);
  342         if (error != 0) {
  343                 device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
  344                     error);
  345                 goto bad1;
  346         }
  347         error = mwl_setupdma(sc);       /* push to firmware */
  348         if (error != 0)                 /* NB: mwl_setupdma prints msg */
  349                 goto bad1;
  350 
  351         callout_init(&sc->sc_timer, 1);
  352         callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
  353         mbufq_init(&sc->sc_snd, ifqmaxlen);
  354 
  355         sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
  356                 taskqueue_thread_enqueue, &sc->sc_tq);
  357         taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
  358                 "%s taskq", device_get_nameunit(sc->sc_dev));
  359 
  360         NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
  361         TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
  362         TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
  363         TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
  364 
  365         /* NB: insure BK queue is the lowest priority h/w queue */
  366         if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
  367                 device_printf(sc->sc_dev,
  368                     "unable to setup xmit queue for %s traffic!\n",
  369                      ieee80211_wme_acnames[WME_AC_BK]);
  370                 error = EIO;
  371                 goto bad2;
  372         }
  373         if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
  374             !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
  375             !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
  376                 /*
  377                  * Not enough hardware tx queues to properly do WME;
  378                  * just punt and assign them all to the same h/w queue.
  379                  * We could do a better job of this if, for example,
  380                  * we allocate queues when we switch from station to
  381                  * AP mode.
  382                  */
  383                 if (sc->sc_ac2q[WME_AC_VI] != NULL)
  384                         mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
  385                 if (sc->sc_ac2q[WME_AC_BE] != NULL)
  386                         mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
  387                 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
  388                 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
  389                 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
  390         }
  391         TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
  392 
  393         ic->ic_softc = sc;
  394         ic->ic_name = device_get_nameunit(sc->sc_dev);
  395         /* XXX not right but it's not used anywhere important */
  396         ic->ic_phytype = IEEE80211_T_OFDM;
  397         ic->ic_opmode = IEEE80211_M_STA;
  398         ic->ic_caps =
  399                   IEEE80211_C_STA               /* station mode supported */
  400                 | IEEE80211_C_HOSTAP            /* hostap mode */
  401                 | IEEE80211_C_MONITOR           /* monitor mode */
  402 #if 0
  403                 | IEEE80211_C_IBSS              /* ibss, nee adhoc, mode */
  404                 | IEEE80211_C_AHDEMO            /* adhoc demo mode */
  405 #endif
  406                 | IEEE80211_C_MBSS              /* mesh point link mode */
  407                 | IEEE80211_C_WDS               /* WDS supported */
  408                 | IEEE80211_C_SHPREAMBLE        /* short preamble supported */
  409                 | IEEE80211_C_SHSLOT            /* short slot time supported */
  410                 | IEEE80211_C_WME               /* WME/WMM supported */
  411                 | IEEE80211_C_BURST             /* xmit bursting supported */
  412                 | IEEE80211_C_WPA               /* capable of WPA1+WPA2 */
  413                 | IEEE80211_C_BGSCAN            /* capable of bg scanning */
  414                 | IEEE80211_C_TXFRAG            /* handle tx frags */
  415                 | IEEE80211_C_TXPMGT            /* capable of txpow mgt */
  416                 | IEEE80211_C_DFS               /* DFS supported */
  417                 ;
  418 
  419         ic->ic_htcaps =
  420                   IEEE80211_HTCAP_SMPS_ENA      /* SM PS mode enabled */
  421                 | IEEE80211_HTCAP_CHWIDTH40     /* 40MHz channel width */
  422                 | IEEE80211_HTCAP_SHORTGI20     /* short GI in 20MHz */
  423                 | IEEE80211_HTCAP_SHORTGI40     /* short GI in 40MHz */
  424                 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
  425 #if MWL_AGGR_SIZE == 7935
  426                 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
  427 #else
  428                 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
  429 #endif
  430 #if 0
  431                 | IEEE80211_HTCAP_PSMP          /* PSMP supported */
  432                 | IEEE80211_HTCAP_40INTOLERANT  /* 40MHz intolerant */
  433 #endif
  434                 /* s/w capabilities */
  435                 | IEEE80211_HTC_HT              /* HT operation */
  436                 | IEEE80211_HTC_AMPDU           /* tx A-MPDU */
  437                 | IEEE80211_HTC_AMSDU           /* tx A-MSDU */
  438                 | IEEE80211_HTC_SMPS            /* SMPS available */
  439                 ;
  440 
  441         /*
  442          * Mark h/w crypto support.
  443          * XXX no way to query h/w support.
  444          */
  445         ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
  446                           |  IEEE80211_CRYPTO_AES_CCM
  447                           |  IEEE80211_CRYPTO_TKIP
  448                           |  IEEE80211_CRYPTO_TKIPMIC
  449                           ;
  450         /*
  451          * Transmit requires space in the packet for a special
  452          * format transmit record and optional padding between
  453          * this record and the payload.  Ask the net80211 layer
  454          * to arrange this when encapsulating packets so we can
  455          * add it efficiently. 
  456          */
  457         ic->ic_headroom = sizeof(struct mwltxrec) -
  458                 sizeof(struct ieee80211_frame);
  459 
  460         IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
  461 
  462         /* call MI attach routine. */
  463         ieee80211_ifattach(ic);
  464         ic->ic_setregdomain = mwl_setregdomain;
  465         ic->ic_getradiocaps = mwl_getradiocaps;
  466         /* override default methods */
  467         ic->ic_raw_xmit = mwl_raw_xmit;
  468         ic->ic_newassoc = mwl_newassoc;
  469         ic->ic_updateslot = mwl_updateslot;
  470         ic->ic_update_mcast = mwl_update_mcast;
  471         ic->ic_update_promisc = mwl_update_promisc;
  472         ic->ic_wme.wme_update = mwl_wme_update;
  473         ic->ic_transmit = mwl_transmit;
  474         ic->ic_ioctl = mwl_ioctl;
  475         ic->ic_parent = mwl_parent;
  476 
  477         ic->ic_node_alloc = mwl_node_alloc;
  478         sc->sc_node_cleanup = ic->ic_node_cleanup;
  479         ic->ic_node_cleanup = mwl_node_cleanup;
  480         sc->sc_node_drain = ic->ic_node_drain;
  481         ic->ic_node_drain = mwl_node_drain;
  482         ic->ic_node_getsignal = mwl_node_getsignal;
  483         ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
  484 
  485         ic->ic_scan_start = mwl_scan_start;
  486         ic->ic_scan_end = mwl_scan_end;
  487         ic->ic_set_channel = mwl_set_channel;
  488 
  489         sc->sc_recv_action = ic->ic_recv_action;
  490         ic->ic_recv_action = mwl_recv_action;
  491         sc->sc_addba_request = ic->ic_addba_request;
  492         ic->ic_addba_request = mwl_addba_request;
  493         sc->sc_addba_response = ic->ic_addba_response;
  494         ic->ic_addba_response = mwl_addba_response;
  495         sc->sc_addba_stop = ic->ic_addba_stop;
  496         ic->ic_addba_stop = mwl_addba_stop;
  497 
  498         ic->ic_vap_create = mwl_vap_create;
  499         ic->ic_vap_delete = mwl_vap_delete;
  500 
  501         ieee80211_radiotap_attach(ic,
  502             &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
  503                 MWL_TX_RADIOTAP_PRESENT,
  504             &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
  505                 MWL_RX_RADIOTAP_PRESENT);
  506         /*
  507          * Setup dynamic sysctl's now that country code and
  508          * regdomain are available from the hal.
  509          */
  510         mwl_sysctlattach(sc);
  511 
  512         if (bootverbose)
  513                 ieee80211_announce(ic);
  514         mwl_announce(sc);
  515         return 0;
  516 bad2:
  517         mwl_dma_cleanup(sc);
  518 bad1:
  519         mwl_hal_detach(mh);
  520 bad:
  521         MWL_RXFREE_DESTROY(sc);
  522         sc->sc_invalid = 1;
  523         return error;
  524 }
  525 
  526 int
  527 mwl_detach(struct mwl_softc *sc)
  528 {
  529         struct ieee80211com *ic = &sc->sc_ic;
  530 
  531         MWL_LOCK(sc);
  532         mwl_stop(sc);
  533         MWL_UNLOCK(sc);
  534         /*
  535          * NB: the order of these is important:
  536          * o call the 802.11 layer before detaching the hal to
  537          *   insure callbacks into the driver to delete global
  538          *   key cache entries can be handled
  539          * o reclaim the tx queue data structures after calling
  540          *   the 802.11 layer as we'll get called back to reclaim
  541          *   node state and potentially want to use them
  542          * o to cleanup the tx queues the hal is called, so detach
  543          *   it last
  544          * Other than that, it's straightforward...
  545          */
  546         ieee80211_ifdetach(ic);
  547         callout_drain(&sc->sc_watchdog);
  548         mwl_dma_cleanup(sc);
  549         MWL_RXFREE_DESTROY(sc);
  550         mwl_tx_cleanup(sc);
  551         mwl_hal_detach(sc->sc_mh);
  552         mbufq_drain(&sc->sc_snd);
  553 
  554         return 0;
  555 }
  556 
  557 /*
  558  * MAC address handling for multiple BSS on the same radio.
  559  * The first vap uses the MAC address from the EEPROM.  For
  560  * subsequent vap's we set the U/L bit (bit 1) in the MAC
  561  * address and use the next six bits as an index.
  562  */
  563 static void
  564 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
  565 {
  566         int i;
  567 
  568         if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
  569                 /* NB: we only do this if h/w supports multiple bssid */
  570                 for (i = 0; i < 32; i++)
  571                         if ((sc->sc_bssidmask & (1<<i)) == 0)
  572                                 break;
  573                 if (i != 0)
  574                         mac[0] |= (i << 2)|0x2;
  575         } else
  576                 i = 0;
  577         sc->sc_bssidmask |= 1<<i;
  578         if (i == 0)
  579                 sc->sc_nbssid0++;
  580 }
  581 
  582 static void
  583 reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
  584 {
  585         int i = mac[0] >> 2;
  586         if (i != 0 || --sc->sc_nbssid0 == 0)
  587                 sc->sc_bssidmask &= ~(1<<i);
  588 }
  589 
  590 static struct ieee80211vap *
  591 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
  592     enum ieee80211_opmode opmode, int flags,
  593     const uint8_t bssid[IEEE80211_ADDR_LEN],
  594     const uint8_t mac0[IEEE80211_ADDR_LEN])
  595 {
  596         struct mwl_softc *sc = ic->ic_softc;
  597         struct mwl_hal *mh = sc->sc_mh;
  598         struct ieee80211vap *vap, *apvap;
  599         struct mwl_hal_vap *hvap;
  600         struct mwl_vap *mvp;
  601         uint8_t mac[IEEE80211_ADDR_LEN];
  602 
  603         IEEE80211_ADDR_COPY(mac, mac0);
  604         switch (opmode) {
  605         case IEEE80211_M_HOSTAP:
  606         case IEEE80211_M_MBSS:
  607                 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
  608                         assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
  609                 hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
  610                 if (hvap == NULL) {
  611                         if ((flags & IEEE80211_CLONE_MACADDR) == 0)
  612                                 reclaim_address(sc, mac);
  613                         return NULL;
  614                 }
  615                 break;
  616         case IEEE80211_M_STA:
  617                 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
  618                         assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
  619                 hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
  620                 if (hvap == NULL) {
  621                         if ((flags & IEEE80211_CLONE_MACADDR) == 0)
  622                                 reclaim_address(sc, mac);
  623                         return NULL;
  624                 }
  625                 /* no h/w beacon miss support; always use s/w */
  626                 flags |= IEEE80211_CLONE_NOBEACONS;
  627                 break;
  628         case IEEE80211_M_WDS:
  629                 hvap = NULL;            /* NB: we use associated AP vap */
  630                 if (sc->sc_napvaps == 0)
  631                         return NULL;    /* no existing AP vap */
  632                 break;
  633         case IEEE80211_M_MONITOR:
  634                 hvap = NULL;
  635                 break;
  636         case IEEE80211_M_IBSS:
  637         case IEEE80211_M_AHDEMO:
  638         default:
  639                 return NULL;
  640         }
  641 
  642         mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
  643         mvp->mv_hvap = hvap;
  644         if (opmode == IEEE80211_M_WDS) {
  645                 /*
  646                  * WDS vaps must have an associated AP vap; find one.
  647                  * XXX not right.
  648                  */
  649                 TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
  650                         if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
  651                                 mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
  652                                 break;
  653                         }
  654                 KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
  655         }
  656         vap = &mvp->mv_vap;
  657         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
  658         /* override with driver methods */
  659         mvp->mv_newstate = vap->iv_newstate;
  660         vap->iv_newstate = mwl_newstate;
  661         vap->iv_max_keyix = 0;  /* XXX */
  662         vap->iv_key_alloc = mwl_key_alloc;
  663         vap->iv_key_delete = mwl_key_delete;
  664         vap->iv_key_set = mwl_key_set;
  665 #ifdef MWL_HOST_PS_SUPPORT
  666         if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
  667                 vap->iv_update_ps = mwl_update_ps;
  668                 mvp->mv_set_tim = vap->iv_set_tim;
  669                 vap->iv_set_tim = mwl_set_tim;
  670         }
  671 #endif
  672         vap->iv_reset = mwl_reset;
  673         vap->iv_update_beacon = mwl_beacon_update;
  674 
  675         /* override max aid so sta's cannot assoc when we're out of sta id's */
  676         vap->iv_max_aid = MWL_MAXSTAID;
  677         /* override default A-MPDU rx parameters */
  678         vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
  679         vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
  680 
  681         /* complete setup */
  682         ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
  683             mac);
  684 
  685         switch (vap->iv_opmode) {
  686         case IEEE80211_M_HOSTAP:
  687         case IEEE80211_M_MBSS:
  688         case IEEE80211_M_STA:
  689                 /*
  690                  * Setup sta db entry for local address.
  691                  */
  692                 mwl_localstadb(vap);
  693                 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
  694                     vap->iv_opmode == IEEE80211_M_MBSS)
  695                         sc->sc_napvaps++;
  696                 else
  697                         sc->sc_nstavaps++;
  698                 break;
  699         case IEEE80211_M_WDS:
  700                 sc->sc_nwdsvaps++;
  701                 break;
  702         default:
  703                 break;
  704         }
  705         /*
  706          * Setup overall operating mode.
  707          */
  708         if (sc->sc_napvaps)
  709                 ic->ic_opmode = IEEE80211_M_HOSTAP;
  710         else if (sc->sc_nstavaps)
  711                 ic->ic_opmode = IEEE80211_M_STA;
  712         else
  713                 ic->ic_opmode = opmode;
  714 
  715         return vap;
  716 }
  717 
  718 static void
  719 mwl_vap_delete(struct ieee80211vap *vap)
  720 {
  721         struct mwl_vap *mvp = MWL_VAP(vap);
  722         struct mwl_softc *sc = vap->iv_ic->ic_softc;
  723         struct mwl_hal *mh = sc->sc_mh;
  724         struct mwl_hal_vap *hvap = mvp->mv_hvap;
  725         enum ieee80211_opmode opmode = vap->iv_opmode;
  726 
  727         /* XXX disallow ap vap delete if WDS still present */
  728         if (sc->sc_running) {
  729                 /* quiesce h/w while we remove the vap */
  730                 mwl_hal_intrset(mh, 0);         /* disable interrupts */
  731         }
  732         ieee80211_vap_detach(vap);
  733         switch (opmode) {
  734         case IEEE80211_M_HOSTAP:
  735         case IEEE80211_M_MBSS:
  736         case IEEE80211_M_STA:
  737                 KASSERT(hvap != NULL, ("no hal vap handle"));
  738                 (void) mwl_hal_delstation(hvap, vap->iv_myaddr);
  739                 mwl_hal_delvap(hvap);
  740                 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
  741                         sc->sc_napvaps--;
  742                 else
  743                         sc->sc_nstavaps--;
  744                 /* XXX don't do it for IEEE80211_CLONE_MACADDR */
  745                 reclaim_address(sc, vap->iv_myaddr);
  746                 break;
  747         case IEEE80211_M_WDS:
  748                 sc->sc_nwdsvaps--;
  749                 break;
  750         default:
  751                 break;
  752         }
  753         mwl_cleartxq(sc, vap);
  754         free(mvp, M_80211_VAP);
  755         if (sc->sc_running)
  756                 mwl_hal_intrset(mh, sc->sc_imask);
  757 }
  758 
  759 void
  760 mwl_suspend(struct mwl_softc *sc)
  761 {
  762 
  763         MWL_LOCK(sc);
  764         mwl_stop(sc);
  765         MWL_UNLOCK(sc);
  766 }
  767 
  768 void
  769 mwl_resume(struct mwl_softc *sc)
  770 {
  771         int error = EDOOFUS;
  772 
  773         MWL_LOCK(sc);
  774         if (sc->sc_ic.ic_nrunning > 0)
  775                 error = mwl_init(sc);
  776         MWL_UNLOCK(sc);
  777 
  778         if (error == 0)
  779                 ieee80211_start_all(&sc->sc_ic);        /* start all vap's */
  780 }
  781 
  782 void
  783 mwl_shutdown(void *arg)
  784 {
  785         struct mwl_softc *sc = arg;
  786 
  787         MWL_LOCK(sc);
  788         mwl_stop(sc);
  789         MWL_UNLOCK(sc);
  790 }
  791 
  792 /*
  793  * Interrupt handler.  Most of the actual processing is deferred.
  794  */
  795 void
  796 mwl_intr(void *arg)
  797 {
  798         struct mwl_softc *sc = arg;
  799         struct mwl_hal *mh = sc->sc_mh;
  800         uint32_t status;
  801 
  802         if (sc->sc_invalid) {
  803                 /*
  804                  * The hardware is not ready/present, don't touch anything.
  805                  * Note this can happen early on if the IRQ is shared.
  806                  */
  807                 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
  808                 return;
  809         }
  810         /*
  811          * Figure out the reason(s) for the interrupt.
  812          */
  813         mwl_hal_getisr(mh, &status);            /* NB: clears ISR too */
  814         if (status == 0)                        /* must be a shared irq */
  815                 return;
  816 
  817         DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
  818             __func__, status, sc->sc_imask);
  819         if (status & MACREG_A2HRIC_BIT_RX_RDY)
  820                 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
  821         if (status & MACREG_A2HRIC_BIT_TX_DONE)
  822                 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
  823         if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
  824                 taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
  825         if (status & MACREG_A2HRIC_BIT_OPC_DONE)
  826                 mwl_hal_cmddone(mh);
  827         if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
  828                 ;
  829         }
  830         if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
  831                 /* TKIP ICV error */
  832                 sc->sc_stats.mst_rx_badtkipicv++;
  833         }
  834         if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
  835                 /* 11n aggregation queue is empty, re-fill */
  836                 ;
  837         }
  838         if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
  839                 ;
  840         }
  841         if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
  842                 /* radar detected, process event */
  843                 taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
  844         }
  845         if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
  846                 /* DFS channel switch */
  847                 taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
  848         }
  849 }
  850 
  851 static void
  852 mwl_radar_proc(void *arg, int pending)
  853 {
  854         struct mwl_softc *sc = arg;
  855         struct ieee80211com *ic = &sc->sc_ic;
  856 
  857         DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
  858             __func__, pending);
  859 
  860         sc->sc_stats.mst_radardetect++;
  861         /* XXX stop h/w BA streams? */
  862 
  863         IEEE80211_LOCK(ic);
  864         ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
  865         IEEE80211_UNLOCK(ic);
  866 }
  867 
  868 static void
  869 mwl_chanswitch_proc(void *arg, int pending)
  870 {
  871         struct mwl_softc *sc = arg;
  872         struct ieee80211com *ic = &sc->sc_ic;
  873 
  874         DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
  875             __func__, pending);
  876 
  877         IEEE80211_LOCK(ic);
  878         sc->sc_csapending = 0;
  879         ieee80211_csa_completeswitch(ic);
  880         IEEE80211_UNLOCK(ic);
  881 }
  882 
  883 static void
  884 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
  885 {
  886         struct ieee80211_node *ni = sp->data[0];
  887 
  888         /* send DELBA and drop the stream */
  889         ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
  890 }
  891 
  892 static void
  893 mwl_bawatchdog_proc(void *arg, int pending)
  894 {
  895         struct mwl_softc *sc = arg;
  896         struct mwl_hal *mh = sc->sc_mh;
  897         const MWL_HAL_BASTREAM *sp;
  898         uint8_t bitmap, n;
  899 
  900         sc->sc_stats.mst_bawatchdog++;
  901 
  902         if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
  903                 DPRINTF(sc, MWL_DEBUG_AMPDU,
  904                     "%s: could not get bitmap\n", __func__);
  905                 sc->sc_stats.mst_bawatchdog_failed++;
  906                 return;
  907         }
  908         DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
  909         if (bitmap == 0xff) {
  910                 n = 0;
  911                 /* disable all ba streams */
  912                 for (bitmap = 0; bitmap < 8; bitmap++) {
  913                         sp = mwl_hal_bastream_lookup(mh, bitmap);
  914                         if (sp != NULL) {
  915                                 mwl_bawatchdog(sp);
  916                                 n++;
  917                         }
  918                 }
  919                 if (n == 0) {
  920                         DPRINTF(sc, MWL_DEBUG_AMPDU,
  921                             "%s: no BA streams found\n", __func__);
  922                         sc->sc_stats.mst_bawatchdog_empty++;
  923                 }
  924         } else if (bitmap != 0xaa) {
  925                 /* disable a single ba stream */
  926                 sp = mwl_hal_bastream_lookup(mh, bitmap);
  927                 if (sp != NULL) {
  928                         mwl_bawatchdog(sp);
  929                 } else {
  930                         DPRINTF(sc, MWL_DEBUG_AMPDU,
  931                             "%s: no BA stream %d\n", __func__, bitmap);
  932                         sc->sc_stats.mst_bawatchdog_notfound++;
  933                 }
  934         }
  935 }
  936 
  937 /*
  938  * Convert net80211 channel to a HAL channel.
  939  */
  940 static void
  941 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
  942 {
  943         hc->channel = chan->ic_ieee;
  944 
  945         *(uint32_t *)&hc->channelFlags = 0;
  946         if (IEEE80211_IS_CHAN_2GHZ(chan))
  947                 hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
  948         else if (IEEE80211_IS_CHAN_5GHZ(chan))
  949                 hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
  950         if (IEEE80211_IS_CHAN_HT40(chan)) {
  951                 hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
  952                 if (IEEE80211_IS_CHAN_HT40U(chan))
  953                         hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
  954                 else
  955                         hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
  956         } else
  957                 hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
  958         /* XXX 10MHz channels */
  959 }
  960 
  961 /*
  962  * Inform firmware of our tx/rx dma setup.  The BAR 0
  963  * writes below are for compatibility with older firmware.
  964  * For current firmware we send this information with a
  965  * cmd block via mwl_hal_sethwdma.
  966  */
  967 static int
  968 mwl_setupdma(struct mwl_softc *sc)
  969 {
  970         int error, i;
  971 
  972         sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
  973         WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
  974         WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
  975 
  976         for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
  977                 struct mwl_txq *txq = &sc->sc_txq[i];
  978                 sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
  979                 WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
  980         }
  981         sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
  982         sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
  983 
  984         error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
  985         if (error != 0) {
  986                 device_printf(sc->sc_dev,
  987                     "unable to setup tx/rx dma; hal status %u\n", error);
  988                 /* XXX */
  989         }
  990         return error;
  991 }
  992 
  993 /*
  994  * Inform firmware of tx rate parameters.
  995  * Called after a channel change.
  996  */
  997 static int
  998 mwl_setcurchanrates(struct mwl_softc *sc)
  999 {
 1000         struct ieee80211com *ic = &sc->sc_ic;
 1001         const struct ieee80211_rateset *rs;
 1002         MWL_HAL_TXRATE rates;
 1003 
 1004         memset(&rates, 0, sizeof(rates));
 1005         rs = ieee80211_get_suprates(ic, ic->ic_curchan);
 1006         /* rate used to send management frames */
 1007         rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
 1008         /* rate used to send multicast frames */
 1009         rates.McastRate = rates.MgtRate;
 1010 
 1011         return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
 1012 }
 1013 
 1014 /*
 1015  * Inform firmware of tx rate parameters.  Called whenever
 1016  * user-settable params change and after a channel change.
 1017  */
 1018 static int
 1019 mwl_setrates(struct ieee80211vap *vap)
 1020 {
 1021         struct mwl_vap *mvp = MWL_VAP(vap);
 1022         struct ieee80211_node *ni = vap->iv_bss;
 1023         const struct ieee80211_txparam *tp = ni->ni_txparms;
 1024         MWL_HAL_TXRATE rates;
 1025 
 1026         KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
 1027 
 1028         /*
 1029          * Update the h/w rate map.
 1030          * NB: 0x80 for MCS is passed through unchanged
 1031          */
 1032         memset(&rates, 0, sizeof(rates));
 1033         /* rate used to send management frames */
 1034         rates.MgtRate = tp->mgmtrate;
 1035         /* rate used to send multicast frames */
 1036         rates.McastRate = tp->mcastrate;
 1037 
 1038         /* while here calculate EAPOL fixed rate cookie */
 1039         mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
 1040 
 1041         return mwl_hal_settxrate(mvp->mv_hvap,
 1042             tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
 1043                 RATE_FIXED : RATE_AUTO, &rates);
 1044 }
 1045 
 1046 /*
 1047  * Setup a fixed xmit rate cookie for EAPOL frames.
 1048  */
 1049 static void
 1050 mwl_seteapolformat(struct ieee80211vap *vap)
 1051 {
 1052         struct mwl_vap *mvp = MWL_VAP(vap);
 1053         struct ieee80211_node *ni = vap->iv_bss;
 1054         enum ieee80211_phymode mode;
 1055         uint8_t rate;
 1056 
 1057         KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
 1058 
 1059         mode = ieee80211_chan2mode(ni->ni_chan);
 1060         /*
 1061          * Use legacy rates when operating a mixed HT+non-HT bss.
 1062          * NB: this may violate POLA for sta and wds vap's.
 1063          */
 1064         if (mode == IEEE80211_MODE_11NA &&
 1065             (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
 1066                 rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
 1067         else if (mode == IEEE80211_MODE_11NG &&
 1068             (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
 1069                 rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
 1070         else
 1071                 rate = vap->iv_txparms[mode].mgmtrate;
 1072 
 1073         mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
 1074 }
 1075 
 1076 /*
 1077  * Map SKU+country code to region code for radar bin'ing.
 1078  */
 1079 static int
 1080 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
 1081 {
 1082         switch (rd->regdomain) {
 1083         case SKU_FCC:
 1084         case SKU_FCC3:
 1085                 return DOMAIN_CODE_FCC;
 1086         case SKU_CA:
 1087                 return DOMAIN_CODE_IC;
 1088         case SKU_ETSI:
 1089         case SKU_ETSI2:
 1090         case SKU_ETSI3:
 1091                 if (rd->country == CTRY_SPAIN)
 1092                         return DOMAIN_CODE_SPAIN;
 1093                 if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
 1094                         return DOMAIN_CODE_FRANCE;
 1095                 /* XXX force 1.3.1 radar type */
 1096                 return DOMAIN_CODE_ETSI_131;
 1097         case SKU_JAPAN:
 1098                 return DOMAIN_CODE_MKK;
 1099         case SKU_ROW:
 1100                 return DOMAIN_CODE_DGT; /* Taiwan */
 1101         case SKU_APAC:
 1102         case SKU_APAC2:
 1103         case SKU_APAC3:
 1104                 return DOMAIN_CODE_AUS; /* Australia */
 1105         }
 1106         /* XXX KOREA? */
 1107         return DOMAIN_CODE_FCC;                 /* XXX? */
 1108 }
 1109 
 1110 static int
 1111 mwl_hal_reset(struct mwl_softc *sc)
 1112 {
 1113         struct ieee80211com *ic = &sc->sc_ic;
 1114         struct mwl_hal *mh = sc->sc_mh;
 1115 
 1116         mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
 1117         mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
 1118         mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
 1119         mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
 1120         mwl_chan_set(sc, ic->ic_curchan);
 1121         /* NB: RF/RA performance tuned for indoor mode */
 1122         mwl_hal_setrateadaptmode(mh, 0);
 1123         mwl_hal_setoptimizationlevel(mh,
 1124             (ic->ic_flags & IEEE80211_F_BURST) != 0);
 1125 
 1126         mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
 1127 
 1128         mwl_hal_setaggampduratemode(mh, 1, 80);         /* XXX */
 1129         mwl_hal_setcfend(mh, 0);                        /* XXX */
 1130 
 1131         return 1;
 1132 }
 1133 
 1134 static int
 1135 mwl_init(struct mwl_softc *sc)
 1136 {
 1137         struct mwl_hal *mh = sc->sc_mh;
 1138         int error = 0;
 1139 
 1140         MWL_LOCK_ASSERT(sc);
 1141 
 1142         /*
 1143          * Stop anything previously setup.  This is safe
 1144          * whether this is the first time through or not.
 1145          */
 1146         mwl_stop(sc);
 1147 
 1148         /*
 1149          * Push vap-independent state to the firmware.
 1150          */
 1151         if (!mwl_hal_reset(sc)) {
 1152                 device_printf(sc->sc_dev, "unable to reset hardware\n");
 1153                 return EIO;
 1154         }
 1155 
 1156         /*
 1157          * Setup recv (once); transmit is already good to go.
 1158          */
 1159         error = mwl_startrecv(sc);
 1160         if (error != 0) {
 1161                 device_printf(sc->sc_dev, "unable to start recv logic\n");
 1162                 return error;
 1163         }
 1164 
 1165         /*
 1166          * Enable interrupts.
 1167          */
 1168         sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
 1169                      | MACREG_A2HRIC_BIT_TX_DONE
 1170                      | MACREG_A2HRIC_BIT_OPC_DONE
 1171 #if 0
 1172                      | MACREG_A2HRIC_BIT_MAC_EVENT
 1173 #endif
 1174                      | MACREG_A2HRIC_BIT_ICV_ERROR
 1175                      | MACREG_A2HRIC_BIT_RADAR_DETECT
 1176                      | MACREG_A2HRIC_BIT_CHAN_SWITCH
 1177 #if 0
 1178                      | MACREG_A2HRIC_BIT_QUEUE_EMPTY
 1179 #endif
 1180                      | MACREG_A2HRIC_BIT_BA_WATCHDOG
 1181                      | MACREQ_A2HRIC_BIT_TX_ACK
 1182                      ;
 1183 
 1184         sc->sc_running = 1;
 1185         mwl_hal_intrset(mh, sc->sc_imask);
 1186         callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
 1187 
 1188         return 0;
 1189 }
 1190 
 1191 static void
 1192 mwl_stop(struct mwl_softc *sc)
 1193 {
 1194 
 1195         MWL_LOCK_ASSERT(sc);
 1196         if (sc->sc_running) {
 1197                 /*
 1198                  * Shutdown the hardware and driver.
 1199                  */
 1200                 sc->sc_running = 0;
 1201                 callout_stop(&sc->sc_watchdog);
 1202                 sc->sc_tx_timer = 0;
 1203                 mwl_draintxq(sc);
 1204         }
 1205 }
 1206 
 1207 static int
 1208 mwl_reset_vap(struct ieee80211vap *vap, int state)
 1209 {
 1210         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 1211         struct ieee80211com *ic = vap->iv_ic;
 1212 
 1213         if (state == IEEE80211_S_RUN)
 1214                 mwl_setrates(vap);
 1215         /* XXX off by 1? */
 1216         mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
 1217         /* XXX auto? 20/40 split? */
 1218         mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
 1219             (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
 1220         mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
 1221             HTPROTECT_NONE : HTPROTECT_AUTO);
 1222         /* XXX txpower cap */
 1223 
 1224         /* re-setup beacons */
 1225         if (state == IEEE80211_S_RUN &&
 1226             (vap->iv_opmode == IEEE80211_M_HOSTAP ||
 1227              vap->iv_opmode == IEEE80211_M_MBSS ||
 1228              vap->iv_opmode == IEEE80211_M_IBSS)) {
 1229                 mwl_setapmode(vap, vap->iv_bss->ni_chan);
 1230                 mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
 1231                     ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
 1232                 return mwl_beacon_setup(vap);
 1233         }
 1234         return 0;
 1235 }
 1236 
 1237 /*
 1238  * Reset the hardware w/o losing operational state.
 1239  * Used to reset or reload hardware state for a vap.
 1240  */
 1241 static int
 1242 mwl_reset(struct ieee80211vap *vap, u_long cmd)
 1243 {
 1244         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 1245         int error = 0;
 1246 
 1247         if (hvap != NULL) {                     /* WDS, MONITOR, etc. */
 1248                 struct ieee80211com *ic = vap->iv_ic;
 1249                 struct mwl_softc *sc = ic->ic_softc;
 1250                 struct mwl_hal *mh = sc->sc_mh;
 1251 
 1252                 /* XXX handle DWDS sta vap change */
 1253                 /* XXX do we need to disable interrupts? */
 1254                 mwl_hal_intrset(mh, 0);         /* disable interrupts */
 1255                 error = mwl_reset_vap(vap, vap->iv_state);
 1256                 mwl_hal_intrset(mh, sc->sc_imask);
 1257         }
 1258         return error;
 1259 }
 1260 
 1261 /*
 1262  * Allocate a tx buffer for sending a frame.  The
 1263  * packet is assumed to have the WME AC stored so
 1264  * we can use it to select the appropriate h/w queue.
 1265  */
 1266 static struct mwl_txbuf *
 1267 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
 1268 {
 1269         struct mwl_txbuf *bf;
 1270 
 1271         /*
 1272          * Grab a TX buffer and associated resources.
 1273          */
 1274         MWL_TXQ_LOCK(txq);
 1275         bf = STAILQ_FIRST(&txq->free);
 1276         if (bf != NULL) {
 1277                 STAILQ_REMOVE_HEAD(&txq->free, bf_list);
 1278                 txq->nfree--;
 1279         }
 1280         MWL_TXQ_UNLOCK(txq);
 1281         if (bf == NULL)
 1282                 DPRINTF(sc, MWL_DEBUG_XMIT,
 1283                     "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
 1284         return bf;
 1285 }
 1286 
 1287 /*
 1288  * Return a tx buffer to the queue it came from.  Note there
 1289  * are two cases because we must preserve the order of buffers
 1290  * as it reflects the fixed order of descriptors in memory
 1291  * (the firmware pre-fetches descriptors so we cannot reorder).
 1292  */
 1293 static void
 1294 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
 1295 {
 1296         bf->bf_m = NULL;
 1297         bf->bf_node = NULL;
 1298         MWL_TXQ_LOCK(txq);
 1299         STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
 1300         txq->nfree++;
 1301         MWL_TXQ_UNLOCK(txq);
 1302 }
 1303 
 1304 static void
 1305 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
 1306 {
 1307         bf->bf_m = NULL;
 1308         bf->bf_node = NULL;
 1309         MWL_TXQ_LOCK(txq);
 1310         STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
 1311         txq->nfree++;
 1312         MWL_TXQ_UNLOCK(txq);
 1313 }
 1314 
 1315 static int
 1316 mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
 1317 {
 1318         struct mwl_softc *sc = ic->ic_softc;
 1319         int error;
 1320 
 1321         MWL_LOCK(sc);
 1322         if (!sc->sc_running) {
 1323                 MWL_UNLOCK(sc);
 1324                 return (ENXIO);
 1325         }
 1326         error = mbufq_enqueue(&sc->sc_snd, m);
 1327         if (error) {
 1328                 MWL_UNLOCK(sc);
 1329                 return (error);
 1330         }
 1331         mwl_start(sc);
 1332         MWL_UNLOCK(sc);
 1333         return (0);
 1334 }
 1335 
 1336 static void
 1337 mwl_start(struct mwl_softc *sc)
 1338 {
 1339         struct ieee80211_node *ni;
 1340         struct mwl_txbuf *bf;
 1341         struct mbuf *m;
 1342         struct mwl_txq *txq = NULL;     /* XXX silence gcc */
 1343         int nqueued;
 1344 
 1345         MWL_LOCK_ASSERT(sc);
 1346         if (!sc->sc_running || sc->sc_invalid)
 1347                 return;
 1348         nqueued = 0;
 1349         while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
 1350                 /*
 1351                  * Grab the node for the destination.
 1352                  */
 1353                 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
 1354                 KASSERT(ni != NULL, ("no node"));
 1355                 m->m_pkthdr.rcvif = NULL;       /* committed, clear ref */
 1356                 /*
 1357                  * Grab a TX buffer and associated resources.
 1358                  * We honor the classification by the 802.11 layer.
 1359                  */
 1360                 txq = sc->sc_ac2q[M_WME_GETAC(m)];
 1361                 bf = mwl_gettxbuf(sc, txq);
 1362                 if (bf == NULL) {
 1363                         m_freem(m);
 1364                         ieee80211_free_node(ni);
 1365 #ifdef MWL_TX_NODROP
 1366                         sc->sc_stats.mst_tx_qstop++;
 1367                         break;
 1368 #else
 1369                         DPRINTF(sc, MWL_DEBUG_XMIT,
 1370                             "%s: tail drop on q %d\n", __func__, txq->qnum);
 1371                         sc->sc_stats.mst_tx_qdrop++;
 1372                         continue;
 1373 #endif /* MWL_TX_NODROP */
 1374                 }
 1375 
 1376                 /*
 1377                  * Pass the frame to the h/w for transmission.
 1378                  */
 1379                 if (mwl_tx_start(sc, ni, bf, m)) {
 1380                         if_inc_counter(ni->ni_vap->iv_ifp,
 1381                             IFCOUNTER_OERRORS, 1);
 1382                         mwl_puttxbuf_head(txq, bf);
 1383                         ieee80211_free_node(ni);
 1384                         continue;
 1385                 }
 1386                 nqueued++;
 1387                 if (nqueued >= mwl_txcoalesce) {
 1388                         /*
 1389                          * Poke the firmware to process queued frames;
 1390                          * see below about (lack of) locking.
 1391                          */
 1392                         nqueued = 0;
 1393                         mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
 1394                 }
 1395         }
 1396         if (nqueued) {
 1397                 /*
 1398                  * NB: We don't need to lock against tx done because
 1399                  * this just prods the firmware to check the transmit
 1400                  * descriptors.  The firmware will also start fetching
 1401                  * descriptors by itself if it notices new ones are
 1402                  * present when it goes to deliver a tx done interrupt
 1403                  * to the host. So if we race with tx done processing
 1404                  * it's ok.  Delivering the kick here rather than in
 1405                  * mwl_tx_start is an optimization to avoid poking the
 1406                  * firmware for each packet.
 1407                  *
 1408                  * NB: the queue id isn't used so 0 is ok.
 1409                  */
 1410                 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
 1411         }
 1412 }
 1413 
 1414 static int
 1415 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
 1416         const struct ieee80211_bpf_params *params)
 1417 {
 1418         struct ieee80211com *ic = ni->ni_ic;
 1419         struct mwl_softc *sc = ic->ic_softc;
 1420         struct mwl_txbuf *bf;
 1421         struct mwl_txq *txq;
 1422 
 1423         if (!sc->sc_running || sc->sc_invalid) {
 1424                 m_freem(m);
 1425                 return ENETDOWN;
 1426         }
 1427         /*
 1428          * Grab a TX buffer and associated resources.
 1429          * Note that we depend on the classification
 1430          * by the 802.11 layer to get to the right h/w
 1431          * queue.  Management frames must ALWAYS go on
 1432          * queue 1 but we cannot just force that here
 1433          * because we may receive non-mgt frames.
 1434          */
 1435         txq = sc->sc_ac2q[M_WME_GETAC(m)];
 1436         bf = mwl_gettxbuf(sc, txq);
 1437         if (bf == NULL) {
 1438                 sc->sc_stats.mst_tx_qstop++;
 1439                 m_freem(m);
 1440                 return ENOBUFS;
 1441         }
 1442         /*
 1443          * Pass the frame to the h/w for transmission.
 1444          */
 1445         if (mwl_tx_start(sc, ni, bf, m)) {
 1446                 mwl_puttxbuf_head(txq, bf);
 1447 
 1448                 return EIO;             /* XXX */
 1449         }
 1450         /*
 1451          * NB: We don't need to lock against tx done because
 1452          * this just prods the firmware to check the transmit
 1453          * descriptors.  The firmware will also start fetching
 1454          * descriptors by itself if it notices new ones are
 1455          * present when it goes to deliver a tx done interrupt
 1456          * to the host. So if we race with tx done processing
 1457          * it's ok.  Delivering the kick here rather than in
 1458          * mwl_tx_start is an optimization to avoid poking the
 1459          * firmware for each packet.
 1460          *
 1461          * NB: the queue id isn't used so 0 is ok.
 1462          */
 1463         mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
 1464         return 0;
 1465 }
 1466 
 1467 static int
 1468 mwl_media_change(struct ifnet *ifp)
 1469 {
 1470         struct ieee80211vap *vap;
 1471         int error;
 1472 
 1473         /* NB: only the fixed rate can change and that doesn't need a reset */
 1474         error = ieee80211_media_change(ifp);
 1475         if (error != 0)
 1476                 return (error);
 1477 
 1478         vap = ifp->if_softc;
 1479         mwl_setrates(vap);
 1480         return (0);
 1481 }
 1482 
 1483 #ifdef MWL_DEBUG
 1484 static void
 1485 mwl_keyprint(struct mwl_softc *sc, const char *tag,
 1486         const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
 1487 {
 1488         static const char *ciphers[] = {
 1489                 "WEP",
 1490                 "TKIP",
 1491                 "AES-CCM",
 1492         };
 1493         int i, n;
 1494 
 1495         printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
 1496         for (i = 0, n = hk->keyLen; i < n; i++)
 1497                 printf(" %02x", hk->key.aes[i]);
 1498         printf(" mac %s", ether_sprintf(mac));
 1499         if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
 1500                 printf(" %s", "rxmic");
 1501                 for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
 1502                         printf(" %02x", hk->key.tkip.rxMic[i]);
 1503                 printf(" txmic");
 1504                 for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
 1505                         printf(" %02x", hk->key.tkip.txMic[i]);
 1506         }
 1507         printf(" flags 0x%x\n", hk->keyFlags);
 1508 }
 1509 #endif
 1510 
 1511 /*
 1512  * Allocate a key cache slot for a unicast key.  The
 1513  * firmware handles key allocation and every station is
 1514  * guaranteed key space so we are always successful.
 1515  */
 1516 static int
 1517 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
 1518         ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
 1519 {
 1520         struct mwl_softc *sc = vap->iv_ic->ic_softc;
 1521 
 1522         if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
 1523             (k->wk_flags & IEEE80211_KEY_GROUP)) {
 1524                 if (!(&vap->iv_nw_keys[0] <= k &&
 1525                       k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
 1526                         /* should not happen */
 1527                         DPRINTF(sc, MWL_DEBUG_KEYCACHE,
 1528                                 "%s: bogus group key\n", __func__);
 1529                         return 0;
 1530                 }
 1531                 /* give the caller what they requested */
 1532                 *keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
 1533         } else {
 1534                 /*
 1535                  * Firmware handles key allocation.
 1536                  */
 1537                 *keyix = *rxkeyix = 0;
 1538         }
 1539         return 1;
 1540 }
 1541 
 1542 /*
 1543  * Delete a key entry allocated by mwl_key_alloc.
 1544  */
 1545 static int
 1546 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
 1547 {
 1548         struct mwl_softc *sc = vap->iv_ic->ic_softc;
 1549         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 1550         MWL_HAL_KEYVAL hk;
 1551         const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
 1552             { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 1553 
 1554         if (hvap == NULL) {
 1555                 if (vap->iv_opmode != IEEE80211_M_WDS) {
 1556                         /* XXX monitor mode? */
 1557                         DPRINTF(sc, MWL_DEBUG_KEYCACHE,
 1558                             "%s: no hvap for opmode %d\n", __func__,
 1559                             vap->iv_opmode);
 1560                         return 0;
 1561                 }
 1562                 hvap = MWL_VAP(vap)->mv_ap_hvap;
 1563         }
 1564 
 1565         DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
 1566             __func__, k->wk_keyix);
 1567 
 1568         memset(&hk, 0, sizeof(hk));
 1569         hk.keyIndex = k->wk_keyix;
 1570         switch (k->wk_cipher->ic_cipher) {
 1571         case IEEE80211_CIPHER_WEP:
 1572                 hk.keyTypeId = KEY_TYPE_ID_WEP;
 1573                 break;
 1574         case IEEE80211_CIPHER_TKIP:
 1575                 hk.keyTypeId = KEY_TYPE_ID_TKIP;
 1576                 break;
 1577         case IEEE80211_CIPHER_AES_CCM:
 1578                 hk.keyTypeId = KEY_TYPE_ID_AES;
 1579                 break;
 1580         default:
 1581                 /* XXX should not happen */
 1582                 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
 1583                     __func__, k->wk_cipher->ic_cipher);
 1584                 return 0;
 1585         }
 1586         return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);   /*XXX*/
 1587 }
 1588 
 1589 static __inline int
 1590 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
 1591 {
 1592         if (k->wk_flags & IEEE80211_KEY_GROUP) {
 1593                 if (k->wk_flags & IEEE80211_KEY_XMIT)
 1594                         hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
 1595                 if (k->wk_flags & IEEE80211_KEY_RECV)
 1596                         hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
 1597                 return 1;
 1598         } else
 1599                 return 0;
 1600 }
 1601 
 1602 /*
 1603  * Set the key cache contents for the specified key.  Key cache
 1604  * slot(s) must already have been allocated by mwl_key_alloc.
 1605  */
 1606 static int
 1607 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
 1608 {
 1609         return (_mwl_key_set(vap, k, k->wk_macaddr));
 1610 }
 1611 
 1612 static int
 1613 _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
 1614         const uint8_t mac[IEEE80211_ADDR_LEN])
 1615 {
 1616 #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
 1617 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
 1618 #define IEEE80211_IS_STATICKEY(k) \
 1619         (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
 1620          (GRPXMIT|IEEE80211_KEY_RECV))
 1621         struct mwl_softc *sc = vap->iv_ic->ic_softc;
 1622         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 1623         const struct ieee80211_cipher *cip = k->wk_cipher;
 1624         const uint8_t *macaddr;
 1625         MWL_HAL_KEYVAL hk;
 1626 
 1627         KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
 1628                 ("s/w crypto set?"));
 1629 
 1630         if (hvap == NULL) {
 1631                 if (vap->iv_opmode != IEEE80211_M_WDS) {
 1632                         /* XXX monitor mode? */
 1633                         DPRINTF(sc, MWL_DEBUG_KEYCACHE,
 1634                             "%s: no hvap for opmode %d\n", __func__,
 1635                             vap->iv_opmode);
 1636                         return 0;
 1637                 }
 1638                 hvap = MWL_VAP(vap)->mv_ap_hvap;
 1639         }
 1640         memset(&hk, 0, sizeof(hk));
 1641         hk.keyIndex = k->wk_keyix;
 1642         switch (cip->ic_cipher) {
 1643         case IEEE80211_CIPHER_WEP:
 1644                 hk.keyTypeId = KEY_TYPE_ID_WEP;
 1645                 hk.keyLen = k->wk_keylen;
 1646                 if (k->wk_keyix == vap->iv_def_txkey)
 1647                         hk.keyFlags = KEY_FLAG_WEP_TXKEY;
 1648                 if (!IEEE80211_IS_STATICKEY(k)) {
 1649                         /* NB: WEP is never used for the PTK */
 1650                         (void) addgroupflags(&hk, k);
 1651                 }
 1652                 break;
 1653         case IEEE80211_CIPHER_TKIP:
 1654                 hk.keyTypeId = KEY_TYPE_ID_TKIP;
 1655                 hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
 1656                 hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
 1657                 hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
 1658                 hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
 1659                 if (!addgroupflags(&hk, k))
 1660                         hk.keyFlags |= KEY_FLAG_PAIRWISE;
 1661                 break;
 1662         case IEEE80211_CIPHER_AES_CCM:
 1663                 hk.keyTypeId = KEY_TYPE_ID_AES;
 1664                 hk.keyLen = k->wk_keylen;
 1665                 if (!addgroupflags(&hk, k))
 1666                         hk.keyFlags |= KEY_FLAG_PAIRWISE;
 1667                 break;
 1668         default:
 1669                 /* XXX should not happen */
 1670                 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
 1671                     __func__, k->wk_cipher->ic_cipher);
 1672                 return 0;
 1673         }
 1674         /*
 1675          * NB: tkip mic keys get copied here too; the layout
 1676          *     just happens to match that in ieee80211_key.
 1677          */
 1678         memcpy(hk.key.aes, k->wk_key, hk.keyLen);
 1679 
 1680         /*
 1681          * Locate address of sta db entry for writing key;
 1682          * the convention unfortunately is somewhat different
 1683          * than how net80211, hostapd, and wpa_supplicant think.
 1684          */
 1685         if (vap->iv_opmode == IEEE80211_M_STA) {
 1686                 /*
 1687                  * NB: keys plumbed before the sta reaches AUTH state
 1688                  * will be discarded or written to the wrong sta db
 1689                  * entry because iv_bss is meaningless.  This is ok
 1690                  * (right now) because we handle deferred plumbing of
 1691                  * WEP keys when the sta reaches AUTH state.
 1692                  */
 1693                 macaddr = vap->iv_bss->ni_bssid;
 1694                 if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
 1695                         /* XXX plumb to local sta db too for static key wep */
 1696                         mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
 1697                 }
 1698         } else if (vap->iv_opmode == IEEE80211_M_WDS &&
 1699             vap->iv_state != IEEE80211_S_RUN) {
 1700                 /*
 1701                  * Prior to RUN state a WDS vap will not it's BSS node
 1702                  * setup so we will plumb the key to the wrong mac
 1703                  * address (it'll be our local address).  Workaround
 1704                  * this for the moment by grabbing the correct address.
 1705                  */
 1706                 macaddr = vap->iv_des_bssid;
 1707         } else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
 1708                 macaddr = vap->iv_myaddr;
 1709         else
 1710                 macaddr = mac;
 1711         KEYPRINTF(sc, &hk, macaddr);
 1712         return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
 1713 #undef IEEE80211_IS_STATICKEY
 1714 #undef GRPXMIT
 1715 }
 1716 
 1717 /*
 1718  * Set the multicast filter contents into the hardware.
 1719  * XXX f/w has no support; just defer to the os.
 1720  */
 1721 static void
 1722 mwl_setmcastfilter(struct mwl_softc *sc)
 1723 {
 1724 #if 0
 1725         struct ether_multi *enm;
 1726         struct ether_multistep estep;
 1727         uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
 1728         uint8_t *mp;
 1729         int nmc;
 1730 
 1731         mp = macs;
 1732         nmc = 0;
 1733         ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
 1734         while (enm != NULL) {
 1735                 /* XXX Punt on ranges. */
 1736                 if (nmc == MWL_HAL_MCAST_MAX ||
 1737                     !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
 1738                         ifp->if_flags |= IFF_ALLMULTI;
 1739                         return;
 1740                 }
 1741                 IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
 1742                 mp += IEEE80211_ADDR_LEN, nmc++;
 1743                 ETHER_NEXT_MULTI(estep, enm);
 1744         }
 1745         ifp->if_flags &= ~IFF_ALLMULTI;
 1746         mwl_hal_setmcast(sc->sc_mh, nmc, macs);
 1747 #endif
 1748 }
 1749 
 1750 static int
 1751 mwl_mode_init(struct mwl_softc *sc)
 1752 {
 1753         struct ieee80211com *ic = &sc->sc_ic;
 1754         struct mwl_hal *mh = sc->sc_mh;
 1755 
 1756         mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
 1757         mwl_setmcastfilter(sc);
 1758 
 1759         return 0;
 1760 }
 1761 
 1762 /*
 1763  * Callback from the 802.11 layer after a multicast state change.
 1764  */
 1765 static void
 1766 mwl_update_mcast(struct ieee80211com *ic)
 1767 {
 1768         struct mwl_softc *sc = ic->ic_softc;
 1769 
 1770         mwl_setmcastfilter(sc);
 1771 }
 1772 
 1773 /*
 1774  * Callback from the 802.11 layer after a promiscuous mode change.
 1775  * Note this interface does not check the operating mode as this
 1776  * is an internal callback and we are expected to honor the current
 1777  * state (e.g. this is used for setting the interface in promiscuous
 1778  * mode when operating in hostap mode to do ACS).
 1779  */
 1780 static void
 1781 mwl_update_promisc(struct ieee80211com *ic)
 1782 {
 1783         struct mwl_softc *sc = ic->ic_softc;
 1784 
 1785         mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
 1786 }
 1787 
 1788 /*
 1789  * Callback from the 802.11 layer to update the slot time
 1790  * based on the current setting.  We use it to notify the
 1791  * firmware of ERP changes and the f/w takes care of things
 1792  * like slot time and preamble.
 1793  */
 1794 static void
 1795 mwl_updateslot(struct ieee80211com *ic)
 1796 {
 1797         struct mwl_softc *sc = ic->ic_softc;
 1798         struct mwl_hal *mh = sc->sc_mh;
 1799         int prot;
 1800 
 1801         /* NB: can be called early; suppress needless cmds */
 1802         if (!sc->sc_running)
 1803                 return;
 1804 
 1805         /*
 1806          * Calculate the ERP flags.  The firwmare will use
 1807          * this to carry out the appropriate measures.
 1808          */
 1809         prot = 0;
 1810         if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
 1811                 if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
 1812                         prot |= IEEE80211_ERP_NON_ERP_PRESENT;
 1813                 if (ic->ic_flags & IEEE80211_F_USEPROT)
 1814                         prot |= IEEE80211_ERP_USE_PROTECTION;
 1815                 if (ic->ic_flags & IEEE80211_F_USEBARKER)
 1816                         prot |= IEEE80211_ERP_LONG_PREAMBLE;
 1817         }
 1818 
 1819         DPRINTF(sc, MWL_DEBUG_RESET,
 1820             "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
 1821             __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
 1822             ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
 1823             ic->ic_flags);
 1824 
 1825         mwl_hal_setgprot(mh, prot);
 1826 }
 1827 
 1828 /*
 1829  * Setup the beacon frame.
 1830  */
 1831 static int
 1832 mwl_beacon_setup(struct ieee80211vap *vap)
 1833 {
 1834         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 1835         struct ieee80211_node *ni = vap->iv_bss;
 1836         struct mbuf *m;
 1837 
 1838         m = ieee80211_beacon_alloc(ni);
 1839         if (m == NULL)
 1840                 return ENOBUFS;
 1841         mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
 1842         m_free(m);
 1843 
 1844         return 0;
 1845 }
 1846 
 1847 /*
 1848  * Update the beacon frame in response to a change.
 1849  */
 1850 static void
 1851 mwl_beacon_update(struct ieee80211vap *vap, int item)
 1852 {
 1853         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 1854         struct ieee80211com *ic = vap->iv_ic;
 1855 
 1856         KASSERT(hvap != NULL, ("no beacon"));
 1857         switch (item) {
 1858         case IEEE80211_BEACON_ERP:
 1859                 mwl_updateslot(ic);
 1860                 break;
 1861         case IEEE80211_BEACON_HTINFO:
 1862                 mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
 1863                     ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
 1864                 break;
 1865         case IEEE80211_BEACON_CAPS:
 1866         case IEEE80211_BEACON_WME:
 1867         case IEEE80211_BEACON_APPIE:
 1868         case IEEE80211_BEACON_CSA:
 1869                 break;
 1870         case IEEE80211_BEACON_TIM:
 1871                 /* NB: firmware always forms TIM */
 1872                 return;
 1873         }
 1874         /* XXX retain beacon frame and update */
 1875         mwl_beacon_setup(vap);
 1876 }
 1877 
 1878 static void
 1879 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1880 {
 1881         bus_addr_t *paddr = (bus_addr_t*) arg;
 1882         KASSERT(error == 0, ("error %u on bus_dma callback", error));
 1883         *paddr = segs->ds_addr;
 1884 }
 1885 
 1886 #ifdef MWL_HOST_PS_SUPPORT
 1887 /*
 1888  * Handle power save station occupancy changes.
 1889  */
 1890 static void
 1891 mwl_update_ps(struct ieee80211vap *vap, int nsta)
 1892 {
 1893         struct mwl_vap *mvp = MWL_VAP(vap);
 1894 
 1895         if (nsta == 0 || mvp->mv_last_ps_sta == 0)
 1896                 mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
 1897         mvp->mv_last_ps_sta = nsta;
 1898 }
 1899 
 1900 /*
 1901  * Handle associated station power save state changes.
 1902  */
 1903 static int
 1904 mwl_set_tim(struct ieee80211_node *ni, int set)
 1905 {
 1906         struct ieee80211vap *vap = ni->ni_vap;
 1907         struct mwl_vap *mvp = MWL_VAP(vap);
 1908 
 1909         if (mvp->mv_set_tim(ni, set)) {         /* NB: state change */
 1910                 mwl_hal_setpowersave_sta(mvp->mv_hvap,
 1911                     IEEE80211_AID(ni->ni_associd), set);
 1912                 return 1;
 1913         } else
 1914                 return 0;
 1915 }
 1916 #endif /* MWL_HOST_PS_SUPPORT */
 1917 
 1918 static int
 1919 mwl_desc_setup(struct mwl_softc *sc, const char *name,
 1920         struct mwl_descdma *dd,
 1921         int nbuf, size_t bufsize, int ndesc, size_t descsize)
 1922 {
 1923         uint8_t *ds;
 1924         int error;
 1925 
 1926         DPRINTF(sc, MWL_DEBUG_RESET,
 1927             "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
 1928             __func__, name, nbuf, (uintmax_t) bufsize,
 1929             ndesc, (uintmax_t) descsize);
 1930 
 1931         dd->dd_name = name;
 1932         dd->dd_desc_len = nbuf * ndesc * descsize;
 1933 
 1934         /*
 1935          * Setup DMA descriptor area.
 1936          */
 1937         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
 1938                        PAGE_SIZE, 0,            /* alignment, bounds */
 1939                        BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
 1940                        BUS_SPACE_MAXADDR,       /* highaddr */
 1941                        NULL, NULL,              /* filter, filterarg */
 1942                        dd->dd_desc_len,         /* maxsize */
 1943                        1,                       /* nsegments */
 1944                        dd->dd_desc_len,         /* maxsegsize */
 1945                        BUS_DMA_ALLOCNOW,        /* flags */
 1946                        NULL,                    /* lockfunc */
 1947                        NULL,                    /* lockarg */
 1948                        &dd->dd_dmat);
 1949         if (error != 0) {
 1950                 device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
 1951                 return error;
 1952         }
 1953 
 1954         /* allocate descriptors */
 1955         error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
 1956                                  BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 
 1957                                  &dd->dd_dmamap);
 1958         if (error != 0) {
 1959                 device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
 1960                         "error %u\n", nbuf * ndesc, dd->dd_name, error);
 1961                 goto fail1;
 1962         }
 1963 
 1964         error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
 1965                                 dd->dd_desc, dd->dd_desc_len,
 1966                                 mwl_load_cb, &dd->dd_desc_paddr,
 1967                                 BUS_DMA_NOWAIT);
 1968         if (error != 0) {
 1969                 device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
 1970                         dd->dd_name, error);
 1971                 goto fail2;
 1972         }
 1973 
 1974         ds = dd->dd_desc;
 1975         memset(ds, 0, dd->dd_desc_len);
 1976         DPRINTF(sc, MWL_DEBUG_RESET,
 1977             "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
 1978             __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
 1979             (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
 1980 
 1981         return 0;
 1982 fail2:
 1983         bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
 1984 fail1:
 1985         bus_dma_tag_destroy(dd->dd_dmat);
 1986         memset(dd, 0, sizeof(*dd));
 1987         return error;
 1988 #undef DS2PHYS
 1989 }
 1990 
 1991 static void
 1992 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
 1993 {
 1994         bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
 1995         bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
 1996         bus_dma_tag_destroy(dd->dd_dmat);
 1997 
 1998         memset(dd, 0, sizeof(*dd));
 1999 }
 2000 
 2001 /* 
 2002  * Construct a tx q's free list.  The order of entries on
 2003  * the list must reflect the physical layout of tx descriptors
 2004  * because the firmware pre-fetches descriptors.
 2005  *
 2006  * XXX might be better to use indices into the buffer array.
 2007  */
 2008 static void
 2009 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
 2010 {
 2011         struct mwl_txbuf *bf;
 2012         int i;
 2013 
 2014         bf = txq->dma.dd_bufptr;
 2015         STAILQ_INIT(&txq->free);
 2016         for (i = 0; i < mwl_txbuf; i++, bf++)
 2017                 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
 2018         txq->nfree = i;
 2019 }
 2020 
 2021 #define DS2PHYS(_dd, _ds) \
 2022         ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
 2023 
 2024 static int
 2025 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
 2026 {
 2027         int error, bsize, i;
 2028         struct mwl_txbuf *bf;
 2029         struct mwl_txdesc *ds;
 2030 
 2031         error = mwl_desc_setup(sc, "tx", &txq->dma,
 2032                         mwl_txbuf, sizeof(struct mwl_txbuf),
 2033                         MWL_TXDESC, sizeof(struct mwl_txdesc));
 2034         if (error != 0)
 2035                 return error;
 2036 
 2037         /* allocate and setup tx buffers */
 2038         bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
 2039         bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
 2040         if (bf == NULL) {
 2041                 device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
 2042                         mwl_txbuf);
 2043                 return ENOMEM;
 2044         }
 2045         txq->dma.dd_bufptr = bf;
 2046 
 2047         ds = txq->dma.dd_desc;
 2048         for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
 2049                 bf->bf_desc = ds;
 2050                 bf->bf_daddr = DS2PHYS(&txq->dma, ds);
 2051                 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
 2052                                 &bf->bf_dmamap);
 2053                 if (error != 0) {
 2054                         device_printf(sc->sc_dev, "unable to create dmamap for tx "
 2055                                 "buffer %u, error %u\n", i, error);
 2056                         return error;
 2057                 }
 2058         }
 2059         mwl_txq_reset(sc, txq);
 2060         return 0;
 2061 }
 2062 
 2063 static void
 2064 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
 2065 {
 2066         struct mwl_txbuf *bf;
 2067         int i;
 2068 
 2069         bf = txq->dma.dd_bufptr;
 2070         for (i = 0; i < mwl_txbuf; i++, bf++) {
 2071                 KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
 2072                 KASSERT(bf->bf_node == NULL, ("node on free list"));
 2073                 if (bf->bf_dmamap != NULL)
 2074                         bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
 2075         }
 2076         STAILQ_INIT(&txq->free);
 2077         txq->nfree = 0;
 2078         if (txq->dma.dd_bufptr != NULL) {
 2079                 free(txq->dma.dd_bufptr, M_MWLDEV);
 2080                 txq->dma.dd_bufptr = NULL;
 2081         }
 2082         if (txq->dma.dd_desc_len != 0)
 2083                 mwl_desc_cleanup(sc, &txq->dma);
 2084 }
 2085 
 2086 static int
 2087 mwl_rxdma_setup(struct mwl_softc *sc)
 2088 {
 2089         int error, jumbosize, bsize, i;
 2090         struct mwl_rxbuf *bf;
 2091         struct mwl_jumbo *rbuf;
 2092         struct mwl_rxdesc *ds;
 2093         caddr_t data;
 2094 
 2095         error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
 2096                         mwl_rxdesc, sizeof(struct mwl_rxbuf),
 2097                         1, sizeof(struct mwl_rxdesc));
 2098         if (error != 0)
 2099                 return error;
 2100 
 2101         /*
 2102          * Receive is done to a private pool of jumbo buffers.
 2103          * This allows us to attach to mbuf's and avoid re-mapping
 2104          * memory on each rx we post.  We allocate a large chunk
 2105          * of memory and manage it in the driver.  The mbuf free
 2106          * callback method is used to reclaim frames after sending
 2107          * them up the stack.  By default we allocate 2x the number of
 2108          * rx descriptors configured so we have some slop to hold
 2109          * us while frames are processed.
 2110          */
 2111         if (mwl_rxbuf < 2*mwl_rxdesc) {
 2112                 device_printf(sc->sc_dev,
 2113                     "too few rx dma buffers (%d); increasing to %d\n",
 2114                     mwl_rxbuf, 2*mwl_rxdesc);
 2115                 mwl_rxbuf = 2*mwl_rxdesc;
 2116         }
 2117         jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
 2118         sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
 2119 
 2120         error = bus_dma_tag_create(sc->sc_dmat, /* parent */
 2121                        PAGE_SIZE, 0,            /* alignment, bounds */
 2122                        BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
 2123                        BUS_SPACE_MAXADDR,       /* highaddr */
 2124                        NULL, NULL,              /* filter, filterarg */
 2125                        sc->sc_rxmemsize,        /* maxsize */
 2126                        1,                       /* nsegments */
 2127                        sc->sc_rxmemsize,        /* maxsegsize */
 2128                        BUS_DMA_ALLOCNOW,        /* flags */
 2129                        NULL,                    /* lockfunc */
 2130                        NULL,                    /* lockarg */
 2131                        &sc->sc_rxdmat);
 2132         if (error != 0) {
 2133                 device_printf(sc->sc_dev, "could not create rx DMA tag\n");
 2134                 return error;
 2135         }
 2136 
 2137         error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
 2138                                  BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 
 2139                                  &sc->sc_rxmap);
 2140         if (error != 0) {
 2141                 device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
 2142                     (uintmax_t) sc->sc_rxmemsize);
 2143                 return error;
 2144         }
 2145 
 2146         error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
 2147                                 sc->sc_rxmem, sc->sc_rxmemsize,
 2148                                 mwl_load_cb, &sc->sc_rxmem_paddr,
 2149                                 BUS_DMA_NOWAIT);
 2150         if (error != 0) {
 2151                 device_printf(sc->sc_dev, "could not load rx DMA map\n");
 2152                 return error;
 2153         }
 2154 
 2155         /*
 2156          * Allocate rx buffers and set them up.
 2157          */
 2158         bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
 2159         bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
 2160         if (bf == NULL) {
 2161                 device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
 2162                 return error;
 2163         }
 2164         sc->sc_rxdma.dd_bufptr = bf;
 2165 
 2166         STAILQ_INIT(&sc->sc_rxbuf);
 2167         ds = sc->sc_rxdma.dd_desc;
 2168         for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
 2169                 bf->bf_desc = ds;
 2170                 bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
 2171                 /* pre-assign dma buffer */
 2172                 bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
 2173                 /* NB: tail is intentional to preserve descriptor order */
 2174                 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
 2175         }
 2176 
 2177         /*
 2178          * Place remainder of dma memory buffers on the free list.
 2179          */
 2180         SLIST_INIT(&sc->sc_rxfree);
 2181         for (; i < mwl_rxbuf; i++) {
 2182                 data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
 2183                 rbuf = MWL_JUMBO_DATA2BUF(data);
 2184                 SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
 2185                 sc->sc_nrxfree++;
 2186         }
 2187         return 0;
 2188 }
 2189 #undef DS2PHYS
 2190 
 2191 static void
 2192 mwl_rxdma_cleanup(struct mwl_softc *sc)
 2193 {
 2194         if (sc->sc_rxmem_paddr != 0) {
 2195                 bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
 2196                 sc->sc_rxmem_paddr = 0;
 2197         }
 2198         if (sc->sc_rxmem != NULL) {
 2199                 bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
 2200                 sc->sc_rxmem = NULL;
 2201         }
 2202         if (sc->sc_rxdma.dd_bufptr != NULL) {
 2203                 free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
 2204                 sc->sc_rxdma.dd_bufptr = NULL;
 2205         }
 2206         if (sc->sc_rxdma.dd_desc_len != 0)
 2207                 mwl_desc_cleanup(sc, &sc->sc_rxdma);
 2208 }
 2209 
 2210 static int
 2211 mwl_dma_setup(struct mwl_softc *sc)
 2212 {
 2213         int error, i;
 2214 
 2215         error = mwl_rxdma_setup(sc);
 2216         if (error != 0) {
 2217                 mwl_rxdma_cleanup(sc);
 2218                 return error;
 2219         }
 2220 
 2221         for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
 2222                 error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
 2223                 if (error != 0) {
 2224                         mwl_dma_cleanup(sc);
 2225                         return error;
 2226                 }
 2227         }
 2228         return 0;
 2229 }
 2230 
 2231 static void
 2232 mwl_dma_cleanup(struct mwl_softc *sc)
 2233 {
 2234         int i;
 2235 
 2236         for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
 2237                 mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
 2238         mwl_rxdma_cleanup(sc);
 2239 }
 2240 
 2241 static struct ieee80211_node *
 2242 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
 2243 {
 2244         struct ieee80211com *ic = vap->iv_ic;
 2245         struct mwl_softc *sc = ic->ic_softc;
 2246         const size_t space = sizeof(struct mwl_node);
 2247         struct mwl_node *mn;
 2248 
 2249         mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
 2250         if (mn == NULL) {
 2251                 /* XXX stat+msg */
 2252                 return NULL;
 2253         }
 2254         DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
 2255         return &mn->mn_node;
 2256 }
 2257 
 2258 static void
 2259 mwl_node_cleanup(struct ieee80211_node *ni)
 2260 {
 2261         struct ieee80211com *ic = ni->ni_ic;
 2262         struct mwl_softc *sc = ic->ic_softc;
 2263         struct mwl_node *mn = MWL_NODE(ni);
 2264 
 2265         DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
 2266             __func__, ni, ni->ni_ic, mn->mn_staid);
 2267 
 2268         if (mn->mn_staid != 0) {
 2269                 struct ieee80211vap *vap = ni->ni_vap;
 2270 
 2271                 if (mn->mn_hvap != NULL) {
 2272                         if (vap->iv_opmode == IEEE80211_M_STA)
 2273                                 mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
 2274                         else
 2275                                 mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
 2276                 }
 2277                 /*
 2278                  * NB: legacy WDS peer sta db entry is installed using
 2279                  * the associate ap's hvap; use it again to delete it.
 2280                  * XXX can vap be NULL?
 2281                  */
 2282                 else if (vap->iv_opmode == IEEE80211_M_WDS &&
 2283                     MWL_VAP(vap)->mv_ap_hvap != NULL)
 2284                         mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
 2285                             ni->ni_macaddr);
 2286                 delstaid(sc, mn->mn_staid);
 2287                 mn->mn_staid = 0;
 2288         }
 2289         sc->sc_node_cleanup(ni);
 2290 }
 2291 
 2292 /*
 2293  * Reclaim rx dma buffers from packets sitting on the ampdu
 2294  * reorder queue for a station.  We replace buffers with a
 2295  * system cluster (if available).
 2296  */
 2297 static void
 2298 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
 2299 {
 2300 #if 0
 2301         int i, n, off;
 2302         struct mbuf *m;
 2303         void *cl;
 2304 
 2305         n = rap->rxa_qframes;
 2306         for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
 2307                 m = rap->rxa_m[i];
 2308                 if (m == NULL)
 2309                         continue;
 2310                 n--;
 2311                 /* our dma buffers have a well-known free routine */
 2312                 if ((m->m_flags & M_EXT) == 0 ||
 2313                     m->m_ext.ext_free != mwl_ext_free)
 2314                         continue;
 2315                 /*
 2316                  * Try to allocate a cluster and move the data.
 2317                  */
 2318                 off = m->m_data - m->m_ext.ext_buf;
 2319                 if (off + m->m_pkthdr.len > MCLBYTES) {
 2320                         /* XXX no AMSDU for now */
 2321                         continue;
 2322                 }
 2323                 cl = pool_cache_get_paddr(&mclpool_cache, 0,
 2324                     &m->m_ext.ext_paddr);
 2325                 if (cl != NULL) {
 2326                         /*
 2327                          * Copy the existing data to the cluster, remove
 2328                          * the rx dma buffer, and attach the cluster in
 2329                          * its place.  Note we preserve the offset to the
 2330                          * data so frames being bridged can still prepend
 2331                          * their headers without adding another mbuf.
 2332                          */
 2333                         memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
 2334                         MEXTREMOVE(m);
 2335                         MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
 2336                         /* setup mbuf like _MCLGET does */
 2337                         m->m_flags |= M_CLUSTER | M_EXT_RW;
 2338                         _MOWNERREF(m, M_EXT | M_CLUSTER);
 2339                         /* NB: m_data is clobbered by MEXTADDR, adjust */
 2340                         m->m_data += off;
 2341                 }
 2342         }
 2343 #endif
 2344 }
 2345 
 2346 /*
 2347  * Callback to reclaim resources.  We first let the
 2348  * net80211 layer do it's thing, then if we are still
 2349  * blocked by a lack of rx dma buffers we walk the ampdu
 2350  * reorder q's to reclaim buffers by copying to a system
 2351  * cluster.
 2352  */
 2353 static void
 2354 mwl_node_drain(struct ieee80211_node *ni)
 2355 {
 2356         struct ieee80211com *ic = ni->ni_ic;
 2357         struct mwl_softc *sc = ic->ic_softc;
 2358         struct mwl_node *mn = MWL_NODE(ni);
 2359 
 2360         DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
 2361             __func__, ni, ni->ni_vap, mn->mn_staid);
 2362 
 2363         /* NB: call up first to age out ampdu q's */
 2364         sc->sc_node_drain(ni);
 2365 
 2366         /* XXX better to not check low water mark? */
 2367         if (sc->sc_rxblocked && mn->mn_staid != 0 &&
 2368             (ni->ni_flags & IEEE80211_NODE_HT)) {
 2369                 uint8_t tid;
 2370                 /*
 2371                  * Walk the reorder q and reclaim rx dma buffers by copying
 2372                  * the packet contents into clusters.
 2373                  */
 2374                 for (tid = 0; tid < WME_NUM_TID; tid++) {
 2375                         struct ieee80211_rx_ampdu *rap;
 2376 
 2377                         rap = &ni->ni_rx_ampdu[tid];
 2378                         if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
 2379                                 continue;
 2380                         if (rap->rxa_qframes)
 2381                                 mwl_ampdu_rxdma_reclaim(rap);
 2382                 }
 2383         }
 2384 }
 2385 
 2386 static void
 2387 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
 2388 {
 2389         *rssi = ni->ni_ic->ic_node_getrssi(ni);
 2390 #ifdef MWL_ANT_INFO_SUPPORT
 2391 #if 0
 2392         /* XXX need to smooth data */
 2393         *noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
 2394 #else
 2395         *noise = -95;           /* XXX */
 2396 #endif
 2397 #else
 2398         *noise = -95;           /* XXX */
 2399 #endif
 2400 }
 2401 
 2402 /*
 2403  * Convert Hardware per-antenna rssi info to common format:
 2404  * Let a1, a2, a3 represent the amplitudes per chain
 2405  * Let amax represent max[a1, a2, a3]
 2406  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
 2407  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
 2408  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
 2409  * maintain some extra precision.
 2410  *
 2411  * Values are stored in .5 db format capped at 127.
 2412  */
 2413 static void
 2414 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
 2415         struct ieee80211_mimo_info *mi)
 2416 {
 2417 #define CVT(_dst, _src) do {                                            \
 2418         (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);   \
 2419         (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);                     \
 2420 } while (0)
 2421         static const int8_t logdbtbl[32] = {
 2422                0,   0,  24,  38,  48,  56,  62,  68, 
 2423               72,  76,  80,  83,  86,  89,  92,  94, 
 2424               96,  98, 100, 102, 104, 106, 107, 109, 
 2425              110, 112, 113, 115, 116, 117, 118, 119
 2426         };
 2427         const struct mwl_node *mn = MWL_NODE_CONST(ni);
 2428         uint8_t rssi = mn->mn_ai.rsvd1/2;               /* XXX */
 2429         uint32_t rssi_max;
 2430 
 2431         rssi_max = mn->mn_ai.rssi_a;
 2432         if (mn->mn_ai.rssi_b > rssi_max)
 2433                 rssi_max = mn->mn_ai.rssi_b;
 2434         if (mn->mn_ai.rssi_c > rssi_max)
 2435                 rssi_max = mn->mn_ai.rssi_c;
 2436 
 2437         CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
 2438         CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
 2439         CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
 2440 
 2441         mi->ch[0].noise[0] = mn->mn_ai.nf_a;
 2442         mi->ch[1].noise[0] = mn->mn_ai.nf_b;
 2443         mi->ch[2].noise[0] = mn->mn_ai.nf_c;
 2444 #undef CVT
 2445 }
 2446 
 2447 static __inline void *
 2448 mwl_getrxdma(struct mwl_softc *sc)
 2449 {
 2450         struct mwl_jumbo *buf;
 2451         void *data;
 2452 
 2453         /*
 2454          * Allocate from jumbo pool.
 2455          */
 2456         MWL_RXFREE_LOCK(sc);
 2457         buf = SLIST_FIRST(&sc->sc_rxfree);
 2458         if (buf == NULL) {
 2459                 DPRINTF(sc, MWL_DEBUG_ANY,
 2460                     "%s: out of rx dma buffers\n", __func__);
 2461                 sc->sc_stats.mst_rx_nodmabuf++;
 2462                 data = NULL;
 2463         } else {
 2464                 SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
 2465                 sc->sc_nrxfree--;
 2466                 data = MWL_JUMBO_BUF2DATA(buf);
 2467         }
 2468         MWL_RXFREE_UNLOCK(sc);
 2469         return data;
 2470 }
 2471 
 2472 static __inline void
 2473 mwl_putrxdma(struct mwl_softc *sc, void *data)
 2474 {
 2475         struct mwl_jumbo *buf;
 2476 
 2477         /* XXX bounds check data */
 2478         MWL_RXFREE_LOCK(sc);
 2479         buf = MWL_JUMBO_DATA2BUF(data);
 2480         SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
 2481         sc->sc_nrxfree++;
 2482         MWL_RXFREE_UNLOCK(sc);
 2483 }
 2484 
 2485 static int
 2486 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
 2487 {
 2488         struct mwl_rxdesc *ds;
 2489 
 2490         ds = bf->bf_desc;
 2491         if (bf->bf_data == NULL) {
 2492                 bf->bf_data = mwl_getrxdma(sc);
 2493                 if (bf->bf_data == NULL) {
 2494                         /* mark descriptor to be skipped */
 2495                         ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
 2496                         /* NB: don't need PREREAD */
 2497                         MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
 2498                         sc->sc_stats.mst_rxbuf_failed++;
 2499                         return ENOMEM;
 2500                 }
 2501         }
 2502         /*
 2503          * NB: DMA buffer contents is known to be unmodified
 2504          *     so there's no need to flush the data cache.
 2505          */
 2506 
 2507         /*
 2508          * Setup descriptor.
 2509          */
 2510         ds->QosCtrl = 0;
 2511         ds->RSSI = 0;
 2512         ds->Status = EAGLE_RXD_STATUS_IDLE;
 2513         ds->Channel = 0;
 2514         ds->PktLen = htole16(MWL_AGGR_SIZE);
 2515         ds->SQ2 = 0;
 2516         ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
 2517         /* NB: don't touch pPhysNext, set once */
 2518         ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
 2519         MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2520 
 2521         return 0;
 2522 }
 2523 
 2524 static void
 2525 mwl_ext_free(struct mbuf *m)
 2526 {
 2527         struct mwl_softc *sc = m->m_ext.ext_arg1;
 2528 
 2529         /* XXX bounds check data */
 2530         mwl_putrxdma(sc, m->m_ext.ext_buf);
 2531         /*
 2532          * If we were previously blocked by a lack of rx dma buffers
 2533          * check if we now have enough to restart rx interrupt handling.
 2534          * NB: we know we are called at splvm which is above splnet.
 2535          */
 2536         if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
 2537                 sc->sc_rxblocked = 0;
 2538                 mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
 2539         }
 2540 }
 2541 
 2542 struct mwl_frame_bar {
 2543         u_int8_t        i_fc[2];
 2544         u_int8_t        i_dur[2];
 2545         u_int8_t        i_ra[IEEE80211_ADDR_LEN];
 2546         u_int8_t        i_ta[IEEE80211_ADDR_LEN];
 2547         /* ctl, seq, FCS */
 2548 } __packed;
 2549 
 2550 /*
 2551  * Like ieee80211_anyhdrsize, but handles BAR frames
 2552  * specially so the logic below to piece the 802.11
 2553  * header together works.
 2554  */
 2555 static __inline int
 2556 mwl_anyhdrsize(const void *data)
 2557 {
 2558         const struct ieee80211_frame *wh = data;
 2559 
 2560         if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
 2561                 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
 2562                 case IEEE80211_FC0_SUBTYPE_CTS:
 2563                 case IEEE80211_FC0_SUBTYPE_ACK:
 2564                         return sizeof(struct ieee80211_frame_ack);
 2565                 case IEEE80211_FC0_SUBTYPE_BAR:
 2566                         return sizeof(struct mwl_frame_bar);
 2567                 }
 2568                 return sizeof(struct ieee80211_frame_min);
 2569         } else
 2570                 return ieee80211_hdrsize(data);
 2571 }
 2572 
 2573 static void
 2574 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
 2575 {
 2576         const struct ieee80211_frame *wh;
 2577         struct ieee80211_node *ni;
 2578 
 2579         wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
 2580         ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
 2581         if (ni != NULL) {
 2582                 ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
 2583                 ieee80211_free_node(ni);
 2584         }
 2585 }
 2586 
 2587 /*
 2588  * Convert hardware signal strength to rssi.  The value
 2589  * provided by the device has the noise floor added in;
 2590  * we need to compensate for this but we don't have that
 2591  * so we use a fixed value.
 2592  *
 2593  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
 2594  * offset is already set as part of the initial gain.  This
 2595  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
 2596  */
 2597 static __inline int
 2598 cvtrssi(uint8_t ssi)
 2599 {
 2600         int rssi = (int) ssi + 8;
 2601         /* XXX hack guess until we have a real noise floor */
 2602         rssi = 2*(87 - rssi);   /* NB: .5 dBm units */
 2603         return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
 2604 }
 2605 
 2606 static void
 2607 mwl_rx_proc(void *arg, int npending)
 2608 {
 2609         struct epoch_tracker et;
 2610         struct mwl_softc *sc = arg;
 2611         struct ieee80211com *ic = &sc->sc_ic;
 2612         struct mwl_rxbuf *bf;
 2613         struct mwl_rxdesc *ds;
 2614         struct mbuf *m;
 2615         struct ieee80211_qosframe *wh;
 2616         struct ieee80211_node *ni;
 2617         struct mwl_node *mn;
 2618         int off, len, hdrlen, pktlen, rssi, ntodo;
 2619         uint8_t *data, status;
 2620         void *newdata;
 2621         int16_t nf;
 2622 
 2623         DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
 2624             __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
 2625             RD4(sc, sc->sc_hwspecs.rxDescWrite));
 2626         nf = -96;                       /* XXX */
 2627         bf = sc->sc_rxnext;
 2628         for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
 2629                 if (bf == NULL)
 2630                         bf = STAILQ_FIRST(&sc->sc_rxbuf);
 2631                 ds = bf->bf_desc;
 2632                 data = bf->bf_data;
 2633                 if (data == NULL) {
 2634                         /*
 2635                          * If data allocation failed previously there
 2636                          * will be no buffer; try again to re-populate it.
 2637                          * Note the firmware will not advance to the next
 2638                          * descriptor with a dma buffer so we must mimic
 2639                          * this or we'll get out of sync.
 2640                          */ 
 2641                         DPRINTF(sc, MWL_DEBUG_ANY,
 2642                             "%s: rx buf w/o dma memory\n", __func__);
 2643                         (void) mwl_rxbuf_init(sc, bf);
 2644                         sc->sc_stats.mst_rx_dmabufmissing++;
 2645                         break;
 2646                 }
 2647                 MWL_RXDESC_SYNC(sc, ds,
 2648                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2649                 if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
 2650                         break;
 2651 #ifdef MWL_DEBUG
 2652                 if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
 2653                         mwl_printrxbuf(bf, 0);
 2654 #endif
 2655                 status = ds->Status;
 2656                 if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
 2657                         counter_u64_add(ic->ic_ierrors, 1);
 2658                         sc->sc_stats.mst_rx_crypto++;
 2659                         /*
 2660                          * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
 2661                          *     for backwards compatibility.
 2662                          */
 2663                         if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
 2664                             (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
 2665                                 /*
 2666                                  * MIC error, notify upper layers.
 2667                                  */
 2668                                 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
 2669                                     BUS_DMASYNC_POSTREAD);
 2670                                 mwl_handlemicerror(ic, data);
 2671                                 sc->sc_stats.mst_rx_tkipmic++;
 2672                         }
 2673                         /* XXX too painful to tap packets */
 2674                         goto rx_next;
 2675                 }
 2676                 /*
 2677                  * Sync the data buffer.
 2678                  */
 2679                 len = le16toh(ds->PktLen);
 2680                 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
 2681                 /*
 2682                  * The 802.11 header is provided all or in part at the front;
 2683                  * use it to calculate the true size of the header that we'll
 2684                  * construct below.  We use this to figure out where to copy
 2685                  * payload prior to constructing the header.
 2686                  */
 2687                 hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
 2688                 off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
 2689 
 2690                 /* calculate rssi early so we can re-use for each aggregate */
 2691                 rssi = cvtrssi(ds->RSSI);
 2692 
 2693                 pktlen = hdrlen + (len - off);
 2694                 /*
 2695                  * NB: we know our frame is at least as large as
 2696                  * IEEE80211_MIN_LEN because there is a 4-address
 2697                  * frame at the front.  Hence there's no need to
 2698                  * vet the packet length.  If the frame in fact
 2699                  * is too small it should be discarded at the
 2700                  * net80211 layer.
 2701                  */
 2702 
 2703                 /*
 2704                  * Attach dma buffer to an mbuf.  We tried
 2705                  * doing this based on the packet size (i.e.
 2706                  * copying small packets) but it turns out to
 2707                  * be a net loss.  The tradeoff might be system
 2708                  * dependent (cache architecture is important).
 2709                  */
 2710                 MGETHDR(m, M_NOWAIT, MT_DATA);
 2711                 if (m == NULL) {
 2712                         DPRINTF(sc, MWL_DEBUG_ANY,
 2713                             "%s: no rx mbuf\n", __func__);
 2714                         sc->sc_stats.mst_rx_nombuf++;
 2715                         goto rx_next;
 2716                 }
 2717                 /*
 2718                  * Acquire the replacement dma buffer before
 2719                  * processing the frame.  If we're out of dma
 2720                  * buffers we disable rx interrupts and wait
 2721                  * for the free pool to reach mlw_rxdmalow buffers
 2722                  * before starting to do work again.  If the firmware
 2723                  * runs out of descriptors then it will toss frames
 2724                  * which is better than our doing it as that can
 2725                  * starve our processing.  It is also important that
 2726                  * we always process rx'd frames in case they are
 2727                  * A-MPDU as otherwise the host's view of the BA
 2728                  * window may get out of sync with the firmware.
 2729                  */
 2730                 newdata = mwl_getrxdma(sc);
 2731                 if (newdata == NULL) {
 2732                         /* NB: stat+msg in mwl_getrxdma */
 2733                         m_free(m);
 2734                         /* disable RX interrupt and mark state */
 2735                         mwl_hal_intrset(sc->sc_mh,
 2736                             sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
 2737                         sc->sc_rxblocked = 1;
 2738                         ieee80211_drain(ic);
 2739                         /* XXX check rxblocked and immediately start again? */
 2740                         goto rx_stop;
 2741                 }
 2742                 bf->bf_data = newdata;
 2743                 /*
 2744                  * Attach the dma buffer to the mbuf;
 2745                  * mwl_rxbuf_init will re-setup the rx
 2746                  * descriptor using the replacement dma
 2747                  * buffer we just installed above.
 2748                  */
 2749                 m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
 2750                     EXT_NET_DRV);
 2751                 m->m_data += off - hdrlen;
 2752                 m->m_pkthdr.len = m->m_len = pktlen;
 2753                 /* NB: dma buffer assumed read-only */
 2754 
 2755                 /*
 2756                  * Piece 802.11 header together.
 2757                  */
 2758                 wh = mtod(m, struct ieee80211_qosframe *);
 2759                 /* NB: don't need to do this sometimes but ... */
 2760                 /* XXX special case so we can memcpy after m_devget? */
 2761                 ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
 2762                 if (IEEE80211_QOS_HAS_SEQ(wh))
 2763                         *(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
 2764                 /*
 2765                  * The f/w strips WEP header but doesn't clear
 2766                  * the WEP bit; mark the packet with M_WEP so
 2767                  * net80211 will treat the data as decrypted.
 2768                  * While here also clear the PWR_MGT bit since
 2769                  * power save is handled by the firmware and
 2770                  * passing this up will potentially cause the
 2771                  * upper layer to put a station in power save
 2772                  * (except when configured with MWL_HOST_PS_SUPPORT).
 2773                  */
 2774                 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
 2775                         m->m_flags |= M_WEP;
 2776 #ifdef MWL_HOST_PS_SUPPORT
 2777                 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
 2778 #else
 2779                 wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
 2780                     IEEE80211_FC1_PWR_MGT);
 2781 #endif
 2782 
 2783                 if (ieee80211_radiotap_active(ic)) {
 2784                         struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
 2785 
 2786                         tap->wr_flags = 0;
 2787                         tap->wr_rate = ds->Rate;
 2788                         tap->wr_antsignal = rssi + nf;
 2789                         tap->wr_antnoise = nf;
 2790                 }
 2791                 if (IFF_DUMPPKTS_RECV(sc, wh)) {
 2792                         ieee80211_dump_pkt(ic, mtod(m, caddr_t),
 2793                             len, ds->Rate, rssi);
 2794                 }
 2795                 /* dispatch */
 2796                 ni = ieee80211_find_rxnode(ic,
 2797                     (const struct ieee80211_frame_min *) wh);
 2798 
 2799                 NET_EPOCH_ENTER(et);
 2800                 if (ni != NULL) {
 2801                         mn = MWL_NODE(ni);
 2802 #ifdef MWL_ANT_INFO_SUPPORT
 2803                         mn->mn_ai.rssi_a = ds->ai.rssi_a;
 2804                         mn->mn_ai.rssi_b = ds->ai.rssi_b;
 2805                         mn->mn_ai.rssi_c = ds->ai.rssi_c;
 2806                         mn->mn_ai.rsvd1 = rssi;
 2807 #endif
 2808                         /* tag AMPDU aggregates for reorder processing */
 2809                         if (ni->ni_flags & IEEE80211_NODE_HT)
 2810                                 m->m_flags |= M_AMPDU;
 2811                         (void) ieee80211_input(ni, m, rssi, nf);
 2812                         ieee80211_free_node(ni);
 2813                 } else
 2814                         (void) ieee80211_input_all(ic, m, rssi, nf);
 2815                 NET_EPOCH_EXIT(et);
 2816 rx_next:
 2817                 /* NB: ignore ENOMEM so we process more descriptors */
 2818                 (void) mwl_rxbuf_init(sc, bf);
 2819                 bf = STAILQ_NEXT(bf, bf_list);
 2820         }
 2821 rx_stop:
 2822         sc->sc_rxnext = bf;
 2823 
 2824         if (mbufq_first(&sc->sc_snd) != NULL) {
 2825                 /* NB: kick fw; the tx thread may have been preempted */
 2826                 mwl_hal_txstart(sc->sc_mh, 0);
 2827                 mwl_start(sc);
 2828         }
 2829 }
 2830 
 2831 static void
 2832 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
 2833 {
 2834         struct mwl_txbuf *bf, *bn;
 2835         struct mwl_txdesc *ds;
 2836 
 2837         MWL_TXQ_LOCK_INIT(sc, txq);
 2838         txq->qnum = qnum;
 2839         txq->txpri = 0; /* XXX */
 2840 #if 0
 2841         /* NB: q setup by mwl_txdma_setup XXX */
 2842         STAILQ_INIT(&txq->free);
 2843 #endif
 2844         STAILQ_FOREACH(bf, &txq->free, bf_list) {
 2845                 bf->bf_txq = txq;
 2846 
 2847                 ds = bf->bf_desc;
 2848                 bn = STAILQ_NEXT(bf, bf_list);
 2849                 if (bn == NULL)
 2850                         bn = STAILQ_FIRST(&txq->free);
 2851                 ds->pPhysNext = htole32(bn->bf_daddr);
 2852         }
 2853         STAILQ_INIT(&txq->active);
 2854 }
 2855 
 2856 /*
 2857  * Setup a hardware data transmit queue for the specified
 2858  * access control.  We record the mapping from ac's
 2859  * to h/w queues for use by mwl_tx_start.
 2860  */
 2861 static int
 2862 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
 2863 {
 2864         struct mwl_txq *txq;
 2865 
 2866         if (ac >= nitems(sc->sc_ac2q)) {
 2867                 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
 2868                         ac, nitems(sc->sc_ac2q));
 2869                 return 0;
 2870         }
 2871         if (mvtype >= MWL_NUM_TX_QUEUES) {
 2872                 device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
 2873                         mvtype, MWL_NUM_TX_QUEUES);
 2874                 return 0;
 2875         }
 2876         txq = &sc->sc_txq[mvtype];
 2877         mwl_txq_init(sc, txq, mvtype);
 2878         sc->sc_ac2q[ac] = txq;
 2879         return 1;
 2880 }
 2881 
 2882 /*
 2883  * Update WME parameters for a transmit queue.
 2884  */
 2885 static int
 2886 mwl_txq_update(struct mwl_softc *sc, int ac)
 2887 {
 2888 #define MWL_EXPONENT_TO_VALUE(v)        ((1<<v)-1)
 2889         struct ieee80211com *ic = &sc->sc_ic;
 2890         struct chanAccParams chp;
 2891         struct mwl_txq *txq = sc->sc_ac2q[ac];
 2892         struct wmeParams *wmep;
 2893         struct mwl_hal *mh = sc->sc_mh;
 2894         int aifs, cwmin, cwmax, txoplim;
 2895 
 2896         ieee80211_wme_ic_getparams(ic, &chp);
 2897         wmep = &chp.cap_wmeParams[ac];
 2898 
 2899         aifs = wmep->wmep_aifsn;
 2900         /* XXX in sta mode need to pass log values for cwmin/max */
 2901         cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
 2902         cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
 2903         txoplim = wmep->wmep_txopLimit;         /* NB: units of 32us */
 2904 
 2905         if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
 2906                 device_printf(sc->sc_dev, "unable to update hardware queue "
 2907                         "parameters for %s traffic!\n",
 2908                         ieee80211_wme_acnames[ac]);
 2909                 return 0;
 2910         }
 2911         return 1;
 2912 #undef MWL_EXPONENT_TO_VALUE
 2913 }
 2914 
 2915 /*
 2916  * Callback from the 802.11 layer to update WME parameters.
 2917  */
 2918 static int
 2919 mwl_wme_update(struct ieee80211com *ic)
 2920 {
 2921         struct mwl_softc *sc = ic->ic_softc;
 2922 
 2923         return !mwl_txq_update(sc, WME_AC_BE) ||
 2924             !mwl_txq_update(sc, WME_AC_BK) ||
 2925             !mwl_txq_update(sc, WME_AC_VI) ||
 2926             !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
 2927 }
 2928 
 2929 /*
 2930  * Reclaim resources for a setup queue.
 2931  */
 2932 static void
 2933 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
 2934 {
 2935         /* XXX hal work? */
 2936         MWL_TXQ_LOCK_DESTROY(txq);
 2937 }
 2938 
 2939 /*
 2940  * Reclaim all tx queue resources.
 2941  */
 2942 static void
 2943 mwl_tx_cleanup(struct mwl_softc *sc)
 2944 {
 2945         int i;
 2946 
 2947         for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
 2948                 mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
 2949 }
 2950 
 2951 static int
 2952 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
 2953 {
 2954         struct mbuf *m;
 2955         int error;
 2956 
 2957         /*
 2958          * Load the DMA map so any coalescing is done.  This
 2959          * also calculates the number of descriptors we need.
 2960          */
 2961         error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
 2962                                      bf->bf_segs, &bf->bf_nseg,
 2963                                      BUS_DMA_NOWAIT);
 2964         if (error == EFBIG) {
 2965                 /* XXX packet requires too many descriptors */
 2966                 bf->bf_nseg = MWL_TXDESC+1;
 2967         } else if (error != 0) {
 2968                 sc->sc_stats.mst_tx_busdma++;
 2969                 m_freem(m0);
 2970                 return error;
 2971         }
 2972         /*
 2973          * Discard null packets and check for packets that
 2974          * require too many TX descriptors.  We try to convert
 2975          * the latter to a cluster.
 2976          */
 2977         if (error == EFBIG) {           /* too many desc's, linearize */
 2978                 sc->sc_stats.mst_tx_linear++;
 2979 #if MWL_TXDESC > 1
 2980                 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
 2981 #else
 2982                 m = m_defrag(m0, M_NOWAIT);
 2983 #endif
 2984                 if (m == NULL) {
 2985                         m_freem(m0);
 2986                         sc->sc_stats.mst_tx_nombuf++;
 2987                         return ENOMEM;
 2988                 }
 2989                 m0 = m;
 2990                 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
 2991                                              bf->bf_segs, &bf->bf_nseg,
 2992                                              BUS_DMA_NOWAIT);
 2993                 if (error != 0) {
 2994                         sc->sc_stats.mst_tx_busdma++;
 2995                         m_freem(m0);
 2996                         return error;
 2997                 }
 2998                 KASSERT(bf->bf_nseg <= MWL_TXDESC,
 2999                     ("too many segments after defrag; nseg %u", bf->bf_nseg));
 3000         } else if (bf->bf_nseg == 0) {          /* null packet, discard */
 3001                 sc->sc_stats.mst_tx_nodata++;
 3002                 m_freem(m0);
 3003                 return EIO;
 3004         }
 3005         DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
 3006                 __func__, m0, m0->m_pkthdr.len);
 3007         bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
 3008         bf->bf_m = m0;
 3009 
 3010         return 0;
 3011 }
 3012 
 3013 static __inline int
 3014 mwl_cvtlegacyrate(int rate)
 3015 {
 3016         switch (rate) {
 3017         case 2:  return 0;
 3018         case 4:  return 1;
 3019         case 11: return 2;
 3020         case 22: return 3;
 3021         case 44: return 4;
 3022         case 12: return 5;
 3023         case 18: return 6;
 3024         case 24: return 7;
 3025         case 36: return 8;
 3026         case 48: return 9;
 3027         case 72: return 10;
 3028         case 96: return 11;
 3029         case 108:return 12;
 3030         }
 3031         return 0;
 3032 }
 3033 
 3034 /*
 3035  * Calculate fixed tx rate information per client state;
 3036  * this value is suitable for writing to the Format field
 3037  * of a tx descriptor.
 3038  */
 3039 static uint16_t
 3040 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
 3041 {
 3042         uint16_t fmt;
 3043 
 3044         fmt = _IEEE80211_SHIFTMASK(3, EAGLE_TXD_ANTENNA)
 3045             | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
 3046                 EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
 3047         if (rate & IEEE80211_RATE_MCS) {        /* HT MCS */
 3048                 fmt |= EAGLE_TXD_FORMAT_HT
 3049                     /* NB: 0x80 implicitly stripped from ucastrate */
 3050                     | _IEEE80211_SHIFTMASK(rate, EAGLE_TXD_RATE);
 3051                 /* XXX short/long GI may be wrong; re-check */
 3052                 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
 3053                         fmt |= EAGLE_TXD_CHW_40
 3054                             | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
 3055                                 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
 3056                 } else {
 3057                         fmt |= EAGLE_TXD_CHW_20
 3058                             | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
 3059                                 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
 3060                 }
 3061         } else {                        /* legacy rate */
 3062                 fmt |= EAGLE_TXD_FORMAT_LEGACY
 3063                     | _IEEE80211_SHIFTMASK(mwl_cvtlegacyrate(rate),
 3064                         EAGLE_TXD_RATE)
 3065                     | EAGLE_TXD_CHW_20
 3066                     /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
 3067                     | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
 3068                         EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
 3069         }
 3070         return fmt;
 3071 }
 3072 
 3073 static int
 3074 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
 3075     struct mbuf *m0)
 3076 {
 3077         struct ieee80211com *ic = &sc->sc_ic;
 3078         struct ieee80211vap *vap = ni->ni_vap;
 3079         int error, iswep, ismcast;
 3080         int hdrlen, pktlen;
 3081         struct mwl_txdesc *ds;
 3082         struct mwl_txq *txq;
 3083         struct ieee80211_frame *wh;
 3084         struct mwltxrec *tr;
 3085         struct mwl_node *mn;
 3086         uint16_t qos;
 3087 #if MWL_TXDESC > 1
 3088         int i;
 3089 #endif
 3090 
 3091         wh = mtod(m0, struct ieee80211_frame *);
 3092         iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
 3093         ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
 3094         hdrlen = ieee80211_anyhdrsize(wh);
 3095         pktlen = m0->m_pkthdr.len;
 3096         if (IEEE80211_QOS_HAS_SEQ(wh)) {
 3097                 qos = *(uint16_t *)ieee80211_getqos(wh);
 3098         } else
 3099                 qos = 0;
 3100 
 3101         if (iswep) {
 3102                 const struct ieee80211_cipher *cip;
 3103                 struct ieee80211_key *k;
 3104 
 3105                 /*
 3106                  * Construct the 802.11 header+trailer for an encrypted
 3107                  * frame. The only reason this can fail is because of an
 3108                  * unknown or unsupported cipher/key type.
 3109                  *
 3110                  * NB: we do this even though the firmware will ignore
 3111                  *     what we've done for WEP and TKIP as we need the
 3112                  *     ExtIV filled in for CCMP and this also adjusts
 3113                  *     the headers which simplifies our work below.
 3114                  */
 3115                 k = ieee80211_crypto_encap(ni, m0);
 3116                 if (k == NULL) {
 3117                         /*
 3118                          * This can happen when the key is yanked after the
 3119                          * frame was queued.  Just discard the frame; the
 3120                          * 802.11 layer counts failures and provides
 3121                          * debugging/diagnostics.
 3122                          */
 3123                         m_freem(m0);
 3124                         return EIO;
 3125                 }
 3126                 /*
 3127                  * Adjust the packet length for the crypto additions
 3128                  * done during encap and any other bits that the f/w
 3129                  * will add later on.
 3130                  */
 3131                 cip = k->wk_cipher;
 3132                 pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
 3133 
 3134                 /* packet header may have moved, reset our local pointer */
 3135                 wh = mtod(m0, struct ieee80211_frame *);
 3136         }
 3137 
 3138         if (ieee80211_radiotap_active_vap(vap)) {
 3139                 sc->sc_tx_th.wt_flags = 0;      /* XXX */
 3140                 if (iswep)
 3141                         sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
 3142 #if 0
 3143                 sc->sc_tx_th.wt_rate = ds->DataRate;
 3144 #endif
 3145                 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
 3146                 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
 3147 
 3148                 ieee80211_radiotap_tx(vap, m0);
 3149         }
 3150         /*
 3151          * Copy up/down the 802.11 header; the firmware requires
 3152          * we present a 2-byte payload length followed by a
 3153          * 4-address header (w/o QoS), followed (optionally) by
 3154          * any WEP/ExtIV header (but only filled in for CCMP).
 3155          * We are assured the mbuf has sufficient headroom to
 3156          * prepend in-place by the setup of ic_headroom in
 3157          * mwl_attach.
 3158          */
 3159         if (hdrlen < sizeof(struct mwltxrec)) {
 3160                 const int space = sizeof(struct mwltxrec) - hdrlen;
 3161                 if (M_LEADINGSPACE(m0) < space) {
 3162                         /* NB: should never happen */
 3163                         device_printf(sc->sc_dev,
 3164                             "not enough headroom, need %d found %zd, "
 3165                             "m_flags 0x%x m_len %d\n",
 3166                             space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
 3167                         ieee80211_dump_pkt(ic,
 3168                             mtod(m0, const uint8_t *), m0->m_len, 0, -1);
 3169                         m_freem(m0);
 3170                         sc->sc_stats.mst_tx_noheadroom++;
 3171                         return EIO;
 3172                 }
 3173                 M_PREPEND(m0, space, M_NOWAIT);
 3174         }
 3175         tr = mtod(m0, struct mwltxrec *);
 3176         if (wh != (struct ieee80211_frame *) &tr->wh)
 3177                 ovbcopy(wh, &tr->wh, hdrlen);
 3178         /*
 3179          * Note: the "firmware length" is actually the length
 3180          * of the fully formed "802.11 payload".  That is, it's
 3181          * everything except for the 802.11 header.  In particular
 3182          * this includes all crypto material including the MIC!
 3183          */
 3184         tr->fwlen = htole16(pktlen - hdrlen);
 3185 
 3186         /*
 3187          * Load the DMA map so any coalescing is done.  This
 3188          * also calculates the number of descriptors we need.
 3189          */
 3190         error = mwl_tx_dmasetup(sc, bf, m0);
 3191         if (error != 0) {
 3192                 /* NB: stat collected in mwl_tx_dmasetup */
 3193                 DPRINTF(sc, MWL_DEBUG_XMIT,
 3194                     "%s: unable to setup dma\n", __func__);
 3195                 return error;
 3196         }
 3197         bf->bf_node = ni;                       /* NB: held reference */
 3198         m0 = bf->bf_m;                          /* NB: may have changed */
 3199         tr = mtod(m0, struct mwltxrec *);
 3200         wh = (struct ieee80211_frame *)&tr->wh;
 3201 
 3202         /*
 3203          * Formulate tx descriptor.
 3204          */
 3205         ds = bf->bf_desc;
 3206         txq = bf->bf_txq;
 3207 
 3208         ds->QosCtrl = qos;                      /* NB: already little-endian */
 3209 #if MWL_TXDESC == 1
 3210         /*
 3211          * NB: multiframes should be zero because the descriptors
 3212          *     are initialized to zero.  This should handle the case
 3213          *     where the driver is built with MWL_TXDESC=1 but we are
 3214          *     using firmware with multi-segment support.
 3215          */
 3216         ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
 3217         ds->PktLen = htole16(bf->bf_segs[0].ds_len);
 3218 #else
 3219         ds->multiframes = htole32(bf->bf_nseg);
 3220         ds->PktLen = htole16(m0->m_pkthdr.len);
 3221         for (i = 0; i < bf->bf_nseg; i++) {
 3222                 ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
 3223                 ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
 3224         }
 3225 #endif
 3226         /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
 3227         ds->Format = 0;
 3228         ds->pad = 0;
 3229         ds->ack_wcb_addr = 0;
 3230 
 3231         mn = MWL_NODE(ni);
 3232         /*
 3233          * Select transmit rate.
 3234          */
 3235         switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
 3236         case IEEE80211_FC0_TYPE_MGT:
 3237                 sc->sc_stats.mst_tx_mgmt++;
 3238                 /* fall thru... */
 3239         case IEEE80211_FC0_TYPE_CTL:
 3240                 /* NB: assign to BE q to avoid bursting */
 3241                 ds->TxPriority = MWL_WME_AC_BE;
 3242                 break;
 3243         case IEEE80211_FC0_TYPE_DATA:
 3244                 if (!ismcast) {
 3245                         const struct ieee80211_txparam *tp = ni->ni_txparms;
 3246                         /*
 3247                          * EAPOL frames get forced to a fixed rate and w/o
 3248                          * aggregation; otherwise check for any fixed rate
 3249                          * for the client (may depend on association state).
 3250                          */
 3251                         if (m0->m_flags & M_EAPOL) {
 3252                                 const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
 3253                                 ds->Format = mvp->mv_eapolformat;
 3254                                 ds->pad = htole16(
 3255                                     EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
 3256                         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
 3257                                 /* XXX pre-calculate per node */
 3258                                 ds->Format = htole16(
 3259                                     mwl_calcformat(tp->ucastrate, ni));
 3260                                 ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
 3261                         }
 3262                         /* NB: EAPOL frames will never have qos set */
 3263                         if (qos == 0)
 3264                                 ds->TxPriority = txq->qnum;
 3265 #if MWL_MAXBA > 3
 3266                         else if (mwl_bastream_match(&mn->mn_ba[3], qos))
 3267                                 ds->TxPriority = mn->mn_ba[3].txq;
 3268 #endif
 3269 #if MWL_MAXBA > 2
 3270                         else if (mwl_bastream_match(&mn->mn_ba[2], qos))
 3271                                 ds->TxPriority = mn->mn_ba[2].txq;
 3272 #endif
 3273 #if MWL_MAXBA > 1
 3274                         else if (mwl_bastream_match(&mn->mn_ba[1], qos))
 3275                                 ds->TxPriority = mn->mn_ba[1].txq;
 3276 #endif
 3277 #if MWL_MAXBA > 0
 3278                         else if (mwl_bastream_match(&mn->mn_ba[0], qos))
 3279                                 ds->TxPriority = mn->mn_ba[0].txq;
 3280 #endif
 3281                         else
 3282                                 ds->TxPriority = txq->qnum;
 3283                 } else
 3284                         ds->TxPriority = txq->qnum;
 3285                 break;
 3286         default:
 3287                 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
 3288                         wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
 3289                 sc->sc_stats.mst_tx_badframetype++;
 3290                 m_freem(m0);
 3291                 return EIO;
 3292         }
 3293 
 3294         if (IFF_DUMPPKTS_XMIT(sc))
 3295                 ieee80211_dump_pkt(ic,
 3296                     mtod(m0, const uint8_t *)+sizeof(uint16_t),
 3297                     m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
 3298 
 3299         MWL_TXQ_LOCK(txq);
 3300         ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
 3301         STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
 3302         MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3303 
 3304         sc->sc_tx_timer = 5;
 3305         MWL_TXQ_UNLOCK(txq);
 3306 
 3307         return 0;
 3308 }
 3309 
 3310 static __inline int
 3311 mwl_cvtlegacyrix(int rix)
 3312 {
 3313         static const int ieeerates[] =
 3314             { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
 3315         return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
 3316 }
 3317 
 3318 /*
 3319  * Process completed xmit descriptors from the specified queue.
 3320  */
 3321 static int
 3322 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
 3323 {
 3324 #define EAGLE_TXD_STATUS_MCAST \
 3325         (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
 3326         struct ieee80211com *ic = &sc->sc_ic;
 3327         struct mwl_txbuf *bf;
 3328         struct mwl_txdesc *ds;
 3329         struct ieee80211_node *ni;
 3330         int nreaped;
 3331         uint32_t status;
 3332 
 3333         DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
 3334         for (nreaped = 0;; nreaped++) {
 3335                 MWL_TXQ_LOCK(txq);
 3336                 bf = STAILQ_FIRST(&txq->active);
 3337                 if (bf == NULL) {
 3338                         MWL_TXQ_UNLOCK(txq);
 3339                         break;
 3340                 }
 3341                 ds = bf->bf_desc;
 3342                 MWL_TXDESC_SYNC(txq, ds,
 3343                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3344                 if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
 3345                         MWL_TXQ_UNLOCK(txq);
 3346                         break;
 3347                 }
 3348                 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
 3349                 MWL_TXQ_UNLOCK(txq);
 3350 
 3351 #ifdef MWL_DEBUG
 3352                 if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
 3353                         mwl_printtxbuf(bf, txq->qnum, nreaped);
 3354 #endif
 3355                 ni = bf->bf_node;
 3356                 if (ni != NULL) {
 3357                         status = le32toh(ds->Status);
 3358                         if (status & EAGLE_TXD_STATUS_OK) {
 3359                                 uint16_t Format = le16toh(ds->Format);
 3360                                 uint8_t txant = _IEEE80211_MASKSHIFT(Format,
 3361                                     EAGLE_TXD_ANTENNA);
 3362 
 3363                                 sc->sc_stats.mst_ant_tx[txant]++;
 3364                                 if (status & EAGLE_TXD_STATUS_OK_RETRY)
 3365                                         sc->sc_stats.mst_tx_retries++;
 3366                                 if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
 3367                                         sc->sc_stats.mst_tx_mretries++;
 3368                                 if (txq->qnum >= MWL_WME_AC_VO)
 3369                                         ic->ic_wme.wme_hipri_traffic++;
 3370                                 ni->ni_txrate = _IEEE80211_MASKSHIFT(Format,
 3371                                     EAGLE_TXD_RATE);
 3372                                 if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
 3373                                         ni->ni_txrate = mwl_cvtlegacyrix(
 3374                                             ni->ni_txrate);
 3375                                 } else
 3376                                         ni->ni_txrate |= IEEE80211_RATE_MCS;
 3377                                 sc->sc_stats.mst_tx_rate = ni->ni_txrate;
 3378                         } else {
 3379                                 if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
 3380                                         sc->sc_stats.mst_tx_linkerror++;
 3381                                 if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
 3382                                         sc->sc_stats.mst_tx_xretries++;
 3383                                 if (status & EAGLE_TXD_STATUS_FAILED_AGING)
 3384                                         sc->sc_stats.mst_tx_aging++;
 3385                                 if (bf->bf_m->m_flags & M_FF)
 3386                                         sc->sc_stats.mst_ff_txerr++;
 3387                         }
 3388                         if (bf->bf_m->m_flags & M_TXCB)
 3389                                 /* XXX strip fw len in case header inspected */
 3390                                 m_adj(bf->bf_m, sizeof(uint16_t));
 3391                         ieee80211_tx_complete(ni, bf->bf_m,
 3392                             (status & EAGLE_TXD_STATUS_OK) == 0);
 3393                 } else
 3394                         m_freem(bf->bf_m);
 3395                 ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
 3396 
 3397                 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
 3398                     BUS_DMASYNC_POSTWRITE);
 3399                 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
 3400 
 3401                 mwl_puttxbuf_tail(txq, bf);
 3402         }
 3403         return nreaped;
 3404 #undef EAGLE_TXD_STATUS_MCAST
 3405 }
 3406 
 3407 /*
 3408  * Deferred processing of transmit interrupt; special-cased
 3409  * for four hardware queues, 0-3.
 3410  */
 3411 static void
 3412 mwl_tx_proc(void *arg, int npending)
 3413 {
 3414         struct mwl_softc *sc = arg;
 3415         int nreaped;
 3416 
 3417         /*
 3418          * Process each active queue.
 3419          */
 3420         nreaped = 0;
 3421         if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
 3422                 nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
 3423         if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
 3424                 nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
 3425         if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
 3426                 nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
 3427         if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
 3428                 nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
 3429 
 3430         if (nreaped != 0) {
 3431                 sc->sc_tx_timer = 0;
 3432                 if (mbufq_first(&sc->sc_snd) != NULL) {
 3433                         /* NB: kick fw; the tx thread may have been preempted */
 3434                         mwl_hal_txstart(sc->sc_mh, 0);
 3435                         mwl_start(sc);
 3436                 }
 3437         }
 3438 }
 3439 
 3440 static void
 3441 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
 3442 {
 3443         struct ieee80211_node *ni;
 3444         struct mwl_txbuf *bf;
 3445         u_int ix __unused;
 3446 
 3447         /*
 3448          * NB: this assumes output has been stopped and
 3449          *     we do not need to block mwl_tx_tasklet
 3450          */
 3451         for (ix = 0;; ix++) {
 3452                 MWL_TXQ_LOCK(txq);
 3453                 bf = STAILQ_FIRST(&txq->active);
 3454                 if (bf == NULL) {
 3455                         MWL_TXQ_UNLOCK(txq);
 3456                         break;
 3457                 }
 3458                 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
 3459                 MWL_TXQ_UNLOCK(txq);
 3460 #ifdef MWL_DEBUG
 3461                 if (sc->sc_debug & MWL_DEBUG_RESET) {
 3462                         struct ieee80211com *ic = &sc->sc_ic;
 3463                         const struct mwltxrec *tr =
 3464                             mtod(bf->bf_m, const struct mwltxrec *);
 3465                         mwl_printtxbuf(bf, txq->qnum, ix);
 3466                         ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
 3467                                 bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
 3468                 }
 3469 #endif /* MWL_DEBUG */
 3470                 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
 3471                 ni = bf->bf_node;
 3472                 if (ni != NULL) {
 3473                         /*
 3474                          * Reclaim node reference.
 3475                          */
 3476                         ieee80211_free_node(ni);
 3477                 }
 3478                 m_freem(bf->bf_m);
 3479 
 3480                 mwl_puttxbuf_tail(txq, bf);
 3481         }
 3482 }
 3483 
 3484 /*
 3485  * Drain the transmit queues and reclaim resources.
 3486  */
 3487 static void
 3488 mwl_draintxq(struct mwl_softc *sc)
 3489 {
 3490         int i;
 3491 
 3492         for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
 3493                 mwl_tx_draintxq(sc, &sc->sc_txq[i]);
 3494         sc->sc_tx_timer = 0;
 3495 }
 3496 
 3497 #ifdef MWL_DIAGAPI
 3498 /*
 3499  * Reset the transmit queues to a pristine state after a fw download.
 3500  */
 3501 static void
 3502 mwl_resettxq(struct mwl_softc *sc)
 3503 {
 3504         int i;
 3505 
 3506         for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
 3507                 mwl_txq_reset(sc, &sc->sc_txq[i]);
 3508 }
 3509 #endif /* MWL_DIAGAPI */
 3510 
 3511 /*
 3512  * Clear the transmit queues of any frames submitted for the
 3513  * specified vap.  This is done when the vap is deleted so we
 3514  * don't potentially reference the vap after it is gone.
 3515  * Note we cannot remove the frames; we only reclaim the node
 3516  * reference.
 3517  */
 3518 static void
 3519 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
 3520 {
 3521         struct mwl_txq *txq;
 3522         struct mwl_txbuf *bf;
 3523         int i;
 3524 
 3525         for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
 3526                 txq = &sc->sc_txq[i];
 3527                 MWL_TXQ_LOCK(txq);
 3528                 STAILQ_FOREACH(bf, &txq->active, bf_list) {
 3529                         struct ieee80211_node *ni = bf->bf_node;
 3530                         if (ni != NULL && ni->ni_vap == vap) {
 3531                                 bf->bf_node = NULL;
 3532                                 ieee80211_free_node(ni);
 3533                         }
 3534                 }
 3535                 MWL_TXQ_UNLOCK(txq);
 3536         }
 3537 }
 3538 
 3539 static int
 3540 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
 3541         const uint8_t *frm, const uint8_t *efrm)
 3542 {
 3543         struct mwl_softc *sc = ni->ni_ic->ic_softc;
 3544         const struct ieee80211_action *ia;
 3545 
 3546         ia = (const struct ieee80211_action *) frm;
 3547         if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
 3548             ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
 3549                 const struct ieee80211_action_ht_mimopowersave *mps =
 3550                     (const struct ieee80211_action_ht_mimopowersave *) ia;
 3551 
 3552                 mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
 3553                     mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
 3554                     _IEEE80211_MASKSHIFT(mps->am_control,
 3555                         IEEE80211_A_HT_MIMOPWRSAVE_MODE));
 3556                 return 0;
 3557         } else
 3558                 return sc->sc_recv_action(ni, wh, frm, efrm);
 3559 }
 3560 
 3561 static int
 3562 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
 3563         int dialogtoken, int baparamset, int batimeout)
 3564 {
 3565         struct mwl_softc *sc = ni->ni_ic->ic_softc;
 3566         struct ieee80211vap *vap = ni->ni_vap;
 3567         struct mwl_node *mn = MWL_NODE(ni);
 3568         struct mwl_bastate *bas;
 3569 
 3570         bas = tap->txa_private;
 3571         if (bas == NULL) {
 3572                 const MWL_HAL_BASTREAM *sp;
 3573                 /*
 3574                  * Check for a free BA stream slot.
 3575                  */
 3576 #if MWL_MAXBA > 3
 3577                 if (mn->mn_ba[3].bastream == NULL)
 3578                         bas = &mn->mn_ba[3];
 3579                 else
 3580 #endif
 3581 #if MWL_MAXBA > 2
 3582                 if (mn->mn_ba[2].bastream == NULL)
 3583                         bas = &mn->mn_ba[2];
 3584                 else
 3585 #endif
 3586 #if MWL_MAXBA > 1
 3587                 if (mn->mn_ba[1].bastream == NULL)
 3588                         bas = &mn->mn_ba[1];
 3589                 else
 3590 #endif
 3591 #if MWL_MAXBA > 0
 3592                 if (mn->mn_ba[0].bastream == NULL)
 3593                         bas = &mn->mn_ba[0];
 3594                 else 
 3595 #endif
 3596                 {
 3597                         /* sta already has max BA streams */
 3598                         /* XXX assign BA stream to highest priority tid */
 3599                         DPRINTF(sc, MWL_DEBUG_AMPDU,
 3600                             "%s: already has max bastreams\n", __func__);
 3601                         sc->sc_stats.mst_ampdu_reject++;
 3602                         return 0;
 3603                 }
 3604                 /* NB: no held reference to ni */
 3605                 sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
 3606                     (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
 3607                     ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
 3608                     ni, tap);
 3609                 if (sp == NULL) {
 3610                         /*
 3611                          * No available stream, return 0 so no
 3612                          * a-mpdu aggregation will be done.
 3613                          */
 3614                         DPRINTF(sc, MWL_DEBUG_AMPDU,
 3615                             "%s: no bastream available\n", __func__);
 3616                         sc->sc_stats.mst_ampdu_nostream++;
 3617                         return 0;
 3618                 }
 3619                 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
 3620                     __func__, sp);
 3621                 /* NB: qos is left zero so we won't match in mwl_tx_start */
 3622                 bas->bastream = sp;
 3623                 tap->txa_private = bas;
 3624         }
 3625         /* fetch current seq# from the firmware; if available */
 3626         if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
 3627             vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
 3628             &tap->txa_start) != 0)
 3629                 tap->txa_start = 0;
 3630         return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
 3631 }
 3632 
 3633 static int
 3634 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
 3635         int code, int baparamset, int batimeout)
 3636 {
 3637         struct mwl_softc *sc = ni->ni_ic->ic_softc;
 3638         struct mwl_bastate *bas;
 3639 
 3640         bas = tap->txa_private;
 3641         if (bas == NULL) {
 3642                 /* XXX should not happen */
 3643                 DPRINTF(sc, MWL_DEBUG_AMPDU,
 3644                     "%s: no BA stream allocated, TID %d\n",
 3645                     __func__, tap->txa_tid);
 3646                 sc->sc_stats.mst_addba_nostream++;
 3647                 return 0;
 3648         }
 3649         if (code == IEEE80211_STATUS_SUCCESS) {
 3650                 struct ieee80211vap *vap = ni->ni_vap;
 3651                 int bufsiz, error;
 3652 
 3653                 /*
 3654                  * Tell the firmware to setup the BA stream;
 3655                  * we know resources are available because we
 3656                  * pre-allocated one before forming the request.
 3657                  */
 3658                 bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ);
 3659                 if (bufsiz == 0)
 3660                         bufsiz = IEEE80211_AGGR_BAWMAX;
 3661                 error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
 3662                     bas->bastream, bufsiz, bufsiz, tap->txa_start);
 3663                 if (error != 0) {
 3664                         /*
 3665                          * Setup failed, return immediately so no a-mpdu
 3666                          * aggregation will be done.
 3667                          */
 3668                         mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
 3669                         mwl_bastream_free(bas);
 3670                         tap->txa_private = NULL;
 3671 
 3672                         DPRINTF(sc, MWL_DEBUG_AMPDU,
 3673                             "%s: create failed, error %d, bufsiz %d TID %d "
 3674                             "htparam 0x%x\n", __func__, error, bufsiz,
 3675                             tap->txa_tid, ni->ni_htparam);
 3676                         sc->sc_stats.mst_bacreate_failed++;
 3677                         return 0;
 3678                 }
 3679                 /* NB: cache txq to avoid ptr indirect */
 3680                 mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
 3681                 DPRINTF(sc, MWL_DEBUG_AMPDU,
 3682                     "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
 3683                     "htparam 0x%x\n", __func__, bas->bastream,
 3684                     bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
 3685         } else {
 3686                 /*
 3687                  * Other side NAK'd us; return the resources.
 3688                  */
 3689                 DPRINTF(sc, MWL_DEBUG_AMPDU,
 3690                     "%s: request failed with code %d, destroy bastream %p\n",
 3691                     __func__, code, bas->bastream);
 3692                 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
 3693                 mwl_bastream_free(bas);
 3694                 tap->txa_private = NULL;
 3695         }
 3696         /* NB: firmware sends BAR so we don't need to */
 3697         return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
 3698 }
 3699 
 3700 static void
 3701 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
 3702 {
 3703         struct mwl_softc *sc = ni->ni_ic->ic_softc;
 3704         struct mwl_bastate *bas;
 3705 
 3706         bas = tap->txa_private;
 3707         if (bas != NULL) {
 3708                 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
 3709                     __func__, bas->bastream);
 3710                 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
 3711                 mwl_bastream_free(bas);
 3712                 tap->txa_private = NULL;
 3713         }
 3714         sc->sc_addba_stop(ni, tap);
 3715 }
 3716 
 3717 /*
 3718  * Setup the rx data structures.  This should only be
 3719  * done once or we may get out of sync with the firmware.
 3720  */
 3721 static int
 3722 mwl_startrecv(struct mwl_softc *sc)
 3723 {
 3724         if (!sc->sc_recvsetup) {
 3725                 struct mwl_rxbuf *bf, *prev;
 3726                 struct mwl_rxdesc *ds;
 3727 
 3728                 prev = NULL;
 3729                 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
 3730                         int error = mwl_rxbuf_init(sc, bf);
 3731                         if (error != 0) {
 3732                                 DPRINTF(sc, MWL_DEBUG_RECV,
 3733                                         "%s: mwl_rxbuf_init failed %d\n",
 3734                                         __func__, error);
 3735                                 return error;
 3736                         }
 3737                         if (prev != NULL) {
 3738                                 ds = prev->bf_desc;
 3739                                 ds->pPhysNext = htole32(bf->bf_daddr);
 3740                         }
 3741                         prev = bf;
 3742                 }
 3743                 if (prev != NULL) {
 3744                         ds = prev->bf_desc;
 3745                         ds->pPhysNext =
 3746                             htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
 3747                 }
 3748                 sc->sc_recvsetup = 1;
 3749         }
 3750         mwl_mode_init(sc);              /* set filters, etc. */
 3751         return 0;
 3752 }
 3753 
 3754 static MWL_HAL_APMODE
 3755 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
 3756 {
 3757         MWL_HAL_APMODE mode;
 3758 
 3759         if (IEEE80211_IS_CHAN_HT(chan)) {
 3760                 if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
 3761                         mode = AP_MODE_N_ONLY;
 3762                 else if (IEEE80211_IS_CHAN_5GHZ(chan))
 3763                         mode = AP_MODE_AandN;
 3764                 else if (vap->iv_flags & IEEE80211_F_PUREG)
 3765                         mode = AP_MODE_GandN;
 3766                 else
 3767                         mode = AP_MODE_BandGandN;
 3768         } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
 3769                 if (vap->iv_flags & IEEE80211_F_PUREG)
 3770                         mode = AP_MODE_G_ONLY;
 3771                 else
 3772                         mode = AP_MODE_MIXED;
 3773         } else if (IEEE80211_IS_CHAN_B(chan))
 3774                 mode = AP_MODE_B_ONLY;
 3775         else if (IEEE80211_IS_CHAN_A(chan))
 3776                 mode = AP_MODE_A_ONLY;
 3777         else
 3778                 mode = AP_MODE_MIXED;           /* XXX should not happen? */
 3779         return mode;
 3780 }
 3781 
 3782 static int
 3783 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
 3784 {
 3785         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 3786         return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
 3787 }
 3788 
 3789 /*
 3790  * Set/change channels.
 3791  */
 3792 static int
 3793 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
 3794 {
 3795         struct mwl_hal *mh = sc->sc_mh;
 3796         struct ieee80211com *ic = &sc->sc_ic;
 3797         MWL_HAL_CHANNEL hchan;
 3798         int maxtxpow;
 3799 
 3800         DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
 3801             __func__, chan->ic_freq, chan->ic_flags);
 3802 
 3803         /*
 3804          * Convert to a HAL channel description with
 3805          * the flags constrained to reflect the current
 3806          * operating mode.
 3807          */
 3808         mwl_mapchan(&hchan, chan);
 3809         mwl_hal_intrset(mh, 0);         /* disable interrupts */
 3810 #if 0
 3811         mwl_draintxq(sc);               /* clear pending tx frames */
 3812 #endif
 3813         mwl_hal_setchannel(mh, &hchan);
 3814         /*
 3815          * Tx power is cap'd by the regulatory setting and
 3816          * possibly a user-set limit.  We pass the min of
 3817          * these to the hal to apply them to the cal data
 3818          * for this channel.
 3819          * XXX min bound?
 3820          */
 3821         maxtxpow = 2*chan->ic_maxregpower;
 3822         if (maxtxpow > ic->ic_txpowlimit)
 3823                 maxtxpow = ic->ic_txpowlimit;
 3824         mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
 3825         /* NB: potentially change mcast/mgt rates */
 3826         mwl_setcurchanrates(sc);
 3827 
 3828         /*
 3829          * Update internal state.
 3830          */
 3831         sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
 3832         sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
 3833         if (IEEE80211_IS_CHAN_A(chan)) {
 3834                 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
 3835                 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
 3836         } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
 3837                 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
 3838                 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
 3839         } else {
 3840                 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
 3841                 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
 3842         }
 3843         sc->sc_curchan = hchan;
 3844         mwl_hal_intrset(mh, sc->sc_imask);
 3845 
 3846         return 0;
 3847 }
 3848 
 3849 static void
 3850 mwl_scan_start(struct ieee80211com *ic)
 3851 {
 3852         struct mwl_softc *sc = ic->ic_softc;
 3853 
 3854         DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
 3855 }
 3856 
 3857 static void
 3858 mwl_scan_end(struct ieee80211com *ic)
 3859 {
 3860         struct mwl_softc *sc = ic->ic_softc;
 3861 
 3862         DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
 3863 }
 3864 
 3865 static void
 3866 mwl_set_channel(struct ieee80211com *ic)
 3867 {
 3868         struct mwl_softc *sc = ic->ic_softc;
 3869 
 3870         (void) mwl_chan_set(sc, ic->ic_curchan);
 3871 }
 3872 
 3873 /* 
 3874  * Handle a channel switch request.  We inform the firmware
 3875  * and mark the global state to suppress various actions.
 3876  * NB: we issue only one request to the fw; we may be called
 3877  * multiple times if there are multiple vap's.
 3878  */
 3879 static void
 3880 mwl_startcsa(struct ieee80211vap *vap)
 3881 {
 3882         struct ieee80211com *ic = vap->iv_ic;
 3883         struct mwl_softc *sc = ic->ic_softc;
 3884         MWL_HAL_CHANNEL hchan;
 3885 
 3886         if (sc->sc_csapending)
 3887                 return;
 3888 
 3889         mwl_mapchan(&hchan, ic->ic_csa_newchan);
 3890         /* 1 =>'s quiet channel */
 3891         mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
 3892         sc->sc_csapending = 1;
 3893 }
 3894 
 3895 /*
 3896  * Plumb any static WEP key for the station.  This is
 3897  * necessary as we must propagate the key from the
 3898  * global key table of the vap to each sta db entry.
 3899  */
 3900 static void
 3901 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
 3902 {
 3903         if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
 3904                 IEEE80211_F_PRIVACY &&
 3905             vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
 3906             vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
 3907                 (void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
 3908                                     mac);
 3909 }
 3910 
 3911 static int
 3912 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
 3913 {
 3914 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
 3915         struct ieee80211vap *vap = ni->ni_vap;
 3916         struct mwl_hal_vap *hvap;
 3917         int error;
 3918 
 3919         if (vap->iv_opmode == IEEE80211_M_WDS) {
 3920                 /*
 3921                  * WDS vap's do not have a f/w vap; instead they piggyback
 3922                  * on an AP vap and we must install the sta db entry and
 3923                  * crypto state using that AP's handle (the WDS vap has none).
 3924                  */
 3925                 hvap = MWL_VAP(vap)->mv_ap_hvap;
 3926         } else
 3927                 hvap = MWL_VAP(vap)->mv_hvap;
 3928         error = mwl_hal_newstation(hvap, ni->ni_macaddr,
 3929             aid, staid, pi,
 3930             ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
 3931             ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
 3932         if (error == 0) {
 3933                 /*
 3934                  * Setup security for this station.  For sta mode this is
 3935                  * needed even though do the same thing on transition to
 3936                  * AUTH state because the call to mwl_hal_newstation
 3937                  * clobbers the crypto state we setup.
 3938                  */
 3939                 mwl_setanywepkey(vap, ni->ni_macaddr);
 3940         }
 3941         return error;
 3942 #undef WME
 3943 }
 3944 
 3945 static void
 3946 mwl_setglobalkeys(struct ieee80211vap *vap)
 3947 {
 3948         struct ieee80211_key *wk;
 3949 
 3950         wk = &vap->iv_nw_keys[0];
 3951         for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
 3952                 if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
 3953                         (void) _mwl_key_set(vap, wk, vap->iv_myaddr);
 3954 }
 3955 
 3956 /*
 3957  * Convert a legacy rate set to a firmware bitmask.
 3958  */
 3959 static uint32_t
 3960 get_rate_bitmap(const struct ieee80211_rateset *rs)
 3961 {
 3962         uint32_t rates;
 3963         int i;
 3964 
 3965         rates = 0;
 3966         for (i = 0; i < rs->rs_nrates; i++)
 3967                 switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
 3968                 case 2:   rates |= 0x001; break;
 3969                 case 4:   rates |= 0x002; break;
 3970                 case 11:  rates |= 0x004; break;
 3971                 case 22:  rates |= 0x008; break;
 3972                 case 44:  rates |= 0x010; break;
 3973                 case 12:  rates |= 0x020; break;
 3974                 case 18:  rates |= 0x040; break;
 3975                 case 24:  rates |= 0x080; break;
 3976                 case 36:  rates |= 0x100; break;
 3977                 case 48:  rates |= 0x200; break;
 3978                 case 72:  rates |= 0x400; break;
 3979                 case 96:  rates |= 0x800; break;
 3980                 case 108: rates |= 0x1000; break;
 3981                 }
 3982         return rates;
 3983 }
 3984 
 3985 /*
 3986  * Construct an HT firmware bitmask from an HT rate set.
 3987  */
 3988 static uint32_t
 3989 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
 3990 {
 3991         uint32_t rates;
 3992         int i;
 3993 
 3994         rates = 0;
 3995         for (i = 0; i < rs->rs_nrates; i++) {
 3996                 if (rs->rs_rates[i] < 16)
 3997                         rates |= 1<<rs->rs_rates[i];
 3998         }
 3999         return rates;
 4000 }
 4001 
 4002 /*
 4003  * Craft station database entry for station.
 4004  * NB: use host byte order here, the hal handles byte swapping.
 4005  */
 4006 static MWL_HAL_PEERINFO *
 4007 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
 4008 {
 4009         const struct ieee80211vap *vap = ni->ni_vap;
 4010 
 4011         memset(pi, 0, sizeof(*pi));
 4012         pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
 4013         pi->CapInfo = ni->ni_capinfo;
 4014         if (ni->ni_flags & IEEE80211_NODE_HT) {
 4015                 /* HT capabilities, etc */
 4016                 pi->HTCapabilitiesInfo = ni->ni_htcap;
 4017                 /* XXX pi.HTCapabilitiesInfo */
 4018                 pi->MacHTParamInfo = ni->ni_htparam;    
 4019                 pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
 4020                 pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
 4021                 pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
 4022                 pi->AddHtInfo.OpMode = ni->ni_htopmode;
 4023                 pi->AddHtInfo.stbc = ni->ni_htstbc;
 4024 
 4025                 /* constrain according to local configuration */
 4026                 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
 4027                         pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
 4028                 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
 4029                         pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
 4030                 if (ni->ni_chw != 40)
 4031                         pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
 4032         }
 4033         return pi;
 4034 }
 4035 
 4036 /*
 4037  * Re-create the local sta db entry for a vap to ensure
 4038  * up to date WME state is pushed to the firmware.  Because
 4039  * this resets crypto state this must be followed by a
 4040  * reload of any keys in the global key table.
 4041  */
 4042 static int
 4043 mwl_localstadb(struct ieee80211vap *vap)
 4044 {
 4045 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
 4046         struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
 4047         struct ieee80211_node *bss;
 4048         MWL_HAL_PEERINFO pi;
 4049         int error;
 4050 
 4051         switch (vap->iv_opmode) {
 4052         case IEEE80211_M_STA:
 4053                 bss = vap->iv_bss;
 4054                 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
 4055                     vap->iv_state == IEEE80211_S_RUN ?
 4056                         mkpeerinfo(&pi, bss) : NULL,
 4057                     (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
 4058                     bss->ni_ies.wme_ie != NULL ?
 4059                         WME(bss->ni_ies.wme_ie)->wme_info : 0);
 4060                 if (error == 0)
 4061                         mwl_setglobalkeys(vap);
 4062                 break;
 4063         case IEEE80211_M_HOSTAP:
 4064         case IEEE80211_M_MBSS:
 4065                 error = mwl_hal_newstation(hvap, vap->iv_myaddr,
 4066                     0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
 4067                 if (error == 0)
 4068                         mwl_setglobalkeys(vap);
 4069                 break;
 4070         default:
 4071                 error = 0;
 4072                 break;
 4073         }
 4074         return error;
 4075 #undef WME
 4076 }
 4077 
 4078 static int
 4079 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
 4080 {
 4081         struct mwl_vap *mvp = MWL_VAP(vap);
 4082         struct mwl_hal_vap *hvap = mvp->mv_hvap;
 4083         struct ieee80211com *ic = vap->iv_ic;
 4084         struct ieee80211_node *ni = NULL;
 4085         struct mwl_softc *sc = ic->ic_softc;
 4086         struct mwl_hal *mh = sc->sc_mh;
 4087         enum ieee80211_state ostate = vap->iv_state;
 4088         int error;
 4089 
 4090         DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
 4091             vap->iv_ifp->if_xname, __func__,
 4092             ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
 4093 
 4094         callout_stop(&sc->sc_timer);
 4095         /*
 4096          * Clear current radar detection state.
 4097          */
 4098         if (ostate == IEEE80211_S_CAC) {
 4099                 /* stop quiet mode radar detection */
 4100                 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
 4101         } else if (sc->sc_radarena) {
 4102                 /* stop in-service radar detection */
 4103                 mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
 4104                 sc->sc_radarena = 0;
 4105         }
 4106         /*
 4107          * Carry out per-state actions before doing net80211 work.
 4108          */
 4109         if (nstate == IEEE80211_S_INIT) {
 4110                 /* NB: only ap+sta vap's have a fw entity */
 4111                 if (hvap != NULL)
 4112                         mwl_hal_stop(hvap);
 4113         } else if (nstate == IEEE80211_S_SCAN) {
 4114                 mwl_hal_start(hvap);
 4115                 /* NB: this disables beacon frames */
 4116                 mwl_hal_setinframode(hvap);
 4117         } else if (nstate == IEEE80211_S_AUTH) {
 4118                 /*
 4119                  * Must create a sta db entry in case a WEP key needs to
 4120                  * be plumbed.  This entry will be overwritten if we
 4121                  * associate; otherwise it will be reclaimed on node free.
 4122                  */
 4123                 ni = vap->iv_bss;
 4124                 MWL_NODE(ni)->mn_hvap = hvap;
 4125                 (void) mwl_peerstadb(ni, 0, 0, NULL);
 4126         } else if (nstate == IEEE80211_S_CSA) {
 4127                 /* XXX move to below? */
 4128                 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
 4129                     vap->iv_opmode == IEEE80211_M_MBSS)
 4130                         mwl_startcsa(vap);
 4131         } else if (nstate == IEEE80211_S_CAC) {
 4132                 /* XXX move to below? */
 4133                 /* stop ap xmit and enable quiet mode radar detection */
 4134                 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
 4135         }
 4136 
 4137         /*
 4138          * Invoke the parent method to do net80211 work.
 4139          */
 4140         error = mvp->mv_newstate(vap, nstate, arg);
 4141 
 4142         /*
 4143          * Carry out work that must be done after net80211 runs;
 4144          * this work requires up to date state (e.g. iv_bss).
 4145          */
 4146         if (error == 0 && nstate == IEEE80211_S_RUN) {
 4147                 /* NB: collect bss node again, it may have changed */
 4148                 ni = vap->iv_bss;
 4149 
 4150                 DPRINTF(sc, MWL_DEBUG_STATE,
 4151                     "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
 4152                     "capinfo 0x%04x chan %d\n",
 4153                     vap->iv_ifp->if_xname, __func__, vap->iv_flags,
 4154                     ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
 4155                     ieee80211_chan2ieee(ic, ic->ic_curchan));
 4156 
 4157                 /*
 4158                  * Recreate local sta db entry to update WME/HT state.
 4159                  */
 4160                 mwl_localstadb(vap);
 4161                 switch (vap->iv_opmode) {
 4162                 case IEEE80211_M_HOSTAP:
 4163                 case IEEE80211_M_MBSS:
 4164                         if (ostate == IEEE80211_S_CAC) {
 4165                                 /* enable in-service radar detection */
 4166                                 mwl_hal_setradardetection(mh,
 4167                                     DR_IN_SERVICE_MONITOR_START);
 4168                                 sc->sc_radarena = 1;
 4169                         }
 4170                         /*
 4171                          * Allocate and setup the beacon frame
 4172                          * (and related state).
 4173                          */
 4174                         error = mwl_reset_vap(vap, IEEE80211_S_RUN);
 4175                         if (error != 0) {
 4176                                 DPRINTF(sc, MWL_DEBUG_STATE,
 4177                                     "%s: beacon setup failed, error %d\n",
 4178                                     __func__, error);
 4179                                 goto bad;
 4180                         }
 4181                         /* NB: must be after setting up beacon */
 4182                         mwl_hal_start(hvap);
 4183                         break;
 4184                 case IEEE80211_M_STA:
 4185                         DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
 4186                             vap->iv_ifp->if_xname, __func__, ni->ni_associd);
 4187                         /*
 4188                          * Set state now that we're associated.
 4189                          */
 4190                         mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
 4191                         mwl_setrates(vap);
 4192                         mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
 4193                         if ((vap->iv_flags & IEEE80211_F_DWDS) &&
 4194                             sc->sc_ndwdsvaps++ == 0)
 4195                                 mwl_hal_setdwds(mh, 1);
 4196                         break;
 4197                 case IEEE80211_M_WDS:
 4198                         DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
 4199                             vap->iv_ifp->if_xname, __func__,
 4200                             ether_sprintf(ni->ni_bssid));
 4201                         mwl_seteapolformat(vap);
 4202                         break;
 4203                 default:
 4204                         break;
 4205                 }
 4206                 /*
 4207                  * Set CS mode according to operating channel;
 4208                  * this mostly an optimization for 5GHz.
 4209                  *
 4210                  * NB: must follow mwl_hal_start which resets csmode
 4211                  */
 4212                 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
 4213                         mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
 4214                 else
 4215                         mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
 4216                 /*
 4217                  * Start timer to prod firmware.
 4218                  */
 4219                 if (sc->sc_ageinterval != 0)
 4220                         callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
 4221                             mwl_agestations, sc);
 4222         } else if (nstate == IEEE80211_S_SLEEP) {
 4223                 /* XXX set chip in power save */
 4224         } else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
 4225             --sc->sc_ndwdsvaps == 0)
 4226                 mwl_hal_setdwds(mh, 0);
 4227 bad:
 4228         return error;
 4229 }
 4230 
 4231 /*
 4232  * Manage station id's; these are separate from AID's
 4233  * as AID's may have values out of the range of possible
 4234  * station id's acceptable to the firmware.
 4235  */
 4236 static int
 4237 allocstaid(struct mwl_softc *sc, int aid)
 4238 {
 4239         int staid;
 4240 
 4241         if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
 4242                 /* NB: don't use 0 */
 4243                 for (staid = 1; staid < MWL_MAXSTAID; staid++)
 4244                         if (isclr(sc->sc_staid, staid))
 4245                                 break;
 4246         } else
 4247                 staid = aid;
 4248         setbit(sc->sc_staid, staid);
 4249         return staid;
 4250 }
 4251 
 4252 static void
 4253 delstaid(struct mwl_softc *sc, int staid)
 4254 {
 4255         clrbit(sc->sc_staid, staid);
 4256 }
 4257 
 4258 /*
 4259  * Setup driver-specific state for a newly associated node.
 4260  * Note that we're called also on a re-associate, the isnew
 4261  * param tells us if this is the first time or not.
 4262  */
 4263 static void
 4264 mwl_newassoc(struct ieee80211_node *ni, int isnew)
 4265 {
 4266         struct ieee80211vap *vap = ni->ni_vap;
 4267         struct mwl_softc *sc = vap->iv_ic->ic_softc;
 4268         struct mwl_node *mn = MWL_NODE(ni);
 4269         MWL_HAL_PEERINFO pi;
 4270         uint16_t aid;
 4271         int error;
 4272 
 4273         aid = IEEE80211_AID(ni->ni_associd);
 4274         if (isnew) {
 4275                 mn->mn_staid = allocstaid(sc, aid);
 4276                 mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
 4277         } else {
 4278                 mn = MWL_NODE(ni);
 4279                 /* XXX reset BA stream? */
 4280         }
 4281         DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
 4282             __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
 4283         error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
 4284         if (error != 0) {
 4285                 DPRINTF(sc, MWL_DEBUG_NODE,
 4286                     "%s: error %d creating sta db entry\n",
 4287                     __func__, error);
 4288                 /* XXX how to deal with error? */
 4289         }
 4290 }
 4291 
 4292 /*
 4293  * Periodically poke the firmware to age out station state
 4294  * (power save queues, pending tx aggregates).
 4295  */
 4296 static void
 4297 mwl_agestations(void *arg)
 4298 {
 4299         struct mwl_softc *sc = arg;
 4300 
 4301         mwl_hal_setkeepalive(sc->sc_mh);
 4302         if (sc->sc_ageinterval != 0)            /* NB: catch dynamic changes */
 4303                 callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
 4304 }
 4305 
 4306 static const struct mwl_hal_channel *
 4307 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
 4308 {
 4309         int i;
 4310 
 4311         for (i = 0; i < ci->nchannels; i++) {
 4312                 const struct mwl_hal_channel *hc = &ci->channels[i];
 4313                 if (hc->ieee == ieee)
 4314                         return hc;
 4315         }
 4316         return NULL;
 4317 }
 4318 
 4319 static int
 4320 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
 4321         int nchan, struct ieee80211_channel chans[])
 4322 {
 4323         struct mwl_softc *sc = ic->ic_softc;
 4324         struct mwl_hal *mh = sc->sc_mh;
 4325         const MWL_HAL_CHANNELINFO *ci;
 4326         int i;
 4327 
 4328         for (i = 0; i < nchan; i++) {
 4329                 struct ieee80211_channel *c = &chans[i];
 4330                 const struct mwl_hal_channel *hc;
 4331 
 4332                 if (IEEE80211_IS_CHAN_2GHZ(c)) {
 4333                         mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
 4334                             IEEE80211_IS_CHAN_HT40(c) ?
 4335                                 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
 4336                 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
 4337                         mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
 4338                             IEEE80211_IS_CHAN_HT40(c) ?
 4339                                 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
 4340                 } else {
 4341                         device_printf(sc->sc_dev,
 4342                             "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
 4343                             __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
 4344                         return EINVAL;
 4345                 }
 4346                 /* 
 4347                  * Verify channel has cal data and cap tx power.
 4348                  */
 4349                 hc = findhalchannel(ci, c->ic_ieee);
 4350                 if (hc != NULL) {
 4351                         if (c->ic_maxpower > 2*hc->maxTxPow)
 4352                                 c->ic_maxpower = 2*hc->maxTxPow;
 4353                         goto next;
 4354                 }
 4355                 if (IEEE80211_IS_CHAN_HT40(c)) {
 4356                         /*
 4357                          * Look for the extension channel since the
 4358                          * hal table only has the primary channel.
 4359                          */
 4360                         hc = findhalchannel(ci, c->ic_extieee);
 4361                         if (hc != NULL) {
 4362                                 if (c->ic_maxpower > 2*hc->maxTxPow)
 4363                                         c->ic_maxpower = 2*hc->maxTxPow;
 4364                                 goto next;
 4365                         }
 4366                 }
 4367                 device_printf(sc->sc_dev,
 4368                     "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
 4369                     __func__, c->ic_ieee, c->ic_extieee,
 4370                     c->ic_freq, c->ic_flags);
 4371                 return EINVAL;
 4372         next:
 4373                 ;
 4374         }
 4375         return 0;
 4376 }
 4377 
 4378 #define IEEE80211_CHAN_HTG      (IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
 4379 #define IEEE80211_CHAN_HTA      (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
 4380 
 4381 static void
 4382 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
 4383         const MWL_HAL_CHANNELINFO *ci, int flags)
 4384 {
 4385         int i, error;
 4386 
 4387         for (i = 0; i < ci->nchannels; i++) {
 4388                 const struct mwl_hal_channel *hc = &ci->channels[i];
 4389 
 4390                 error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
 4391                     hc->ieee, hc->maxTxPow, flags);
 4392                 if (error != 0 && error != ENOENT)
 4393                         break;
 4394         }
 4395 }
 4396 
 4397 static void
 4398 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
 4399         const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
 4400 {
 4401         int i, error;
 4402 
 4403         error = 0;
 4404         for (i = 0; i < ci->nchannels && error == 0; i++) {
 4405                 const struct mwl_hal_channel *hc = &ci->channels[i];
 4406 
 4407                 error = ieee80211_add_channel(chans, maxchans, nchans,
 4408                     hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
 4409         }
 4410 }
 4411 
 4412 static void
 4413 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
 4414         struct ieee80211_channel chans[])
 4415 {
 4416         const MWL_HAL_CHANNELINFO *ci;
 4417         uint8_t bands[IEEE80211_MODE_BYTES];
 4418 
 4419         /*
 4420          * Use the channel info from the hal to craft the
 4421          * channel list.  Note that we pass back an unsorted
 4422          * list; the caller is required to sort it for us
 4423          * (if desired).
 4424          */
 4425         *nchans = 0;
 4426         if (mwl_hal_getchannelinfo(sc->sc_mh,
 4427             MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
 4428                 memset(bands, 0, sizeof(bands));
 4429                 setbit(bands, IEEE80211_MODE_11B);
 4430                 setbit(bands, IEEE80211_MODE_11G);
 4431                 setbit(bands, IEEE80211_MODE_11NG);
 4432                 addchannels(chans, maxchans, nchans, ci, bands);
 4433         }
 4434         if (mwl_hal_getchannelinfo(sc->sc_mh,
 4435             MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
 4436                 memset(bands, 0, sizeof(bands));
 4437                 setbit(bands, IEEE80211_MODE_11A);
 4438                 setbit(bands, IEEE80211_MODE_11NA);
 4439                 addchannels(chans, maxchans, nchans, ci, bands);
 4440         }
 4441         if (mwl_hal_getchannelinfo(sc->sc_mh,
 4442             MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
 4443                 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
 4444         if (mwl_hal_getchannelinfo(sc->sc_mh,
 4445             MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
 4446                 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
 4447 }
 4448 
 4449 static void
 4450 mwl_getradiocaps(struct ieee80211com *ic,
 4451         int maxchans, int *nchans, struct ieee80211_channel chans[])
 4452 {
 4453         struct mwl_softc *sc = ic->ic_softc;
 4454 
 4455         getchannels(sc, maxchans, nchans, chans);
 4456 }
 4457 
 4458 static int
 4459 mwl_getchannels(struct mwl_softc *sc)
 4460 {
 4461         struct ieee80211com *ic = &sc->sc_ic;
 4462 
 4463         /*
 4464          * Use the channel info from the hal to craft the
 4465          * channel list for net80211.  Note that we pass up
 4466          * an unsorted list; net80211 will sort it for us.
 4467          */
 4468         memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
 4469         ic->ic_nchans = 0;
 4470         getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
 4471 
 4472         ic->ic_regdomain.regdomain = SKU_DEBUG;
 4473         ic->ic_regdomain.country = CTRY_DEFAULT;
 4474         ic->ic_regdomain.location = 'I';
 4475         ic->ic_regdomain.isocc[0] = ' ';        /* XXX? */
 4476         ic->ic_regdomain.isocc[1] = ' ';
 4477         return (ic->ic_nchans == 0 ? EIO : 0);
 4478 }
 4479 #undef IEEE80211_CHAN_HTA
 4480 #undef IEEE80211_CHAN_HTG
 4481 
 4482 #ifdef MWL_DEBUG
 4483 static void
 4484 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
 4485 {
 4486         const struct mwl_rxdesc *ds = bf->bf_desc;
 4487         uint32_t status = le32toh(ds->Status);
 4488 
 4489         printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
 4490                "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
 4491             ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
 4492             le32toh(ds->pPhysBuffData), ds->RxControl, 
 4493             ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
 4494                 "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
 4495             ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
 4496             ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
 4497 }
 4498 
 4499 static void
 4500 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
 4501 {
 4502         const struct mwl_txdesc *ds = bf->bf_desc;
 4503         uint32_t status = le32toh(ds->Status);
 4504 
 4505         printf("Q%u[%3u]", qnum, ix);
 4506         printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
 4507         printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
 4508             le32toh(ds->pPhysNext),
 4509             le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
 4510             status & EAGLE_TXD_STATUS_USED ?
 4511                 "" : (status & 3) != 0 ? " *" : " !");
 4512         printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
 4513             ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
 4514             le32toh(ds->SapPktInfo), le16toh(ds->Format));
 4515 #if MWL_TXDESC > 1
 4516         printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
 4517             , le32toh(ds->multiframes)
 4518             , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
 4519             , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
 4520             , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
 4521         );
 4522         printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
 4523             , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
 4524             , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
 4525             , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
 4526         );
 4527 #endif
 4528 #if 0
 4529 { const uint8_t *cp = (const uint8_t *) ds;
 4530   int i;
 4531   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
 4532         printf("%02x ", cp[i]);
 4533         if (((i+1) % 16) == 0)
 4534                 printf("\n");
 4535   }
 4536   printf("\n");
 4537 }
 4538 #endif
 4539 }
 4540 #endif /* MWL_DEBUG */
 4541 
 4542 #if 0
 4543 static void
 4544 mwl_txq_dump(struct mwl_txq *txq)
 4545 {
 4546         struct mwl_txbuf *bf;
 4547         int i = 0;
 4548 
 4549         MWL_TXQ_LOCK(txq);
 4550         STAILQ_FOREACH(bf, &txq->active, bf_list) {
 4551                 struct mwl_txdesc *ds = bf->bf_desc;
 4552                 MWL_TXDESC_SYNC(txq, ds,
 4553                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 4554 #ifdef MWL_DEBUG
 4555                 mwl_printtxbuf(bf, txq->qnum, i);
 4556 #endif
 4557                 i++;
 4558         }
 4559         MWL_TXQ_UNLOCK(txq);
 4560 }
 4561 #endif
 4562 
 4563 static void
 4564 mwl_watchdog(void *arg)
 4565 {
 4566         struct mwl_softc *sc = arg;
 4567 
 4568         callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
 4569         if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
 4570                 return;
 4571 
 4572         if (sc->sc_running && !sc->sc_invalid) {
 4573                 if (mwl_hal_setkeepalive(sc->sc_mh))
 4574                         device_printf(sc->sc_dev,
 4575                             "transmit timeout (firmware hung?)\n");
 4576                 else
 4577                         device_printf(sc->sc_dev,
 4578                             "transmit timeout\n");
 4579 #if 0
 4580                 mwl_reset(sc);
 4581 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
 4582 #endif
 4583                 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
 4584                 sc->sc_stats.mst_watchdog++;
 4585         }
 4586 }
 4587 
 4588 #ifdef MWL_DIAGAPI
 4589 /*
 4590  * Diagnostic interface to the HAL.  This is used by various
 4591  * tools to do things like retrieve register contents for
 4592  * debugging.  The mechanism is intentionally opaque so that
 4593  * it can change frequently w/o concern for compatibility.
 4594  */
 4595 static int
 4596 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
 4597 {
 4598         struct mwl_hal *mh = sc->sc_mh;
 4599         u_int id = md->md_id & MWL_DIAG_ID;
 4600         void *indata = NULL;
 4601         void *outdata = NULL;
 4602         u_int32_t insize = md->md_in_size;
 4603         u_int32_t outsize = md->md_out_size;
 4604         int error = 0;
 4605 
 4606         if (md->md_id & MWL_DIAG_IN) {
 4607                 /*
 4608                  * Copy in data.
 4609                  */
 4610                 indata = malloc(insize, M_TEMP, M_NOWAIT);
 4611                 if (indata == NULL) {
 4612                         error = ENOMEM;
 4613                         goto bad;
 4614                 }
 4615                 error = copyin(md->md_in_data, indata, insize);
 4616                 if (error)
 4617                         goto bad;
 4618         }
 4619         if (md->md_id & MWL_DIAG_DYN) {
 4620                 /*
 4621                  * Allocate a buffer for the results (otherwise the HAL
 4622                  * returns a pointer to a buffer where we can read the
 4623                  * results).  Note that we depend on the HAL leaving this
 4624                  * pointer for us to use below in reclaiming the buffer;
 4625                  * may want to be more defensive.
 4626                  */
 4627                 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
 4628                 if (outdata == NULL) {
 4629                         error = ENOMEM;
 4630                         goto bad;
 4631                 }
 4632         }
 4633         if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
 4634                 if (outsize < md->md_out_size)
 4635                         md->md_out_size = outsize;
 4636                 if (outdata != NULL)
 4637                         error = copyout(outdata, md->md_out_data,
 4638                                         md->md_out_size);
 4639         } else {
 4640                 error = EINVAL;
 4641         }
 4642 bad:
 4643         if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
 4644                 free(indata, M_TEMP);
 4645         if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
 4646                 free(outdata, M_TEMP);
 4647         return error;
 4648 }
 4649 
 4650 static int
 4651 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
 4652 {
 4653         struct mwl_hal *mh = sc->sc_mh;
 4654         int error;
 4655 
 4656         MWL_LOCK_ASSERT(sc);
 4657 
 4658         if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
 4659                 device_printf(sc->sc_dev, "unable to load firmware\n");
 4660                 return EIO;
 4661         }
 4662         if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
 4663                 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
 4664                 return EIO;
 4665         }
 4666         error = mwl_setupdma(sc);
 4667         if (error != 0) {
 4668                 /* NB: mwl_setupdma prints a msg */
 4669                 return error;
 4670         }
 4671         /*
 4672          * Reset tx/rx data structures; after reload we must
 4673          * re-start the driver's notion of the next xmit/recv.
 4674          */
 4675         mwl_draintxq(sc);               /* clear pending frames */
 4676         mwl_resettxq(sc);               /* rebuild tx q lists */
 4677         sc->sc_rxnext = NULL;           /* force rx to start at the list head */
 4678         return 0;
 4679 }
 4680 #endif /* MWL_DIAGAPI */
 4681 
 4682 static void
 4683 mwl_parent(struct ieee80211com *ic)
 4684 {
 4685         struct mwl_softc *sc = ic->ic_softc;
 4686         int startall = 0;
 4687 
 4688         MWL_LOCK(sc);
 4689         if (ic->ic_nrunning > 0) {
 4690                 if (sc->sc_running) {
 4691                         /*
 4692                          * To avoid rescanning another access point,
 4693                          * do not call mwl_init() here.  Instead,
 4694                          * only reflect promisc mode settings.
 4695                          */
 4696                         mwl_mode_init(sc);
 4697                 } else {
 4698                         /*
 4699                          * Beware of being called during attach/detach
 4700                          * to reset promiscuous mode.  In that case we
 4701                          * will still be marked UP but not RUNNING.
 4702                          * However trying to re-init the interface
 4703                          * is the wrong thing to do as we've already
 4704                          * torn down much of our state.  There's
 4705                          * probably a better way to deal with this.
 4706                          */
 4707                         if (!sc->sc_invalid) {
 4708                                 mwl_init(sc);   /* XXX lose error */
 4709                                 startall = 1;
 4710                         }
 4711                 }
 4712         } else
 4713                 mwl_stop(sc);
 4714         MWL_UNLOCK(sc);
 4715         if (startall)
 4716                 ieee80211_start_all(ic);
 4717 }
 4718 
 4719 static int
 4720 mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
 4721 {
 4722         struct mwl_softc *sc = ic->ic_softc;
 4723         struct ifreq *ifr = data;
 4724         int error = 0;
 4725 
 4726         switch (cmd) {
 4727         case SIOCGMVSTATS:
 4728                 mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
 4729 #if 0
 4730                 /* NB: embed these numbers to get a consistent view */
 4731                 sc->sc_stats.mst_tx_packets =
 4732                     ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
 4733                 sc->sc_stats.mst_rx_packets =
 4734                     ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
 4735 #endif
 4736                 /*
 4737                  * NB: Drop the softc lock in case of a page fault;
 4738                  * we'll accept any potential inconsisentcy in the
 4739                  * statistics.  The alternative is to copy the data
 4740                  * to a local structure.
 4741                  */
 4742                 return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
 4743                     sizeof (sc->sc_stats)));
 4744 #ifdef MWL_DIAGAPI
 4745         case SIOCGMVDIAG:
 4746                 /* XXX check privs */
 4747                 return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
 4748         case SIOCGMVRESET:
 4749                 /* XXX check privs */
 4750                 MWL_LOCK(sc);
 4751                 error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr); 
 4752                 MWL_UNLOCK(sc);
 4753                 break;
 4754 #endif /* MWL_DIAGAPI */
 4755         default:
 4756                 error = ENOTTY;
 4757                 break;
 4758         }
 4759         return (error);
 4760 }
 4761 
 4762 #ifdef  MWL_DEBUG
 4763 static int
 4764 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
 4765 {
 4766         struct mwl_softc *sc = arg1;
 4767         int debug, error;
 4768 
 4769         debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
 4770         error = sysctl_handle_int(oidp, &debug, 0, req);
 4771         if (error || !req->newptr)
 4772                 return error;
 4773         mwl_hal_setdebug(sc->sc_mh, debug >> 24);
 4774         sc->sc_debug = debug & 0x00ffffff;
 4775         return 0;
 4776 }
 4777 #endif /* MWL_DEBUG */
 4778 
 4779 static void
 4780 mwl_sysctlattach(struct mwl_softc *sc)
 4781 {
 4782 #ifdef  MWL_DEBUG
 4783         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
 4784         struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
 4785 
 4786         sc->sc_debug = mwl_debug;
 4787         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
 4788             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
 4789             mwl_sysctl_debug, "I", "control debugging printfs");
 4790 #endif
 4791 }
 4792 
 4793 /*
 4794  * Announce various information on device/driver attach.
 4795  */
 4796 static void
 4797 mwl_announce(struct mwl_softc *sc)
 4798 {
 4799 
 4800         device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
 4801                 sc->sc_hwspecs.hwVersion,
 4802                 (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
 4803                 (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
 4804                 (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
 4805                 (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
 4806                 sc->sc_hwspecs.regionCode);
 4807         sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
 4808 
 4809         if (bootverbose) {
 4810                 int i;
 4811                 for (i = 0; i <= WME_AC_VO; i++) {
 4812                         struct mwl_txq *txq = sc->sc_ac2q[i];
 4813                         device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
 4814                                 txq->qnum, ieee80211_wme_acnames[i]);
 4815                 }
 4816         }
 4817         if (bootverbose || mwl_rxdesc != MWL_RXDESC)
 4818                 device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
 4819         if (bootverbose || mwl_rxbuf != MWL_RXBUF)
 4820                 device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
 4821         if (bootverbose || mwl_txbuf != MWL_TXBUF)
 4822                 device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
 4823         if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
 4824                 device_printf(sc->sc_dev, "multi-bss support\n");
 4825 #ifdef MWL_TX_NODROP
 4826         if (bootverbose)
 4827                 device_printf(sc->sc_dev, "no tx drop\n");
 4828 #endif
 4829 }

Cache object: eda8fe5a9fb9dd1d34408ebaaa028e11


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.