FreeBSD/Linux Kernel Cross Reference
sys/dev/iwm/if_iwm.c
1 /* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90 /*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134
135 #include <net/bpf.h>
136
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off) ((t)((m)->m_data + (off)))
176
177 const uint8_t iwm_nvm_channels[] = {
178 /* 2.4 GHz */
179 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 /* 5 GHz */
181 36, 40, 44, 48, 52, 56, 60, 64,
182 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186 "IWM_NUM_CHANNELS is too small");
187
188 const uint8_t iwm_nvm_channels_8000[] = {
189 /* 2.4 GHz */
190 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 /* 5 GHz */
192 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197 "IWM_NUM_CHANNELS_8000 is too small");
198
199 #define IWM_NUM_2GHZ_CHANNELS 14
200 #define IWM_N_HW_ADDR_MASK 0xF
201
202 /*
203 * XXX For now, there's simply a fixed set of rate table entries
204 * that are populated.
205 */
206 const struct iwm_rate {
207 uint8_t rate;
208 uint8_t plcp;
209 } iwm_rates[] = {
210 { 2, IWM_RATE_1M_PLCP },
211 { 4, IWM_RATE_2M_PLCP },
212 { 11, IWM_RATE_5M_PLCP },
213 { 22, IWM_RATE_11M_PLCP },
214 { 12, IWM_RATE_6M_PLCP },
215 { 18, IWM_RATE_9M_PLCP },
216 { 24, IWM_RATE_12M_PLCP },
217 { 36, IWM_RATE_18M_PLCP },
218 { 48, IWM_RATE_24M_PLCP },
219 { 72, IWM_RATE_36M_PLCP },
220 { 96, IWM_RATE_48M_PLCP },
221 { 108, IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK 0
224 #define IWM_RIDX_OFDM 4
225 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228
229 struct iwm_nvm_section {
230 uint16_t length;
231 uint8_t *data;
232 };
233
234 #define IWM_UCODE_ALIVE_TIMEOUT hz
235 #define IWM_UCODE_CALIB_TIMEOUT (2*hz)
236
237 struct iwm_alive_data {
238 int valid;
239 uint32_t scd_base_addr;
240 };
241
242 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int iwm_firmware_store_section(struct iwm_softc *,
244 enum iwm_ucode_type,
245 const uint8_t *, size_t);
246 static int iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void iwm_fw_info_free(struct iwm_fw_info *);
248 static int iwm_read_firmware(struct iwm_softc *);
249 static int iwm_alloc_fwmem(struct iwm_softc *);
250 static int iwm_alloc_sched(struct iwm_softc *);
251 static int iwm_alloc_kw(struct iwm_softc *);
252 static int iwm_alloc_ict(struct iwm_softc *);
253 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257 int);
258 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void iwm_enable_interrupts(struct iwm_softc *);
261 static void iwm_restore_interrupts(struct iwm_softc *);
262 static void iwm_disable_interrupts(struct iwm_softc *);
263 static void iwm_ict_reset(struct iwm_softc *);
264 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void iwm_stop_device(struct iwm_softc *);
266 static void iwm_nic_config(struct iwm_softc *);
267 static int iwm_nic_rx_init(struct iwm_softc *);
268 static int iwm_nic_tx_init(struct iwm_softc *);
269 static int iwm_nic_init(struct iwm_softc *);
270 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272 uint16_t, uint8_t *, uint16_t *);
273 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 uint16_t *, uint32_t);
275 static uint32_t iwm_eeprom_channel_flags(uint16_t);
276 static void iwm_add_channel_band(struct iwm_softc *,
277 struct ieee80211_channel[], int, int *, int, size_t,
278 const uint8_t[]);
279 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
280 struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 const uint16_t *, const uint16_t *,
284 const uint16_t *, const uint16_t *,
285 const uint16_t *);
286 static void iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
288 struct iwm_nvm_data *,
289 const uint16_t *,
290 const uint16_t *);
291 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 const uint16_t *);
293 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 const uint16_t *);
296 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
297 const uint16_t *);
298 static void iwm_set_radio_cfg(const struct iwm_softc *,
299 struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int iwm_nvm_init(struct iwm_softc *);
303 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 const struct iwm_fw_desc *);
305 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 bus_addr_t, uint32_t);
307 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 const struct iwm_fw_img *,
309 int, int *);
310 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 const struct iwm_fw_img *,
312 int, int *);
313 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 const struct iwm_fw_img *);
315 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
316 const struct iwm_fw_img *);
317 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
321 enum iwm_ucode_type);
322 static int iwm_run_init_ucode(struct iwm_softc *, int);
323 static int iwm_config_ltr(struct iwm_softc *sc);
324 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
326 struct iwm_rx_packet *);
327 static int iwm_get_noise(struct iwm_softc *,
328 const struct iwm_statistics_rx_non_phy *);
329 static void iwm_handle_rx_statistics(struct iwm_softc *,
330 struct iwm_rx_packet *);
331 static bool iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 uint32_t, bool);
333 static int iwm_rx_tx_cmd_single(struct iwm_softc *,
334 struct iwm_rx_packet *,
335 struct iwm_node *);
336 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340 uint16_t);
341 #endif
342 static const struct iwm_rate *
343 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 struct mbuf *, struct iwm_tx_cmd *);
345 static int iwm_tx(struct iwm_softc *, struct mbuf *,
346 struct ieee80211_node *, int);
347 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 const struct ieee80211_bpf_params *);
349 static int iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static struct ieee80211_node *
352 iwm_node_alloc(struct ieee80211vap *,
353 const uint8_t[IEEE80211_ADDR_LEN]);
354 static uint8_t iwm_rate_from_ucode_rate(uint32_t);
355 static int iwm_rate2ridx(struct iwm_softc *, uint8_t);
356 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358 static void iwm_endscan_cb(void *, int);
359 static int iwm_send_bt_init_conf(struct iwm_softc *);
360 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
361 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
362 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int iwm_init_hw(struct iwm_softc *);
365 static void iwm_init(struct iwm_softc *);
366 static void iwm_start(struct iwm_softc *);
367 static void iwm_stop(struct iwm_softc *);
368 static void iwm_watchdog(void *);
369 static void iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372 iwm_desc_lookup(uint32_t);
373 static void iwm_nic_error(struct iwm_softc *);
374 static void iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377 static void iwm_notif_intr(struct iwm_softc *);
378 static void iwm_intr(void *);
379 static int iwm_attach(device_t);
380 static int iwm_is_valid_ether_addr(uint8_t *);
381 static void iwm_preinit(void *);
382 static int iwm_detach_local(struct iwm_softc *sc, int);
383 static void iwm_init_task(void *);
384 static void iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386 iwm_vap_create(struct ieee80211com *,
387 const char [IFNAMSIZ], int,
388 enum ieee80211_opmode, int,
389 const uint8_t [IEEE80211_ADDR_LEN],
390 const uint8_t [IEEE80211_ADDR_LEN]);
391 static void iwm_vap_delete(struct ieee80211vap *);
392 static void iwm_xmit_queue_drain(struct iwm_softc *);
393 static void iwm_scan_start(struct ieee80211com *);
394 static void iwm_scan_end(struct ieee80211com *);
395 static void iwm_update_mcast(struct ieee80211com *);
396 static void iwm_set_channel(struct ieee80211com *);
397 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
398 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
399 static int iwm_detach(device_t);
400
401 static int iwm_lar_disable = 0;
402 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
403
404 /*
405 * Firmware parser.
406 */
407
408 static int
409 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
410 {
411 const struct iwm_fw_cscheme_list *l = (const void *)data;
412
413 if (dlen < sizeof(*l) ||
414 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
415 return EINVAL;
416
417 /* we don't actually store anything for now, always use s/w crypto */
418
419 return 0;
420 }
421
422 static int
423 iwm_firmware_store_section(struct iwm_softc *sc,
424 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
425 {
426 struct iwm_fw_img *fws;
427 struct iwm_fw_desc *fwone;
428
429 if (type >= IWM_UCODE_TYPE_MAX)
430 return EINVAL;
431 if (dlen < sizeof(uint32_t))
432 return EINVAL;
433
434 fws = &sc->sc_fw.img[type];
435 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
436 return EINVAL;
437
438 fwone = &fws->sec[fws->fw_count];
439
440 /* first 32bit are device load offset */
441 memcpy(&fwone->offset, data, sizeof(uint32_t));
442
443 /* rest is data */
444 fwone->data = data + sizeof(uint32_t);
445 fwone->len = dlen - sizeof(uint32_t);
446
447 fws->fw_count++;
448
449 return 0;
450 }
451
452 #define IWM_DEFAULT_SCAN_CHANNELS 40
453
454 /* iwlwifi: iwl-drv.c */
455 struct iwm_tlv_calib_data {
456 uint32_t ucode_type;
457 struct iwm_tlv_calib_ctrl calib;
458 } __packed;
459
460 static int
461 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
462 {
463 const struct iwm_tlv_calib_data *def_calib = data;
464 uint32_t ucode_type = le32toh(def_calib->ucode_type);
465
466 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
467 device_printf(sc->sc_dev,
468 "Wrong ucode_type %u for default "
469 "calibration.\n", ucode_type);
470 return EINVAL;
471 }
472
473 sc->sc_default_calib[ucode_type].flow_trigger =
474 def_calib->calib.flow_trigger;
475 sc->sc_default_calib[ucode_type].event_trigger =
476 def_calib->calib.event_trigger;
477
478 return 0;
479 }
480
481 static int
482 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
483 struct iwm_ucode_capabilities *capa)
484 {
485 const struct iwm_ucode_api *ucode_api = (const void *)data;
486 uint32_t api_index = le32toh(ucode_api->api_index);
487 uint32_t api_flags = le32toh(ucode_api->api_flags);
488 int i;
489
490 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
491 device_printf(sc->sc_dev,
492 "api flags index %d larger than supported by driver\n",
493 api_index);
494 /* don't return an error so we can load FW that has more bits */
495 return 0;
496 }
497
498 for (i = 0; i < 32; i++) {
499 if (api_flags & (1U << i))
500 setbit(capa->enabled_api, i + 32 * api_index);
501 }
502
503 return 0;
504 }
505
506 static int
507 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
508 struct iwm_ucode_capabilities *capa)
509 {
510 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
511 uint32_t api_index = le32toh(ucode_capa->api_index);
512 uint32_t api_flags = le32toh(ucode_capa->api_capa);
513 int i;
514
515 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
516 device_printf(sc->sc_dev,
517 "capa flags index %d larger than supported by driver\n",
518 api_index);
519 /* don't return an error so we can load FW that has more bits */
520 return 0;
521 }
522
523 for (i = 0; i < 32; i++) {
524 if (api_flags & (1U << i))
525 setbit(capa->enabled_capa, i + 32 * api_index);
526 }
527
528 return 0;
529 }
530
531 static void
532 iwm_fw_info_free(struct iwm_fw_info *fw)
533 {
534 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
535 fw->fw_fp = NULL;
536 memset(fw->img, 0, sizeof(fw->img));
537 }
538
539 static int
540 iwm_read_firmware(struct iwm_softc *sc)
541 {
542 struct iwm_fw_info *fw = &sc->sc_fw;
543 const struct iwm_tlv_ucode_header *uhdr;
544 const struct iwm_ucode_tlv *tlv;
545 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
546 enum iwm_ucode_tlv_type tlv_type;
547 const struct firmware *fwp;
548 const uint8_t *data;
549 uint32_t tlv_len;
550 uint32_t usniffer_img;
551 const uint8_t *tlv_data;
552 uint32_t paging_mem_size;
553 int num_of_cpus;
554 int error = 0;
555 size_t len;
556
557 /*
558 * Load firmware into driver memory.
559 * fw_fp will be set.
560 */
561 fwp = firmware_get(sc->cfg->fw_name);
562 if (fwp == NULL) {
563 device_printf(sc->sc_dev,
564 "could not read firmware %s (error %d)\n",
565 sc->cfg->fw_name, error);
566 goto out;
567 }
568 fw->fw_fp = fwp;
569
570 /* (Re-)Initialize default values. */
571 capa->flags = 0;
572 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
573 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
574 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
575 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
576 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
577
578 /*
579 * Parse firmware contents
580 */
581
582 uhdr = (const void *)fw->fw_fp->data;
583 if (*(const uint32_t *)fw->fw_fp->data != 0
584 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585 device_printf(sc->sc_dev, "invalid firmware %s\n",
586 sc->cfg->fw_name);
587 error = EINVAL;
588 goto out;
589 }
590
591 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
592 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594 IWM_UCODE_API(le32toh(uhdr->ver)));
595 data = uhdr->data;
596 len = fw->fw_fp->datasize - sizeof(*uhdr);
597
598 while (len >= sizeof(*tlv)) {
599 len -= sizeof(*tlv);
600 tlv = (const void *)data;
601
602 tlv_len = le32toh(tlv->length);
603 tlv_type = le32toh(tlv->type);
604 tlv_data = tlv->data;
605
606 if (len < tlv_len) {
607 device_printf(sc->sc_dev,
608 "firmware too short: %zu bytes\n",
609 len);
610 error = EINVAL;
611 goto parse_out;
612 }
613 len -= roundup2(tlv_len, 4);
614 data += sizeof(*tlv) + roundup2(tlv_len, 4);
615
616 switch ((int)tlv_type) {
617 case IWM_UCODE_TLV_PROBE_MAX_LEN:
618 if (tlv_len != sizeof(uint32_t)) {
619 device_printf(sc->sc_dev,
620 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
621 __func__, tlv_len);
622 error = EINVAL;
623 goto parse_out;
624 }
625 capa->max_probe_length =
626 le32_to_cpup((const uint32_t *)tlv_data);
627 /* limit it to something sensible */
628 if (capa->max_probe_length >
629 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
630 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
631 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
632 "ridiculous\n", __func__);
633 error = EINVAL;
634 goto parse_out;
635 }
636 break;
637 case IWM_UCODE_TLV_PAN:
638 if (tlv_len) {
639 device_printf(sc->sc_dev,
640 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
641 __func__, tlv_len);
642 error = EINVAL;
643 goto parse_out;
644 }
645 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
646 break;
647 case IWM_UCODE_TLV_FLAGS:
648 if (tlv_len < sizeof(uint32_t)) {
649 device_printf(sc->sc_dev,
650 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
651 __func__, tlv_len);
652 error = EINVAL;
653 goto parse_out;
654 }
655 if (tlv_len % sizeof(uint32_t)) {
656 device_printf(sc->sc_dev,
657 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
658 __func__, tlv_len);
659 error = EINVAL;
660 goto parse_out;
661 }
662 /*
663 * Apparently there can be many flags, but Linux driver
664 * parses only the first one, and so do we.
665 *
666 * XXX: why does this override IWM_UCODE_TLV_PAN?
667 * Intentional or a bug? Observations from
668 * current firmware file:
669 * 1) TLV_PAN is parsed first
670 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
671 * ==> this resets TLV_PAN to itself... hnnnk
672 */
673 capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
674 break;
675 case IWM_UCODE_TLV_CSCHEME:
676 if ((error = iwm_store_cscheme(sc,
677 tlv_data, tlv_len)) != 0) {
678 device_printf(sc->sc_dev,
679 "%s: iwm_store_cscheme(): returned %d\n",
680 __func__, error);
681 goto parse_out;
682 }
683 break;
684 case IWM_UCODE_TLV_NUM_OF_CPU:
685 if (tlv_len != sizeof(uint32_t)) {
686 device_printf(sc->sc_dev,
687 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
688 __func__, tlv_len);
689 error = EINVAL;
690 goto parse_out;
691 }
692 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
693 if (num_of_cpus == 2) {
694 fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
695 TRUE;
696 fw->img[IWM_UCODE_INIT].is_dual_cpus =
697 TRUE;
698 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
699 TRUE;
700 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701 device_printf(sc->sc_dev,
702 "%s: Driver supports only 1 or 2 CPUs\n",
703 __func__);
704 error = EINVAL;
705 goto parse_out;
706 }
707 break;
708 case IWM_UCODE_TLV_SEC_RT:
709 if ((error = iwm_firmware_store_section(sc,
710 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711 device_printf(sc->sc_dev,
712 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
713 __func__, error);
714 goto parse_out;
715 }
716 break;
717 case IWM_UCODE_TLV_SEC_INIT:
718 if ((error = iwm_firmware_store_section(sc,
719 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
720 device_printf(sc->sc_dev,
721 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
722 __func__, error);
723 goto parse_out;
724 }
725 break;
726 case IWM_UCODE_TLV_SEC_WOWLAN:
727 if ((error = iwm_firmware_store_section(sc,
728 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
729 device_printf(sc->sc_dev,
730 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
731 __func__, error);
732 goto parse_out;
733 }
734 break;
735 case IWM_UCODE_TLV_DEF_CALIB:
736 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
737 device_printf(sc->sc_dev,
738 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
739 __func__, tlv_len,
740 sizeof(struct iwm_tlv_calib_data));
741 error = EINVAL;
742 goto parse_out;
743 }
744 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
745 device_printf(sc->sc_dev,
746 "%s: iwm_set_default_calib() failed: %d\n",
747 __func__, error);
748 goto parse_out;
749 }
750 break;
751 case IWM_UCODE_TLV_PHY_SKU:
752 if (tlv_len != sizeof(uint32_t)) {
753 error = EINVAL;
754 device_printf(sc->sc_dev,
755 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
756 __func__, tlv_len);
757 goto parse_out;
758 }
759 sc->sc_fw.phy_config =
760 le32_to_cpup((const uint32_t *)tlv_data);
761 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
762 IWM_FW_PHY_CFG_TX_CHAIN) >>
763 IWM_FW_PHY_CFG_TX_CHAIN_POS;
764 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
765 IWM_FW_PHY_CFG_RX_CHAIN) >>
766 IWM_FW_PHY_CFG_RX_CHAIN_POS;
767 break;
768
769 case IWM_UCODE_TLV_API_CHANGES_SET: {
770 if (tlv_len != sizeof(struct iwm_ucode_api)) {
771 error = EINVAL;
772 goto parse_out;
773 }
774 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
775 error = EINVAL;
776 goto parse_out;
777 }
778 break;
779 }
780
781 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
782 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
783 error = EINVAL;
784 goto parse_out;
785 }
786 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
787 error = EINVAL;
788 goto parse_out;
789 }
790 break;
791 }
792
793 case IWM_UCODE_TLV_CMD_VERSIONS:
794 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
795 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
796 /* ignore, not used by current driver */
797 break;
798
799 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
800 if ((error = iwm_firmware_store_section(sc,
801 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
802 tlv_len)) != 0)
803 goto parse_out;
804 break;
805
806 case IWM_UCODE_TLV_PAGING:
807 if (tlv_len != sizeof(uint32_t)) {
808 error = EINVAL;
809 goto parse_out;
810 }
811 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
812
813 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
814 "%s: Paging: paging enabled (size = %u bytes)\n",
815 __func__, paging_mem_size);
816 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
817 device_printf(sc->sc_dev,
818 "%s: Paging: driver supports up to %u bytes for paging image\n",
819 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
820 error = EINVAL;
821 goto out;
822 }
823 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
824 device_printf(sc->sc_dev,
825 "%s: Paging: image isn't multiple %u\n",
826 __func__, IWM_FW_PAGING_SIZE);
827 error = EINVAL;
828 goto out;
829 }
830
831 sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
832 paging_mem_size;
833 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
834 sc->sc_fw.img[usniffer_img].paging_mem_size =
835 paging_mem_size;
836 break;
837
838 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
839 if (tlv_len != sizeof(uint32_t)) {
840 error = EINVAL;
841 goto parse_out;
842 }
843 capa->n_scan_channels =
844 le32_to_cpup((const uint32_t *)tlv_data);
845 break;
846
847 case IWM_UCODE_TLV_FW_VERSION:
848 if (tlv_len != sizeof(uint32_t) * 3) {
849 error = EINVAL;
850 goto parse_out;
851 }
852 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
853 "%u.%u.%u",
854 le32toh(((const uint32_t *)tlv_data)[0]),
855 le32toh(((const uint32_t *)tlv_data)[1]),
856 le32toh(((const uint32_t *)tlv_data)[2]));
857 break;
858
859 case IWM_UCODE_TLV_FW_MEM_SEG:
860 break;
861
862 default:
863 device_printf(sc->sc_dev,
864 "%s: unknown firmware section %d, abort\n",
865 __func__, tlv_type);
866 error = EINVAL;
867 goto parse_out;
868 }
869 }
870
871 KASSERT(error == 0, ("unhandled error"));
872
873 parse_out:
874 if (error) {
875 device_printf(sc->sc_dev, "firmware parse error %d, "
876 "section type %d\n", error, tlv_type);
877 }
878
879 out:
880 if (error) {
881 if (fw->fw_fp != NULL)
882 iwm_fw_info_free(fw);
883 }
884
885 return error;
886 }
887
888 /*
889 * DMA resource routines
890 */
891
892 /* fwmem is used to load firmware onto the card */
893 static int
894 iwm_alloc_fwmem(struct iwm_softc *sc)
895 {
896 /* Must be aligned on a 16-byte boundary. */
897 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
898 IWM_FH_MEM_TB_MAX_LENGTH, 16);
899 }
900
901 /* tx scheduler rings. not used? */
902 static int
903 iwm_alloc_sched(struct iwm_softc *sc)
904 {
905 /* TX scheduler rings must be aligned on a 1KB boundary. */
906 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908 }
909
910 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
911 static int
912 iwm_alloc_kw(struct iwm_softc *sc)
913 {
914 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915 }
916
917 /* interrupt cause table */
918 static int
919 iwm_alloc_ict(struct iwm_softc *sc)
920 {
921 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
922 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
923 }
924
925 static int
926 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927 {
928 bus_size_t size;
929 size_t descsz;
930 int count, i, error;
931
932 ring->cur = 0;
933 if (sc->cfg->mqrx_supported) {
934 count = IWM_RX_MQ_RING_COUNT;
935 descsz = sizeof(uint64_t);
936 } else {
937 count = IWM_RX_LEGACY_RING_COUNT;
938 descsz = sizeof(uint32_t);
939 }
940
941 /* Allocate RX descriptors (256-byte aligned). */
942 size = count * descsz;
943 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
944 256);
945 if (error != 0) {
946 device_printf(sc->sc_dev,
947 "could not allocate RX ring DMA memory\n");
948 goto fail;
949 }
950 ring->desc = ring->free_desc_dma.vaddr;
951
952 /* Allocate RX status area (16-byte aligned). */
953 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
954 sizeof(*ring->stat), 16);
955 if (error != 0) {
956 device_printf(sc->sc_dev,
957 "could not allocate RX status DMA memory\n");
958 goto fail;
959 }
960 ring->stat = ring->stat_dma.vaddr;
961
962 if (sc->cfg->mqrx_supported) {
963 size = count * sizeof(uint32_t);
964 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
965 size, 256);
966 if (error != 0) {
967 device_printf(sc->sc_dev,
968 "could not allocate RX ring DMA memory\n");
969 goto fail;
970 }
971 }
972
973 /* Create RX buffer DMA tag. */
974 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
975 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
976 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
977 if (error != 0) {
978 device_printf(sc->sc_dev,
979 "%s: could not create RX buf DMA tag, error %d\n",
980 __func__, error);
981 goto fail;
982 }
983
984 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
985 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
986 if (error != 0) {
987 device_printf(sc->sc_dev,
988 "%s: could not create RX buf DMA map, error %d\n",
989 __func__, error);
990 goto fail;
991 }
992
993 /*
994 * Allocate and map RX buffers.
995 */
996 for (i = 0; i < count; i++) {
997 struct iwm_rx_data *data = &ring->data[i];
998 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
999 if (error != 0) {
1000 device_printf(sc->sc_dev,
1001 "%s: could not create RX buf DMA map, error %d\n",
1002 __func__, error);
1003 goto fail;
1004 }
1005 data->m = NULL;
1006
1007 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1008 goto fail;
1009 }
1010 }
1011 return 0;
1012
1013 fail: iwm_free_rx_ring(sc, ring);
1014 return error;
1015 }
1016
1017 static void
1018 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1019 {
1020 /* Reset the ring state */
1021 ring->cur = 0;
1022
1023 /*
1024 * The hw rx ring index in shared memory must also be cleared,
1025 * otherwise the discrepancy can cause reprocessing chaos.
1026 */
1027 if (sc->rxq.stat)
1028 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1029 }
1030
1031 static void
1032 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1033 {
1034 int count, i;
1035
1036 iwm_dma_contig_free(&ring->free_desc_dma);
1037 iwm_dma_contig_free(&ring->stat_dma);
1038 iwm_dma_contig_free(&ring->used_desc_dma);
1039
1040 count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1041 IWM_RX_LEGACY_RING_COUNT;
1042
1043 for (i = 0; i < count; i++) {
1044 struct iwm_rx_data *data = &ring->data[i];
1045
1046 if (data->m != NULL) {
1047 bus_dmamap_sync(ring->data_dmat, data->map,
1048 BUS_DMASYNC_POSTREAD);
1049 bus_dmamap_unload(ring->data_dmat, data->map);
1050 m_freem(data->m);
1051 data->m = NULL;
1052 }
1053 if (data->map != NULL) {
1054 bus_dmamap_destroy(ring->data_dmat, data->map);
1055 data->map = NULL;
1056 }
1057 }
1058 if (ring->spare_map != NULL) {
1059 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1060 ring->spare_map = NULL;
1061 }
1062 if (ring->data_dmat != NULL) {
1063 bus_dma_tag_destroy(ring->data_dmat);
1064 ring->data_dmat = NULL;
1065 }
1066 }
1067
1068 static int
1069 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1070 {
1071 bus_addr_t paddr;
1072 bus_size_t size;
1073 size_t maxsize;
1074 int nsegments;
1075 int i, error;
1076
1077 ring->qid = qid;
1078 ring->queued = 0;
1079 ring->cur = 0;
1080
1081 /* Allocate TX descriptors (256-byte aligned). */
1082 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1083 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1084 if (error != 0) {
1085 device_printf(sc->sc_dev,
1086 "could not allocate TX ring DMA memory\n");
1087 goto fail;
1088 }
1089 ring->desc = ring->desc_dma.vaddr;
1090
1091 /*
1092 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1093 * to allocate commands space for other rings.
1094 */
1095 if (qid > IWM_CMD_QUEUE)
1096 return 0;
1097
1098 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1099 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1100 if (error != 0) {
1101 device_printf(sc->sc_dev,
1102 "could not allocate TX cmd DMA memory\n");
1103 goto fail;
1104 }
1105 ring->cmd = ring->cmd_dma.vaddr;
1106
1107 /* FW commands may require more mapped space than packets. */
1108 if (qid == IWM_CMD_QUEUE) {
1109 maxsize = IWM_RBUF_SIZE;
1110 nsegments = 1;
1111 } else {
1112 maxsize = MCLBYTES;
1113 nsegments = IWM_MAX_SCATTER - 2;
1114 }
1115
1116 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1117 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1118 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1119 if (error != 0) {
1120 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1121 goto fail;
1122 }
1123
1124 paddr = ring->cmd_dma.paddr;
1125 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1126 struct iwm_tx_data *data = &ring->data[i];
1127
1128 data->cmd_paddr = paddr;
1129 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1130 + offsetof(struct iwm_tx_cmd, scratch);
1131 paddr += sizeof(struct iwm_device_cmd);
1132
1133 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1134 if (error != 0) {
1135 device_printf(sc->sc_dev,
1136 "could not create TX buf DMA map\n");
1137 goto fail;
1138 }
1139 }
1140 KASSERT(paddr == ring->cmd_dma.paddr + size,
1141 ("invalid physical address"));
1142 return 0;
1143
1144 fail: iwm_free_tx_ring(sc, ring);
1145 return error;
1146 }
1147
1148 static void
1149 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1150 {
1151 int i;
1152
1153 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154 struct iwm_tx_data *data = &ring->data[i];
1155
1156 if (data->m != NULL) {
1157 bus_dmamap_sync(ring->data_dmat, data->map,
1158 BUS_DMASYNC_POSTWRITE);
1159 bus_dmamap_unload(ring->data_dmat, data->map);
1160 m_freem(data->m);
1161 data->m = NULL;
1162 }
1163 }
1164 /* Clear TX descriptors. */
1165 memset(ring->desc, 0, ring->desc_dma.size);
1166 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1167 BUS_DMASYNC_PREWRITE);
1168 sc->qfullmsk &= ~(1 << ring->qid);
1169 ring->queued = 0;
1170 ring->cur = 0;
1171
1172 if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1173 iwm_pcie_clear_cmd_in_flight(sc);
1174 }
1175
1176 static void
1177 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1178 {
1179 int i;
1180
1181 iwm_dma_contig_free(&ring->desc_dma);
1182 iwm_dma_contig_free(&ring->cmd_dma);
1183
1184 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185 struct iwm_tx_data *data = &ring->data[i];
1186
1187 if (data->m != NULL) {
1188 bus_dmamap_sync(ring->data_dmat, data->map,
1189 BUS_DMASYNC_POSTWRITE);
1190 bus_dmamap_unload(ring->data_dmat, data->map);
1191 m_freem(data->m);
1192 data->m = NULL;
1193 }
1194 if (data->map != NULL) {
1195 bus_dmamap_destroy(ring->data_dmat, data->map);
1196 data->map = NULL;
1197 }
1198 }
1199 if (ring->data_dmat != NULL) {
1200 bus_dma_tag_destroy(ring->data_dmat);
1201 ring->data_dmat = NULL;
1202 }
1203 }
1204
1205 /*
1206 * High-level hardware frobbing routines
1207 */
1208
1209 static void
1210 iwm_enable_interrupts(struct iwm_softc *sc)
1211 {
1212 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1213 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1214 }
1215
1216 static void
1217 iwm_restore_interrupts(struct iwm_softc *sc)
1218 {
1219 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1220 }
1221
1222 static void
1223 iwm_disable_interrupts(struct iwm_softc *sc)
1224 {
1225 /* disable interrupts */
1226 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1227
1228 /* acknowledge all interrupts */
1229 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1230 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1231 }
1232
1233 static void
1234 iwm_ict_reset(struct iwm_softc *sc)
1235 {
1236 iwm_disable_interrupts(sc);
1237
1238 /* Reset ICT table. */
1239 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1240 sc->ict_cur = 0;
1241
1242 /* Set physical address of ICT table (4KB aligned). */
1243 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1244 IWM_CSR_DRAM_INT_TBL_ENABLE
1245 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1246 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1247 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1248
1249 /* Switch to ICT interrupt mode in driver. */
1250 sc->sc_flags |= IWM_FLAG_USE_ICT;
1251
1252 /* Re-enable interrupts. */
1253 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1254 iwm_enable_interrupts(sc);
1255 }
1256
1257 /* iwlwifi pcie/trans.c */
1258
1259 /*
1260 * Since this .. hard-resets things, it's time to actually
1261 * mark the first vap (if any) as having no mac context.
1262 * It's annoying, but since the driver is potentially being
1263 * stop/start'ed whilst active (thanks openbsd port!) we
1264 * have to correctly track this.
1265 */
1266 static void
1267 iwm_stop_device(struct iwm_softc *sc)
1268 {
1269 struct ieee80211com *ic = &sc->sc_ic;
1270 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1271 int chnl, qid;
1272 uint32_t mask = 0;
1273
1274 /* tell the device to stop sending interrupts */
1275 iwm_disable_interrupts(sc);
1276
1277 /*
1278 * FreeBSD-local: mark the first vap as not-uploaded,
1279 * so the next transition through auth/assoc
1280 * will correctly populate the MAC context.
1281 */
1282 if (vap) {
1283 struct iwm_vap *iv = IWM_VAP(vap);
1284 iv->phy_ctxt = NULL;
1285 iv->is_uploaded = 0;
1286 }
1287 sc->sc_firmware_state = 0;
1288 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1289
1290 /* device going down, Stop using ICT table */
1291 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1292
1293 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1294
1295 if (iwm_nic_lock(sc)) {
1296 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1297
1298 /* Stop each Tx DMA channel */
1299 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1300 IWM_WRITE(sc,
1301 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1302 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1303 }
1304
1305 /* Wait for DMA channels to be idle */
1306 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1307 5000)) {
1308 device_printf(sc->sc_dev,
1309 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1310 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1311 }
1312 iwm_nic_unlock(sc);
1313 }
1314 iwm_pcie_rx_stop(sc);
1315
1316 /* Stop RX ring. */
1317 iwm_reset_rx_ring(sc, &sc->rxq);
1318
1319 /* Reset all TX rings. */
1320 for (qid = 0; qid < nitems(sc->txq); qid++)
1321 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1322
1323 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1324 /* Power-down device's busmaster DMA clocks */
1325 if (iwm_nic_lock(sc)) {
1326 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1327 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1328 iwm_nic_unlock(sc);
1329 }
1330 DELAY(5);
1331 }
1332
1333 /* Make sure (redundant) we've released our request to stay awake */
1334 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1335 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1336
1337 /* Stop the device, and put it in low power state */
1338 iwm_apm_stop(sc);
1339
1340 /* stop and reset the on-board processor */
1341 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1342 DELAY(5000);
1343
1344 /*
1345 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1346 */
1347 iwm_disable_interrupts(sc);
1348
1349 /*
1350 * Even if we stop the HW, we still want the RF kill
1351 * interrupt
1352 */
1353 iwm_enable_rfkill_int(sc);
1354 iwm_check_rfkill(sc);
1355
1356 iwm_prepare_card_hw(sc);
1357 }
1358
1359 /* iwlwifi: mvm/ops.c */
1360 static void
1361 iwm_nic_config(struct iwm_softc *sc)
1362 {
1363 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1364 uint32_t reg_val = 0;
1365 uint32_t phy_config = iwm_get_phy_config(sc);
1366
1367 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1368 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1369 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1370 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1371 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1372 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1373
1374 /* SKU control */
1375 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1376 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1377 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1378 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1379
1380 /* radio configuration */
1381 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1382 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1383 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1384
1385 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1386 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1387 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1388 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1389 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1390 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1391 IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1392 IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1393 reg_val);
1394
1395 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1396 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1397 radio_cfg_step, radio_cfg_dash);
1398
1399 /*
1400 * W/A : NIC is stuck in a reset state after Early PCIe power off
1401 * (PCIe power is lost before PERST# is asserted), causing ME FW
1402 * to lose ownership and not being able to obtain it back.
1403 */
1404 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1405 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1406 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1407 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1408 }
1409 }
1410
1411 static int
1412 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1413 {
1414 int enabled;
1415
1416 if (!iwm_nic_lock(sc))
1417 return EBUSY;
1418
1419 /* Stop RX DMA. */
1420 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1421 /* Disable RX used and free queue operation. */
1422 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1423
1424 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1425 sc->rxq.free_desc_dma.paddr);
1426 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1427 sc->rxq.used_desc_dma.paddr);
1428 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1429 sc->rxq.stat_dma.paddr);
1430 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1431 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1432 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1433
1434 /* We configure only queue 0 for now. */
1435 enabled = ((1 << 0) << 16) | (1 << 0);
1436
1437 /* Enable RX DMA, 4KB buffer size. */
1438 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1439 IWM_RFH_DMA_EN_ENABLE_VAL |
1440 IWM_RFH_RXF_DMA_RB_SIZE_4K |
1441 IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1442 IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1443 IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1444
1445 /* Enable RX DMA snooping. */
1446 iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1447 IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1448 IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1449 (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1450 IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1451
1452 /* Enable the configured queue(s). */
1453 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1454
1455 iwm_nic_unlock(sc);
1456
1457 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1458
1459 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1460
1461 return (0);
1462 }
1463
1464 static int
1465 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1466 {
1467
1468 /* Stop Rx DMA */
1469 iwm_pcie_rx_stop(sc);
1470
1471 if (!iwm_nic_lock(sc))
1472 return EBUSY;
1473
1474 /* reset and flush pointers */
1475 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1476 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1477 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1478 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1479
1480 /* Set physical address of RX ring (256-byte aligned). */
1481 IWM_WRITE(sc,
1482 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1483 sc->rxq.free_desc_dma.paddr >> 8);
1484
1485 /* Set physical address of RX status (16-byte aligned). */
1486 IWM_WRITE(sc,
1487 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1488
1489 /* Enable Rx DMA
1490 * XXX 5000 HW isn't supported by the iwm(4) driver.
1491 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1492 * the credit mechanism in 5000 HW RX FIFO
1493 * Direct rx interrupts to hosts
1494 * Rx buffer size 4 or 8k or 12k
1495 * RB timeout 0x10
1496 * 256 RBDs
1497 */
1498 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1499 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1500 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1501 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1502 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1503 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1504 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1505
1506 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1507
1508 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1509 if (sc->cfg->host_interrupt_operation_mode)
1510 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1511
1512 iwm_nic_unlock(sc);
1513
1514 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1515
1516 return 0;
1517 }
1518
1519 static int
1520 iwm_nic_rx_init(struct iwm_softc *sc)
1521 {
1522 if (sc->cfg->mqrx_supported)
1523 return iwm_nic_rx_mq_init(sc);
1524 else
1525 return iwm_nic_rx_legacy_init(sc);
1526 }
1527
1528 static int
1529 iwm_nic_tx_init(struct iwm_softc *sc)
1530 {
1531 int qid;
1532
1533 if (!iwm_nic_lock(sc))
1534 return EBUSY;
1535
1536 /* Deactivate TX scheduler. */
1537 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1538
1539 /* Set physical address of "keep warm" page (16-byte aligned). */
1540 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1541
1542 /* Initialize TX rings. */
1543 for (qid = 0; qid < nitems(sc->txq); qid++) {
1544 struct iwm_tx_ring *txq = &sc->txq[qid];
1545
1546 /* Set physical address of TX ring (256-byte aligned). */
1547 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1548 txq->desc_dma.paddr >> 8);
1549 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1550 "%s: loading ring %d descriptors (%p) at %lx\n",
1551 __func__,
1552 qid, txq->desc,
1553 (unsigned long) (txq->desc_dma.paddr >> 8));
1554 }
1555
1556 iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1557 IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1558 IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1559
1560 iwm_nic_unlock(sc);
1561
1562 return 0;
1563 }
1564
1565 static int
1566 iwm_nic_init(struct iwm_softc *sc)
1567 {
1568 int error;
1569
1570 iwm_apm_init(sc);
1571 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1572 iwm_set_pwr(sc);
1573
1574 iwm_nic_config(sc);
1575
1576 if ((error = iwm_nic_rx_init(sc)) != 0)
1577 return error;
1578
1579 /*
1580 * Ditto for TX, from iwn
1581 */
1582 if ((error = iwm_nic_tx_init(sc)) != 0)
1583 return error;
1584
1585 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1586 "%s: shadow registers enabled\n", __func__);
1587 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1588
1589 return 0;
1590 }
1591
1592 int
1593 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1594 {
1595 int qmsk;
1596
1597 qmsk = 1 << qid;
1598
1599 if (!iwm_nic_lock(sc)) {
1600 device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1601 __func__, qid);
1602 return EBUSY;
1603 }
1604
1605 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1606
1607 if (qid == IWM_CMD_QUEUE) {
1608 /* Disable the scheduler. */
1609 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1610
1611 /* Stop the TX queue prior to configuration. */
1612 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1613 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1614 (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1615
1616 iwm_nic_unlock(sc);
1617
1618 /* Disable aggregations for this queue. */
1619 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1620
1621 if (!iwm_nic_lock(sc)) {
1622 device_printf(sc->sc_dev,
1623 "%s: cannot enable txq %d\n", __func__, qid);
1624 return EBUSY;
1625 }
1626 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1627 iwm_nic_unlock(sc);
1628
1629 iwm_write_mem32(sc,
1630 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1631 /* Set scheduler window size and frame limit. */
1632 iwm_write_mem32(sc,
1633 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1634 sizeof(uint32_t),
1635 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1636 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1637 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1638 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1639
1640 if (!iwm_nic_lock(sc)) {
1641 device_printf(sc->sc_dev,
1642 "%s: cannot enable txq %d\n", __func__, qid);
1643 return EBUSY;
1644 }
1645 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1646 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1647 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1648 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1649 IWM_SCD_QUEUE_STTS_REG_MSK);
1650
1651 /* Enable the scheduler for this queue. */
1652 iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1653 } else {
1654 struct iwm_scd_txq_cfg_cmd cmd;
1655 int error;
1656
1657 iwm_nic_unlock(sc);
1658
1659 memset(&cmd, 0, sizeof(cmd));
1660 cmd.scd_queue = qid;
1661 cmd.enable = 1;
1662 cmd.sta_id = sta_id;
1663 cmd.tx_fifo = fifo;
1664 cmd.aggregate = 0;
1665 cmd.window = IWM_FRAME_LIMIT;
1666
1667 error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1668 sizeof(cmd), &cmd);
1669 if (error) {
1670 device_printf(sc->sc_dev,
1671 "cannot enable txq %d\n", qid);
1672 return error;
1673 }
1674
1675 if (!iwm_nic_lock(sc))
1676 return EBUSY;
1677 }
1678
1679 iwm_nic_unlock(sc);
1680
1681 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1682 __func__, qid, fifo);
1683
1684 return 0;
1685 }
1686
1687 static int
1688 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1689 {
1690 int error, chnl;
1691
1692 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1693 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1694
1695 if (!iwm_nic_lock(sc))
1696 return EBUSY;
1697
1698 iwm_ict_reset(sc);
1699
1700 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1701 if (scd_base_addr != 0 &&
1702 scd_base_addr != sc->scd_base_addr) {
1703 device_printf(sc->sc_dev,
1704 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1705 __func__, sc->scd_base_addr, scd_base_addr);
1706 }
1707
1708 iwm_nic_unlock(sc);
1709
1710 /* reset context data, TX status and translation data */
1711 error = iwm_write_mem(sc,
1712 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1713 NULL, clear_dwords);
1714 if (error)
1715 return EBUSY;
1716
1717 if (!iwm_nic_lock(sc))
1718 return EBUSY;
1719
1720 /* Set physical address of TX scheduler rings (1KB aligned). */
1721 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1722
1723 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1724
1725 iwm_nic_unlock(sc);
1726
1727 /* enable command channel */
1728 error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1729 if (error)
1730 return error;
1731
1732 if (!iwm_nic_lock(sc))
1733 return EBUSY;
1734
1735 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1736
1737 /* Enable DMA channels. */
1738 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1739 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1740 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1741 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1742 }
1743
1744 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1745 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1746
1747 iwm_nic_unlock(sc);
1748
1749 /* Enable L1-Active */
1750 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1751 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1752 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1753 }
1754
1755 return error;
1756 }
1757
1758 /*
1759 * NVM read access and content parsing. We do not support
1760 * external NVM or writing NVM.
1761 * iwlwifi/mvm/nvm.c
1762 */
1763
1764 /* Default NVM size to read */
1765 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1766
1767 #define IWM_NVM_WRITE_OPCODE 1
1768 #define IWM_NVM_READ_OPCODE 0
1769
1770 /* load nvm chunk response */
1771 enum {
1772 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1773 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1774 };
1775
1776 static int
1777 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1778 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1779 {
1780 struct iwm_nvm_access_cmd nvm_access_cmd = {
1781 .offset = htole16(offset),
1782 .length = htole16(length),
1783 .type = htole16(section),
1784 .op_code = IWM_NVM_READ_OPCODE,
1785 };
1786 struct iwm_nvm_access_resp *nvm_resp;
1787 struct iwm_rx_packet *pkt;
1788 struct iwm_host_cmd cmd = {
1789 .id = IWM_NVM_ACCESS_CMD,
1790 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1791 .data = { &nvm_access_cmd, },
1792 };
1793 int ret, bytes_read, offset_read;
1794 uint8_t *resp_data;
1795
1796 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1797
1798 ret = iwm_send_cmd(sc, &cmd);
1799 if (ret) {
1800 device_printf(sc->sc_dev,
1801 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1802 return ret;
1803 }
1804
1805 pkt = cmd.resp_pkt;
1806
1807 /* Extract NVM response */
1808 nvm_resp = (void *)pkt->data;
1809 ret = le16toh(nvm_resp->status);
1810 bytes_read = le16toh(nvm_resp->length);
1811 offset_read = le16toh(nvm_resp->offset);
1812 resp_data = nvm_resp->data;
1813 if (ret) {
1814 if ((offset != 0) &&
1815 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1816 /*
1817 * meaning of NOT_VALID_ADDRESS:
1818 * driver try to read chunk from address that is
1819 * multiple of 2K and got an error since addr is empty.
1820 * meaning of (offset != 0): driver already
1821 * read valid data from another chunk so this case
1822 * is not an error.
1823 */
1824 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1826 offset);
1827 *len = 0;
1828 ret = 0;
1829 } else {
1830 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831 "NVM access command failed with status %d\n", ret);
1832 ret = EIO;
1833 }
1834 goto exit;
1835 }
1836
1837 if (offset_read != offset) {
1838 device_printf(sc->sc_dev,
1839 "NVM ACCESS response with invalid offset %d\n",
1840 offset_read);
1841 ret = EINVAL;
1842 goto exit;
1843 }
1844
1845 if (bytes_read > length) {
1846 device_printf(sc->sc_dev,
1847 "NVM ACCESS response with too much data "
1848 "(%d bytes requested, %d bytes received)\n",
1849 length, bytes_read);
1850 ret = EINVAL;
1851 goto exit;
1852 }
1853
1854 /* Write data to NVM */
1855 memcpy(data + offset, resp_data, bytes_read);
1856 *len = bytes_read;
1857
1858 exit:
1859 iwm_free_resp(sc, &cmd);
1860 return ret;
1861 }
1862
1863 /*
1864 * Reads an NVM section completely.
1865 * NICs prior to 7000 family don't have a real NVM, but just read
1866 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1867 * by uCode, we need to manually check in this case that we don't
1868 * overflow and try to read more than the EEPROM size.
1869 * For 7000 family NICs, we supply the maximal size we can read, and
1870 * the uCode fills the response with as much data as we can,
1871 * without overflowing, so no check is needed.
1872 */
1873 static int
1874 iwm_nvm_read_section(struct iwm_softc *sc,
1875 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1876 {
1877 uint16_t seglen, length, offset = 0;
1878 int ret;
1879
1880 /* Set nvm section read length */
1881 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1882
1883 seglen = length;
1884
1885 /* Read the NVM until exhausted (reading less than requested) */
1886 while (seglen == length) {
1887 /* Check no memory assumptions fail and cause an overflow */
1888 if ((size_read + offset + length) >
1889 sc->cfg->eeprom_size) {
1890 device_printf(sc->sc_dev,
1891 "EEPROM size is too small for NVM\n");
1892 return ENOBUFS;
1893 }
1894
1895 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1896 if (ret) {
1897 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1898 "Cannot read NVM from section %d offset %d, length %d\n",
1899 section, offset, length);
1900 return ret;
1901 }
1902 offset += seglen;
1903 }
1904
1905 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1906 "NVM section %d read completed\n", section);
1907 *len = offset;
1908 return 0;
1909 }
1910
1911 /*
1912 * BEGIN IWM_NVM_PARSE
1913 */
1914
1915 /* iwlwifi/iwl-nvm-parse.c */
1916
1917 /* NVM offsets (in words) definitions */
1918 enum iwm_nvm_offsets {
1919 /* NVM HW-Section offset (in words) definitions */
1920 IWM_HW_ADDR = 0x15,
1921
1922 /* NVM SW-Section offset (in words) definitions */
1923 IWM_NVM_SW_SECTION = 0x1C0,
1924 IWM_NVM_VERSION = 0,
1925 IWM_RADIO_CFG = 1,
1926 IWM_SKU = 2,
1927 IWM_N_HW_ADDRS = 3,
1928 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1929
1930 /* NVM calibration section offset (in words) definitions */
1931 IWM_NVM_CALIB_SECTION = 0x2B8,
1932 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1933 };
1934
1935 enum iwm_8000_nvm_offsets {
1936 /* NVM HW-Section offset (in words) definitions */
1937 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1938 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1939 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1940 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1941 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1942
1943 /* NVM SW-Section offset (in words) definitions */
1944 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1945 IWM_NVM_VERSION_8000 = 0,
1946 IWM_RADIO_CFG_8000 = 0,
1947 IWM_SKU_8000 = 2,
1948 IWM_N_HW_ADDRS_8000 = 3,
1949
1950 /* NVM REGULATORY -Section offset (in words) definitions */
1951 IWM_NVM_CHANNELS_8000 = 0,
1952 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1953 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1954 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1955
1956 /* NVM calibration section offset (in words) definitions */
1957 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1958 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1959 };
1960
1961 /* SKU Capabilities (actual values from NVM definition) */
1962 enum nvm_sku_bits {
1963 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1964 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1965 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1966 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1967 };
1968
1969 /* radio config bits (actual values from NVM definition) */
1970 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1971 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1972 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1973 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1974 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1975 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1976
1977 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1978 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1979 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1980 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1981 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1982 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1983
1984 /**
1985 * enum iwm_nvm_channel_flags - channel flags in NVM
1986 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1987 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1988 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1989 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1990 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1991 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1992 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1993 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1994 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1995 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1996 */
1997 enum iwm_nvm_channel_flags {
1998 IWM_NVM_CHANNEL_VALID = (1 << 0),
1999 IWM_NVM_CHANNEL_IBSS = (1 << 1),
2000 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2001 IWM_NVM_CHANNEL_RADAR = (1 << 4),
2002 IWM_NVM_CHANNEL_DFS = (1 << 7),
2003 IWM_NVM_CHANNEL_WIDE = (1 << 8),
2004 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2005 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2006 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2007 };
2008
2009 /*
2010 * Translate EEPROM flags to net80211.
2011 */
2012 static uint32_t
2013 iwm_eeprom_channel_flags(uint16_t ch_flags)
2014 {
2015 uint32_t nflags;
2016
2017 nflags = 0;
2018 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2019 nflags |= IEEE80211_CHAN_PASSIVE;
2020 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2021 nflags |= IEEE80211_CHAN_NOADHOC;
2022 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2023 nflags |= IEEE80211_CHAN_DFS;
2024 /* Just in case. */
2025 nflags |= IEEE80211_CHAN_NOADHOC;
2026 }
2027
2028 return (nflags);
2029 }
2030
2031 static void
2032 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2033 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2034 const uint8_t bands[])
2035 {
2036 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2037 uint32_t nflags;
2038 uint16_t ch_flags;
2039 uint8_t ieee;
2040 int error;
2041
2042 for (; ch_idx < ch_num; ch_idx++) {
2043 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2044 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2045 ieee = iwm_nvm_channels[ch_idx];
2046 else
2047 ieee = iwm_nvm_channels_8000[ch_idx];
2048
2049 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2050 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2051 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2052 ieee, ch_flags,
2053 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2054 "5.2" : "2.4");
2055 continue;
2056 }
2057
2058 nflags = iwm_eeprom_channel_flags(ch_flags);
2059 error = ieee80211_add_channel(chans, maxchans, nchans,
2060 ieee, 0, 0, nflags, bands);
2061 if (error != 0)
2062 break;
2063
2064 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2065 "Ch. %d Flags %x [%sGHz] - Added\n",
2066 ieee, ch_flags,
2067 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2068 "5.2" : "2.4");
2069 }
2070 }
2071
2072 static void
2073 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2074 struct ieee80211_channel chans[])
2075 {
2076 struct iwm_softc *sc = ic->ic_softc;
2077 struct iwm_nvm_data *data = sc->nvm_data;
2078 uint8_t bands[IEEE80211_MODE_BYTES];
2079 size_t ch_num;
2080
2081 memset(bands, 0, sizeof(bands));
2082 /* 1-13: 11b/g channels. */
2083 setbit(bands, IEEE80211_MODE_11B);
2084 setbit(bands, IEEE80211_MODE_11G);
2085 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2086 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2087
2088 /* 14: 11b channel only. */
2089 clrbit(bands, IEEE80211_MODE_11G);
2090 iwm_add_channel_band(sc, chans, maxchans, nchans,
2091 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2092
2093 if (data->sku_cap_band_52GHz_enable) {
2094 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2095 ch_num = nitems(iwm_nvm_channels);
2096 else
2097 ch_num = nitems(iwm_nvm_channels_8000);
2098 memset(bands, 0, sizeof(bands));
2099 setbit(bands, IEEE80211_MODE_11A);
2100 iwm_add_channel_band(sc, chans, maxchans, nchans,
2101 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2102 }
2103 }
2104
2105 static void
2106 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2107 const uint16_t *mac_override, const uint16_t *nvm_hw)
2108 {
2109 const uint8_t *hw_addr;
2110
2111 if (mac_override) {
2112 static const uint8_t reserved_mac[] = {
2113 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2114 };
2115
2116 hw_addr = (const uint8_t *)(mac_override +
2117 IWM_MAC_ADDRESS_OVERRIDE_8000);
2118
2119 /*
2120 * Store the MAC address from MAO section.
2121 * No byte swapping is required in MAO section
2122 */
2123 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2124
2125 /*
2126 * Force the use of the OTP MAC address in case of reserved MAC
2127 * address in the NVM, or if address is given but invalid.
2128 */
2129 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2130 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2131 iwm_is_valid_ether_addr(data->hw_addr) &&
2132 !IEEE80211_IS_MULTICAST(data->hw_addr))
2133 return;
2134
2135 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2136 "%s: mac address from nvm override section invalid\n",
2137 __func__);
2138 }
2139
2140 if (nvm_hw) {
2141 /* read the mac address from WFMP registers */
2142 uint32_t mac_addr0 =
2143 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2144 uint32_t mac_addr1 =
2145 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2146
2147 hw_addr = (const uint8_t *)&mac_addr0;
2148 data->hw_addr[0] = hw_addr[3];
2149 data->hw_addr[1] = hw_addr[2];
2150 data->hw_addr[2] = hw_addr[1];
2151 data->hw_addr[3] = hw_addr[0];
2152
2153 hw_addr = (const uint8_t *)&mac_addr1;
2154 data->hw_addr[4] = hw_addr[1];
2155 data->hw_addr[5] = hw_addr[0];
2156
2157 return;
2158 }
2159
2160 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2161 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2162 }
2163
2164 static int
2165 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2166 const uint16_t *phy_sku)
2167 {
2168 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2169 return le16_to_cpup(nvm_sw + IWM_SKU);
2170
2171 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2172 }
2173
2174 static int
2175 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2176 {
2177 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2178 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2179 else
2180 return le32_to_cpup((const uint32_t *)(nvm_sw +
2181 IWM_NVM_VERSION_8000));
2182 }
2183
2184 static int
2185 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2186 const uint16_t *phy_sku)
2187 {
2188 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2189 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2190
2191 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2192 }
2193
2194 static int
2195 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2196 {
2197 int n_hw_addr;
2198
2199 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2200 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2201
2202 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2203
2204 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2205 }
2206
2207 static void
2208 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2209 uint32_t radio_cfg)
2210 {
2211 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2212 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2213 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2214 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2215 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2216 return;
2217 }
2218
2219 /* set the radio configuration for family 8000 */
2220 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2221 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2222 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2223 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2224 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2225 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2226 }
2227
2228 static int
2229 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2230 const uint16_t *nvm_hw, const uint16_t *mac_override)
2231 {
2232 #ifdef notyet /* for FAMILY 9000 */
2233 if (cfg->mac_addr_from_csr) {
2234 iwm_set_hw_address_from_csr(sc, data);
2235 } else
2236 #endif
2237 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2238 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2239
2240 /* The byte order is little endian 16 bit, meaning 214365 */
2241 data->hw_addr[0] = hw_addr[1];
2242 data->hw_addr[1] = hw_addr[0];
2243 data->hw_addr[2] = hw_addr[3];
2244 data->hw_addr[3] = hw_addr[2];
2245 data->hw_addr[4] = hw_addr[5];
2246 data->hw_addr[5] = hw_addr[4];
2247 } else {
2248 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2249 }
2250
2251 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2252 device_printf(sc->sc_dev, "no valid mac address was found\n");
2253 return EINVAL;
2254 }
2255
2256 return 0;
2257 }
2258
2259 static struct iwm_nvm_data *
2260 iwm_parse_nvm_data(struct iwm_softc *sc,
2261 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2262 const uint16_t *nvm_calib, const uint16_t *mac_override,
2263 const uint16_t *phy_sku, const uint16_t *regulatory)
2264 {
2265 struct iwm_nvm_data *data;
2266 uint32_t sku, radio_cfg;
2267 uint16_t lar_config;
2268
2269 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2270 data = malloc(sizeof(*data) +
2271 IWM_NUM_CHANNELS * sizeof(uint16_t),
2272 M_DEVBUF, M_NOWAIT | M_ZERO);
2273 } else {
2274 data = malloc(sizeof(*data) +
2275 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2276 M_DEVBUF, M_NOWAIT | M_ZERO);
2277 }
2278 if (!data)
2279 return NULL;
2280
2281 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2282
2283 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2284 iwm_set_radio_cfg(sc, data, radio_cfg);
2285
2286 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2287 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2288 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2289 data->sku_cap_11n_enable = 0;
2290
2291 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2292
2293 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2294 /* TODO: use IWL_NVM_EXT */
2295 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2296 IWM_NVM_LAR_OFFSET_8000_OLD :
2297 IWM_NVM_LAR_OFFSET_8000;
2298
2299 lar_config = le16_to_cpup(regulatory + lar_offset);
2300 data->lar_enabled = !!(lar_config &
2301 IWM_NVM_LAR_ENABLED_8000);
2302 }
2303
2304 /* If no valid mac address was found - bail out */
2305 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2306 free(data, M_DEVBUF);
2307 return NULL;
2308 }
2309
2310 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2311 memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2312 ®ulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2313 IWM_NUM_CHANNELS * sizeof(uint16_t));
2314 } else {
2315 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000],
2316 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2317 }
2318
2319 return data;
2320 }
2321
2322 static void
2323 iwm_free_nvm_data(struct iwm_nvm_data *data)
2324 {
2325 if (data != NULL)
2326 free(data, M_DEVBUF);
2327 }
2328
2329 static struct iwm_nvm_data *
2330 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2331 {
2332 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2333
2334 /* Checking for required sections */
2335 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2336 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2337 !sections[sc->cfg->nvm_hw_section_num].data) {
2338 device_printf(sc->sc_dev,
2339 "Can't parse empty OTP/NVM sections\n");
2340 return NULL;
2341 }
2342 } else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2343 /* SW and REGULATORY sections are mandatory */
2344 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2345 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2346 device_printf(sc->sc_dev,
2347 "Can't parse empty OTP/NVM sections\n");
2348 return NULL;
2349 }
2350 /* MAC_OVERRIDE or at least HW section must exist */
2351 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2352 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2353 device_printf(sc->sc_dev,
2354 "Can't parse mac_address, empty sections\n");
2355 return NULL;
2356 }
2357
2358 /* PHY_SKU section is mandatory in B0 */
2359 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2360 device_printf(sc->sc_dev,
2361 "Can't parse phy_sku in B0, empty sections\n");
2362 return NULL;
2363 }
2364 } else {
2365 panic("unknown device family %d\n", sc->cfg->device_family);
2366 }
2367
2368 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2369 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2370 calib = (const uint16_t *)
2371 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2372 regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2373 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2374 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2375 mac_override = (const uint16_t *)
2376 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2377 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2378
2379 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2380 phy_sku, regulatory);
2381 }
2382
2383 static int
2384 iwm_nvm_init(struct iwm_softc *sc)
2385 {
2386 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2387 int i, ret, section;
2388 uint32_t size_read = 0;
2389 uint8_t *nvm_buffer, *temp;
2390 uint16_t len;
2391
2392 memset(nvm_sections, 0, sizeof(nvm_sections));
2393
2394 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2395 return EINVAL;
2396
2397 /* load NVM values from nic */
2398 /* Read From FW NVM */
2399 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2400
2401 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2402 if (!nvm_buffer)
2403 return ENOMEM;
2404 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2405 /* we override the constness for initial read */
2406 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2407 &len, size_read);
2408 if (ret)
2409 continue;
2410 size_read += len;
2411 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2412 if (!temp) {
2413 ret = ENOMEM;
2414 break;
2415 }
2416 memcpy(temp, nvm_buffer, len);
2417
2418 nvm_sections[section].data = temp;
2419 nvm_sections[section].length = len;
2420 }
2421 if (!size_read)
2422 device_printf(sc->sc_dev, "OTP is blank\n");
2423 free(nvm_buffer, M_DEVBUF);
2424
2425 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2426 if (!sc->nvm_data)
2427 return EINVAL;
2428 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2429 "nvm version = %x\n", sc->nvm_data->nvm_version);
2430
2431 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2432 if (nvm_sections[i].data != NULL)
2433 free(nvm_sections[i].data, M_DEVBUF);
2434 }
2435
2436 return 0;
2437 }
2438
2439 static int
2440 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2441 const struct iwm_fw_desc *section)
2442 {
2443 struct iwm_dma_info *dma = &sc->fw_dma;
2444 uint8_t *v_addr;
2445 bus_addr_t p_addr;
2446 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2447 int ret = 0;
2448
2449 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2450 "%s: [%d] uCode section being loaded...\n",
2451 __func__, section_num);
2452
2453 v_addr = dma->vaddr;
2454 p_addr = dma->paddr;
2455
2456 for (offset = 0; offset < section->len; offset += chunk_sz) {
2457 uint32_t copy_size, dst_addr;
2458 int extended_addr = FALSE;
2459
2460 copy_size = MIN(chunk_sz, section->len - offset);
2461 dst_addr = section->offset + offset;
2462
2463 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2464 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2465 extended_addr = TRUE;
2466
2467 if (extended_addr)
2468 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2469 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2470
2471 memcpy(v_addr, (const uint8_t *)section->data + offset,
2472 copy_size);
2473 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2474 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2475 copy_size);
2476
2477 if (extended_addr)
2478 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2479 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2480
2481 if (ret) {
2482 device_printf(sc->sc_dev,
2483 "%s: Could not load the [%d] uCode section\n",
2484 __func__, section_num);
2485 break;
2486 }
2487 }
2488
2489 return ret;
2490 }
2491
2492 /*
2493 * ucode
2494 */
2495 static int
2496 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2497 bus_addr_t phy_addr, uint32_t byte_cnt)
2498 {
2499 sc->sc_fw_chunk_done = 0;
2500
2501 if (!iwm_nic_lock(sc))
2502 return EBUSY;
2503
2504 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2505 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2506
2507 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2508 dst_addr);
2509
2510 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2511 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2512
2513 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2514 (iwm_get_dma_hi_addr(phy_addr)
2515 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2516
2517 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2518 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2519 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2520 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2521
2522 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2523 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2524 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2525 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2526
2527 iwm_nic_unlock(sc);
2528
2529 /* wait up to 5s for this segment to load */
2530 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2531
2532 if (!sc->sc_fw_chunk_done) {
2533 device_printf(sc->sc_dev,
2534 "fw chunk addr 0x%x len %d failed to load\n",
2535 dst_addr, byte_cnt);
2536 return ETIMEDOUT;
2537 }
2538
2539 return 0;
2540 }
2541
2542 static int
2543 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2544 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2545 {
2546 int shift_param;
2547 int i, ret = 0, sec_num = 0x1;
2548 uint32_t val, last_read_idx = 0;
2549
2550 if (cpu == 1) {
2551 shift_param = 0;
2552 *first_ucode_section = 0;
2553 } else {
2554 shift_param = 16;
2555 (*first_ucode_section)++;
2556 }
2557
2558 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2559 last_read_idx = i;
2560
2561 /*
2562 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2563 * CPU1 to CPU2.
2564 * PAGING_SEPARATOR_SECTION delimiter - separate between
2565 * CPU2 non paged to CPU2 paging sec.
2566 */
2567 if (!image->sec[i].data ||
2568 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2569 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2570 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2571 "Break since Data not valid or Empty section, sec = %d\n",
2572 i);
2573 break;
2574 }
2575 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2576 if (ret)
2577 return ret;
2578
2579 /* Notify the ucode of the loaded section number and status */
2580 if (iwm_nic_lock(sc)) {
2581 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2582 val = val | (sec_num << shift_param);
2583 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2584 sec_num = (sec_num << 1) | 0x1;
2585 iwm_nic_unlock(sc);
2586 }
2587 }
2588
2589 *first_ucode_section = last_read_idx;
2590
2591 iwm_enable_interrupts(sc);
2592
2593 if (iwm_nic_lock(sc)) {
2594 if (cpu == 1)
2595 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2596 else
2597 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2598 iwm_nic_unlock(sc);
2599 }
2600
2601 return 0;
2602 }
2603
2604 static int
2605 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2606 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2607 {
2608 int shift_param;
2609 int i, ret = 0;
2610 uint32_t last_read_idx = 0;
2611
2612 if (cpu == 1) {
2613 shift_param = 0;
2614 *first_ucode_section = 0;
2615 } else {
2616 shift_param = 16;
2617 (*first_ucode_section)++;
2618 }
2619
2620 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2621 last_read_idx = i;
2622
2623 /*
2624 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2625 * CPU1 to CPU2.
2626 * PAGING_SEPARATOR_SECTION delimiter - separate between
2627 * CPU2 non paged to CPU2 paging sec.
2628 */
2629 if (!image->sec[i].data ||
2630 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2631 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2632 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2633 "Break since Data not valid or Empty section, sec = %d\n",
2634 i);
2635 break;
2636 }
2637
2638 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2639 if (ret)
2640 return ret;
2641 }
2642
2643 *first_ucode_section = last_read_idx;
2644
2645 return 0;
2646
2647 }
2648
2649 static int
2650 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2651 {
2652 int ret = 0;
2653 int first_ucode_section;
2654
2655 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2656 image->is_dual_cpus ? "Dual" : "Single");
2657
2658 /* load to FW the binary non secured sections of CPU1 */
2659 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2660 if (ret)
2661 return ret;
2662
2663 if (image->is_dual_cpus) {
2664 /* set CPU2 header address */
2665 if (iwm_nic_lock(sc)) {
2666 iwm_write_prph(sc,
2667 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2668 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2669 iwm_nic_unlock(sc);
2670 }
2671
2672 /* load to FW the binary sections of CPU2 */
2673 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2674 &first_ucode_section);
2675 if (ret)
2676 return ret;
2677 }
2678
2679 iwm_enable_interrupts(sc);
2680
2681 /* release CPU reset */
2682 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2683
2684 return 0;
2685 }
2686
2687 int
2688 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2689 const struct iwm_fw_img *image)
2690 {
2691 int ret = 0;
2692 int first_ucode_section;
2693
2694 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2695 image->is_dual_cpus ? "Dual" : "Single");
2696
2697 /* configure the ucode to be ready to get the secured image */
2698 /* release CPU reset */
2699 if (iwm_nic_lock(sc)) {
2700 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2701 IWM_RELEASE_CPU_RESET_BIT);
2702 iwm_nic_unlock(sc);
2703 }
2704
2705 /* load to FW the binary Secured sections of CPU1 */
2706 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2707 &first_ucode_section);
2708 if (ret)
2709 return ret;
2710
2711 /* load to FW the binary sections of CPU2 */
2712 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2713 &first_ucode_section);
2714 }
2715
2716 /* XXX Get rid of this definition */
2717 static inline void
2718 iwm_enable_fw_load_int(struct iwm_softc *sc)
2719 {
2720 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2721 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2722 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2723 }
2724
2725 /* XXX Add proper rfkill support code */
2726 static int
2727 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2728 {
2729 int ret;
2730
2731 /* This may fail if AMT took ownership of the device */
2732 if (iwm_prepare_card_hw(sc)) {
2733 device_printf(sc->sc_dev,
2734 "%s: Exit HW not ready\n", __func__);
2735 ret = EIO;
2736 goto out;
2737 }
2738
2739 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2740
2741 iwm_disable_interrupts(sc);
2742
2743 /* make sure rfkill handshake bits are cleared */
2744 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2745 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2746 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2747
2748 /* clear (again), then enable host interrupts */
2749 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2750
2751 ret = iwm_nic_init(sc);
2752 if (ret) {
2753 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2754 goto out;
2755 }
2756
2757 /*
2758 * Now, we load the firmware and don't want to be interrupted, even
2759 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2760 * FH_TX interrupt which is needed to load the firmware). If the
2761 * RF-Kill switch is toggled, we will find out after having loaded
2762 * the firmware and return the proper value to the caller.
2763 */
2764 iwm_enable_fw_load_int(sc);
2765
2766 /* really make sure rfkill handshake bits are cleared */
2767 /* maybe we should write a few times more? just to make sure */
2768 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2769 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2770
2771 /* Load the given image to the HW */
2772 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2773 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2774 else
2775 ret = iwm_pcie_load_given_ucode(sc, fw);
2776
2777 /* XXX re-check RF-Kill state */
2778
2779 out:
2780 return ret;
2781 }
2782
2783 static int
2784 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2785 {
2786 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2787 .valid = htole32(valid_tx_ant),
2788 };
2789
2790 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2791 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2792 }
2793
2794 /* iwlwifi: mvm/fw.c */
2795 static int
2796 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2797 {
2798 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2799 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2800
2801 /* Set parameters */
2802 phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2803 phy_cfg_cmd.calib_control.event_trigger =
2804 sc->sc_default_calib[ucode_type].event_trigger;
2805 phy_cfg_cmd.calib_control.flow_trigger =
2806 sc->sc_default_calib[ucode_type].flow_trigger;
2807
2808 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2809 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2810 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2811 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2812 }
2813
2814 static int
2815 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2816 {
2817 struct iwm_alive_data *alive_data = data;
2818 struct iwm_alive_resp_v3 *palive3;
2819 struct iwm_alive_resp *palive;
2820 struct iwm_umac_alive *umac;
2821 struct iwm_lmac_alive *lmac1;
2822 struct iwm_lmac_alive *lmac2 = NULL;
2823 uint16_t status;
2824
2825 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2826 palive = (void *)pkt->data;
2827 umac = &palive->umac_data;
2828 lmac1 = &palive->lmac_data[0];
2829 lmac2 = &palive->lmac_data[1];
2830 status = le16toh(palive->status);
2831 } else {
2832 palive3 = (void *)pkt->data;
2833 umac = &palive3->umac_data;
2834 lmac1 = &palive3->lmac_data;
2835 status = le16toh(palive3->status);
2836 }
2837
2838 sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2839 if (lmac2)
2840 sc->error_event_table[1] =
2841 le32toh(lmac2->error_event_table_ptr);
2842 sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2843 sc->umac_error_event_table = le32toh(umac->error_info_addr);
2844 alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2845 alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2846 if (sc->umac_error_event_table)
2847 sc->support_umac_log = TRUE;
2848
2849 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2850 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2851 status, lmac1->ver_type, lmac1->ver_subtype);
2852
2853 if (lmac2)
2854 IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2855
2856 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2857 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2858 le32toh(umac->umac_major),
2859 le32toh(umac->umac_minor));
2860
2861 return TRUE;
2862 }
2863
2864 static int
2865 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2866 struct iwm_rx_packet *pkt, void *data)
2867 {
2868 struct iwm_phy_db *phy_db = data;
2869
2870 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2871 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2872 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2873 __func__, pkt->hdr.code);
2874 }
2875 return TRUE;
2876 }
2877
2878 if (iwm_phy_db_set_section(phy_db, pkt)) {
2879 device_printf(sc->sc_dev,
2880 "%s: iwm_phy_db_set_section failed\n", __func__);
2881 }
2882
2883 return FALSE;
2884 }
2885
2886 static int
2887 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2888 enum iwm_ucode_type ucode_type)
2889 {
2890 struct iwm_notification_wait alive_wait;
2891 struct iwm_alive_data alive_data;
2892 const struct iwm_fw_img *fw;
2893 enum iwm_ucode_type old_type = sc->cur_ucode;
2894 int error;
2895 static const uint16_t alive_cmd[] = { IWM_ALIVE };
2896
2897 fw = &sc->sc_fw.img[ucode_type];
2898 sc->cur_ucode = ucode_type;
2899 sc->ucode_loaded = FALSE;
2900
2901 memset(&alive_data, 0, sizeof(alive_data));
2902 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2903 alive_cmd, nitems(alive_cmd),
2904 iwm_alive_fn, &alive_data);
2905
2906 error = iwm_start_fw(sc, fw);
2907 if (error) {
2908 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2909 sc->cur_ucode = old_type;
2910 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2911 return error;
2912 }
2913
2914 /*
2915 * Some things may run in the background now, but we
2916 * just wait for the ALIVE notification here.
2917 */
2918 IWM_UNLOCK(sc);
2919 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2920 IWM_UCODE_ALIVE_TIMEOUT);
2921 IWM_LOCK(sc);
2922 if (error) {
2923 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2924 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2925 if (iwm_nic_lock(sc)) {
2926 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2927 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2928 iwm_nic_unlock(sc);
2929 }
2930 device_printf(sc->sc_dev,
2931 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2932 a, b);
2933 }
2934 sc->cur_ucode = old_type;
2935 return error;
2936 }
2937
2938 if (!alive_data.valid) {
2939 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2940 __func__);
2941 sc->cur_ucode = old_type;
2942 return EIO;
2943 }
2944
2945 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2946
2947 /*
2948 * configure and operate fw paging mechanism.
2949 * driver configures the paging flow only once, CPU2 paging image
2950 * included in the IWM_UCODE_INIT image.
2951 */
2952 if (fw->paging_mem_size) {
2953 error = iwm_save_fw_paging(sc, fw);
2954 if (error) {
2955 device_printf(sc->sc_dev,
2956 "%s: failed to save the FW paging image\n",
2957 __func__);
2958 return error;
2959 }
2960
2961 error = iwm_send_paging_cmd(sc, fw);
2962 if (error) {
2963 device_printf(sc->sc_dev,
2964 "%s: failed to send the paging cmd\n", __func__);
2965 iwm_free_fw_paging(sc);
2966 return error;
2967 }
2968 }
2969
2970 if (!error)
2971 sc->ucode_loaded = TRUE;
2972 return error;
2973 }
2974
2975 /*
2976 * mvm misc bits
2977 */
2978
2979 /*
2980 * follows iwlwifi/fw.c
2981 */
2982 static int
2983 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2984 {
2985 struct iwm_notification_wait calib_wait;
2986 static const uint16_t init_complete[] = {
2987 IWM_INIT_COMPLETE_NOTIF,
2988 IWM_CALIB_RES_NOTIF_PHY_DB
2989 };
2990 int ret;
2991
2992 /* do not operate with rfkill switch turned on */
2993 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2994 device_printf(sc->sc_dev,
2995 "radio is disabled by hardware switch\n");
2996 return EPERM;
2997 }
2998
2999 iwm_init_notification_wait(sc->sc_notif_wait,
3000 &calib_wait,
3001 init_complete,
3002 nitems(init_complete),
3003 iwm_wait_phy_db_entry,
3004 sc->sc_phy_db);
3005
3006 /* Will also start the device */
3007 ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3008 if (ret) {
3009 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3010 ret);
3011 goto error;
3012 }
3013
3014 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
3015 ret = iwm_send_bt_init_conf(sc);
3016 if (ret) {
3017 device_printf(sc->sc_dev,
3018 "failed to send bt coex configuration: %d\n", ret);
3019 goto error;
3020 }
3021 }
3022
3023 if (justnvm) {
3024 /* Read nvm */
3025 ret = iwm_nvm_init(sc);
3026 if (ret) {
3027 device_printf(sc->sc_dev, "failed to read nvm\n");
3028 goto error;
3029 }
3030 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3031 goto error;
3032 }
3033
3034 /* Send TX valid antennas before triggering calibrations */
3035 ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
3036 if (ret) {
3037 device_printf(sc->sc_dev,
3038 "failed to send antennas before calibration: %d\n", ret);
3039 goto error;
3040 }
3041
3042 /*
3043 * Send phy configurations command to init uCode
3044 * to start the 16.0 uCode init image internal calibrations.
3045 */
3046 ret = iwm_send_phy_cfg_cmd(sc);
3047 if (ret) {
3048 device_printf(sc->sc_dev,
3049 "%s: Failed to run INIT calibrations: %d\n",
3050 __func__, ret);
3051 goto error;
3052 }
3053
3054 /*
3055 * Nothing to do but wait for the init complete notification
3056 * from the firmware.
3057 */
3058 IWM_UNLOCK(sc);
3059 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3060 IWM_UCODE_CALIB_TIMEOUT);
3061 IWM_LOCK(sc);
3062
3063
3064 goto out;
3065
3066 error:
3067 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3068 out:
3069 return ret;
3070 }
3071
3072 static int
3073 iwm_config_ltr(struct iwm_softc *sc)
3074 {
3075 struct iwm_ltr_config_cmd cmd = {
3076 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3077 };
3078
3079 if (!sc->sc_ltr_enabled)
3080 return 0;
3081
3082 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3083 }
3084
3085 /*
3086 * receive side
3087 */
3088
3089 /* (re)stock rx ring, called at init-time and at runtime */
3090 static int
3091 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3092 {
3093 struct iwm_rx_ring *ring = &sc->rxq;
3094 struct iwm_rx_data *data = &ring->data[idx];
3095 struct mbuf *m;
3096 bus_dmamap_t dmamap;
3097 bus_dma_segment_t seg;
3098 int nsegs, error;
3099
3100 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3101 if (m == NULL)
3102 return ENOBUFS;
3103
3104 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3105 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3106 &seg, &nsegs, BUS_DMA_NOWAIT);
3107 if (error != 0) {
3108 device_printf(sc->sc_dev,
3109 "%s: can't map mbuf, error %d\n", __func__, error);
3110 m_freem(m);
3111 return error;
3112 }
3113
3114 if (data->m != NULL)
3115 bus_dmamap_unload(ring->data_dmat, data->map);
3116
3117 /* Swap ring->spare_map with data->map */
3118 dmamap = data->map;
3119 data->map = ring->spare_map;
3120 ring->spare_map = dmamap;
3121
3122 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3123 data->m = m;
3124
3125 /* Update RX descriptor. */
3126 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3127 if (sc->cfg->mqrx_supported)
3128 ((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3129 else
3130 ((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3131 bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3132 BUS_DMASYNC_PREWRITE);
3133
3134 return 0;
3135 }
3136
3137 static void
3138 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3139 {
3140 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3141
3142 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3143
3144 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3145 }
3146
3147 /*
3148 * Retrieve the average noise (in dBm) among receivers.
3149 */
3150 static int
3151 iwm_get_noise(struct iwm_softc *sc,
3152 const struct iwm_statistics_rx_non_phy *stats)
3153 {
3154 int i, total, nbant, noise;
3155
3156 total = nbant = noise = 0;
3157 for (i = 0; i < 3; i++) {
3158 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3159 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3160 __func__,
3161 i,
3162 noise);
3163
3164 if (noise) {
3165 total += noise;
3166 nbant++;
3167 }
3168 }
3169
3170 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3171 __func__, nbant, total);
3172 #if 0
3173 /* There should be at least one antenna but check anyway. */
3174 return (nbant == 0) ? -127 : (total / nbant) - 107;
3175 #else
3176 /* For now, just hard-code it to -96 to be safe */
3177 return (-96);
3178 #endif
3179 }
3180
3181 static void
3182 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3183 {
3184 struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3185
3186 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3187 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3188 }
3189
3190 /* iwlwifi: mvm/rx.c */
3191 /*
3192 * iwm_get_signal_strength - use new rx PHY INFO API
3193 * values are reported by the fw as positive values - need to negate
3194 * to obtain their dBM. Account for missing antennas by replacing 0
3195 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3196 */
3197 static int
3198 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3199 struct iwm_rx_phy_info *phy_info)
3200 {
3201 int energy_a, energy_b, energy_c, max_energy;
3202 uint32_t val;
3203
3204 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3205 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3206 IWM_RX_INFO_ENERGY_ANT_A_POS;
3207 energy_a = energy_a ? -energy_a : -256;
3208 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3209 IWM_RX_INFO_ENERGY_ANT_B_POS;
3210 energy_b = energy_b ? -energy_b : -256;
3211 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3212 IWM_RX_INFO_ENERGY_ANT_C_POS;
3213 energy_c = energy_c ? -energy_c : -256;
3214 max_energy = MAX(energy_a, energy_b);
3215 max_energy = MAX(max_energy, energy_c);
3216
3217 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218 "energy In A %d B %d C %d , and max %d\n",
3219 energy_a, energy_b, energy_c, max_energy);
3220
3221 return max_energy;
3222 }
3223
3224 static int
3225 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3226 struct iwm_rx_mpdu_desc *desc)
3227 {
3228 int energy_a, energy_b;
3229
3230 energy_a = desc->v1.energy_a;
3231 energy_b = desc->v1.energy_b;
3232 energy_a = energy_a ? -energy_a : -256;
3233 energy_b = energy_b ? -energy_b : -256;
3234 return MAX(energy_a, energy_b);
3235 }
3236
3237 /*
3238 * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3239 *
3240 * Handles the actual data of the Rx packet from the fw
3241 */
3242 static bool
3243 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3244 bool stolen)
3245 {
3246 struct ieee80211com *ic = &sc->sc_ic;
3247 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3248 struct ieee80211_frame *wh;
3249 struct ieee80211_rx_stats rxs;
3250 struct iwm_rx_phy_info *phy_info;
3251 struct iwm_rx_mpdu_res_start *rx_res;
3252 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3253 uint32_t len;
3254 uint32_t rx_pkt_status;
3255 int rssi;
3256
3257 phy_info = &sc->sc_last_phy_info;
3258 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3259 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3260 len = le16toh(rx_res->byte_count);
3261 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3262
3263 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3264 device_printf(sc->sc_dev,
3265 "dsp size out of range [0,20]: %d\n",
3266 phy_info->cfg_phy_cnt);
3267 return false;
3268 }
3269
3270 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3271 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3272 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3273 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3274 return false;
3275 }
3276
3277 rssi = iwm_rx_get_signal_strength(sc, phy_info);
3278
3279 /* Map it to relative value */
3280 rssi = rssi - sc->sc_noise;
3281
3282 /* replenish ring for the buffer we're going to feed to the sharks */
3283 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3284 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3285 __func__);
3286 return false;
3287 }
3288
3289 m->m_data = pkt->data + sizeof(*rx_res);
3290 m->m_pkthdr.len = m->m_len = len;
3291
3292 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3293 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3294
3295 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3296 "%s: phy_info: channel=%d, flags=0x%08x\n",
3297 __func__,
3298 le16toh(phy_info->channel),
3299 le16toh(phy_info->phy_flags));
3300
3301 /*
3302 * Populate an RX state struct with the provided information.
3303 */
3304 bzero(&rxs, sizeof(rxs));
3305 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3306 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3307 rxs.c_ieee = le16toh(phy_info->channel);
3308 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3309 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3310 } else {
3311 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3312 }
3313
3314 /* rssi is in 1/2db units */
3315 rxs.c_rssi = rssi * 2;
3316 rxs.c_nf = sc->sc_noise;
3317 if (ieee80211_add_rx_params(m, &rxs) == 0)
3318 return false;
3319
3320 if (ieee80211_radiotap_active_vap(vap)) {
3321 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3322
3323 tap->wr_flags = 0;
3324 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3325 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3326 tap->wr_chan_freq = htole16(rxs.c_freq);
3327 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3328 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3329 tap->wr_dbm_antsignal = (int8_t)rssi;
3330 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3331 tap->wr_tsft = phy_info->system_timestamp;
3332 switch (phy_info->rate) {
3333 /* CCK rates. */
3334 case 10: tap->wr_rate = 2; break;
3335 case 20: tap->wr_rate = 4; break;
3336 case 55: tap->wr_rate = 11; break;
3337 case 110: tap->wr_rate = 22; break;
3338 /* OFDM rates. */
3339 case 0xd: tap->wr_rate = 12; break;
3340 case 0xf: tap->wr_rate = 18; break;
3341 case 0x5: tap->wr_rate = 24; break;
3342 case 0x7: tap->wr_rate = 36; break;
3343 case 0x9: tap->wr_rate = 48; break;
3344 case 0xb: tap->wr_rate = 72; break;
3345 case 0x1: tap->wr_rate = 96; break;
3346 case 0x3: tap->wr_rate = 108; break;
3347 /* Unknown rate: should not happen. */
3348 default: tap->wr_rate = 0;
3349 }
3350 }
3351
3352 return true;
3353 }
3354
3355 static bool
3356 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3357 bool stolen)
3358 {
3359 struct ieee80211com *ic = &sc->sc_ic;
3360 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3361 struct ieee80211_frame *wh;
3362 struct ieee80211_rx_stats rxs;
3363 struct iwm_rx_mpdu_desc *desc;
3364 struct iwm_rx_packet *pkt;
3365 int rssi;
3366 uint32_t hdrlen, len, rate_n_flags;
3367 uint16_t phy_info;
3368 uint8_t channel;
3369
3370 pkt = mtodo(m, offset);
3371 desc = (void *)pkt->data;
3372
3373 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3374 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3375 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3376 "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3377 return false;
3378 }
3379
3380 channel = desc->v1.channel;
3381 len = le16toh(desc->mpdu_len);
3382 phy_info = le16toh(desc->phy_info);
3383 rate_n_flags = desc->v1.rate_n_flags;
3384
3385 wh = mtodo(m, sizeof(*desc));
3386 m->m_data = pkt->data + sizeof(*desc);
3387 m->m_pkthdr.len = m->m_len = len;
3388 m->m_len = len;
3389
3390 /* Account for padding following the frame header. */
3391 if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3392 hdrlen = ieee80211_anyhdrsize(wh);
3393 memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3394 m->m_data = mtodo(m, 2);
3395 wh = mtod(m, struct ieee80211_frame *);
3396 }
3397
3398 /* Map it to relative value */
3399 rssi = iwm_rxmq_get_signal_strength(sc, desc);
3400 rssi = rssi - sc->sc_noise;
3401
3402 /* replenish ring for the buffer we're going to feed to the sharks */
3403 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3404 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3405 __func__);
3406 return false;
3407 }
3408
3409 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3410 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3411
3412 /*
3413 * Populate an RX state struct with the provided information.
3414 */
3415 bzero(&rxs, sizeof(rxs));
3416 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3417 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3418 rxs.c_ieee = channel;
3419 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3420 channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3421
3422 /* rssi is in 1/2db units */
3423 rxs.c_rssi = rssi * 2;
3424 rxs.c_nf = sc->sc_noise;
3425 if (ieee80211_add_rx_params(m, &rxs) == 0)
3426 return false;
3427
3428 if (ieee80211_radiotap_active_vap(vap)) {
3429 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3430
3431 tap->wr_flags = 0;
3432 if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3433 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3434 tap->wr_chan_freq = htole16(rxs.c_freq);
3435 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3436 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3437 tap->wr_dbm_antsignal = (int8_t)rssi;
3438 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3439 tap->wr_tsft = desc->v1.gp2_on_air_rise;
3440 switch ((rate_n_flags & 0xff)) {
3441 /* CCK rates. */
3442 case 10: tap->wr_rate = 2; break;
3443 case 20: tap->wr_rate = 4; break;
3444 case 55: tap->wr_rate = 11; break;
3445 case 110: tap->wr_rate = 22; break;
3446 /* OFDM rates. */
3447 case 0xd: tap->wr_rate = 12; break;
3448 case 0xf: tap->wr_rate = 18; break;
3449 case 0x5: tap->wr_rate = 24; break;
3450 case 0x7: tap->wr_rate = 36; break;
3451 case 0x9: tap->wr_rate = 48; break;
3452 case 0xb: tap->wr_rate = 72; break;
3453 case 0x1: tap->wr_rate = 96; break;
3454 case 0x3: tap->wr_rate = 108; break;
3455 /* Unknown rate: should not happen. */
3456 default: tap->wr_rate = 0;
3457 }
3458 }
3459
3460 return true;
3461 }
3462
3463 static bool
3464 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3465 bool stolen)
3466 {
3467 struct ieee80211com *ic;
3468 struct ieee80211_frame *wh;
3469 struct ieee80211_node *ni;
3470 bool ret;
3471
3472 ic = &sc->sc_ic;
3473
3474 ret = sc->cfg->mqrx_supported ?
3475 iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3476 iwm_rx_rx_mpdu(sc, m, offset, stolen);
3477 if (!ret) {
3478 counter_u64_add(ic->ic_ierrors, 1);
3479 return (ret);
3480 }
3481
3482 wh = mtod(m, struct ieee80211_frame *);
3483 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3484
3485 IWM_UNLOCK(sc);
3486 if (ni != NULL) {
3487 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3488 ieee80211_input_mimo(ni, m);
3489 ieee80211_free_node(ni);
3490 } else {
3491 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3492 ieee80211_input_mimo_all(ic, m);
3493 }
3494 IWM_LOCK(sc);
3495
3496 return true;
3497 }
3498
3499 static int
3500 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3501 struct iwm_node *in)
3502 {
3503 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3504 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3505 struct ieee80211_node *ni = &in->in_ni;
3506 struct ieee80211vap *vap = ni->ni_vap;
3507 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3508 int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3509 boolean_t rate_matched;
3510 uint8_t tx_resp_rate;
3511
3512 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3513
3514 /* Update rate control statistics. */
3515 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3516 __func__,
3517 (int) le16toh(tx_resp->status.status),
3518 (int) le16toh(tx_resp->status.sequence),
3519 tx_resp->frame_count,
3520 tx_resp->bt_kill_count,
3521 tx_resp->failure_rts,
3522 tx_resp->failure_frame,
3523 le32toh(tx_resp->initial_rate),
3524 (int) le16toh(tx_resp->wireless_media_time));
3525
3526 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3527
3528 /* For rate control, ignore frames sent at different initial rate */
3529 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3530
3531 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3532 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3533 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3534 "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3535 }
3536
3537 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3538 IEEE80211_RATECTL_STATUS_LONG_RETRY;
3539 txs->short_retries = tx_resp->failure_rts;
3540 txs->long_retries = tx_resp->failure_frame;
3541 if (status != IWM_TX_STATUS_SUCCESS &&
3542 status != IWM_TX_STATUS_DIRECT_DONE) {
3543 switch (status) {
3544 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3545 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3546 break;
3547 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3548 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3549 break;
3550 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3551 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3552 break;
3553 default:
3554 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3555 break;
3556 }
3557 } else {
3558 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3559 }
3560
3561 if (rate_matched) {
3562 ieee80211_ratectl_tx_complete(ni, txs);
3563
3564 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3565 new_rate = vap->iv_bss->ni_txrate;
3566 if (new_rate != 0 && new_rate != cur_rate) {
3567 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3568 iwm_setrates(sc, in, rix);
3569 iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3570 }
3571 }
3572
3573 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3574 }
3575
3576 static void
3577 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3578 {
3579 struct iwm_cmd_header *cmd_hdr;
3580 struct iwm_tx_ring *ring;
3581 struct iwm_tx_data *txd;
3582 struct iwm_node *in;
3583 struct mbuf *m;
3584 int idx, qid, qmsk, status;
3585
3586 cmd_hdr = &pkt->hdr;
3587 idx = cmd_hdr->idx;
3588 qid = cmd_hdr->qid;
3589
3590 ring = &sc->txq[qid];
3591 txd = &ring->data[idx];
3592 in = txd->in;
3593 m = txd->m;
3594
3595 KASSERT(txd->done == 0, ("txd not done"));
3596 KASSERT(txd->in != NULL, ("txd without node"));
3597 KASSERT(txd->m != NULL, ("txd without mbuf"));
3598
3599 sc->sc_tx_timer = 0;
3600
3601 status = iwm_rx_tx_cmd_single(sc, pkt, in);
3602
3603 /* Unmap and free mbuf. */
3604 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3605 bus_dmamap_unload(ring->data_dmat, txd->map);
3606
3607 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3608 "free txd %p, in %p\n", txd, txd->in);
3609 txd->done = 1;
3610 txd->m = NULL;
3611 txd->in = NULL;
3612
3613 ieee80211_tx_complete(&in->in_ni, m, status);
3614
3615 qmsk = 1 << qid;
3616 if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3617 sc->qfullmsk &= ~qmsk;
3618 if (sc->qfullmsk == 0)
3619 iwm_start(sc);
3620 }
3621 }
3622
3623 /*
3624 * transmit side
3625 */
3626
3627 /*
3628 * Process a "command done" firmware notification. This is where we wakeup
3629 * processes waiting for a synchronous command completion.
3630 * from if_iwn
3631 */
3632 static void
3633 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3634 {
3635 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3636 struct iwm_tx_data *data;
3637
3638 if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3639 return; /* Not a command ack. */
3640 }
3641
3642 /* XXX wide commands? */
3643 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3644 "cmd notification type 0x%x qid %d idx %d\n",
3645 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3646
3647 data = &ring->data[pkt->hdr.idx];
3648
3649 /* If the command was mapped in an mbuf, free it. */
3650 if (data->m != NULL) {
3651 bus_dmamap_sync(ring->data_dmat, data->map,
3652 BUS_DMASYNC_POSTWRITE);
3653 bus_dmamap_unload(ring->data_dmat, data->map);
3654 m_freem(data->m);
3655 data->m = NULL;
3656 }
3657 wakeup(&ring->desc[pkt->hdr.idx]);
3658
3659 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3660 device_printf(sc->sc_dev,
3661 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3662 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3663 /* XXX call iwm_force_nmi() */
3664 }
3665
3666 KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3667 ring->queued--;
3668 if (ring->queued == 0)
3669 iwm_pcie_clear_cmd_in_flight(sc);
3670 }
3671
3672 #if 0
3673 /*
3674 * necessary only for block ack mode
3675 */
3676 void
3677 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3678 uint16_t len)
3679 {
3680 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3681 uint16_t w_val;
3682
3683 scd_bc_tbl = sc->sched_dma.vaddr;
3684
3685 len += 8; /* magic numbers came naturally from paris */
3686 len = roundup(len, 4) / 4;
3687
3688 w_val = htole16(sta_id << 12 | len);
3689
3690 /* Update TX scheduler. */
3691 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3692 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3693 BUS_DMASYNC_PREWRITE);
3694
3695 /* I really wonder what this is ?!? */
3696 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3697 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3698 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3699 BUS_DMASYNC_PREWRITE);
3700 }
3701 }
3702 #endif
3703
3704 static int
3705 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3706 {
3707 int i;
3708
3709 for (i = 0; i < nitems(iwm_rates); i++) {
3710 if (iwm_rates[i].rate == rate)
3711 return (i);
3712 }
3713 /* XXX error? */
3714 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3715 "%s: couldn't find an entry for rate=%d\n",
3716 __func__,
3717 rate);
3718 return (0);
3719 }
3720
3721 /*
3722 * Fill in the rate related information for a transmit command.
3723 */
3724 static const struct iwm_rate *
3725 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3726 struct mbuf *m, struct iwm_tx_cmd *tx)
3727 {
3728 struct ieee80211_node *ni = &in->in_ni;
3729 struct ieee80211_frame *wh;
3730 const struct ieee80211_txparam *tp = ni->ni_txparms;
3731 const struct iwm_rate *rinfo;
3732 int type;
3733 int ridx, rate_flags;
3734
3735 wh = mtod(m, struct ieee80211_frame *);
3736 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3737
3738 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3739 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3740
3741 if (type == IEEE80211_FC0_TYPE_MGT ||
3742 type == IEEE80211_FC0_TYPE_CTL ||
3743 (m->m_flags & M_EAPOL) != 0) {
3744 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3745 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3746 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3747 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3748 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3749 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3750 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3751 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3752 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3753 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3754 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3755 } else {
3756 /* for data frames, use RS table */
3757 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3758 ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3759 if (ridx == -1)
3760 ridx = 0;
3761
3762 /* This is the index into the programmed table */
3763 tx->initial_rate_index = 0;
3764 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3765 }
3766
3767 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3768 "%s: frame type=%d txrate %d\n",
3769 __func__, type, iwm_rates[ridx].rate);
3770
3771 rinfo = &iwm_rates[ridx];
3772
3773 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3774 __func__, ridx,
3775 rinfo->rate,
3776 !! (IWM_RIDX_IS_CCK(ridx))
3777 );
3778
3779 /* XXX TODO: hard-coded TX antenna? */
3780 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3781 rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3782 else
3783 rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3784 if (IWM_RIDX_IS_CCK(ridx))
3785 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3786 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3787
3788 return rinfo;
3789 }
3790
3791 #define TB0_SIZE 16
3792 static int
3793 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3794 {
3795 struct ieee80211com *ic = &sc->sc_ic;
3796 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3797 struct iwm_node *in = IWM_NODE(ni);
3798 struct iwm_tx_ring *ring;
3799 struct iwm_tx_data *data;
3800 struct iwm_tfd *desc;
3801 struct iwm_device_cmd *cmd;
3802 struct iwm_tx_cmd *tx;
3803 struct ieee80211_frame *wh;
3804 struct ieee80211_key *k = NULL;
3805 struct mbuf *m1;
3806 const struct iwm_rate *rinfo;
3807 uint32_t flags;
3808 u_int hdrlen;
3809 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3810 int nsegs;
3811 uint8_t tid, type;
3812 int i, totlen, error, pad;
3813
3814 wh = mtod(m, struct ieee80211_frame *);
3815 hdrlen = ieee80211_anyhdrsize(wh);
3816 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3817 tid = 0;
3818 ring = &sc->txq[ac];
3819 desc = &ring->desc[ring->cur];
3820 data = &ring->data[ring->cur];
3821
3822 /* Fill out iwm_tx_cmd to send to the firmware */
3823 cmd = &ring->cmd[ring->cur];
3824 cmd->hdr.code = IWM_TX_CMD;
3825 cmd->hdr.flags = 0;
3826 cmd->hdr.qid = ring->qid;
3827 cmd->hdr.idx = ring->cur;
3828
3829 tx = (void *)cmd->data;
3830 memset(tx, 0, sizeof(*tx));
3831
3832 rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3833
3834 /* Encrypt the frame if need be. */
3835 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3836 /* Retrieve key for TX && do software encryption. */
3837 k = ieee80211_crypto_encap(ni, m);
3838 if (k == NULL) {
3839 m_freem(m);
3840 return (ENOBUFS);
3841 }
3842 /* 802.11 header may have moved. */
3843 wh = mtod(m, struct ieee80211_frame *);
3844 }
3845
3846 if (ieee80211_radiotap_active_vap(vap)) {
3847 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3848
3849 tap->wt_flags = 0;
3850 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3851 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3852 tap->wt_rate = rinfo->rate;
3853 if (k != NULL)
3854 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3855 ieee80211_radiotap_tx(vap, m);
3856 }
3857
3858 flags = 0;
3859 totlen = m->m_pkthdr.len;
3860 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3861 flags |= IWM_TX_CMD_FLG_ACK;
3862 }
3863
3864 if (type == IEEE80211_FC0_TYPE_DATA &&
3865 totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3866 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3867 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3868 }
3869
3870 tx->sta_id = IWM_STATION_ID;
3871
3872 if (type == IEEE80211_FC0_TYPE_MGT) {
3873 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3874
3875 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3876 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3877 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3878 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3879 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3880 } else {
3881 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3882 }
3883 } else {
3884 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3885 }
3886
3887 if (hdrlen & 3) {
3888 /* First segment length must be a multiple of 4. */
3889 flags |= IWM_TX_CMD_FLG_MH_PAD;
3890 tx->offload_assist |= htole16(1 << IWM_TX_CMD_OFFLD_PAD);
3891 pad = 4 - (hdrlen & 3);
3892 } else {
3893 tx->offload_assist = 0;
3894 pad = 0;
3895 }
3896
3897 tx->len = htole16(totlen);
3898 tx->tid_tspec = tid;
3899 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3900
3901 /* Set physical address of "scratch area". */
3902 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3903 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3904
3905 /* Copy 802.11 header in TX command. */
3906 memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3907
3908 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3909
3910 tx->sec_ctl = 0;
3911 tx->tx_flags |= htole32(flags);
3912
3913 /* Trim 802.11 header. */
3914 m_adj(m, hdrlen);
3915 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3916 segs, &nsegs, BUS_DMA_NOWAIT);
3917 if (error != 0) {
3918 if (error != EFBIG) {
3919 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3920 error);
3921 m_freem(m);
3922 return error;
3923 }
3924 /* Too many DMA segments, linearize mbuf. */
3925 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3926 if (m1 == NULL) {
3927 device_printf(sc->sc_dev,
3928 "%s: could not defrag mbuf\n", __func__);
3929 m_freem(m);
3930 return (ENOBUFS);
3931 }
3932 m = m1;
3933
3934 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3935 segs, &nsegs, BUS_DMA_NOWAIT);
3936 if (error != 0) {
3937 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3938 error);
3939 m_freem(m);
3940 return error;
3941 }
3942 }
3943 data->m = m;
3944 data->in = in;
3945 data->done = 0;
3946
3947 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3948 "sending txd %p, in %p\n", data, data->in);
3949 KASSERT(data->in != NULL, ("node is NULL"));
3950
3951 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3952 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3953 ring->qid, ring->cur, totlen, nsegs,
3954 le32toh(tx->tx_flags),
3955 le32toh(tx->rate_n_flags),
3956 tx->initial_rate_index
3957 );
3958
3959 /* Fill TX descriptor. */
3960 memset(desc, 0, sizeof(*desc));
3961 desc->num_tbs = 2 + nsegs;
3962
3963 desc->tbs[0].lo = htole32(data->cmd_paddr);
3964 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3965 (TB0_SIZE << 4));
3966 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3967 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3968 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3969 hdrlen + pad - TB0_SIZE) << 4));
3970
3971 /* Other DMA segments are for data payload. */
3972 for (i = 0; i < nsegs; i++) {
3973 seg = &segs[i];
3974 desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3975 desc->tbs[i + 2].hi_n_len =
3976 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3977 (seg->ds_len << 4);
3978 }
3979
3980 bus_dmamap_sync(ring->data_dmat, data->map,
3981 BUS_DMASYNC_PREWRITE);
3982 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3983 BUS_DMASYNC_PREWRITE);
3984 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3985 BUS_DMASYNC_PREWRITE);
3986
3987 #if 0
3988 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3989 #endif
3990
3991 /* Kick TX ring. */
3992 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3993 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3994
3995 /* Mark TX ring as full if we reach a certain threshold. */
3996 if (++ring->queued > IWM_TX_RING_HIMARK) {
3997 sc->qfullmsk |= 1 << ring->qid;
3998 }
3999
4000 return 0;
4001 }
4002
4003 static int
4004 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4005 const struct ieee80211_bpf_params *params)
4006 {
4007 struct ieee80211com *ic = ni->ni_ic;
4008 struct iwm_softc *sc = ic->ic_softc;
4009 int error = 0;
4010
4011 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4012 "->%s begin\n", __func__);
4013
4014 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4015 m_freem(m);
4016 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4017 "<-%s not RUNNING\n", __func__);
4018 return (ENETDOWN);
4019 }
4020
4021 IWM_LOCK(sc);
4022 /* XXX fix this */
4023 if (params == NULL) {
4024 error = iwm_tx(sc, m, ni, 0);
4025 } else {
4026 error = iwm_tx(sc, m, ni, 0);
4027 }
4028 if (sc->sc_tx_timer == 0)
4029 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4030 sc->sc_tx_timer = 5;
4031 IWM_UNLOCK(sc);
4032
4033 return (error);
4034 }
4035
4036 /*
4037 * mvm/tx.c
4038 */
4039
4040 /*
4041 * Note that there are transports that buffer frames before they reach
4042 * the firmware. This means that after flush_tx_path is called, the
4043 * queue might not be empty. The race-free way to handle this is to:
4044 * 1) set the station as draining
4045 * 2) flush the Tx path
4046 * 3) wait for the transport queues to be empty
4047 */
4048 int
4049 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4050 {
4051 int ret;
4052 struct iwm_tx_path_flush_cmd flush_cmd = {
4053 .queues_ctl = htole32(tfd_msk),
4054 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4055 };
4056
4057 ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4058 sizeof(flush_cmd), &flush_cmd);
4059 if (ret)
4060 device_printf(sc->sc_dev,
4061 "Flushing tx queue failed: %d\n", ret);
4062 return ret;
4063 }
4064
4065 /*
4066 * BEGIN mvm/quota.c
4067 */
4068
4069 static int
4070 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4071 {
4072 struct iwm_time_quota_cmd cmd;
4073 int i, idx, ret, num_active_macs, quota, quota_rem;
4074 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4075 int n_ifs[IWM_MAX_BINDINGS] = {0, };
4076 uint16_t id;
4077
4078 memset(&cmd, 0, sizeof(cmd));
4079
4080 /* currently, PHY ID == binding ID */
4081 if (ivp) {
4082 id = ivp->phy_ctxt->id;
4083 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4084 colors[id] = ivp->phy_ctxt->color;
4085
4086 if (1)
4087 n_ifs[id] = 1;
4088 }
4089
4090 /*
4091 * The FW's scheduling session consists of
4092 * IWM_MAX_QUOTA fragments. Divide these fragments
4093 * equally between all the bindings that require quota
4094 */
4095 num_active_macs = 0;
4096 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4097 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4098 num_active_macs += n_ifs[i];
4099 }
4100
4101 quota = 0;
4102 quota_rem = 0;
4103 if (num_active_macs) {
4104 quota = IWM_MAX_QUOTA / num_active_macs;
4105 quota_rem = IWM_MAX_QUOTA % num_active_macs;
4106 }
4107
4108 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4109 if (colors[i] < 0)
4110 continue;
4111
4112 cmd.quotas[idx].id_and_color =
4113 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4114
4115 if (n_ifs[i] <= 0) {
4116 cmd.quotas[idx].quota = htole32(0);
4117 cmd.quotas[idx].max_duration = htole32(0);
4118 } else {
4119 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4120 cmd.quotas[idx].max_duration = htole32(0);
4121 }
4122 idx++;
4123 }
4124
4125 /* Give the remainder of the session to the first binding */
4126 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4127
4128 ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4129 sizeof(cmd), &cmd);
4130 if (ret)
4131 device_printf(sc->sc_dev,
4132 "%s: Failed to send quota: %d\n", __func__, ret);
4133 return ret;
4134 }
4135
4136 /*
4137 * END mvm/quota.c
4138 */
4139
4140 /*
4141 * ieee80211 routines
4142 */
4143
4144 /*
4145 * Change to AUTH state in 80211 state machine. Roughly matches what
4146 * Linux does in bss_info_changed().
4147 */
4148 static int
4149 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4150 {
4151 struct ieee80211_node *ni;
4152 struct iwm_node *in;
4153 struct iwm_vap *iv = IWM_VAP(vap);
4154 uint32_t duration;
4155 int error;
4156
4157 /*
4158 * XXX i have a feeling that the vap node is being
4159 * freed from underneath us. Grr.
4160 */
4161 ni = ieee80211_ref_node(vap->iv_bss);
4162 in = IWM_NODE(ni);
4163 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4164 "%s: called; vap=%p, bss ni=%p\n",
4165 __func__,
4166 vap,
4167 ni);
4168 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4169 __func__, ether_sprintf(ni->ni_bssid));
4170
4171 in->in_assoc = 0;
4172 iv->iv_auth = 1;
4173
4174 /*
4175 * Firmware bug - it'll crash if the beacon interval is less
4176 * than 16. We can't avoid connecting at all, so refuse the
4177 * station state change, this will cause net80211 to abandon
4178 * attempts to connect to this AP, and eventually wpa_s will
4179 * blacklist the AP...
4180 */
4181 if (ni->ni_intval < 16) {
4182 device_printf(sc->sc_dev,
4183 "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4184 ether_sprintf(ni->ni_bssid), ni->ni_intval);
4185 error = EINVAL;
4186 goto out;
4187 }
4188
4189 error = iwm_allow_mcast(vap, sc);
4190 if (error) {
4191 device_printf(sc->sc_dev,
4192 "%s: failed to set multicast\n", __func__);
4193 goto out;
4194 }
4195
4196 /*
4197 * This is where it deviates from what Linux does.
4198 *
4199 * Linux iwlwifi doesn't reset the nic each time, nor does it
4200 * call ctxt_add() here. Instead, it adds it during vap creation,
4201 * and always does a mac_ctx_changed().
4202 *
4203 * The openbsd port doesn't attempt to do that - it reset things
4204 * at odd states and does the add here.
4205 *
4206 * So, until the state handling is fixed (ie, we never reset
4207 * the NIC except for a firmware failure, which should drag
4208 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4209 * contexts that are required), let's do a dirty hack here.
4210 */
4211 if (iv->is_uploaded) {
4212 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4213 device_printf(sc->sc_dev,
4214 "%s: failed to update MAC\n", __func__);
4215 goto out;
4216 }
4217 } else {
4218 if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4219 device_printf(sc->sc_dev,
4220 "%s: failed to add MAC\n", __func__);
4221 goto out;
4222 }
4223 }
4224 sc->sc_firmware_state = 1;
4225
4226 if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4227 in->in_ni.ni_chan, 1, 1)) != 0) {
4228 device_printf(sc->sc_dev,
4229 "%s: failed update phy ctxt\n", __func__);
4230 goto out;
4231 }
4232 iv->phy_ctxt = &sc->sc_phyctxt[0];
4233
4234 if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4235 device_printf(sc->sc_dev,
4236 "%s: binding update cmd\n", __func__);
4237 goto out;
4238 }
4239 sc->sc_firmware_state = 2;
4240 /*
4241 * Authentication becomes unreliable when powersaving is left enabled
4242 * here. Powersaving will be activated again when association has
4243 * finished or is aborted.
4244 */
4245 iv->ps_disabled = TRUE;
4246 error = iwm_power_update_mac(sc);
4247 iv->ps_disabled = FALSE;
4248 if (error != 0) {
4249 device_printf(sc->sc_dev,
4250 "%s: failed to update power management\n",
4251 __func__);
4252 goto out;
4253 }
4254 if ((error = iwm_add_sta(sc, in)) != 0) {
4255 device_printf(sc->sc_dev,
4256 "%s: failed to add sta\n", __func__);
4257 goto out;
4258 }
4259 sc->sc_firmware_state = 3;
4260
4261 /*
4262 * Prevent the FW from wandering off channel during association
4263 * by "protecting" the session with a time event.
4264 */
4265 /* XXX duration is in units of TU, not MS */
4266 duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4267 iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4268
4269 error = 0;
4270 out:
4271 if (error != 0)
4272 iv->iv_auth = 0;
4273 ieee80211_free_node(ni);
4274 return (error);
4275 }
4276
4277 static struct ieee80211_node *
4278 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4279 {
4280 return malloc(sizeof (struct iwm_node), M_80211_NODE,
4281 M_NOWAIT | M_ZERO);
4282 }
4283
4284 static uint8_t
4285 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4286 {
4287 uint8_t plcp = rate_n_flags & 0xff;
4288 int i;
4289
4290 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4291 if (iwm_rates[i].plcp == plcp)
4292 return iwm_rates[i].rate;
4293 }
4294 return 0;
4295 }
4296
4297 uint8_t
4298 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4299 {
4300 int i;
4301 uint8_t rval;
4302
4303 for (i = 0; i < rs->rs_nrates; i++) {
4304 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4305 if (rval == iwm_rates[ridx].rate)
4306 return rs->rs_rates[i];
4307 }
4308
4309 return 0;
4310 }
4311
4312 static int
4313 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4314 {
4315 int i;
4316
4317 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4318 if (iwm_rates[i].rate == rate)
4319 return i;
4320 }
4321
4322 device_printf(sc->sc_dev,
4323 "%s: WARNING: device rate for %u not found!\n",
4324 __func__, rate);
4325
4326 return -1;
4327 }
4328
4329
4330 static void
4331 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4332 {
4333 struct ieee80211_node *ni = &in->in_ni;
4334 struct iwm_lq_cmd *lq = &in->in_lq;
4335 struct ieee80211_rateset *rs = &ni->ni_rates;
4336 int nrates = rs->rs_nrates;
4337 int i, ridx, tab = 0;
4338 // int txant = 0;
4339
4340 KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4341
4342 if (nrates > nitems(lq->rs_table)) {
4343 device_printf(sc->sc_dev,
4344 "%s: node supports %d rates, driver handles "
4345 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4346 return;
4347 }
4348 if (nrates == 0) {
4349 device_printf(sc->sc_dev,
4350 "%s: node supports 0 rates, odd!\n", __func__);
4351 return;
4352 }
4353 nrates = imin(rix + 1, nrates);
4354
4355 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4356 "%s: nrates=%d\n", __func__, nrates);
4357
4358 /* then construct a lq_cmd based on those */
4359 memset(lq, 0, sizeof(*lq));
4360 lq->sta_id = IWM_STATION_ID;
4361
4362 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4363 if (ni->ni_flags & IEEE80211_NODE_HT)
4364 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4365
4366 /*
4367 * are these used? (we don't do SISO or MIMO)
4368 * need to set them to non-zero, though, or we get an error.
4369 */
4370 lq->single_stream_ant_msk = 1;
4371 lq->dual_stream_ant_msk = 1;
4372
4373 /*
4374 * Build the actual rate selection table.
4375 * The lowest bits are the rates. Additionally,
4376 * CCK needs bit 9 to be set. The rest of the bits
4377 * we add to the table select the tx antenna
4378 * Note that we add the rates in the highest rate first
4379 * (opposite of ni_rates).
4380 */
4381 for (i = 0; i < nrates; i++) {
4382 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4383 int nextant;
4384
4385 /* Map 802.11 rate to HW rate index. */
4386 ridx = iwm_rate2ridx(sc, rate);
4387 if (ridx == -1)
4388 continue;
4389
4390 #if 0
4391 if (txant == 0)
4392 txant = iwm_get_valid_tx_ant(sc);
4393 nextant = 1<<(ffs(txant)-1);
4394 txant &= ~nextant;
4395 #else
4396 nextant = iwm_get_valid_tx_ant(sc);
4397 #endif
4398 tab = iwm_rates[ridx].plcp;
4399 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4400 if (IWM_RIDX_IS_CCK(ridx))
4401 tab |= IWM_RATE_MCS_CCK_MSK;
4402 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4403 "station rate i=%d, rate=%d, hw=%x\n",
4404 i, iwm_rates[ridx].rate, tab);
4405 lq->rs_table[i] = htole32(tab);
4406 }
4407 /* then fill the rest with the lowest possible rate */
4408 for (i = nrates; i < nitems(lq->rs_table); i++) {
4409 KASSERT(tab != 0, ("invalid tab"));
4410 lq->rs_table[i] = htole32(tab);
4411 }
4412 }
4413
4414 static void
4415 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4416 {
4417 struct iwm_vap *ivp = IWM_VAP(vap);
4418 int error;
4419
4420 /* Avoid Tx watchdog triggering, when transfers get dropped here. */
4421 sc->sc_tx_timer = 0;
4422
4423 ivp->iv_auth = 0;
4424 if (sc->sc_firmware_state == 3) {
4425 iwm_xmit_queue_drain(sc);
4426 // iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4427 error = iwm_rm_sta(sc, vap, TRUE);
4428 if (error) {
4429 device_printf(sc->sc_dev,
4430 "%s: Failed to remove station: %d\n",
4431 __func__, error);
4432 }
4433 }
4434 if (sc->sc_firmware_state == 3) {
4435 error = iwm_mac_ctxt_changed(sc, vap);
4436 if (error) {
4437 device_printf(sc->sc_dev,
4438 "%s: Failed to change mac context: %d\n",
4439 __func__, error);
4440 }
4441 }
4442 if (sc->sc_firmware_state == 3) {
4443 error = iwm_sf_update(sc, vap, FALSE);
4444 if (error) {
4445 device_printf(sc->sc_dev,
4446 "%s: Failed to update smart FIFO: %d\n",
4447 __func__, error);
4448 }
4449 }
4450 if (sc->sc_firmware_state == 3) {
4451 error = iwm_rm_sta_id(sc, vap);
4452 if (error) {
4453 device_printf(sc->sc_dev,
4454 "%s: Failed to remove station id: %d\n",
4455 __func__, error);
4456 }
4457 }
4458 if (sc->sc_firmware_state == 3) {
4459 error = iwm_update_quotas(sc, NULL);
4460 if (error) {
4461 device_printf(sc->sc_dev,
4462 "%s: Failed to update PHY quota: %d\n",
4463 __func__, error);
4464 }
4465 }
4466 if (sc->sc_firmware_state == 3) {
4467 /* XXX Might need to specify bssid correctly. */
4468 error = iwm_mac_ctxt_changed(sc, vap);
4469 if (error) {
4470 device_printf(sc->sc_dev,
4471 "%s: Failed to change mac context: %d\n",
4472 __func__, error);
4473 }
4474 }
4475 if (sc->sc_firmware_state == 3) {
4476 sc->sc_firmware_state = 2;
4477 }
4478 if (sc->sc_firmware_state > 1) {
4479 error = iwm_binding_remove_vif(sc, ivp);
4480 if (error) {
4481 device_printf(sc->sc_dev,
4482 "%s: Failed to remove channel ctx: %d\n",
4483 __func__, error);
4484 }
4485 }
4486 if (sc->sc_firmware_state > 1) {
4487 sc->sc_firmware_state = 1;
4488 }
4489 ivp->phy_ctxt = NULL;
4490 if (sc->sc_firmware_state > 0) {
4491 error = iwm_mac_ctxt_changed(sc, vap);
4492 if (error) {
4493 device_printf(sc->sc_dev,
4494 "%s: Failed to change mac context: %d\n",
4495 __func__, error);
4496 }
4497 }
4498 if (sc->sc_firmware_state > 0) {
4499 error = iwm_power_update_mac(sc);
4500 if (error != 0) {
4501 device_printf(sc->sc_dev,
4502 "%s: failed to update power management\n",
4503 __func__);
4504 }
4505 }
4506 sc->sc_firmware_state = 0;
4507 }
4508
4509 static int
4510 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4511 {
4512 struct iwm_vap *ivp = IWM_VAP(vap);
4513 struct ieee80211com *ic = vap->iv_ic;
4514 struct iwm_softc *sc = ic->ic_softc;
4515 struct iwm_node *in;
4516 int error;
4517
4518 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4519 "switching state %s -> %s arg=0x%x\n",
4520 ieee80211_state_name[vap->iv_state],
4521 ieee80211_state_name[nstate],
4522 arg);
4523
4524 IEEE80211_UNLOCK(ic);
4525 IWM_LOCK(sc);
4526
4527 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4528 (nstate == IEEE80211_S_AUTH ||
4529 nstate == IEEE80211_S_ASSOC ||
4530 nstate == IEEE80211_S_RUN)) {
4531 /* Stop blinking for a scan, when authenticating. */
4532 iwm_led_blink_stop(sc);
4533 }
4534
4535 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4536 iwm_led_disable(sc);
4537 /* disable beacon filtering if we're hopping out of RUN */
4538 iwm_disable_beacon_filter(sc);
4539 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4540 in->in_assoc = 0;
4541 }
4542
4543 if ((vap->iv_state == IEEE80211_S_AUTH ||
4544 vap->iv_state == IEEE80211_S_ASSOC ||
4545 vap->iv_state == IEEE80211_S_RUN) &&
4546 (nstate == IEEE80211_S_INIT ||
4547 nstate == IEEE80211_S_SCAN ||
4548 nstate == IEEE80211_S_AUTH)) {
4549 iwm_stop_session_protection(sc, ivp);
4550 }
4551
4552 if ((vap->iv_state == IEEE80211_S_RUN ||
4553 vap->iv_state == IEEE80211_S_ASSOC) &&
4554 nstate == IEEE80211_S_INIT) {
4555 /*
4556 * In this case, iv_newstate() wants to send an 80211 frame on
4557 * the network that we are leaving. So we need to call it,
4558 * before tearing down all the firmware state.
4559 */
4560 IWM_UNLOCK(sc);
4561 IEEE80211_LOCK(ic);
4562 ivp->iv_newstate(vap, nstate, arg);
4563 IEEE80211_UNLOCK(ic);
4564 IWM_LOCK(sc);
4565 iwm_bring_down_firmware(sc, vap);
4566 IWM_UNLOCK(sc);
4567 IEEE80211_LOCK(ic);
4568 return 0;
4569 }
4570
4571 switch (nstate) {
4572 case IEEE80211_S_INIT:
4573 case IEEE80211_S_SCAN:
4574 break;
4575
4576 case IEEE80211_S_AUTH:
4577 iwm_bring_down_firmware(sc, vap);
4578 if ((error = iwm_auth(vap, sc)) != 0) {
4579 device_printf(sc->sc_dev,
4580 "%s: could not move to auth state: %d\n",
4581 __func__, error);
4582 iwm_bring_down_firmware(sc, vap);
4583 IWM_UNLOCK(sc);
4584 IEEE80211_LOCK(ic);
4585 return 1;
4586 }
4587 break;
4588
4589 case IEEE80211_S_ASSOC:
4590 /*
4591 * EBS may be disabled due to previous failures reported by FW.
4592 * Reset EBS status here assuming environment has been changed.
4593 */
4594 sc->last_ebs_successful = TRUE;
4595 break;
4596
4597 case IEEE80211_S_RUN:
4598 in = IWM_NODE(vap->iv_bss);
4599 /* Update the association state, now we have it all */
4600 /* (eg associd comes in at this point */
4601 error = iwm_update_sta(sc, in);
4602 if (error != 0) {
4603 device_printf(sc->sc_dev,
4604 "%s: failed to update STA\n", __func__);
4605 IWM_UNLOCK(sc);
4606 IEEE80211_LOCK(ic);
4607 return error;
4608 }
4609 in->in_assoc = 1;
4610 error = iwm_mac_ctxt_changed(sc, vap);
4611 if (error != 0) {
4612 device_printf(sc->sc_dev,
4613 "%s: failed to update MAC: %d\n", __func__, error);
4614 }
4615
4616 iwm_sf_update(sc, vap, FALSE);
4617 iwm_enable_beacon_filter(sc, ivp);
4618 iwm_power_update_mac(sc);
4619 iwm_update_quotas(sc, ivp);
4620 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4621 iwm_setrates(sc, in, rix);
4622
4623 if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4624 device_printf(sc->sc_dev,
4625 "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4626 }
4627
4628 iwm_led_enable(sc);
4629 break;
4630
4631 default:
4632 break;
4633 }
4634 IWM_UNLOCK(sc);
4635 IEEE80211_LOCK(ic);
4636
4637 return (ivp->iv_newstate(vap, nstate, arg));
4638 }
4639
4640 void
4641 iwm_endscan_cb(void *arg, int pending)
4642 {
4643 struct iwm_softc *sc = arg;
4644 struct ieee80211com *ic = &sc->sc_ic;
4645
4646 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4647 "%s: scan ended\n",
4648 __func__);
4649
4650 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4651 }
4652
4653 static int
4654 iwm_send_bt_init_conf(struct iwm_softc *sc)
4655 {
4656 struct iwm_bt_coex_cmd bt_cmd;
4657
4658 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4659 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4660
4661 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4662 &bt_cmd);
4663 }
4664
4665 static boolean_t
4666 iwm_is_lar_supported(struct iwm_softc *sc)
4667 {
4668 boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4669 boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4670
4671 if (iwm_lar_disable)
4672 return FALSE;
4673
4674 /*
4675 * Enable LAR only if it is supported by the FW (TLV) &&
4676 * enabled in the NVM
4677 */
4678 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4679 return nvm_lar && tlv_lar;
4680 else
4681 return tlv_lar;
4682 }
4683
4684 static boolean_t
4685 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4686 {
4687 return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4688 iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4689 }
4690
4691 static int
4692 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4693 {
4694 struct iwm_mcc_update_cmd mcc_cmd;
4695 struct iwm_host_cmd hcmd = {
4696 .id = IWM_MCC_UPDATE_CMD,
4697 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4698 .data = { &mcc_cmd },
4699 };
4700 int ret;
4701 #ifdef IWM_DEBUG
4702 struct iwm_rx_packet *pkt;
4703 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4704 struct iwm_mcc_update_resp *mcc_resp;
4705 int n_channels;
4706 uint16_t mcc;
4707 #endif
4708 int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4709
4710 if (!iwm_is_lar_supported(sc)) {
4711 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4712 __func__);
4713 return 0;
4714 }
4715
4716 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4717 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4718 if (iwm_is_wifi_mcc_supported(sc))
4719 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4720 else
4721 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4722
4723 if (resp_v2)
4724 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4725 else
4726 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4727
4728 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4729 "send MCC update to FW with '%c%c' src = %d\n",
4730 alpha2[0], alpha2[1], mcc_cmd.source_id);
4731
4732 ret = iwm_send_cmd(sc, &hcmd);
4733 if (ret)
4734 return ret;
4735
4736 #ifdef IWM_DEBUG
4737 pkt = hcmd.resp_pkt;
4738
4739 /* Extract MCC response */
4740 if (resp_v2) {
4741 mcc_resp = (void *)pkt->data;
4742 mcc = mcc_resp->mcc;
4743 n_channels = le32toh(mcc_resp->n_channels);
4744 } else {
4745 mcc_resp_v1 = (void *)pkt->data;
4746 mcc = mcc_resp_v1->mcc;
4747 n_channels = le32toh(mcc_resp_v1->n_channels);
4748 }
4749
4750 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4751 if (mcc == 0)
4752 mcc = 0x3030; /* "00" - world */
4753
4754 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4755 "regulatory domain '%c%c' (%d channels available)\n",
4756 mcc >> 8, mcc & 0xff, n_channels);
4757 #endif
4758 iwm_free_resp(sc, &hcmd);
4759
4760 return 0;
4761 }
4762
4763 static void
4764 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4765 {
4766 struct iwm_host_cmd cmd = {
4767 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4768 .len = { sizeof(uint32_t), },
4769 .data = { &backoff, },
4770 };
4771
4772 if (iwm_send_cmd(sc, &cmd) != 0) {
4773 device_printf(sc->sc_dev,
4774 "failed to change thermal tx backoff\n");
4775 }
4776 }
4777
4778 static int
4779 iwm_init_hw(struct iwm_softc *sc)
4780 {
4781 struct ieee80211com *ic = &sc->sc_ic;
4782 int error, i, ac;
4783
4784 sc->sf_state = IWM_SF_UNINIT;
4785
4786 if ((error = iwm_start_hw(sc)) != 0) {
4787 printf("iwm_start_hw: failed %d\n", error);
4788 return error;
4789 }
4790
4791 if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4792 printf("iwm_run_init_ucode: failed %d\n", error);
4793 return error;
4794 }
4795
4796 /*
4797 * should stop and start HW since that INIT
4798 * image just loaded
4799 */
4800 iwm_stop_device(sc);
4801 sc->sc_ps_disabled = FALSE;
4802 if ((error = iwm_start_hw(sc)) != 0) {
4803 device_printf(sc->sc_dev, "could not initialize hardware\n");
4804 return error;
4805 }
4806
4807 /* omstart, this time with the regular firmware */
4808 error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4809 if (error) {
4810 device_printf(sc->sc_dev, "could not load firmware\n");
4811 goto error;
4812 }
4813
4814 error = iwm_sf_update(sc, NULL, FALSE);
4815 if (error)
4816 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4817
4818 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4819 device_printf(sc->sc_dev, "bt init conf failed\n");
4820 goto error;
4821 }
4822
4823 error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4824 if (error != 0) {
4825 device_printf(sc->sc_dev, "antenna config failed\n");
4826 goto error;
4827 }
4828
4829 /* Send phy db control command and then phy db calibration */
4830 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4831 goto error;
4832
4833 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4834 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4835 goto error;
4836 }
4837
4838 /* Add auxiliary station for scanning */
4839 if ((error = iwm_add_aux_sta(sc)) != 0) {
4840 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4841 goto error;
4842 }
4843
4844 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4845 /*
4846 * The channel used here isn't relevant as it's
4847 * going to be overwritten in the other flows.
4848 * For now use the first channel we have.
4849 */
4850 if ((error = iwm_phy_ctxt_add(sc,
4851 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4852 goto error;
4853 }
4854
4855 /* Initialize tx backoffs to the minimum. */
4856 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4857 iwm_tt_tx_backoff(sc, 0);
4858
4859 if (iwm_config_ltr(sc) != 0)
4860 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4861
4862 error = iwm_power_update_device(sc);
4863 if (error)
4864 goto error;
4865
4866 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4867 goto error;
4868
4869 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4870 if ((error = iwm_config_umac_scan(sc)) != 0)
4871 goto error;
4872 }
4873
4874 /* Enable Tx queues. */
4875 for (ac = 0; ac < WME_NUM_AC; ac++) {
4876 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4877 iwm_ac_to_tx_fifo[ac]);
4878 if (error)
4879 goto error;
4880 }
4881
4882 if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4883 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4884 goto error;
4885 }
4886
4887 return 0;
4888
4889 error:
4890 iwm_stop_device(sc);
4891 return error;
4892 }
4893
4894 /* Allow multicast from our BSSID. */
4895 static int
4896 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4897 {
4898 struct ieee80211_node *ni = vap->iv_bss;
4899 struct iwm_mcast_filter_cmd *cmd;
4900 size_t size;
4901 int error;
4902
4903 size = roundup(sizeof(*cmd), 4);
4904 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4905 if (cmd == NULL)
4906 return ENOMEM;
4907 cmd->filter_own = 1;
4908 cmd->port_id = 0;
4909 cmd->count = 0;
4910 cmd->pass_all = 1;
4911 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4912
4913 error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4914 IWM_CMD_SYNC, size, cmd);
4915 free(cmd, M_DEVBUF);
4916
4917 return (error);
4918 }
4919
4920 /*
4921 * ifnet interfaces
4922 */
4923
4924 static void
4925 iwm_init(struct iwm_softc *sc)
4926 {
4927 int error;
4928
4929 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4930 return;
4931 }
4932 sc->sc_generation++;
4933 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4934
4935 if ((error = iwm_init_hw(sc)) != 0) {
4936 printf("iwm_init_hw failed %d\n", error);
4937 iwm_stop(sc);
4938 return;
4939 }
4940
4941 /*
4942 * Ok, firmware loaded and we are jogging
4943 */
4944 sc->sc_flags |= IWM_FLAG_HW_INITED;
4945 }
4946
4947 static int
4948 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4949 {
4950 struct iwm_softc *sc;
4951 int error;
4952
4953 sc = ic->ic_softc;
4954
4955 IWM_LOCK(sc);
4956 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4957 IWM_UNLOCK(sc);
4958 return (ENXIO);
4959 }
4960 error = mbufq_enqueue(&sc->sc_snd, m);
4961 if (error) {
4962 IWM_UNLOCK(sc);
4963 return (error);
4964 }
4965 iwm_start(sc);
4966 IWM_UNLOCK(sc);
4967 return (0);
4968 }
4969
4970 /*
4971 * Dequeue packets from sendq and call send.
4972 */
4973 static void
4974 iwm_start(struct iwm_softc *sc)
4975 {
4976 struct ieee80211_node *ni;
4977 struct mbuf *m;
4978 int ac = 0;
4979
4980 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4981 while (sc->qfullmsk == 0 &&
4982 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4983 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4984 if (iwm_tx(sc, m, ni, ac) != 0) {
4985 if_inc_counter(ni->ni_vap->iv_ifp,
4986 IFCOUNTER_OERRORS, 1);
4987 ieee80211_free_node(ni);
4988 continue;
4989 }
4990 if (sc->sc_tx_timer == 0) {
4991 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4992 sc);
4993 }
4994 sc->sc_tx_timer = 15;
4995 }
4996 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4997 }
4998
4999 static void
5000 iwm_stop(struct iwm_softc *sc)
5001 {
5002
5003 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5004 sc->sc_flags |= IWM_FLAG_STOPPED;
5005 sc->sc_generation++;
5006 iwm_led_blink_stop(sc);
5007 sc->sc_tx_timer = 0;
5008 iwm_stop_device(sc);
5009 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5010 }
5011
5012 static void
5013 iwm_watchdog(void *arg)
5014 {
5015 struct iwm_softc *sc = arg;
5016 struct ieee80211com *ic = &sc->sc_ic;
5017
5018 if (sc->sc_attached == 0)
5019 return;
5020
5021 if (sc->sc_tx_timer > 0) {
5022 if (--sc->sc_tx_timer == 0) {
5023 device_printf(sc->sc_dev, "device timeout\n");
5024 #ifdef IWM_DEBUG
5025 iwm_nic_error(sc);
5026 #endif
5027 ieee80211_restart_all(ic);
5028 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5029 return;
5030 }
5031 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5032 }
5033 }
5034
5035 static void
5036 iwm_parent(struct ieee80211com *ic)
5037 {
5038 struct iwm_softc *sc = ic->ic_softc;
5039 int startall = 0;
5040 int rfkill = 0;
5041
5042 IWM_LOCK(sc);
5043 if (ic->ic_nrunning > 0) {
5044 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5045 iwm_init(sc);
5046 rfkill = iwm_check_rfkill(sc);
5047 if (!rfkill)
5048 startall = 1;
5049 }
5050 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5051 iwm_stop(sc);
5052 IWM_UNLOCK(sc);
5053 if (startall)
5054 ieee80211_start_all(ic);
5055 else if (rfkill)
5056 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5057 }
5058
5059 static void
5060 iwm_rftoggle_task(void *arg, int npending __unused)
5061 {
5062 struct iwm_softc *sc = arg;
5063 struct ieee80211com *ic = &sc->sc_ic;
5064 int rfkill;
5065
5066 IWM_LOCK(sc);
5067 rfkill = iwm_check_rfkill(sc);
5068 IWM_UNLOCK(sc);
5069 if (rfkill) {
5070 device_printf(sc->sc_dev,
5071 "%s: rfkill switch, disabling interface\n", __func__);
5072 ieee80211_suspend_all(ic);
5073 ieee80211_notify_radio(ic, 0);
5074 } else {
5075 device_printf(sc->sc_dev,
5076 "%s: rfkill cleared, re-enabling interface\n", __func__);
5077 ieee80211_resume_all(ic);
5078 ieee80211_notify_radio(ic, 1);
5079 }
5080 }
5081
5082 /*
5083 * The interrupt side of things
5084 */
5085
5086 /*
5087 * error dumping routines are from iwlwifi/mvm/utils.c
5088 */
5089
5090 /*
5091 * Note: This structure is read from the device with IO accesses,
5092 * and the reading already does the endian conversion. As it is
5093 * read with uint32_t-sized accesses, any members with a different size
5094 * need to be ordered correctly though!
5095 */
5096 struct iwm_error_event_table {
5097 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5098 uint32_t error_id; /* type of error */
5099 uint32_t trm_hw_status0; /* TRM HW status */
5100 uint32_t trm_hw_status1; /* TRM HW status */
5101 uint32_t blink2; /* branch link */
5102 uint32_t ilink1; /* interrupt link */
5103 uint32_t ilink2; /* interrupt link */
5104 uint32_t data1; /* error-specific data */
5105 uint32_t data2; /* error-specific data */
5106 uint32_t data3; /* error-specific data */
5107 uint32_t bcon_time; /* beacon timer */
5108 uint32_t tsf_low; /* network timestamp function timer */
5109 uint32_t tsf_hi; /* network timestamp function timer */
5110 uint32_t gp1; /* GP1 timer register */
5111 uint32_t gp2; /* GP2 timer register */
5112 uint32_t fw_rev_type; /* firmware revision type */
5113 uint32_t major; /* uCode version major */
5114 uint32_t minor; /* uCode version minor */
5115 uint32_t hw_ver; /* HW Silicon version */
5116 uint32_t brd_ver; /* HW board version */
5117 uint32_t log_pc; /* log program counter */
5118 uint32_t frame_ptr; /* frame pointer */
5119 uint32_t stack_ptr; /* stack pointer */
5120 uint32_t hcmd; /* last host command header */
5121 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5122 * rxtx_flag */
5123 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5124 * host_flag */
5125 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5126 * enc_flag */
5127 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5128 * time_flag */
5129 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5130 * wico interrupt */
5131 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5132 uint32_t wait_event; /* wait event() caller address */
5133 uint32_t l2p_control; /* L2pControlField */
5134 uint32_t l2p_duration; /* L2pDurationField */
5135 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5136 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5137 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5138 * (LMPM_PMG_SEL) */
5139 uint32_t u_timestamp; /* indicate when the date and time of the
5140 * compilation */
5141 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5142 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5143
5144 /*
5145 * UMAC error struct - relevant starting from family 8000 chip.
5146 * Note: This structure is read from the device with IO accesses,
5147 * and the reading already does the endian conversion. As it is
5148 * read with u32-sized accesses, any members with a different size
5149 * need to be ordered correctly though!
5150 */
5151 struct iwm_umac_error_event_table {
5152 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5153 uint32_t error_id; /* type of error */
5154 uint32_t blink1; /* branch link */
5155 uint32_t blink2; /* branch link */
5156 uint32_t ilink1; /* interrupt link */
5157 uint32_t ilink2; /* interrupt link */
5158 uint32_t data1; /* error-specific data */
5159 uint32_t data2; /* error-specific data */
5160 uint32_t data3; /* error-specific data */
5161 uint32_t umac_major;
5162 uint32_t umac_minor;
5163 uint32_t frame_pointer; /* core register 27*/
5164 uint32_t stack_pointer; /* core register 28 */
5165 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5166 uint32_t nic_isr_pref; /* ISR status register */
5167 } __packed;
5168
5169 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5170 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5171
5172 #ifdef IWM_DEBUG
5173 struct {
5174 const char *name;
5175 uint8_t num;
5176 } advanced_lookup[] = {
5177 { "NMI_INTERRUPT_WDG", 0x34 },
5178 { "SYSASSERT", 0x35 },
5179 { "UCODE_VERSION_MISMATCH", 0x37 },
5180 { "BAD_COMMAND", 0x38 },
5181 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5182 { "FATAL_ERROR", 0x3D },
5183 { "NMI_TRM_HW_ERR", 0x46 },
5184 { "NMI_INTERRUPT_TRM", 0x4C },
5185 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5186 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5187 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5188 { "NMI_INTERRUPT_HOST", 0x66 },
5189 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5190 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5191 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5192 { "ADVANCED_SYSASSERT", 0 },
5193 };
5194
5195 static const char *
5196 iwm_desc_lookup(uint32_t num)
5197 {
5198 int i;
5199
5200 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5201 if (advanced_lookup[i].num == num)
5202 return advanced_lookup[i].name;
5203
5204 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5205 return advanced_lookup[i].name;
5206 }
5207
5208 static void
5209 iwm_nic_umac_error(struct iwm_softc *sc)
5210 {
5211 struct iwm_umac_error_event_table table;
5212 uint32_t base;
5213
5214 base = sc->umac_error_event_table;
5215
5216 if (base < 0x800000) {
5217 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5218 base);
5219 return;
5220 }
5221
5222 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5223 device_printf(sc->sc_dev, "reading errlog failed\n");
5224 return;
5225 }
5226
5227 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5228 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5229 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5230 sc->sc_flags, table.valid);
5231 }
5232
5233 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5234 iwm_desc_lookup(table.error_id));
5235 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5236 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5237 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5238 table.ilink1);
5239 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5240 table.ilink2);
5241 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5242 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5243 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5244 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5245 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5246 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5247 table.frame_pointer);
5248 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5249 table.stack_pointer);
5250 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5251 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5252 table.nic_isr_pref);
5253 }
5254
5255 /*
5256 * Support for dumping the error log seemed like a good idea ...
5257 * but it's mostly hex junk and the only sensible thing is the
5258 * hw/ucode revision (which we know anyway). Since it's here,
5259 * I'll just leave it in, just in case e.g. the Intel guys want to
5260 * help us decipher some "ADVANCED_SYSASSERT" later.
5261 */
5262 static void
5263 iwm_nic_error(struct iwm_softc *sc)
5264 {
5265 struct iwm_error_event_table table;
5266 uint32_t base;
5267
5268 device_printf(sc->sc_dev, "dumping device error log\n");
5269 base = sc->error_event_table[0];
5270 if (base < 0x800000) {
5271 device_printf(sc->sc_dev,
5272 "Invalid error log pointer 0x%08x\n", base);
5273 return;
5274 }
5275
5276 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5277 device_printf(sc->sc_dev, "reading errlog failed\n");
5278 return;
5279 }
5280
5281 if (!table.valid) {
5282 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5283 return;
5284 }
5285
5286 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5287 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5288 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5289 sc->sc_flags, table.valid);
5290 }
5291
5292 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5293 iwm_desc_lookup(table.error_id));
5294 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5295 table.trm_hw_status0);
5296 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5297 table.trm_hw_status1);
5298 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5299 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5300 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5301 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5302 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5303 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5304 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5305 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5306 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5307 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5308 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5309 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5310 table.fw_rev_type);
5311 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5312 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5313 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5314 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5315 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5316 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5317 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5318 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5319 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5320 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5321 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5322 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5323 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5324 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5325 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5326 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5327 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5328 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5329 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5330
5331 if (sc->umac_error_event_table)
5332 iwm_nic_umac_error(sc);
5333 }
5334 #endif
5335
5336 static void
5337 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5338 {
5339 struct ieee80211com *ic = &sc->sc_ic;
5340 struct iwm_cmd_response *cresp;
5341 struct mbuf *m1;
5342 uint32_t offset = 0;
5343 uint32_t maxoff = IWM_RBUF_SIZE;
5344 uint32_t nextoff;
5345 boolean_t stolen = FALSE;
5346
5347 #define HAVEROOM(a) \
5348 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5349
5350 while (HAVEROOM(offset)) {
5351 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5352 offset);
5353 int qid, idx, code, len;
5354
5355 qid = pkt->hdr.qid;
5356 idx = pkt->hdr.idx;
5357
5358 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5359
5360 /*
5361 * randomly get these from the firmware, no idea why.
5362 * they at least seem harmless, so just ignore them for now
5363 */
5364 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5365 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5366 break;
5367 }
5368
5369 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5370 "rx packet qid=%d idx=%d type=%x\n",
5371 qid & ~0x80, pkt->hdr.idx, code);
5372
5373 len = iwm_rx_packet_len(pkt);
5374 len += sizeof(uint32_t); /* account for status word */
5375 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5376
5377 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5378
5379 switch (code) {
5380 case IWM_REPLY_RX_PHY_CMD:
5381 iwm_rx_rx_phy_cmd(sc, pkt);
5382 break;
5383
5384 case IWM_REPLY_RX_MPDU_CMD: {
5385 /*
5386 * If this is the last frame in the RX buffer, we
5387 * can directly feed the mbuf to the sharks here.
5388 */
5389 struct iwm_rx_packet *nextpkt = mtodoff(m,
5390 struct iwm_rx_packet *, nextoff);
5391 if (!HAVEROOM(nextoff) ||
5392 (nextpkt->hdr.code == 0 &&
5393 (nextpkt->hdr.qid & ~0x80) == 0 &&
5394 nextpkt->hdr.idx == 0) ||
5395 (nextpkt->len_n_flags ==
5396 htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5397 if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5398 stolen = FALSE;
5399 /* Make sure we abort the loop */
5400 nextoff = maxoff;
5401 }
5402 break;
5403 }
5404
5405 /*
5406 * Use m_copym instead of m_split, because that
5407 * makes it easier to keep a valid rx buffer in
5408 * the ring, when iwm_rx_mpdu() fails.
5409 *
5410 * We need to start m_copym() at offset 0, to get the
5411 * M_PKTHDR flag preserved.
5412 */
5413 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5414 if (m1) {
5415 if (iwm_rx_mpdu(sc, m1, offset, stolen))
5416 stolen = TRUE;
5417 else
5418 m_freem(m1);
5419 }
5420 break;
5421 }
5422
5423 case IWM_TX_CMD:
5424 iwm_rx_tx_cmd(sc, pkt);
5425 break;
5426
5427 case IWM_MISSED_BEACONS_NOTIFICATION: {
5428 struct iwm_missed_beacons_notif *resp;
5429 int missed;
5430
5431 /* XXX look at mac_id to determine interface ID */
5432 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5433
5434 resp = (void *)pkt->data;
5435 missed = le32toh(resp->consec_missed_beacons);
5436
5437 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5438 "%s: MISSED_BEACON: mac_id=%d, "
5439 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5440 "num_rx=%d\n",
5441 __func__,
5442 le32toh(resp->mac_id),
5443 le32toh(resp->consec_missed_beacons_since_last_rx),
5444 le32toh(resp->consec_missed_beacons),
5445 le32toh(resp->num_expected_beacons),
5446 le32toh(resp->num_recvd_beacons));
5447
5448 /* Be paranoid */
5449 if (vap == NULL)
5450 break;
5451
5452 /* XXX no net80211 locking? */
5453 if (vap->iv_state == IEEE80211_S_RUN &&
5454 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5455 if (missed > vap->iv_bmissthreshold) {
5456 /* XXX bad locking; turn into task */
5457 IWM_UNLOCK(sc);
5458 ieee80211_beacon_miss(ic);
5459 IWM_LOCK(sc);
5460 }
5461 }
5462
5463 break;
5464 }
5465
5466 case IWM_MFUART_LOAD_NOTIFICATION:
5467 break;
5468
5469 case IWM_ALIVE:
5470 break;
5471
5472 case IWM_CALIB_RES_NOTIF_PHY_DB:
5473 break;
5474
5475 case IWM_STATISTICS_NOTIFICATION:
5476 iwm_handle_rx_statistics(sc, pkt);
5477 break;
5478
5479 case IWM_NVM_ACCESS_CMD:
5480 case IWM_MCC_UPDATE_CMD:
5481 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5482 memcpy(sc->sc_cmd_resp,
5483 pkt, sizeof(sc->sc_cmd_resp));
5484 }
5485 break;
5486
5487 case IWM_MCC_CHUB_UPDATE_CMD: {
5488 struct iwm_mcc_chub_notif *notif;
5489 notif = (void *)pkt->data;
5490
5491 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5492 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5493 sc->sc_fw_mcc[2] = '\0';
5494 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5495 "fw source %d sent CC '%s'\n",
5496 notif->source_id, sc->sc_fw_mcc);
5497 break;
5498 }
5499
5500 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5501 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5502 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5503 struct iwm_dts_measurement_notif_v1 *notif;
5504
5505 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5506 device_printf(sc->sc_dev,
5507 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5508 break;
5509 }
5510 notif = (void *)pkt->data;
5511 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5512 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5513 notif->temp);
5514 break;
5515 }
5516
5517 case IWM_PHY_CONFIGURATION_CMD:
5518 case IWM_TX_ANT_CONFIGURATION_CMD:
5519 case IWM_ADD_STA:
5520 case IWM_MAC_CONTEXT_CMD:
5521 case IWM_REPLY_SF_CFG_CMD:
5522 case IWM_POWER_TABLE_CMD:
5523 case IWM_LTR_CONFIG:
5524 case IWM_PHY_CONTEXT_CMD:
5525 case IWM_BINDING_CONTEXT_CMD:
5526 case IWM_TIME_EVENT_CMD:
5527 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5528 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5529 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5530 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5531 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5532 case IWM_REPLY_BEACON_FILTERING_CMD:
5533 case IWM_MAC_PM_POWER_TABLE:
5534 case IWM_TIME_QUOTA_CMD:
5535 case IWM_REMOVE_STA:
5536 case IWM_TXPATH_FLUSH:
5537 case IWM_LQ_CMD:
5538 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5539 IWM_FW_PAGING_BLOCK_CMD):
5540 case IWM_BT_CONFIG:
5541 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5542 cresp = (void *)pkt->data;
5543 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5544 memcpy(sc->sc_cmd_resp,
5545 pkt, sizeof(*pkt)+sizeof(*cresp));
5546 }
5547 break;
5548
5549 /* ignore */
5550 case IWM_PHY_DB_CMD:
5551 break;
5552
5553 case IWM_INIT_COMPLETE_NOTIF:
5554 break;
5555
5556 case IWM_SCAN_OFFLOAD_COMPLETE:
5557 iwm_rx_lmac_scan_complete_notif(sc, pkt);
5558 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5559 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5560 ieee80211_runtask(ic, &sc->sc_es_task);
5561 }
5562 break;
5563
5564 case IWM_SCAN_ITERATION_COMPLETE: {
5565 struct iwm_lmac_scan_complete_notif *notif;
5566 notif = (void *)pkt->data;
5567 break;
5568 }
5569
5570 case IWM_SCAN_COMPLETE_UMAC:
5571 iwm_rx_umac_scan_complete_notif(sc, pkt);
5572 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5573 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5574 ieee80211_runtask(ic, &sc->sc_es_task);
5575 }
5576 break;
5577
5578 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5579 struct iwm_umac_scan_iter_complete_notif *notif;
5580 notif = (void *)pkt->data;
5581
5582 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5583 "complete, status=0x%x, %d channels scanned\n",
5584 notif->status, notif->scanned_channels);
5585 break;
5586 }
5587
5588 case IWM_REPLY_ERROR: {
5589 struct iwm_error_resp *resp;
5590 resp = (void *)pkt->data;
5591
5592 device_printf(sc->sc_dev,
5593 "firmware error 0x%x, cmd 0x%x\n",
5594 le32toh(resp->error_type),
5595 resp->cmd_id);
5596 break;
5597 }
5598
5599 case IWM_TIME_EVENT_NOTIFICATION:
5600 iwm_rx_time_event_notif(sc, pkt);
5601 break;
5602
5603 /*
5604 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5605 * messages. Just ignore them for now.
5606 */
5607 case IWM_DEBUG_LOG_MSG:
5608 break;
5609
5610 case IWM_MCAST_FILTER_CMD:
5611 break;
5612
5613 case IWM_SCD_QUEUE_CFG: {
5614 struct iwm_scd_txq_cfg_rsp *rsp;
5615 rsp = (void *)pkt->data;
5616
5617 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5618 "queue cfg token=0x%x sta_id=%d "
5619 "tid=%d scd_queue=%d\n",
5620 rsp->token, rsp->sta_id, rsp->tid,
5621 rsp->scd_queue);
5622 break;
5623 }
5624
5625 default:
5626 device_printf(sc->sc_dev,
5627 "code %x, frame %d/%d %x unhandled\n",
5628 code, qid & ~0x80, idx, pkt->len_n_flags);
5629 break;
5630 }
5631
5632 /*
5633 * Why test bit 0x80? The Linux driver:
5634 *
5635 * There is one exception: uCode sets bit 15 when it
5636 * originates the response/notification, i.e. when the
5637 * response/notification is not a direct response to a
5638 * command sent by the driver. For example, uCode issues
5639 * IWM_REPLY_RX when it sends a received frame to the driver;
5640 * it is not a direct response to any driver command.
5641 *
5642 * Ok, so since when is 7 == 15? Well, the Linux driver
5643 * uses a slightly different format for pkt->hdr, and "qid"
5644 * is actually the upper byte of a two-byte field.
5645 */
5646 if (!(qid & (1 << 7)))
5647 iwm_cmd_done(sc, pkt);
5648
5649 offset = nextoff;
5650 }
5651 if (stolen)
5652 m_freem(m);
5653 #undef HAVEROOM
5654 }
5655
5656 /*
5657 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5658 * Basic structure from if_iwn
5659 */
5660 static void
5661 iwm_notif_intr(struct iwm_softc *sc)
5662 {
5663 int count;
5664 uint32_t wreg;
5665 uint16_t hw;
5666
5667 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5668 BUS_DMASYNC_POSTREAD);
5669
5670 if (sc->cfg->mqrx_supported) {
5671 count = IWM_RX_MQ_RING_COUNT;
5672 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5673 } else {
5674 count = IWM_RX_LEGACY_RING_COUNT;
5675 wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5676 }
5677
5678 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5679
5680 /*
5681 * Process responses
5682 */
5683 while (sc->rxq.cur != hw) {
5684 struct iwm_rx_ring *ring = &sc->rxq;
5685 struct iwm_rx_data *data = &ring->data[ring->cur];
5686
5687 bus_dmamap_sync(ring->data_dmat, data->map,
5688 BUS_DMASYNC_POSTREAD);
5689
5690 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5691 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5692 iwm_handle_rxb(sc, data->m);
5693
5694 ring->cur = (ring->cur + 1) % count;
5695 }
5696
5697 /*
5698 * Tell the firmware that it can reuse the ring entries that
5699 * we have just processed.
5700 * Seems like the hardware gets upset unless we align
5701 * the write by 8??
5702 */
5703 hw = (hw == 0) ? count - 1 : hw - 1;
5704 IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5705 }
5706
5707 static void
5708 iwm_intr(void *arg)
5709 {
5710 struct iwm_softc *sc = arg;
5711 int handled = 0;
5712 int r1, r2, rv = 0;
5713 int isperiodic = 0;
5714
5715 IWM_LOCK(sc);
5716 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5717
5718 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5719 uint32_t *ict = sc->ict_dma.vaddr;
5720 int tmp;
5721
5722 tmp = htole32(ict[sc->ict_cur]);
5723 if (!tmp)
5724 goto out_ena;
5725
5726 /*
5727 * ok, there was something. keep plowing until we have all.
5728 */
5729 r1 = r2 = 0;
5730 while (tmp) {
5731 r1 |= tmp;
5732 ict[sc->ict_cur] = 0;
5733 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5734 tmp = htole32(ict[sc->ict_cur]);
5735 }
5736
5737 /* this is where the fun begins. don't ask */
5738 if (r1 == 0xffffffff)
5739 r1 = 0;
5740
5741 /* i am not expected to understand this */
5742 if (r1 & 0xc0000)
5743 r1 |= 0x8000;
5744 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5745 } else {
5746 r1 = IWM_READ(sc, IWM_CSR_INT);
5747 /* "hardware gone" (where, fishing?) */
5748 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5749 goto out;
5750 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5751 }
5752 if (r1 == 0 && r2 == 0) {
5753 goto out_ena;
5754 }
5755
5756 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5757
5758 /* Safely ignore these bits for debug checks below */
5759 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5760
5761 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5762 int i;
5763 struct ieee80211com *ic = &sc->sc_ic;
5764 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5765
5766 #ifdef IWM_DEBUG
5767 iwm_nic_error(sc);
5768 #endif
5769 /* Dump driver status (TX and RX rings) while we're here. */
5770 device_printf(sc->sc_dev, "driver status:\n");
5771 for (i = 0; i < IWM_MAX_QUEUES; i++) {
5772 struct iwm_tx_ring *ring = &sc->txq[i];
5773 device_printf(sc->sc_dev,
5774 " tx ring %2d: qid=%-2d cur=%-3d "
5775 "queued=%-3d\n",
5776 i, ring->qid, ring->cur, ring->queued);
5777 }
5778 device_printf(sc->sc_dev,
5779 " rx ring: cur=%d\n", sc->rxq.cur);
5780 device_printf(sc->sc_dev,
5781 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5782
5783 /* Reset our firmware state tracking. */
5784 sc->sc_firmware_state = 0;
5785 /* Don't stop the device; just do a VAP restart */
5786 IWM_UNLOCK(sc);
5787
5788 if (vap == NULL) {
5789 printf("%s: null vap\n", __func__);
5790 return;
5791 }
5792
5793 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5794 "restarting\n", __func__, vap->iv_state);
5795
5796 ieee80211_restart_all(ic);
5797 return;
5798 }
5799
5800 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5801 handled |= IWM_CSR_INT_BIT_HW_ERR;
5802 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5803 iwm_stop(sc);
5804 rv = 1;
5805 goto out;
5806 }
5807
5808 /* firmware chunk loaded */
5809 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5810 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5811 handled |= IWM_CSR_INT_BIT_FH_TX;
5812 sc->sc_fw_chunk_done = 1;
5813 wakeup(&sc->sc_fw);
5814 }
5815
5816 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5817 handled |= IWM_CSR_INT_BIT_RF_KILL;
5818 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5819 }
5820
5821 /*
5822 * The Linux driver uses periodic interrupts to avoid races.
5823 * We cargo-cult like it's going out of fashion.
5824 */
5825 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5826 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5827 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5828 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5829 IWM_WRITE_1(sc,
5830 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5831 isperiodic = 1;
5832 }
5833
5834 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5835 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5836 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5837
5838 iwm_notif_intr(sc);
5839
5840 /* enable periodic interrupt, see above */
5841 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5842 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5843 IWM_CSR_INT_PERIODIC_ENA);
5844 }
5845
5846 if (__predict_false(r1 & ~handled))
5847 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5848 "%s: unhandled interrupts: %x\n", __func__, r1);
5849 rv = 1;
5850
5851 out_ena:
5852 iwm_restore_interrupts(sc);
5853 out:
5854 IWM_UNLOCK(sc);
5855 return;
5856 }
5857
5858 /*
5859 * Autoconf glue-sniffing
5860 */
5861 #define PCI_VENDOR_INTEL 0x8086
5862 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5863 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5864 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5865 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5866 #define PCI_PRODUCT_INTEL_WL_3168_1 0x24fb
5867 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5868 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5869 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5870 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5871 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5872 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5873 #define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd
5874 #define PCI_PRODUCT_INTEL_WL_9560_1 0x9df0
5875 #define PCI_PRODUCT_INTEL_WL_9560_2 0xa370
5876 #define PCI_PRODUCT_INTEL_WL_9560_3 0x31dc
5877 #define PCI_PRODUCT_INTEL_WL_9260_1 0x2526
5878
5879 static const struct iwm_devices {
5880 uint16_t device;
5881 const struct iwm_cfg *cfg;
5882 } iwm_devices[] = {
5883 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5884 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5885 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5886 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5887 { PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5888 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5889 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5890 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5891 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5892 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5893 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5894 { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5895 { PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5896 { PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5897 { PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5898 { PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5899 };
5900
5901 static int
5902 iwm_probe(device_t dev)
5903 {
5904 int i;
5905
5906 for (i = 0; i < nitems(iwm_devices); i++) {
5907 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5908 pci_get_device(dev) == iwm_devices[i].device) {
5909 device_set_desc(dev, iwm_devices[i].cfg->name);
5910 return (BUS_PROBE_DEFAULT);
5911 }
5912 }
5913
5914 return (ENXIO);
5915 }
5916
5917 static int
5918 iwm_dev_check(device_t dev)
5919 {
5920 struct iwm_softc *sc;
5921 uint16_t devid;
5922 int i;
5923
5924 sc = device_get_softc(dev);
5925
5926 devid = pci_get_device(dev);
5927 for (i = 0; i < nitems(iwm_devices); i++) {
5928 if (iwm_devices[i].device == devid) {
5929 sc->cfg = iwm_devices[i].cfg;
5930 return (0);
5931 }
5932 }
5933 device_printf(dev, "unknown adapter type\n");
5934 return ENXIO;
5935 }
5936
5937 /* PCI registers */
5938 #define PCI_CFG_RETRY_TIMEOUT 0x041
5939
5940 static int
5941 iwm_pci_attach(device_t dev)
5942 {
5943 struct iwm_softc *sc;
5944 int count, error, rid;
5945 uint16_t reg;
5946
5947 sc = device_get_softc(dev);
5948
5949 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5950 * PCI Tx retries from interfering with C3 CPU state */
5951 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5952
5953 /* Enable bus-mastering and hardware bug workaround. */
5954 pci_enable_busmaster(dev);
5955 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5956 /* if !MSI */
5957 if (reg & PCIM_STATUS_INTxSTATE) {
5958 reg &= ~PCIM_STATUS_INTxSTATE;
5959 }
5960 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5961
5962 rid = PCIR_BAR(0);
5963 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5964 RF_ACTIVE);
5965 if (sc->sc_mem == NULL) {
5966 device_printf(sc->sc_dev, "can't map mem space\n");
5967 return (ENXIO);
5968 }
5969 sc->sc_st = rman_get_bustag(sc->sc_mem);
5970 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5971
5972 /* Install interrupt handler. */
5973 count = 1;
5974 rid = 0;
5975 if ( |