FreeBSD/Linux Kernel Cross Reference
sys/dev/iwn/if_iwn.c
1 /*-
2 * Copyright (c) 2007-2009
3 * Damien Bergamini <damien.bergamini@free.fr>
4 * Copyright (c) 2008
5 * Benjamin Close <benjsc@FreeBSD.org>
6 * Copyright (c) 2008 Sam Leffler, Errno Consulting
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /*
22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23 * adapters.
24 */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD: releng/8.3/sys/dev/iwn/if_iwn.c 226628 2011-10-22 09:43:35Z bschmidt $");
28
29 #include <sys/param.h>
30 #include <sys/sockio.h>
31 #include <sys/sysctl.h>
32 #include <sys/mbuf.h>
33 #include <sys/kernel.h>
34 #include <sys/socket.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/bus.h>
38 #include <sys/rman.h>
39 #include <sys/endian.h>
40 #include <sys/firmware.h>
41 #include <sys/limits.h>
42 #include <sys/module.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/clock.h>
49
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52
53 #include <net/bpf.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/ethernet.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/if_ether.h>
65 #include <netinet/ip.h>
66
67 #include <net80211/ieee80211_var.h>
68 #include <net80211/ieee80211_radiotap.h>
69 #include <net80211/ieee80211_regdomain.h>
70 #include <net80211/ieee80211_ratectl.h>
71
72 #include <dev/iwn/if_iwnreg.h>
73 #include <dev/iwn/if_iwnvar.h>
74
75 struct iwn_ident {
76 uint16_t vendor;
77 uint16_t device;
78 const char *name;
79 };
80
81 static const struct iwn_ident iwn_ident_table[] = {
82 { 0x8086, 0x0082, "Intel(R) Centrino(R) Advanced-N 6205" },
83 { 0x8086, 0x0083, "Intel(R) Centrino(R) Wireless-N 1000" },
84 { 0x8086, 0x0084, "Intel(R) Centrino(R) Wireless-N 1000" },
85 { 0x8086, 0x0085, "Intel(R) Centrino(R) Advanced-N 6205" },
86 { 0x8086, 0x0087, "Intel(R) Centrino(R) Advanced-N + WiMAX 6250" },
87 { 0x8086, 0x0089, "Intel(R) Centrino(R) Advanced-N + WiMAX 6250" },
88 { 0x8086, 0x008a, "Intel(R) Centrino(R) Wireless-N 1030" },
89 { 0x8086, 0x008b, "Intel(R) Centrino(R) Wireless-N 1030" },
90 { 0x8086, 0x0090, "Intel(R) Centrino(R) Advanced-N 6230" },
91 { 0x8086, 0x0091, "Intel(R) Centrino(R) Advanced-N 6230" },
92 { 0x8086, 0x0896, "Intel(R) Centrino(R) Wireless-N 130" },
93 { 0x8086, 0x4229, "Intel(R) Wireless WiFi Link 4965" },
94 { 0x8086, 0x422b, "Intel(R) Centrino(R) Ultimate-N 6300" },
95 { 0x8086, 0x422c, "Intel(R) Centrino(R) Advanced-N 6200" },
96 { 0x8086, 0x422d, "Intel(R) Wireless WiFi Link 4965" },
97 { 0x8086, 0x4230, "Intel(R) Wireless WiFi Link 4965" },
98 { 0x8086, 0x4232, "Intel(R) WiFi Link 5100" },
99 { 0x8086, 0x4233, "Intel(R) Wireless WiFi Link 4965" },
100 { 0x8086, 0x4235, "Intel(R) Ultimate N WiFi Link 5300" },
101 { 0x8086, 0x4236, "Intel(R) Ultimate N WiFi Link 5300" },
102 { 0x8086, 0x4237, "Intel(R) WiFi Link 5100" },
103 { 0x8086, 0x4238, "Intel(R) Centrino(R) Ultimate-N 6300" },
104 { 0x8086, 0x4239, "Intel(R) Centrino(R) Advanced-N 6200" },
105 { 0x8086, 0x423a, "Intel(R) WiMAX/WiFi Link 5350" },
106 { 0x8086, 0x423b, "Intel(R) WiMAX/WiFi Link 5350" },
107 { 0x8086, 0x423c, "Intel(R) WiMAX/WiFi Link 5150" },
108 { 0x8086, 0x423d, "Intel(R) WiMAX/WiFi Link 5150" },
109 { 0, 0, NULL }
110 };
111
112 static int iwn_probe(device_t);
113 static int iwn_attach(device_t);
114 static int iwn4965_attach(struct iwn_softc *, uint16_t);
115 static int iwn5000_attach(struct iwn_softc *, uint16_t);
116 static void iwn_radiotap_attach(struct iwn_softc *);
117 static void iwn_sysctlattach(struct iwn_softc *);
118 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
119 const char name[IFNAMSIZ], int unit, int opmode,
120 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
121 const uint8_t mac[IEEE80211_ADDR_LEN]);
122 static void iwn_vap_delete(struct ieee80211vap *);
123 static int iwn_detach(device_t);
124 static int iwn_shutdown(device_t);
125 static int iwn_suspend(device_t);
126 static int iwn_resume(device_t);
127 static int iwn_nic_lock(struct iwn_softc *);
128 static int iwn_eeprom_lock(struct iwn_softc *);
129 static int iwn_init_otprom(struct iwn_softc *);
130 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
131 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
132 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
133 void **, bus_size_t, bus_size_t);
134 static void iwn_dma_contig_free(struct iwn_dma_info *);
135 static int iwn_alloc_sched(struct iwn_softc *);
136 static void iwn_free_sched(struct iwn_softc *);
137 static int iwn_alloc_kw(struct iwn_softc *);
138 static void iwn_free_kw(struct iwn_softc *);
139 static int iwn_alloc_ict(struct iwn_softc *);
140 static void iwn_free_ict(struct iwn_softc *);
141 static int iwn_alloc_fwmem(struct iwn_softc *);
142 static void iwn_free_fwmem(struct iwn_softc *);
143 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
144 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
145 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
146 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
147 int);
148 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
149 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
150 static void iwn5000_ict_reset(struct iwn_softc *);
151 static int iwn_read_eeprom(struct iwn_softc *,
152 uint8_t macaddr[IEEE80211_ADDR_LEN]);
153 static void iwn4965_read_eeprom(struct iwn_softc *);
154 static void iwn4965_print_power_group(struct iwn_softc *, int);
155 static void iwn5000_read_eeprom(struct iwn_softc *);
156 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
157 static void iwn_read_eeprom_band(struct iwn_softc *, int);
158 #if 0 /* HT */
159 static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
160 #endif
161 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
162 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
163 struct ieee80211_channel *);
164 static int iwn_setregdomain(struct ieee80211com *,
165 struct ieee80211_regdomain *, int,
166 struct ieee80211_channel[]);
167 static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
168 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
169 const uint8_t mac[IEEE80211_ADDR_LEN]);
170 static void iwn_newassoc(struct ieee80211_node *, int);
171 static int iwn_media_change(struct ifnet *);
172 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
173 static void iwn_calib_timeout(void *);
174 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
175 struct iwn_rx_data *);
176 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
177 struct iwn_rx_data *);
178 #if 0 /* HT */
179 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
180 struct iwn_rx_data *);
181 #endif
182 static void iwn5000_rx_calib_results(struct iwn_softc *,
183 struct iwn_rx_desc *, struct iwn_rx_data *);
184 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
185 struct iwn_rx_data *);
186 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
187 struct iwn_rx_data *);
188 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
189 struct iwn_rx_data *);
190 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
191 uint8_t);
192 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
193 static void iwn_notif_intr(struct iwn_softc *);
194 static void iwn_wakeup_intr(struct iwn_softc *);
195 static void iwn_rftoggle_intr(struct iwn_softc *);
196 static void iwn_fatal_intr(struct iwn_softc *);
197 static void iwn_intr(void *);
198 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
199 uint16_t);
200 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
201 uint16_t);
202 #ifdef notyet
203 static void iwn5000_reset_sched(struct iwn_softc *, int, int);
204 #endif
205 static uint8_t iwn_plcp_signal(int);
206 static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
207 struct ieee80211_node *);
208 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
209 struct ieee80211_node *,
210 const struct ieee80211_bpf_params *params);
211 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
212 const struct ieee80211_bpf_params *);
213 static void iwn_start(struct ifnet *);
214 static void iwn_start_locked(struct ifnet *);
215 static void iwn_watchdog(void *);
216 static int iwn_ioctl(struct ifnet *, u_long, caddr_t);
217 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
218 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
219 int);
220 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
221 int);
222 static int iwn_set_link_quality(struct iwn_softc *,
223 struct ieee80211_node *);
224 static int iwn_add_broadcast_node(struct iwn_softc *, int);
225 static int iwn_updateedca(struct ieee80211com *);
226 static void iwn_update_mcast(struct ifnet *);
227 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
228 static int iwn_set_critical_temp(struct iwn_softc *);
229 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
230 static void iwn4965_power_calibration(struct iwn_softc *, int);
231 static int iwn4965_set_txpower(struct iwn_softc *,
232 struct ieee80211_channel *, int);
233 static int iwn5000_set_txpower(struct iwn_softc *,
234 struct ieee80211_channel *, int);
235 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
236 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
237 static int iwn_get_noise(const struct iwn_rx_general_stats *);
238 static int iwn4965_get_temperature(struct iwn_softc *);
239 static int iwn5000_get_temperature(struct iwn_softc *);
240 static int iwn_init_sensitivity(struct iwn_softc *);
241 static void iwn_collect_noise(struct iwn_softc *,
242 const struct iwn_rx_general_stats *);
243 static int iwn4965_init_gains(struct iwn_softc *);
244 static int iwn5000_init_gains(struct iwn_softc *);
245 static int iwn4965_set_gains(struct iwn_softc *);
246 static int iwn5000_set_gains(struct iwn_softc *);
247 static void iwn_tune_sensitivity(struct iwn_softc *,
248 const struct iwn_rx_stats *);
249 static int iwn_send_sensitivity(struct iwn_softc *);
250 static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
251 static int iwn_send_btcoex(struct iwn_softc *);
252 static int iwn_send_advanced_btcoex(struct iwn_softc *);
253 static int iwn_config(struct iwn_softc *);
254 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
255 static int iwn_scan(struct iwn_softc *);
256 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
257 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
258 #if 0 /* HT */
259 static int iwn_ampdu_rx_start(struct ieee80211com *,
260 struct ieee80211_node *, uint8_t);
261 static void iwn_ampdu_rx_stop(struct ieee80211com *,
262 struct ieee80211_node *, uint8_t);
263 static int iwn_ampdu_tx_start(struct ieee80211com *,
264 struct ieee80211_node *, uint8_t);
265 static void iwn_ampdu_tx_stop(struct ieee80211com *,
266 struct ieee80211_node *, uint8_t);
267 static void iwn4965_ampdu_tx_start(struct iwn_softc *,
268 struct ieee80211_node *, uint8_t, uint16_t);
269 static void iwn4965_ampdu_tx_stop(struct iwn_softc *,
270 uint8_t, uint16_t);
271 static void iwn5000_ampdu_tx_start(struct iwn_softc *,
272 struct ieee80211_node *, uint8_t, uint16_t);
273 static void iwn5000_ampdu_tx_stop(struct iwn_softc *,
274 uint8_t, uint16_t);
275 #endif
276 static int iwn5000_query_calibration(struct iwn_softc *);
277 static int iwn5000_send_calibration(struct iwn_softc *);
278 static int iwn5000_send_wimax_coex(struct iwn_softc *);
279 static int iwn5000_crystal_calib(struct iwn_softc *);
280 static int iwn5000_temp_offset_calib(struct iwn_softc *);
281 static int iwn4965_post_alive(struct iwn_softc *);
282 static int iwn5000_post_alive(struct iwn_softc *);
283 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
284 int);
285 static int iwn4965_load_firmware(struct iwn_softc *);
286 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
287 const uint8_t *, int);
288 static int iwn5000_load_firmware(struct iwn_softc *);
289 static int iwn_read_firmware_leg(struct iwn_softc *,
290 struct iwn_fw_info *);
291 static int iwn_read_firmware_tlv(struct iwn_softc *,
292 struct iwn_fw_info *, uint16_t);
293 static int iwn_read_firmware(struct iwn_softc *);
294 static int iwn_clock_wait(struct iwn_softc *);
295 static int iwn_apm_init(struct iwn_softc *);
296 static void iwn_apm_stop_master(struct iwn_softc *);
297 static void iwn_apm_stop(struct iwn_softc *);
298 static int iwn4965_nic_config(struct iwn_softc *);
299 static int iwn5000_nic_config(struct iwn_softc *);
300 static int iwn_hw_prepare(struct iwn_softc *);
301 static int iwn_hw_init(struct iwn_softc *);
302 static void iwn_hw_stop(struct iwn_softc *);
303 static void iwn_radio_on(void *, int);
304 static void iwn_radio_off(void *, int);
305 static void iwn_init_locked(struct iwn_softc *);
306 static void iwn_init(void *);
307 static void iwn_stop_locked(struct iwn_softc *);
308 static void iwn_stop(struct iwn_softc *);
309 static void iwn_scan_start(struct ieee80211com *);
310 static void iwn_scan_end(struct ieee80211com *);
311 static void iwn_set_channel(struct ieee80211com *);
312 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
313 static void iwn_scan_mindwell(struct ieee80211_scan_state *);
314 static void iwn_hw_reset(void *, int);
315
316 #define IWN_DEBUG
317 #ifdef IWN_DEBUG
318 enum {
319 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
320 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */
321 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
322 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */
323 IWN_DEBUG_RESET = 0x00000010, /* reset processing */
324 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */
325 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */
326 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
327 IWN_DEBUG_INTR = 0x00000100, /* ISR */
328 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
329 IWN_DEBUG_NODE = 0x00000400, /* node management */
330 IWN_DEBUG_LED = 0x00000800, /* led management */
331 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */
332 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */
333 IWN_DEBUG_ANY = 0xffffffff
334 };
335
336 #define DPRINTF(sc, m, fmt, ...) do { \
337 if (sc->sc_debug & (m)) \
338 printf(fmt, __VA_ARGS__); \
339 } while (0)
340
341 static const char *
342 iwn_intr_str(uint8_t cmd)
343 {
344 switch (cmd) {
345 /* Notifications */
346 case IWN_UC_READY: return "UC_READY";
347 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE";
348 case IWN_TX_DONE: return "TX_DONE";
349 case IWN_START_SCAN: return "START_SCAN";
350 case IWN_STOP_SCAN: return "STOP_SCAN";
351 case IWN_RX_STATISTICS: return "RX_STATS";
352 case IWN_BEACON_STATISTICS: return "BEACON_STATS";
353 case IWN_STATE_CHANGED: return "STATE_CHANGED";
354 case IWN_BEACON_MISSED: return "BEACON_MISSED";
355 case IWN_RX_PHY: return "RX_PHY";
356 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE";
357 case IWN_RX_DONE: return "RX_DONE";
358
359 /* Command Notifications */
360 case IWN_CMD_RXON: return "IWN_CMD_RXON";
361 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC";
362 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS";
363 case IWN_CMD_TIMING: return "IWN_CMD_TIMING";
364 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY";
365 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED";
366 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX";
367 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG";
368 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT";
369 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
370 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE";
371 case IWN_CMD_SCAN: return "IWN_CMD_SCAN";
372 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS";
373 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER";
374 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM";
375 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG";
376 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX";
377 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP";
378 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY";
379 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB";
380 }
381 return "UNKNOWN INTR NOTIF/CMD";
382 }
383 #else
384 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
385 #endif
386
387 static device_method_t iwn_methods[] = {
388 /* Device interface */
389 DEVMETHOD(device_probe, iwn_probe),
390 DEVMETHOD(device_attach, iwn_attach),
391 DEVMETHOD(device_detach, iwn_detach),
392 DEVMETHOD(device_shutdown, iwn_shutdown),
393 DEVMETHOD(device_suspend, iwn_suspend),
394 DEVMETHOD(device_resume, iwn_resume),
395 { 0, 0 }
396 };
397
398 static driver_t iwn_driver = {
399 "iwn",
400 iwn_methods,
401 sizeof(struct iwn_softc)
402 };
403 static devclass_t iwn_devclass;
404
405 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
406
407 MODULE_DEPEND(iwn, firmware, 1, 1, 1);
408 MODULE_DEPEND(iwn, pci, 1, 1, 1);
409 MODULE_DEPEND(iwn, wlan, 1, 1, 1);
410
411 static int
412 iwn_probe(device_t dev)
413 {
414 const struct iwn_ident *ident;
415
416 for (ident = iwn_ident_table; ident->name != NULL; ident++) {
417 if (pci_get_vendor(dev) == ident->vendor &&
418 pci_get_device(dev) == ident->device) {
419 device_set_desc(dev, ident->name);
420 return 0;
421 }
422 }
423 return ENXIO;
424 }
425
426 static int
427 iwn_attach(device_t dev)
428 {
429 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
430 struct ieee80211com *ic;
431 struct ifnet *ifp;
432 uint32_t reg;
433 int i, error, result;
434 uint8_t macaddr[IEEE80211_ADDR_LEN];
435
436 sc->sc_dev = dev;
437
438 /*
439 * Get the offset of the PCI Express Capability Structure in PCI
440 * Configuration Space.
441 */
442 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
443 if (error != 0) {
444 device_printf(dev, "PCIe capability structure not found!\n");
445 return error;
446 }
447
448 /* Clear device-specific "PCI retry timeout" register (41h). */
449 pci_write_config(dev, 0x41, 0, 1);
450
451 /* Hardware bug workaround. */
452 reg = pci_read_config(dev, PCIR_COMMAND, 1);
453 if (reg & PCIM_CMD_INTxDIS) {
454 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
455 __func__);
456 reg &= ~PCIM_CMD_INTxDIS;
457 pci_write_config(dev, PCIR_COMMAND, reg, 1);
458 }
459
460 /* Enable bus-mastering. */
461 pci_enable_busmaster(dev);
462
463 sc->mem_rid = PCIR_BAR(0);
464 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
465 RF_ACTIVE);
466 if (sc->mem == NULL) {
467 device_printf(dev, "can't map mem space\n");
468 error = ENOMEM;
469 return error;
470 }
471 sc->sc_st = rman_get_bustag(sc->mem);
472 sc->sc_sh = rman_get_bushandle(sc->mem);
473
474 sc->irq_rid = 0;
475 if ((result = pci_msi_count(dev)) == 1 &&
476 pci_alloc_msi(dev, &result) == 0)
477 sc->irq_rid = 1;
478 /* Install interrupt handler. */
479 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
480 RF_ACTIVE | RF_SHAREABLE);
481 if (sc->irq == NULL) {
482 device_printf(dev, "can't map interrupt\n");
483 error = ENOMEM;
484 goto fail;
485 }
486
487 IWN_LOCK_INIT(sc);
488
489 /* Read hardware revision and attach. */
490 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
491 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
492 error = iwn4965_attach(sc, pci_get_device(dev));
493 else
494 error = iwn5000_attach(sc, pci_get_device(dev));
495 if (error != 0) {
496 device_printf(dev, "could not attach device, error %d\n",
497 error);
498 goto fail;
499 }
500
501 if ((error = iwn_hw_prepare(sc)) != 0) {
502 device_printf(dev, "hardware not ready, error %d\n", error);
503 goto fail;
504 }
505
506 /* Allocate DMA memory for firmware transfers. */
507 if ((error = iwn_alloc_fwmem(sc)) != 0) {
508 device_printf(dev,
509 "could not allocate memory for firmware, error %d\n",
510 error);
511 goto fail;
512 }
513
514 /* Allocate "Keep Warm" page. */
515 if ((error = iwn_alloc_kw(sc)) != 0) {
516 device_printf(dev,
517 "could not allocate keep warm page, error %d\n", error);
518 goto fail;
519 }
520
521 /* Allocate ICT table for 5000 Series. */
522 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
523 (error = iwn_alloc_ict(sc)) != 0) {
524 device_printf(dev, "could not allocate ICT table, error %d\n",
525 error);
526 goto fail;
527 }
528
529 /* Allocate TX scheduler "rings". */
530 if ((error = iwn_alloc_sched(sc)) != 0) {
531 device_printf(dev,
532 "could not allocate TX scheduler rings, error %d\n", error);
533 goto fail;
534 }
535
536 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
537 for (i = 0; i < sc->ntxqs; i++) {
538 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
539 device_printf(dev,
540 "could not allocate TX ring %d, error %d\n", i,
541 error);
542 goto fail;
543 }
544 }
545
546 /* Allocate RX ring. */
547 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
548 device_printf(dev, "could not allocate RX ring, error %d\n",
549 error);
550 goto fail;
551 }
552
553 /* Clear pending interrupts. */
554 IWN_WRITE(sc, IWN_INT, 0xffffffff);
555
556 /* Count the number of available chains. */
557 sc->ntxchains =
558 ((sc->txchainmask >> 2) & 1) +
559 ((sc->txchainmask >> 1) & 1) +
560 ((sc->txchainmask >> 0) & 1);
561 sc->nrxchains =
562 ((sc->rxchainmask >> 2) & 1) +
563 ((sc->rxchainmask >> 1) & 1) +
564 ((sc->rxchainmask >> 0) & 1);
565 if (bootverbose) {
566 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
567 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
568 macaddr, ":");
569 }
570
571 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
572 if (ifp == NULL) {
573 device_printf(dev, "can not allocate ifnet structure\n");
574 goto fail;
575 }
576
577 ic = ifp->if_l2com;
578 ic->ic_ifp = ifp;
579 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
580 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
581
582 /* Set device capabilities. */
583 ic->ic_caps =
584 IEEE80211_C_STA /* station mode supported */
585 | IEEE80211_C_MONITOR /* monitor mode supported */
586 | IEEE80211_C_TXPMGT /* tx power management */
587 | IEEE80211_C_SHSLOT /* short slot time supported */
588 | IEEE80211_C_WPA
589 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
590 | IEEE80211_C_BGSCAN /* background scanning */
591 #if 0
592 | IEEE80211_C_IBSS /* ibss/adhoc mode */
593 #endif
594 | IEEE80211_C_WME /* WME */
595 ;
596 #if 0 /* HT */
597 /* XXX disable until HT channel setup works */
598 ic->ic_htcaps =
599 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
600 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
601 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
602 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
603 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
604 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
605 /* s/w capabilities */
606 | IEEE80211_HTC_HT /* HT operation */
607 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
608 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
609 ;
610
611 /* Set HT capabilities. */
612 ic->ic_htcaps =
613 #if IWN_RBUF_SIZE == 8192
614 IEEE80211_HTCAP_AMSDU7935 |
615 #endif
616 IEEE80211_HTCAP_CBW20_40 |
617 IEEE80211_HTCAP_SGI20 |
618 IEEE80211_HTCAP_SGI40;
619 if (sc->hw_type != IWN_HW_REV_TYPE_4965)
620 ic->ic_htcaps |= IEEE80211_HTCAP_GF;
621 if (sc->hw_type == IWN_HW_REV_TYPE_6050)
622 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
623 else
624 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
625 #endif
626
627 /* Read MAC address, channels, etc from EEPROM. */
628 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
629 device_printf(dev, "could not read EEPROM, error %d\n",
630 error);
631 goto fail;
632 }
633
634 #if 0 /* HT */
635 /* Set supported HT rates. */
636 ic->ic_sup_mcs[0] = 0xff;
637 if (sc->nrxchains > 1)
638 ic->ic_sup_mcs[1] = 0xff;
639 if (sc->nrxchains > 2)
640 ic->ic_sup_mcs[2] = 0xff;
641 #endif
642
643 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
644 ifp->if_softc = sc;
645 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
646 ifp->if_init = iwn_init;
647 ifp->if_ioctl = iwn_ioctl;
648 ifp->if_start = iwn_start;
649 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
650 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
651 IFQ_SET_READY(&ifp->if_snd);
652
653 ieee80211_ifattach(ic, macaddr);
654 ic->ic_vap_create = iwn_vap_create;
655 ic->ic_vap_delete = iwn_vap_delete;
656 ic->ic_raw_xmit = iwn_raw_xmit;
657 ic->ic_node_alloc = iwn_node_alloc;
658 #if 0 /* HT */
659 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
660 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
661 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
662 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
663 #endif
664 ic->ic_newassoc = iwn_newassoc;
665 ic->ic_wme.wme_update = iwn_updateedca;
666 ic->ic_update_mcast = iwn_update_mcast;
667 ic->ic_scan_start = iwn_scan_start;
668 ic->ic_scan_end = iwn_scan_end;
669 ic->ic_set_channel = iwn_set_channel;
670 ic->ic_scan_curchan = iwn_scan_curchan;
671 ic->ic_scan_mindwell = iwn_scan_mindwell;
672 ic->ic_setregdomain = iwn_setregdomain;
673
674 iwn_radiotap_attach(sc);
675
676 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
677 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
678 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
679 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
680 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
681
682 iwn_sysctlattach(sc);
683
684 /*
685 * Hook our interrupt after all initialization is complete.
686 */
687 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
688 NULL, iwn_intr, sc, &sc->sc_ih);
689 if (error != 0) {
690 device_printf(dev, "can't establish interrupt, error %d\n",
691 error);
692 goto fail;
693 }
694
695 if (bootverbose)
696 ieee80211_announce(ic);
697 return 0;
698 fail:
699 iwn_detach(dev);
700 return error;
701 }
702
703 static int
704 iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
705 {
706 struct iwn_ops *ops = &sc->ops;
707
708 ops->load_firmware = iwn4965_load_firmware;
709 ops->read_eeprom = iwn4965_read_eeprom;
710 ops->post_alive = iwn4965_post_alive;
711 ops->nic_config = iwn4965_nic_config;
712 ops->update_sched = iwn4965_update_sched;
713 ops->get_temperature = iwn4965_get_temperature;
714 ops->get_rssi = iwn4965_get_rssi;
715 ops->set_txpower = iwn4965_set_txpower;
716 ops->init_gains = iwn4965_init_gains;
717 ops->set_gains = iwn4965_set_gains;
718 ops->add_node = iwn4965_add_node;
719 ops->tx_done = iwn4965_tx_done;
720 #if 0 /* HT */
721 ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
722 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
723 #endif
724 sc->ntxqs = IWN4965_NTXQUEUES;
725 sc->ndmachnls = IWN4965_NDMACHNLS;
726 sc->broadcast_id = IWN4965_ID_BROADCAST;
727 sc->rxonsz = IWN4965_RXONSZ;
728 sc->schedsz = IWN4965_SCHEDSZ;
729 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
730 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
731 sc->fwsz = IWN4965_FWSZ;
732 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
733 sc->limits = &iwn4965_sensitivity_limits;
734 sc->fwname = "iwn4965fw";
735 /* Override chains masks, ROM is known to be broken. */
736 sc->txchainmask = IWN_ANT_AB;
737 sc->rxchainmask = IWN_ANT_ABC;
738
739 return 0;
740 }
741
742 static int
743 iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
744 {
745 struct iwn_ops *ops = &sc->ops;
746
747 ops->load_firmware = iwn5000_load_firmware;
748 ops->read_eeprom = iwn5000_read_eeprom;
749 ops->post_alive = iwn5000_post_alive;
750 ops->nic_config = iwn5000_nic_config;
751 ops->update_sched = iwn5000_update_sched;
752 ops->get_temperature = iwn5000_get_temperature;
753 ops->get_rssi = iwn5000_get_rssi;
754 ops->set_txpower = iwn5000_set_txpower;
755 ops->init_gains = iwn5000_init_gains;
756 ops->set_gains = iwn5000_set_gains;
757 ops->add_node = iwn5000_add_node;
758 ops->tx_done = iwn5000_tx_done;
759 #if 0 /* HT */
760 ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
761 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
762 #endif
763 sc->ntxqs = IWN5000_NTXQUEUES;
764 sc->ndmachnls = IWN5000_NDMACHNLS;
765 sc->broadcast_id = IWN5000_ID_BROADCAST;
766 sc->rxonsz = IWN5000_RXONSZ;
767 sc->schedsz = IWN5000_SCHEDSZ;
768 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
769 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
770 sc->fwsz = IWN5000_FWSZ;
771 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
772 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
773 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
774
775 switch (sc->hw_type) {
776 case IWN_HW_REV_TYPE_5100:
777 sc->limits = &iwn5000_sensitivity_limits;
778 sc->fwname = "iwn5000fw";
779 /* Override chains masks, ROM is known to be broken. */
780 sc->txchainmask = IWN_ANT_B;
781 sc->rxchainmask = IWN_ANT_AB;
782 break;
783 case IWN_HW_REV_TYPE_5150:
784 sc->limits = &iwn5150_sensitivity_limits;
785 sc->fwname = "iwn5150fw";
786 break;
787 case IWN_HW_REV_TYPE_5300:
788 case IWN_HW_REV_TYPE_5350:
789 sc->limits = &iwn5000_sensitivity_limits;
790 sc->fwname = "iwn5000fw";
791 break;
792 case IWN_HW_REV_TYPE_1000:
793 sc->limits = &iwn1000_sensitivity_limits;
794 sc->fwname = "iwn1000fw";
795 break;
796 case IWN_HW_REV_TYPE_6000:
797 sc->limits = &iwn6000_sensitivity_limits;
798 sc->fwname = "iwn6000fw";
799 if (pid == 0x422c || pid == 0x4239) {
800 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
801 /* Override chains masks, ROM is known to be broken. */
802 sc->txchainmask = IWN_ANT_BC;
803 sc->rxchainmask = IWN_ANT_BC;
804 }
805 break;
806 case IWN_HW_REV_TYPE_6050:
807 sc->limits = &iwn6000_sensitivity_limits;
808 sc->fwname = "iwn6050fw";
809 /* Override chains masks, ROM is known to be broken. */
810 sc->txchainmask = IWN_ANT_AB;
811 sc->rxchainmask = IWN_ANT_AB;
812 break;
813 case IWN_HW_REV_TYPE_6005:
814 sc->limits = &iwn6000_sensitivity_limits;
815 if (pid != 0x0082 && pid != 0x0085) {
816 sc->fwname = "iwn6000g2bfw";
817 sc->sc_flags |= IWN_FLAG_ADV_BTCOEX;
818 } else
819 sc->fwname = "iwn6000g2afw";
820 break;
821 default:
822 device_printf(sc->sc_dev, "adapter type %d not supported\n",
823 sc->hw_type);
824 return ENOTSUP;
825 }
826 return 0;
827 }
828
829 /*
830 * Attach the interface to 802.11 radiotap.
831 */
832 static void
833 iwn_radiotap_attach(struct iwn_softc *sc)
834 {
835 struct ifnet *ifp = sc->sc_ifp;
836 struct ieee80211com *ic = ifp->if_l2com;
837
838 ieee80211_radiotap_attach(ic,
839 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
840 IWN_TX_RADIOTAP_PRESENT,
841 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
842 IWN_RX_RADIOTAP_PRESENT);
843 }
844
845 static void
846 iwn_sysctlattach(struct iwn_softc *sc)
847 {
848 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
849 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
850
851 #ifdef IWN_DEBUG
852 sc->sc_debug = 0;
853 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
854 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
855 #endif
856 }
857
858 static struct ieee80211vap *
859 iwn_vap_create(struct ieee80211com *ic,
860 const char name[IFNAMSIZ], int unit, int opmode, int flags,
861 const uint8_t bssid[IEEE80211_ADDR_LEN],
862 const uint8_t mac[IEEE80211_ADDR_LEN])
863 {
864 struct iwn_vap *ivp;
865 struct ieee80211vap *vap;
866
867 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
868 return NULL;
869 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
870 M_80211_VAP, M_NOWAIT | M_ZERO);
871 if (ivp == NULL)
872 return NULL;
873 vap = &ivp->iv_vap;
874 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
875 vap->iv_bmissthreshold = 10; /* override default */
876 /* Override with driver methods. */
877 ivp->iv_newstate = vap->iv_newstate;
878 vap->iv_newstate = iwn_newstate;
879
880 ieee80211_ratectl_init(vap);
881 /* Complete setup. */
882 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
883 ic->ic_opmode = opmode;
884 return vap;
885 }
886
887 static void
888 iwn_vap_delete(struct ieee80211vap *vap)
889 {
890 struct iwn_vap *ivp = IWN_VAP(vap);
891
892 ieee80211_ratectl_deinit(vap);
893 ieee80211_vap_detach(vap);
894 free(ivp, M_80211_VAP);
895 }
896
897 static int
898 iwn_detach(device_t dev)
899 {
900 struct iwn_softc *sc = device_get_softc(dev);
901 struct ifnet *ifp = sc->sc_ifp;
902 struct ieee80211com *ic;
903 int qid;
904
905 if (ifp != NULL) {
906 ic = ifp->if_l2com;
907
908 ieee80211_draintask(ic, &sc->sc_reinit_task);
909 ieee80211_draintask(ic, &sc->sc_radioon_task);
910 ieee80211_draintask(ic, &sc->sc_radiooff_task);
911
912 iwn_stop(sc);
913 callout_drain(&sc->watchdog_to);
914 callout_drain(&sc->calib_to);
915 ieee80211_ifdetach(ic);
916 }
917
918 /* Uninstall interrupt handler. */
919 if (sc->irq != NULL) {
920 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
921 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
922 if (sc->irq_rid == 1)
923 pci_release_msi(dev);
924 }
925
926 /* Free DMA resources. */
927 iwn_free_rx_ring(sc, &sc->rxq);
928 for (qid = 0; qid < sc->ntxqs; qid++)
929 iwn_free_tx_ring(sc, &sc->txq[qid]);
930 iwn_free_sched(sc);
931 iwn_free_kw(sc);
932 if (sc->ict != NULL)
933 iwn_free_ict(sc);
934 iwn_free_fwmem(sc);
935
936 if (sc->mem != NULL)
937 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
938
939 if (ifp != NULL)
940 if_free(ifp);
941
942 IWN_LOCK_DESTROY(sc);
943 return 0;
944 }
945
946 static int
947 iwn_shutdown(device_t dev)
948 {
949 struct iwn_softc *sc = device_get_softc(dev);
950
951 iwn_stop(sc);
952 return 0;
953 }
954
955 static int
956 iwn_suspend(device_t dev)
957 {
958 struct iwn_softc *sc = device_get_softc(dev);
959 struct ifnet *ifp = sc->sc_ifp;
960 struct ieee80211com *ic = ifp->if_l2com;
961 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
962
963 iwn_stop(sc);
964 if (vap != NULL)
965 ieee80211_stop(vap);
966 return 0;
967 }
968
969 static int
970 iwn_resume(device_t dev)
971 {
972 struct iwn_softc *sc = device_get_softc(dev);
973 struct ifnet *ifp = sc->sc_ifp;
974 struct ieee80211com *ic = ifp->if_l2com;
975 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
976
977 /* Clear device-specific "PCI retry timeout" register (41h). */
978 pci_write_config(dev, 0x41, 0, 1);
979
980 if (ifp->if_flags & IFF_UP) {
981 iwn_init(sc);
982 if (vap != NULL)
983 ieee80211_init(vap);
984 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
985 iwn_start(ifp);
986 }
987 return 0;
988 }
989
990 static int
991 iwn_nic_lock(struct iwn_softc *sc)
992 {
993 int ntries;
994
995 /* Request exclusive access to NIC. */
996 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
997
998 /* Spin until we actually get the lock. */
999 for (ntries = 0; ntries < 1000; ntries++) {
1000 if ((IWN_READ(sc, IWN_GP_CNTRL) &
1001 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1002 IWN_GP_CNTRL_MAC_ACCESS_ENA)
1003 return 0;
1004 DELAY(10);
1005 }
1006 return ETIMEDOUT;
1007 }
1008
1009 static __inline void
1010 iwn_nic_unlock(struct iwn_softc *sc)
1011 {
1012 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1013 }
1014
1015 static __inline uint32_t
1016 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1017 {
1018 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1019 IWN_BARRIER_READ_WRITE(sc);
1020 return IWN_READ(sc, IWN_PRPH_RDATA);
1021 }
1022
1023 static __inline void
1024 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1025 {
1026 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1027 IWN_BARRIER_WRITE(sc);
1028 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1029 }
1030
1031 static __inline void
1032 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1033 {
1034 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1035 }
1036
1037 static __inline void
1038 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1039 {
1040 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1041 }
1042
1043 static __inline void
1044 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1045 const uint32_t *data, int count)
1046 {
1047 for (; count > 0; count--, data++, addr += 4)
1048 iwn_prph_write(sc, addr, *data);
1049 }
1050
1051 static __inline uint32_t
1052 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1053 {
1054 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1055 IWN_BARRIER_READ_WRITE(sc);
1056 return IWN_READ(sc, IWN_MEM_RDATA);
1057 }
1058
1059 static __inline void
1060 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1061 {
1062 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1063 IWN_BARRIER_WRITE(sc);
1064 IWN_WRITE(sc, IWN_MEM_WDATA, data);
1065 }
1066
1067 static __inline void
1068 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1069 {
1070 uint32_t tmp;
1071
1072 tmp = iwn_mem_read(sc, addr & ~3);
1073 if (addr & 3)
1074 tmp = (tmp & 0x0000ffff) | data << 16;
1075 else
1076 tmp = (tmp & 0xffff0000) | data;
1077 iwn_mem_write(sc, addr & ~3, tmp);
1078 }
1079
1080 static __inline void
1081 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1082 int count)
1083 {
1084 for (; count > 0; count--, addr += 4)
1085 *data++ = iwn_mem_read(sc, addr);
1086 }
1087
1088 static __inline void
1089 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1090 int count)
1091 {
1092 for (; count > 0; count--, addr += 4)
1093 iwn_mem_write(sc, addr, val);
1094 }
1095
1096 static int
1097 iwn_eeprom_lock(struct iwn_softc *sc)
1098 {
1099 int i, ntries;
1100
1101 for (i = 0; i < 100; i++) {
1102 /* Request exclusive access to EEPROM. */
1103 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1104 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1105
1106 /* Spin until we actually get the lock. */
1107 for (ntries = 0; ntries < 100; ntries++) {
1108 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1109 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1110 return 0;
1111 DELAY(10);
1112 }
1113 }
1114 return ETIMEDOUT;
1115 }
1116
1117 static __inline void
1118 iwn_eeprom_unlock(struct iwn_softc *sc)
1119 {
1120 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1121 }
1122
1123 /*
1124 * Initialize access by host to One Time Programmable ROM.
1125 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1126 */
1127 static int
1128 iwn_init_otprom(struct iwn_softc *sc)
1129 {
1130 uint16_t prev, base, next;
1131 int count, error;
1132
1133 /* Wait for clock stabilization before accessing prph. */
1134 if ((error = iwn_clock_wait(sc)) != 0)
1135 return error;
1136
1137 if ((error = iwn_nic_lock(sc)) != 0)
1138 return error;
1139 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1140 DELAY(5);
1141 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1142 iwn_nic_unlock(sc);
1143
1144 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1145 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1146 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1147 IWN_RESET_LINK_PWR_MGMT_DIS);
1148 }
1149 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1150 /* Clear ECC status. */
1151 IWN_SETBITS(sc, IWN_OTP_GP,
1152 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1153
1154 /*
1155 * Find the block before last block (contains the EEPROM image)
1156 * for HW without OTP shadow RAM.
1157 */
1158 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1159 /* Switch to absolute addressing mode. */
1160 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1161 base = prev = 0;
1162 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1163 error = iwn_read_prom_data(sc, base, &next, 2);
1164 if (error != 0)
1165 return error;
1166 if (next == 0) /* End of linked-list. */
1167 break;
1168 prev = base;
1169 base = le16toh(next);
1170 }
1171 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1172 return EIO;
1173 /* Skip "next" word. */
1174 sc->prom_base = prev + 1;
1175 }
1176 return 0;
1177 }
1178
1179 static int
1180 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1181 {
1182 uint8_t *out = data;
1183 uint32_t val, tmp;
1184 int ntries;
1185
1186 addr += sc->prom_base;
1187 for (; count > 0; count -= 2, addr++) {
1188 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1189 for (ntries = 0; ntries < 10; ntries++) {
1190 val = IWN_READ(sc, IWN_EEPROM);
1191 if (val & IWN_EEPROM_READ_VALID)
1192 break;
1193 DELAY(5);
1194 }
1195 if (ntries == 10) {
1196 device_printf(sc->sc_dev,
1197 "timeout reading ROM at 0x%x\n", addr);
1198 return ETIMEDOUT;
1199 }
1200 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1201 /* OTPROM, check for ECC errors. */
1202 tmp = IWN_READ(sc, IWN_OTP_GP);
1203 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1204 device_printf(sc->sc_dev,
1205 "OTPROM ECC error at 0x%x\n", addr);
1206 return EIO;
1207 }
1208 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1209 /* Correctable ECC error, clear bit. */
1210 IWN_SETBITS(sc, IWN_OTP_GP,
1211 IWN_OTP_GP_ECC_CORR_STTS);
1212 }
1213 }
1214 *out++ = val >> 16;
1215 if (count > 1)
1216 *out++ = val >> 24;
1217 }
1218 return 0;
1219 }
1220
1221 static void
1222 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1223 {
1224 if (error != 0)
1225 return;
1226 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1227 *(bus_addr_t *)arg = segs[0].ds_addr;
1228 }
1229
1230 static int
1231 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1232 void **kvap, bus_size_t size, bus_size_t alignment)
1233 {
1234 int error;
1235
1236 dma->tag = NULL;
1237 dma->size = size;
1238
1239 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1240 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1241 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1242 if (error != 0)
1243 goto fail;
1244
1245 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1246 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1247 if (error != 0)
1248 goto fail;
1249
1250 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1251 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1252 if (error != 0)
1253 goto fail;
1254
1255 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1256
1257 if (kvap != NULL)
1258 *kvap = dma->vaddr;
1259
1260 return 0;
1261
1262 fail: iwn_dma_contig_free(dma);
1263 return error;
1264 }
1265
1266 static void
1267 iwn_dma_contig_free(struct iwn_dma_info *dma)
1268 {
1269 if (dma->map != NULL) {
1270 if (dma->vaddr != NULL) {
1271 bus_dmamap_sync(dma->tag, dma->map,
1272 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1273 bus_dmamap_unload(dma->tag, dma->map);
1274 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map);
1275 dma->vaddr = NULL;
1276 }
1277 bus_dmamap_destroy(dma->tag, dma->map);
1278 dma->map = NULL;
1279 }
1280 if (dma->tag != NULL) {
1281 bus_dma_tag_destroy(dma->tag);
1282 dma->tag = NULL;
1283 }
1284 }
1285
1286 static int
1287 iwn_alloc_sched(struct iwn_softc *sc)
1288 {
1289 /* TX scheduler rings must be aligned on a 1KB boundary. */
1290 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1291 sc->schedsz, 1024);
1292 }
1293
1294 static void
1295 iwn_free_sched(struct iwn_softc *sc)
1296 {
1297 iwn_dma_contig_free(&sc->sched_dma);
1298 }
1299
1300 static int
1301 iwn_alloc_kw(struct iwn_softc *sc)
1302 {
1303 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1304 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1305 }
1306
1307 static void
1308 iwn_free_kw(struct iwn_softc *sc)
1309 {
1310 iwn_dma_contig_free(&sc->kw_dma);
1311 }
1312
1313 static int
1314 iwn_alloc_ict(struct iwn_softc *sc)
1315 {
1316 /* ICT table must be aligned on a 4KB boundary. */
1317 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1318 IWN_ICT_SIZE, 4096);
1319 }
1320
1321 static void
1322 iwn_free_ict(struct iwn_softc *sc)
1323 {
1324 iwn_dma_contig_free(&sc->ict_dma);
1325 }
1326
1327 static int
1328 iwn_alloc_fwmem(struct iwn_softc *sc)
1329 {
1330 /* Must be aligned on a 16-byte boundary. */
1331 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1332 }
1333
1334 static void
1335 iwn_free_fwmem(struct iwn_softc *sc)
1336 {
1337 iwn_dma_contig_free(&sc->fw_dma);
1338 }
1339
1340 static int
1341 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1342 {
1343 bus_size_t size;
1344 int i, error;
1345
1346 ring->cur = 0;
1347
1348 /* Allocate RX descriptors (256-byte aligned). */
1349 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1350 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1351 size, 256);
1352 if (error != 0) {
1353 device_printf(sc->sc_dev,
1354 "%s: could not allocate RX ring DMA memory, error %d\n",
1355 __func__, error);
1356 goto fail;
1357 }
1358
1359 /* Allocate RX status area (16-byte aligned). */
1360 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1361 sizeof (struct iwn_rx_status), 16);
1362 if (error != 0) {
1363 device_printf(sc->sc_dev,
1364 "%s: could not allocate RX status DMA memory, error %d\n",
1365 __func__, error);
1366 goto fail;
1367 }
1368
1369 /* Create RX buffer DMA tag. */
1370 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1371 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1372 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1373 &ring->data_dmat);
1374 if (error != 0) {
1375 device_printf(sc->sc_dev,
1376 "%s: could not create RX buf DMA tag, error %d\n",
1377 __func__, error);
1378 goto fail;
1379 }
1380
1381 /*
1382 * Allocate and map RX buffers.
1383 */
1384 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1385 struct iwn_rx_data *data = &ring->data[i];
1386 bus_addr_t paddr;
1387
1388 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1389 if (error != 0) {
1390 device_printf(sc->sc_dev,
1391 "%s: could not create RX buf DMA map, error %d\n",
1392 __func__, error);
1393 goto fail;
1394 }
1395
1396 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
1397 IWN_RBUF_SIZE);
1398 if (data->m == NULL) {
1399 device_printf(sc->sc_dev,
1400 "%s: could not allocate RX mbuf\n", __func__);
1401 error = ENOBUFS;
1402 goto fail;
1403 }
1404
1405 error = bus_dmamap_load(ring->data_dmat, data->map,
1406 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1407 &paddr, BUS_DMA_NOWAIT);
1408 if (error != 0 && error != EFBIG) {
1409 device_printf(sc->sc_dev,
1410 "%s: can't not map mbuf, error %d\n", __func__,
1411 error);
1412 goto fail;
1413 }
1414
1415 /* Set physical address of RX buffer (256-byte aligned). */
1416 ring->desc[i] = htole32(paddr >> 8);
1417 }
1418
1419 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1420 BUS_DMASYNC_PREWRITE);
1421
1422 return 0;
1423
1424 fail: iwn_free_rx_ring(sc, ring);
1425 return error;
1426 }
1427
1428 static void
1429 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1430 {
1431 int ntries;
1432
1433 if (iwn_nic_lock(sc) == 0) {
1434 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1435 for (ntries = 0; ntries < 1000; ntries++) {
1436 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1437 IWN_FH_RX_STATUS_IDLE)
1438 break;
1439 DELAY(10);
1440 }
1441 iwn_nic_unlock(sc);
1442 }
1443 ring->cur = 0;
1444 sc->last_rx_valid = 0;
1445 }
1446
1447 static void
1448 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1449 {
1450 int i;
1451
1452 iwn_dma_contig_free(&ring->desc_dma);
1453 iwn_dma_contig_free(&ring->stat_dma);
1454
1455 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1456 struct iwn_rx_data *data = &ring->data[i];
1457
1458 if (data->m != NULL) {
1459 bus_dmamap_sync(ring->data_dmat, data->map,
1460 BUS_DMASYNC_POSTREAD);
1461 bus_dmamap_unload(ring->data_dmat, data->map);
1462 m_freem(data->m);
1463 data->m = NULL;
1464 }
1465 if (data->map != NULL)
1466 bus_dmamap_destroy(ring->data_dmat, data->map);
1467 }
1468 if (ring->data_dmat != NULL) {
1469 bus_dma_tag_destroy(ring->data_dmat);
1470 ring->data_dmat = NULL;
1471 }
1472 }
1473
1474 static int
1475 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1476 {
1477 bus_addr_t paddr;
1478 bus_size_t size;
1479 int i, error;
1480
1481 ring->qid = qid;
1482 ring->queued = 0;
1483 ring->cur = 0;
1484
1485 /* Allocate TX descriptors (256-byte aligned). */
1486 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1487 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1488 size, 256);
1489 if (error != 0) {
1490 device_printf(sc->sc_dev,
1491 "%s: could not allocate TX ring DMA memory, error %d\n",
1492 __func__, error);
1493 goto fail;
1494 }
1495 /*
1496 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1497 * to allocate commands space for other rings.
1498 * XXX Do we really need to allocate descriptors for other rings?
1499 */
1500 if (qid > 4)
1501 return 0;
1502
1503 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1504 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1505 size, 4);
1506 if (error != 0) {
1507 device_printf(sc->sc_dev,
1508 "%s: could not allocate TX cmd DMA memory, error %d\n",
1509 __func__, error);
1510 goto fail;
1511 }
1512
1513 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1514 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1515 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1516 &ring->data_dmat);
1517 if (error != 0) {
1518 device_printf(sc->sc_dev,
1519 "%s: could not create TX buf DMA tag, error %d\n",
1520 __func__, error);
1521 goto fail;
1522 }
1523
1524 paddr = ring->cmd_dma.paddr;
1525 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1526 struct iwn_tx_data *data = &ring->data[i];
1527
1528 data->cmd_paddr = paddr;
1529 data->scratch_paddr = paddr + 12;
1530 paddr += sizeof (struct iwn_tx_cmd);
1531
1532 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1533 if (error != 0) {
1534 device_printf(sc->sc_dev,
1535 "%s: could not create TX buf DMA map, error %d\n",
1536 __func__, error);
1537 goto fail;
1538 }
1539 }
1540 return 0;
1541
1542 fail: iwn_free_tx_ring(sc, ring);
1543 return error;
1544 }
1545
1546 static void
1547 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1548 {
1549 int i;
1550
1551 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1552 struct iwn_tx_data *data = &ring->data[i];
1553
1554 if (data->m != NULL) {
1555 bus_dmamap_sync(ring->data_dmat, data->map,
1556 BUS_DMASYNC_POSTWRITE);
1557 bus_dmamap_unload(ring->data_dmat, data->map);
1558 m_freem(data->m);
1559 data->m = NULL;
1560 }
1561 }
1562 /* Clear TX descriptors. */
1563 memset(ring->desc, 0, ring->desc_dma.size);
1564 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1565 BUS_DMASYNC_PREWRITE);
1566 sc->qfullmsk &= ~(1 << ring->qid);
1567 ring->queued = 0;
1568 ring->cur = 0;
1569 }
1570
1571 static void
1572 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1573 {
1574 int i;
1575
1576 iwn_dma_contig_free(&ring->desc_dma);
1577 iwn_dma_contig_free(&ring->cmd_dma);
1578
1579 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1580 struct iwn_tx_data *data = &ring->data[i];
1581
1582 if (data->m != NULL) {
1583 bus_dmamap_sync(ring->data_dmat, data->map,
1584 BUS_DMASYNC_POSTWRITE);
1585 bus_dmamap_unload(ring->data_dmat, data->map);
1586 m_freem(data->m);
1587 }
1588 if (data->map != NULL)
1589 bus_dmamap_destroy(ring->data_dmat, data->map);
1590 }
1591 if (ring->data_dmat != NULL) {
1592 bus_dma_tag_destroy(ring->data_dmat);
1593 ring->data_dmat = NULL;
1594 }
1595 }
1596
1597 static void
1598 iwn5000_ict_reset(struct iwn_softc *sc)
1599 {
1600 /* Disable interrupts. */
1601 IWN_WRITE(sc, IWN_INT_MASK, 0);
1602
1603 /* Reset ICT table. */
1604 memset(sc->ict, 0, IWN_ICT_SIZE);
1605 sc->ict_cur = 0;
1606
1607 /* Set physical address of ICT table (4KB aligned). */
1608 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1609 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1610 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1611
1612 /* Enable periodic RX interrupt. */
1613 sc->int_mask |= IWN_INT_RX_PERIODIC;
1614 /* Switch to ICT interrupt mode in driver. */
1615 sc->sc_flags |= IWN_FLAG_USE_ICT;
1616
1617 /* Re-enable interrupts. */
1618 IWN_WRITE(sc, IWN_INT, 0xffffffff);
1619 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1620 }
1621
1622 static int
1623 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1624 {
1625 struct iwn_ops *ops = &sc->ops;
1626 uint16_t val;
1627 int error;
1628
1629 /* Check whether adapter has an EEPROM or an OTPROM. */
1630 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1631 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1632 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1633 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1634 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1635
1636 /* Adapter has to be powered on for EEPROM access to work. */
1637 if ((error = iwn_apm_init(sc)) != 0) {
1638 device_printf(sc->sc_dev,
1639 "%s: could not power ON adapter, error %d\n", __func__,
1640 error);
1641 return error;
1642 }
1643
1644 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1645 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1646 return EIO;
1647 }
1648 if ((error = iwn_eeprom_lock(sc)) != 0) {
1649 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
1650 __func__, error);
1651 return error;
1652 }
1653 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1654 if ((error = iwn_init_otprom(sc)) != 0) {
1655 device_printf(sc->sc_dev,
1656 "%s: could not initialize OTPROM, error %d\n",
1657 __func__, error);
1658 return error;
1659 }
1660 }
1661
1662 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1663 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
1664 /* Check if HT support is bonded out. */
1665 if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1666 sc->sc_flags |= IWN_FLAG_HAS_11N;
1667
1668 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1669 sc->rfcfg = le16toh(val);
1670 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1671 /* Read Tx/Rx chains from ROM unless it's known to be broken. */
1672 if (sc->txchainmask == 0)
1673 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1674 if (sc->rxchainmask == 0)
1675 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1676
1677 /* Read MAC address. */
1678 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1679
1680 /* Read adapter-specific information from EEPROM. */
1681 ops->read_eeprom(sc);
1682
1683 iwn_apm_stop(sc); /* Power OFF adapter. */
1684
1685 iwn_eeprom_unlock(sc);
1686 return 0;
1687 }
1688
1689 static void
1690 iwn4965_read_eeprom(struct iwn_softc *sc)
1691 {
1692 uint32_t addr;
1693 uint16_t val;
1694 int i;
1695
1696 /* Read regulatory domain (4 ASCII characters). */
1697 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1698
1699 /* Read the list of authorized channels (20MHz ones only). */
1700 for (i = 0; i < 5; i++) {
1701 addr = iwn4965_regulatory_bands[i];
1702 iwn_read_eeprom_channels(sc, i, addr);
1703 }
1704
1705 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1706 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1707 sc->maxpwr2GHz = val & 0xff;
1708 sc->maxpwr5GHz = val >> 8;
1709 /* Check that EEPROM values are within valid range. */
1710 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1711 sc->maxpwr5GHz = 38;
1712 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1713 sc->maxpwr2GHz = 38;
1714 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1715 sc->maxpwr2GHz, sc->maxpwr5GHz);
1716
1717 /* Read samples for each TX power group. */
1718 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1719 sizeof sc->bands);
1720
1721 /* Read voltage at which samples were taken. */
1722 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1723 sc->eeprom_voltage = (int16_t)le16toh(val);
1724 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1725 sc->eeprom_voltage);
1726
1727 #ifdef IWN_DEBUG
1728 /* Print samples. */
1729 if (sc->sc_debug & IWN_DEBUG_ANY) {
1730 for (i = 0; i < IWN_NBANDS; i++)
1731 iwn4965_print_power_group(sc, i);
1732 }
1733 #endif
1734 }
1735
1736 #ifdef IWN_DEBUG
1737 static void
1738 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1739 {
1740 struct iwn4965_eeprom_band *band = &sc->bands[i];
1741 struct iwn4965_eeprom_chan_samples *chans = band->chans;
1742 int j, c;
1743
1744 printf("===band %d===\n", i);
1745 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1746 printf("chan1 num=%d\n", chans[0].num);
1747 for (c = 0; c < 2; c++) {
1748 for (j = 0; j < IWN_NSAMPLES; j++) {
1749 printf("chain %d, sample %d: temp=%d gain=%d "
1750 "power=%d pa_det=%d\n", c, j,
1751 chans[0].samples[c][j].temp,
1752 chans[0].samples[c][j].gain,
1753 chans[0].samples[c][j].power,
1754 chans[0].samples[c][j].pa_det);
1755 }
1756 }
1757 printf("chan2 num=%d\n", chans[1].num);
1758 for (c = 0; c < 2; c++) {
1759 for (j = 0; j < IWN_NSAMPLES; j++) {
1760 printf("chain %d, sample %d: temp=%d gain=%d "
1761 "power=%d pa_det=%d\n", c, j,
1762 chans[1].samples[c][j].temp,
1763 chans[1].samples[c][j].gain,
1764 chans[1].samples[c][j].power,
1765 chans[1].samples[c][j].pa_det);
1766 }
1767 }
1768 }
1769 #endif
1770
1771 static void
1772 iwn5000_read_eeprom(struct iwn_softc *sc)
1773 {
1774 struct iwn5000_eeprom_calib_hdr hdr;
1775 int32_t volt;
1776 uint32_t base, addr;
1777 uint16_t val;
1778 int i;
1779
1780 /* Read regulatory domain (4 ASCII characters). */
1781 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1782 base = le16toh(val);
1783 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1784 sc->eeprom_domain, 4);
1785
1786 /* Read the list of authorized channels (20MHz ones only). */
1787 for (i = 0; i < 5; i++) {
1788 addr = base + iwn5000_regulatory_bands[i];
1789 iwn_read_eeprom_channels(sc, i, addr);
1790 }
1791
1792 /* Read enhanced TX power information for 6000 Series. */
1793 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1794 iwn_read_eeprom_enhinfo(sc);
1795
1796 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1797 base = le16toh(val);
1798 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1799 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1800 "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
1801 hdr.version, hdr.pa_type, le16toh(hdr.volt));
1802 sc->calib_ver = hdr.version;
1803
1804 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1805 /* Compute temperature offset. */
1806 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1807 sc->eeprom_temp = le16toh(val);
1808 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1809 volt = le16toh(val);
1810 sc->temp_off = sc->eeprom_temp - (volt / -5);
1811 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1812 sc->eeprom_temp, volt, sc->temp_off);
1813 } else {
1814 /* Read crystal calibration. */
1815 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1816 &sc->eeprom_crystal, sizeof (uint32_t));
1817 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1818 le32toh(sc->eeprom_crystal));
1819 }
1820 }
1821
1822 /*
1823 * Translate EEPROM flags to net80211.
1824 */
1825 static uint32_t
1826 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1827 {
1828 uint32_t nflags;
1829
1830 nflags = 0;
1831 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1832 nflags |= IEEE80211_CHAN_PASSIVE;
1833 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1834 nflags |= IEEE80211_CHAN_NOADHOC;
1835 if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1836 nflags |= IEEE80211_CHAN_DFS;
1837 /* XXX apparently IBSS may still be marked */
1838 nflags |= IEEE80211_CHAN_NOADHOC;
1839 }
1840
1841 return nflags;
1842 }
1843
1844 static void
1845 iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1846 {
1847 struct ifnet *ifp = sc->sc_ifp;
1848 struct ieee80211com *ic = ifp->if_l2com;
1849 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1850 const struct iwn_chan_band *band = &iwn_bands[n];
1851 struct ieee80211_channel *c;
1852 uint8_t chan;
1853 int i, nflags;
1854
1855 for (i = 0; i < band->nchan; i++) {
1856 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1857 DPRINTF(sc, IWN_DEBUG_RESET,
1858 "skip chan %d flags 0x%x maxpwr %d\n",
1859 band->chan[i], channels[i].flags,
1860 channels[i].maxpwr);
1861 continue;
1862 }
1863 chan = band->chan[i];
1864 nflags = iwn_eeprom_channel_flags(&channels[i]);
1865
1866 c = &ic->ic_channels[ic->ic_nchans++];
1867 c->ic_ieee = chan;
1868 c->ic_maxregpower = channels[i].maxpwr;
1869 c->ic_maxpower = 2*c->ic_maxregpower;
1870
1871 if (n == 0) { /* 2GHz band */
1872 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
1873 /* G =>'s B is supported */
1874 c->ic_flags = IEEE80211_CHAN_B | nflags;
1875 c = &ic->ic_channels[ic->ic_nchans++];
1876 c[0] = c[-1];
1877 c->ic_flags = IEEE80211_CHAN_G | nflags;
1878 } else { /* 5GHz band */
1879 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
1880 c->ic_flags = IEEE80211_CHAN_A | nflags;
1881 }
1882
1883 /* Save maximum allowed TX power for this channel. */
1884 sc->maxpwr[chan] = channels[i].maxpwr;
1885
1886 DPRINTF(sc, IWN_DEBUG_RESET,
1887 "add chan %d flags 0x%x maxpwr %d\n", chan,
1888 channels[i].flags, channels[i].maxpwr);
1889
1890 #if 0 /* HT */
1891 /* XXX no constraints on using HT20 */
1892 /* add HT20, HT40 added separately */
1893 c = &ic->ic_channels[ic->ic_nchans++];
1894 c[0] = c[-1];
1895 c->ic_flags |= IEEE80211_CHAN_HT20;
1896 /* XXX NARROW =>'s 1/2 and 1/4 width? */
1897 #endif
1898 }
1899 }
1900
1901 #if 0 /* HT */
1902 static void
1903 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1904 {
1905 struct ifnet *ifp = sc->sc_ifp;
1906 struct ieee80211com *ic = ifp->if_l2com;
1907 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1908 const struct iwn_chan_band *band = &iwn_bands[n];
1909 struct ieee80211_channel *c, *cent, *extc;
1910 int i;
1911
1912 for (i = 0; i < band->nchan; i++) {
1913 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) ||
1914 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) {
1915 DPRINTF(sc, IWN_DEBUG_RESET,
1916 "skip chan %d flags 0x%x maxpwr %d\n",
1917 band->chan[i], channels[i].flags,
1918 channels[i].maxpwr);
1919 continue;
1920 }
1921 /*
1922 * Each entry defines an HT40 channel pair; find the
1923 * center channel, then the extension channel above.
1924 */
1925 cent = ieee80211_find_channel_byieee(ic, band->chan[i],
1926 band->flags & ~IEEE80211_CHAN_HT);
1927 if (cent == NULL) { /* XXX shouldn't happen */
1928 device_printf(sc->sc_dev,
1929 "%s: no entry for channel %d\n",
1930 __func__, band->chan[i]);
1931 continue;
1932 }
1933 extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1934 band->flags & ~IEEE80211_CHAN_HT);
1935 if (extc == NULL) {
1936 DPRINTF(sc, IWN_DEBUG_RESET,
1937 "skip chan %d, extension channel not found\n",
1938 band->chan[i]);
1939 continue;
1940 }
1941
1942 DPRINTF(sc, IWN_DEBUG_RESET,
1943 "add ht40 chan %d flags 0x%x maxpwr %d\n",
1944 band->chan[i], channels[i].flags, channels[i].maxpwr);
1945
1946 c = &ic->ic_channels[ic->ic_nchans++];
1947 c[0] = cent[0];
1948 c->ic_extieee = extc->ic_ieee;
1949 c->ic_flags &= ~IEEE80211_CHAN_HT;
1950 c->ic_flags |= IEEE80211_CHAN_HT40U;
1951 c = &ic->ic_channels[ic->ic_nchans++];
1952 c[0] = extc[0];
1953 c->ic_extieee = cent->ic_ieee;
1954 c->ic_flags &= ~IEEE80211_CHAN_HT;
1955 c->ic_flags |= IEEE80211_CHAN_HT40D;
1956 }
1957 }
1958 #endif
1959
1960 static void
1961 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1962 {
1963 struct ifnet *ifp = sc->sc_ifp;
1964 struct ieee80211com *ic = ifp->if_l2com;
1965
1966 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1967 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1968
1969 if (n < 5)
1970 iwn_read_eeprom_band(sc, n);
1971 #if 0 /* HT */
1972 else
1973 iwn_read_eeprom_ht40(sc, n);
1974 #endif
1975 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1976 }
1977
1978 static struct iwn_eeprom_chan *
1979 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
1980 {
1981 int i, j;
1982
1983 for (j = 0; j < 7; j++) {
1984 for (i = 0; i < iwn_bands[j].nchan; i++) {
1985 if (iwn_bands[j].chan[i] == c->ic_ieee)
1986 return &sc->eeprom_channels[j][i];
1987 }
1988 }
1989
1990 return NULL;
1991 }
1992
1993 /*
1994 * Enforce flags read from EEPROM.
1995 */
1996 static int
1997 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
1998 int nchan, struct ieee80211_channel chans[])
1999 {
2000 struct iwn_softc *sc = ic->ic_ifp->if_softc;
2001 int i;
2002
2003 for (i = 0; i < nchan; i++) {
2004 struct ieee80211_channel *c = &chans[i];
2005 struct iwn_eeprom_chan *channel;
2006
2007 channel = iwn_find_eeprom_channel(sc, c);
2008 if (channel == NULL) {
2009 if_printf(ic->ic_ifp,
2010 "%s: invalid channel %u freq %u/0x%x\n",
2011 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2012 return EINVAL;
2013 }
2014 c->ic_flags |= iwn_eeprom_channel_flags(channel);
2015 }
2016
2017 return 0;
2018 }
2019
2020 #define nitems(_a) (sizeof((_a)) / sizeof((_a)[0]))
2021
2022 static void
2023 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2024 {
2025 struct iwn_eeprom_enhinfo enhinfo[35];
2026 uint16_t val, base;
2027 int8_t maxpwr;
2028 int i;
2029
2030 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2031 base = le16toh(val);
2032 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2033 enhinfo, sizeof enhinfo);
2034
2035 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
2036 for (i = 0; i < nitems(enhinfo); i++) {
2037 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
2038 continue; /* Skip invalid entries. */
2039
2040 maxpwr = 0;
2041 if (sc->txchainmask & IWN_ANT_A)
2042 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2043 if (sc->txchainmask & IWN_ANT_B)
2044 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2045 if (sc->txchainmask & IWN_ANT_C)
2046 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2047 if (sc->ntxchains == 2)
2048 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2049 else if (sc->ntxchains == 3)
2050 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2051 maxpwr /= 2; /* Convert half-dBm to dBm. */
2052
2053 DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i,
2054 maxpwr);
2055 sc->enh_maxpwr[i] = maxpwr;
2056 }
2057 }
2058
2059 static struct ieee80211_node *
2060 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2061 {
2062 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2063 }
2064
2065 static void
2066 iwn_newassoc(struct ieee80211_node *ni, int isnew)
2067 {
2068 struct iwn_node *wn = (void *)ni;
2069 int ridx, i;
2070
2071 for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
2072 ridx = iwn_plcp_signal(ni->ni_rates.rs_rates[i]);
2073 wn->ridx[i] = ridx;
2074 }
2075 }
2076
2077 static int
2078 iwn_media_change(struct ifnet *ifp)
2079 {
2080 int error;
2081
2082 error = ieee80211_media_change(ifp);
2083 /* NB: only the fixed rate can change and that doesn't need a reset */
2084 return (error == ENETRESET ? 0 : error);
2085 }
2086
2087 static int
2088 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2089 {
2090 struct iwn_vap *ivp = IWN_VAP(vap);
2091 struct ieee80211com *ic = vap->iv_ic;
2092 struct iwn_softc *sc = ic->ic_ifp->if_softc;
2093 int error = 0;
2094
2095 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2096 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2097
2098 IEEE80211_UNLOCK(ic);
2099 IWN_LOCK(sc);
2100 callout_stop(&sc->calib_to);
2101
2102 switch (nstate) {
2103 case IEEE80211_S_ASSOC:
2104 if (vap->iv_state != IEEE80211_S_RUN)
2105 break;
2106 /* FALLTHROUGH */
2107 case IEEE80211_S_AUTH:
2108 if (vap->iv_state == IEEE80211_S_AUTH)
2109 break;
2110
2111 /*
2112 * !AUTH -> AUTH transition requires state reset to handle
2113 * reassociations correctly.
2114 */
2115 sc->rxon.associd = 0;
2116 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
2117 sc->calib.state = IWN_CALIB_STATE_INIT;
2118
2119 if ((error = iwn_auth(sc, vap)) != 0) {
2120 device_printf(sc->sc_dev,
2121 "%s: could not move to auth state\n", __func__);
2122 }
2123 break;
2124
2125 case IEEE80211_S_RUN:
2126 /*
2127 * RUN -> RUN transition; Just restart the timers.
2128 */
2129 if (vap->iv_state == IEEE80211_S_RUN) {
2130 sc->calib_cnt = 0;
2131 break;
2132 }
2133
2134 /*
2135 * !RUN -> RUN requires setting the association id
2136 * which is done with a firmware cmd. We also defer
2137 * starting the timers until that work is done.
2138 */
2139 if ((error = iwn_run(sc, vap)) != 0) {
2140 device_printf(sc->sc_dev,
2141 "%s: could not move to run state\n", __func__);
2142 }
2143 break;
2144
2145 case IEEE80211_S_INIT:
2146 sc->calib.state = IWN_CALIB_STATE_INIT;
2147 break;
2148
2149 default:
2150 break;
2151 }
2152 IWN_UNLOCK(sc);
2153 IEEE80211_LOCK(ic);
2154 if (error != 0)
2155 return error;
2156 return ivp->iv_newstate(vap, nstate, arg);
2157 }
2158
2159 static void
2160 iwn_calib_timeout(void *arg)
2161 {
2162 struct iwn_softc *sc = arg;
2163
2164 IWN_LOCK_ASSERT(sc);
2165
2166 /* Force automatic TX power calibration every 60 secs. */
2167 if (++sc->calib_cnt >= 120) {
2168 uint32_t flags = 0;
2169
2170 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2171 "sending request for statistics");
2172 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2173 sizeof flags, 1);
2174 sc->calib_cnt = 0;
2175 }
2176 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2177 sc);
2178 }
2179
2180 /*
2181 * Process an RX_PHY firmware notification. This is usually immediately
2182 * followed by an MPDU_RX_DONE notification.
2183 */
2184 static void
2185 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2186 struct iwn_rx_data *data)
2187 {
2188 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2189
2190 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2191 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2192
2193 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
2194 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2195 sc->last_rx_valid = 1;
2196 }
2197
2198 /*
2199 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2200 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2201 */
2202 static void
2203 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2204 struct iwn_rx_data *data)
2205 {
2206 struct iwn_ops *ops = &sc->ops;
2207 struct ifnet *ifp = sc->sc_ifp;
2208 struct ieee80211com *ic = ifp->if_l2com;
2209 struct iwn_rx_ring *ring = &sc->rxq;
2210 struct ieee80211_frame *wh;
2211 struct ieee80211_node *ni;
2212 struct mbuf *m, *m1;
2213 struct iwn_rx_stat *stat;
2214 caddr_t head;
2215 bus_addr_t paddr;
2216 uint32_t flags;
2217 int error, len, rssi, nf;
2218
2219 if (desc->type == IWN_MPDU_RX_DONE) {
2220 /* Check for prior RX_PHY notification. */
2221 if (!sc->last_rx_valid) {
2222 DPRINTF(sc, IWN_DEBUG_ANY,
2223 "%s: missing RX_PHY\n", __func__);
2224 return;
2225 }
2226 sc->last_rx_valid = 0;
2227 stat = &sc->last_rx_stat;
2228 } else
2229 stat = (struct iwn_rx_stat *)(desc + 1);
2230
2231 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2232
2233 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2234 device_printf(sc->sc_dev,
2235 "%s: invalid RX statistic header, len %d\n", __func__,
2236 stat->cfg_phy_len);
2237 return;
2238 }
2239 if (desc->type == IWN_MPDU_RX_DONE) {
2240 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2241 head = (caddr_t)(mpdu + 1);
2242 len = le16toh(mpdu->len);
2243 } else {
2244 head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2245 len = le16toh(stat->len);
2246 }
2247
2248 flags = le32toh(*(uint32_t *)(head + len));
2249
2250 /* Discard frames with a bad FCS early. */
2251 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2252 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2253 __func__, flags);
2254 ifp->if_ierrors++;
2255 return;
2256 }
2257 /* Discard frames that are too short. */
2258 if (len < sizeof (*wh)) {
2259 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2260 __func__, len);
2261 ifp->if_ierrors++;
2262 return;
2263 }
2264
2265 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2266 if (m1 == NULL) {
2267 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2268 __func__);
2269 ifp->if_ierrors++;
2270 return;
2271 }
2272 bus_dmamap_unload(ring->data_dmat, data->map);
2273
2274 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
2275 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2276 if (error != 0 && error != EFBIG) {
2277 device_printf(sc->sc_dev,
2278 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2279 m_freem(m1);
2280
2281 /* Try to reload the old mbuf. */
2282 error = bus_dmamap_load(ring->data_dmat, data->map,
2283 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
2284 &paddr, BUS_DMA_NOWAIT);
2285 if (error != 0 && error != EFBIG) {
2286 panic("%s: could not load old RX mbuf", __func__);
2287 }
2288 /* Physical address may have changed. */
2289 ring->desc[ring->cur] = htole32(paddr >> 8);
2290 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
2291 BUS_DMASYNC_PREWRITE);
2292 ifp->if_ierrors++;
2293 return;
2294 }
2295
2296 m = data->m;
2297 data->m = m1;
2298 /* Update RX descriptor. */
2299 ring->desc[ring->cur] = htole32(paddr >> 8);
2300 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2301 BUS_DMASYNC_PREWRITE);
2302
2303 /* Finalize mbuf. */
2304 m->m_pkthdr.rcvif = ifp;
2305 m->m_data = head;
2306 m->m_pkthdr.len = m->m_len = len;
2307
2308 /* Grab a reference to the source node. */
2309 wh = mtod(m, struct ieee80211_frame *);
2310 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2311 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2312 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2313
2314 rssi = ops->get_rssi(sc, stat);
2315
2316 if (ieee80211_radiotap_active(ic)) {
2317 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2318
2319 tap->wr_flags = 0;
2320 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2321 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2322 tap->wr_dbm_antsignal = (int8_t)rssi;
2323 tap->wr_dbm_antnoise = (int8_t)nf;
2324 tap->wr_tsft = stat->tstamp;
2325 switch (stat->rate) {
2326 /* CCK rates. */
2327 case 10: tap->wr_rate = 2; break;
2328 case 20: tap->wr_rate = 4; break;
2329 case 55: tap->wr_rate = 11; break;
2330 case 110: tap->wr_rate = 22; break;
2331 /* OFDM rates. */
2332 case 0xd: tap->wr_rate = 12; break;
2333 case 0xf: tap->wr_rate = 18; break;
2334 case 0x5: tap->wr_rate = 24; break;
2335 case 0x7: tap->wr_rate = 36; break;
2336 case 0x9: tap->wr_rate = 48; break;
2337 case 0xb: tap->wr_rate = 72; break;
2338 case 0x1: tap->wr_rate = 96; break;
2339 case 0x3: tap->wr_rate = 108; break;
2340 /* Unknown rate: should not happen. */
2341 default: tap->wr_rate = 0;
2342 }
2343 }
2344
2345 IWN_UNLOCK(sc);
2346
2347 /* Send the frame to the 802.11 layer. */
2348 if (ni != NULL) {
2349 (void)ieee80211_input(ni, m, rssi - nf, nf);
2350 /* Node is no longer needed. */
2351 ieee80211_free_node(ni);
2352 } else
2353 (void)ieee80211_input_all(ic, m, rssi - nf, nf);
2354
2355 IWN_LOCK(sc);
2356 }
2357
2358 #if 0 /* HT */
2359 /* Process an incoming Compressed BlockAck. */
2360 static void
2361 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2362 struct iwn_rx_data *data)
2363 {
2364 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2365 struct iwn_tx_ring *txq;
2366
2367 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2368
2369 txq = &sc->txq[letoh16(ba->qid)];
2370 /* XXX TBD */
2371 }
2372 #endif
2373
2374 /*
2375 * Process a CALIBRATION_RESULT notification sent by the initialization
2376 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2377 */
2378 static void
2379 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2380 struct iwn_rx_data *data)
2381 {
2382 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2383 int len, idx = -1;
2384
2385 /* Runtime firmware should not send such a notification. */
2386 if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2387 return;
2388
2389 len = (le32toh(desc->len) & 0x3fff) - 4;
2390 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2391
2392 switch (calib->code) {
2393 case IWN5000_PHY_CALIB_DC:
2394 if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 &&
2395 (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2396 sc->hw_type >= IWN_HW_REV_TYPE_6000))
2397 idx = 0;
2398 break;
2399 case IWN5000_PHY_CALIB_LO:
2400 idx = 1;
2401 break;
2402 case IWN5000_PHY_CALIB_TX_IQ:
2403 idx = 2;
2404 break;
2405 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2406 if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2407 sc->hw_type != IWN_HW_REV_TYPE_5150)
2408 idx = 3;
2409 break;
2410 case IWN5000_PHY_CALIB_BASE_BAND:
2411 idx = 4;
2412 break;
2413 }
2414 if (idx == -1) /* Ignore other results. */
2415 return;
2416
2417 /* Save calibration result. */
2418 if (sc->calibcmd[idx].buf != NULL)
2419 free(sc->calibcmd[idx].buf, M_DEVBUF);
2420 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2421 if (sc->calibcmd[idx].buf == NULL) {
2422 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2423 "not enough memory for calibration result %d\n",
2424 calib->code);
2425 return;
2426 }
2427 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2428 "saving calibration result code=%d len=%d\n", calib->code, len);
2429 sc->calibcmd[idx].len = len;
2430 memcpy(sc->calibcmd[idx].buf, calib, len);
2431 }
2432
2433 /*
2434 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2435 * The latter is sent by the firmware after each received beacon.
2436 */
2437 static void
2438 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2439 struct iwn_rx_data *data)
2440 {
2441 struct iwn_ops *ops = &sc->ops;
2442 struct ifnet *ifp = sc->sc_ifp;
2443 struct ieee80211com *ic = ifp->if_l2com;
2444 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2445 struct iwn_calib_state *calib = &sc->calib;
2446 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2447 int temp;
2448
2449 /* Ignore statistics received during a scan. */
2450 if (vap->iv_state != IEEE80211_S_RUN ||
2451 (ic->ic_flags & IEEE80211_F_SCAN))
2452 return;
2453
2454 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2455
2456 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n",
2457 __func__, desc->type);
2458 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
2459
2460 /* Test if temperature has changed. */
2461 if (stats->general.temp != sc->rawtemp) {
2462 /* Convert "raw" temperature to degC. */
2463 sc->rawtemp = stats->general.temp;
2464 temp = ops->get_temperature(sc);
2465 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2466 __func__, temp);
2467
2468 /* Update TX power if need be (4965AGN only). */
2469 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2470 iwn4965_power_calibration(sc, temp);
2471 }
2472
2473 if (desc->type != IWN_BEACON_STATISTICS)
2474 return; /* Reply to a statistics request. */
2475
2476 sc->noise = iwn_get_noise(&stats->rx.general);
2477 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2478
2479 /* Test that RSSI and noise are present in stats report. */
2480 if (le32toh(stats->rx.general.flags) != 1) {
2481 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2482 "received statistics without RSSI");
2483 return;
2484 }
2485
2486 if (calib->state == IWN_CALIB_STATE_ASSOC)
2487 iwn_collect_noise(sc, &stats->rx.general);
2488 else if (calib->state == IWN_CALIB_STATE_RUN)
2489 iwn_tune_sensitivity(sc, &stats->rx);
2490 }
2491
2492 /*
2493 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2494 * and 5000 adapters have different incompatible TX status formats.
2495 */
2496 static void
2497 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2498 struct iwn_rx_data *data)
2499 {
2500 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2501 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2502
2503 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2504 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2505 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2506 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2507 le32toh(stat->status));
2508
2509 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2510 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2511 }
2512
2513 static void
2514 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2515 struct iwn_rx_data *data)
2516 {
2517 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2518 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2519
2520 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2521 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2522 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2523 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2524 le32toh(stat->status));
2525
2526 #ifdef notyet
2527 /* Reset TX scheduler slot. */
2528 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2529 #endif
2530
2531 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2532 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2533 }
2534
2535 /*
2536 * Adapter-independent backend for TX_DONE firmware notifications.
2537 */
2538 static void
2539 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2540 uint8_t status)
2541 {
2542 struct ifnet *ifp = sc->sc_ifp;
2543 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2544 struct iwn_tx_data *data = &ring->data[desc->idx];
2545 struct mbuf *m;
2546 struct ieee80211_node *ni;
2547 struct ieee80211vap *vap;
2548
2549 KASSERT(data->ni != NULL, ("no node"));
2550
2551 /* Unmap and free mbuf. */
2552 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2553 bus_dmamap_unload(ring->data_dmat, data->map);
2554 m = data->m, data->m = NULL;
2555 ni = data->ni, data->ni = NULL;
2556 vap = ni->ni_vap;
2557
2558 if (m->m_flags & M_TXCB) {
2559 /*
2560 * Channels marked for "radar" require traffic to be received
2561 * to unlock before we can transmit. Until traffic is seen
2562 * any attempt to transmit is returned immediately with status
2563 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
2564 * happen on first authenticate after scanning. To workaround
2565 * this we ignore a failure of this sort in AUTH state so the
2566 * 802.11 layer will fall back to using a timeout to wait for
2567 * the AUTH reply. This allows the firmware time to see
2568 * traffic so a subsequent retry of AUTH succeeds. It's
2569 * unclear why the firmware does not maintain state for
2570 * channels recently visited as this would allow immediate
2571 * use of the channel after a scan (where we see traffic).
2572 */
2573 if (status == IWN_TX_FAIL_TX_LOCKED &&
2574 ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2575 ieee80211_process_callback(ni, m, 0);
2576 else
2577 ieee80211_process_callback(ni, m,
2578 (status & IWN_TX_FAIL) != 0);
2579 }
2580
2581 /*
2582 * Update rate control statistics for the node.
2583 */
2584 if (status & IWN_TX_FAIL) {
2585 ifp->if_oerrors++;
2586 ieee80211_ratectl_tx_complete(vap, ni,
2587 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2588 } else {
2589 ifp->if_opackets++;
2590 ieee80211_ratectl_tx_complete(vap, ni,
2591 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2592 }
2593 m_freem(m);
2594 ieee80211_free_node(ni);
2595
2596 sc->sc_tx_timer = 0;
2597 if (--ring->queued < IWN_TX_RING_LOMARK) {
2598 sc->qfullmsk &= ~(1 << ring->qid);
2599 if (sc->qfullmsk == 0 &&
2600 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2601 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2602 iwn_start_locked(ifp);
2603 }
2604 }
2605 }
2606
2607 /*
2608 * Process a "command done" firmware notification. This is where we wakeup
2609 * processes waiting for a synchronous command completion.
2610 */
2611 static void
2612 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2613 {
2614 struct iwn_tx_ring *ring = &sc->txq[4];
2615 struct iwn_tx_data *data;
2616
2617 if ((desc->qid & 0xf) != 4)
2618 return; /* Not a command ack. */
2619
2620 data = &ring->data[desc->idx];
2621
2622 /* If the command was mapped in an mbuf, free it. */
2623 if (data->m != NULL) {
2624 bus_dmamap_sync(ring->data_dmat, data->map,
2625 BUS_DMASYNC_POSTWRITE);
2626 bus_dmamap_unload(ring->data_dmat, data->map);
2627 m_freem(data->m);
2628 data->m = NULL;
2629 }
2630 wakeup(&ring->desc[desc->idx]);
2631 }
2632
2633 /*
2634 * Process an INT_FH_RX or INT_SW_RX interrupt.
2635 */
2636 static void
2637 iwn_notif_intr(struct iwn_softc *sc)
2638 {
2639 struct iwn_ops *ops = &sc->ops;
2640 struct ifnet *ifp = sc->sc_ifp;
2641 struct ieee80211com *ic = ifp->if_l2com;
2642 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2643 uint16_t hw;
2644
2645 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2646 BUS_DMASYNC_POSTREAD);
2647
2648 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2649 while (sc->rxq.cur != hw) {
2650 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2651 struct iwn_rx_desc *desc;
2652
2653 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2654 BUS_DMASYNC_POSTREAD);
2655 desc = mtod(data->m, struct iwn_rx_desc *);
2656
2657 DPRINTF(sc, IWN_DEBUG_RECV,
2658 "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2659 __func__, desc->qid & 0xf, desc->idx, desc->flags,
2660 desc->type, iwn_intr_str(desc->type),
2661 le16toh(desc->len));
2662
2663 if (!(desc->qid & 0x80)) /* Reply to a command. */
2664 iwn_cmd_done(sc, desc);
2665
2666 switch (desc->type) {
2667 case IWN_RX_PHY:
2668 iwn_rx_phy(sc, desc, data);
2669 break;
2670
2671 case IWN_RX_DONE: /* 4965AGN only. */
2672 case IWN_MPDU_RX_DONE:
2673 /* An 802.11 frame has been received. */
2674 iwn_rx_done(sc, desc, data);
2675 break;
2676
2677 #if 0 /* HT */
2678 case IWN_RX_COMPRESSED_BA:
2679 /* A Compressed BlockAck has been received. */
2680 iwn_rx_compressed_ba(sc, desc, data);
2681 break;
2682 #endif
2683
2684 case IWN_TX_DONE:
2685 /* An 802.11 frame has been transmitted. */
2686 ops->tx_done(sc, desc, data);
2687 break;
2688
2689 case IWN_RX_STATISTICS:
2690 case IWN_BEACON_STATISTICS:
2691 iwn_rx_statistics(sc, desc, data);
2692 break;
2693
2694 case IWN_BEACON_MISSED:
2695 {
2696 struct iwn_beacon_missed *miss =
2697 (struct iwn_beacon_missed *)(desc + 1);
2698 int misses;
2699
2700 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2701 BUS_DMASYNC_POSTREAD);
2702 misses = le32toh(miss->consecutive);
2703
2704 DPRINTF(sc, IWN_DEBUG_STATE,
2705 "%s: beacons missed %d/%d\n", __func__,
2706 misses, le32toh(miss->total));
2707 /*
2708 * If more than 5 consecutive beacons are missed,
2709 * reinitialize the sensitivity state machine.
2710 */
2711 if (vap->iv_state == IEEE80211_S_RUN &&
2712 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
2713 if (misses > 5)
2714 (void)iwn_init_sensitivity(sc);
2715 if (misses >= vap->iv_bmissthreshold) {
2716 IWN_UNLOCK(sc);
2717 ieee80211_beacon_miss(ic);
2718 IWN_LOCK(sc);
2719 }
2720 }
2721 break;
2722 }
2723 case IWN_UC_READY:
2724 {
2725 struct iwn_ucode_info *uc =
2726 (struct iwn_ucode_info *)(desc + 1);
2727
2728 /* The microcontroller is ready. */
2729 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2730 BUS_DMASYNC_POSTREAD);
2731 DPRINTF(sc, IWN_DEBUG_RESET,
2732 "microcode alive notification version=%d.%d "
2733 "subtype=%x alive=%x\n", uc->major, uc->minor,
2734 uc->subtype, le32toh(uc->valid));
2735
2736 if (le32toh(uc->valid) != 1) {
2737 device_printf(sc->sc_dev,
2738 "microcontroller initialization failed");
2739 break;
2740 }
2741 if (uc->subtype == IWN_UCODE_INIT) {
2742 /* Save microcontroller report. */
2743 memcpy(&sc->ucode_info, uc, sizeof (*uc));
2744 }
2745 /* Save the address of the error log in SRAM. */
2746 sc->errptr = le32toh(uc->errptr);
2747 break;
2748 }
2749 case IWN_STATE_CHANGED:
2750 {
2751 uint32_t *status = (uint32_t *)(desc + 1);
2752
2753 /*
2754 * State change allows hardware switch change to be
2755 * noted. However, we handle this in iwn_intr as we
2756 * get both the enable/disble intr.
2757 */
2758 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2759 BUS_DMASYNC_POSTREAD);
2760 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
2761 le32toh(*status));
2762 break;
2763 }
2764 case IWN_START_SCAN:
2765 {
2766 struct iwn_start_scan *scan =
2767 (struct iwn_start_scan *)(desc + 1);
2768
2769 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2770 BUS_DMASYNC_POSTREAD);
2771 DPRINTF(sc, IWN_DEBUG_ANY,
2772 "%s: scanning channel %d status %x\n",
2773 __func__, scan->chan, le32toh(scan->status));
2774 break;
2775 }
2776 case IWN_STOP_SCAN:
2777 {
2778 struct iwn_stop_scan *scan =
2779 (struct iwn_stop_scan *)(desc + 1);
2780
2781 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2782 BUS_DMASYNC_POSTREAD);
2783 DPRINTF(sc, IWN_DEBUG_STATE,
2784 "scan finished nchan=%d status=%d chan=%d\n",
2785 scan->nchan, scan->status, scan->chan);
2786
2787 IWN_UNLOCK(sc);
2788 ieee80211_scan_next(vap);
2789 IWN_LOCK(sc);
2790 break;
2791 }
2792 case IWN5000_CALIBRATION_RESULT:
2793 iwn5000_rx_calib_results(sc, desc, data);
2794 break;
2795
2796 case IWN5000_CALIBRATION_DONE:
2797 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2798 wakeup(sc);
2799 break;
2800 }
2801
2802 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2803 }
2804
2805 /* Tell the firmware what we have processed. */
2806 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2807 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2808 }
2809
2810 /*
2811 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2812 * from power-down sleep mode.
2813 */
2814 static void
2815 iwn_wakeup_intr(struct iwn_softc *sc)
2816 {
2817 int qid;
2818
2819 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
2820 __func__);
2821
2822 /* Wakeup RX and TX rings. */
2823 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2824 for (qid = 0; qid < sc->ntxqs; qid++) {
2825 struct iwn_tx_ring *ring = &sc->txq[qid];
2826 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2827 }
2828 }
2829
2830 static void
2831 iwn_rftoggle_intr(struct iwn_softc *sc)
2832 {
2833 struct ifnet *ifp = sc->sc_ifp;
2834 struct ieee80211com *ic = ifp->if_l2com;
2835 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
2836
2837 IWN_LOCK_ASSERT(sc);
2838
2839 device_printf(sc->sc_dev, "RF switch: radio %s\n",
2840 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2841 if (tmp & IWN_GP_CNTRL_RFKILL)
2842 ieee80211_runtask(ic, &sc->sc_radioon_task);
2843 else
2844 ieee80211_runtask(ic, &sc->sc_radiooff_task);
2845 }
2846
2847 /*
2848 * Dump the error log of the firmware when a firmware panic occurs. Although
2849 * we can't debug the firmware because it is neither open source nor free, it
2850 * can help us to identify certain classes of problems.
2851 */
2852 static void
2853 iwn_fatal_intr(struct iwn_softc *sc)
2854 {
2855 struct iwn_fw_dump dump;
2856 int i;
2857
2858 IWN_LOCK_ASSERT(sc);
2859
2860 /* Force a complete recalibration on next init. */
2861 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2862
2863 /* Check that the error log address is valid. */
2864 if (sc->errptr < IWN_FW_DATA_BASE ||
2865 sc->errptr + sizeof (dump) >
2866 IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
2867 printf("%s: bad firmware error log address 0x%08x\n", __func__,
2868 sc->errptr);
2869 return;
2870 }
2871 if (iwn_nic_lock(sc) != 0) {
2872 printf("%s: could not read firmware error log\n", __func__);
2873 return;
2874 }
2875 /* Read firmware error log from SRAM. */
2876 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2877 sizeof (dump) / sizeof (uint32_t));
2878 iwn_nic_unlock(sc);
2879
2880 if (dump.valid == 0) {
2881 printf("%s: firmware error log is empty\n", __func__);
2882 return;
2883 }
2884 printf("firmware error log:\n");
2885 printf(" error type = \"%s\" (0x%08X)\n",
2886 (dump.id < nitems(iwn_fw_errmsg)) ?
2887 iwn_fw_errmsg[dump.id] : "UNKNOWN",
2888 dump.id);
2889 printf(" program counter = 0x%08X\n", dump.pc);
2890 printf(" source line = 0x%08X\n", dump.src_line);
2891 printf(" error data = 0x%08X%08X\n",
2892 dump.error_data[0], dump.error_data[1]);
2893 printf(" branch link = 0x%08X%08X\n",
2894 dump.branch_link[0], dump.branch_link[1]);
2895 printf(" interrupt link = 0x%08X%08X\n",
2896 dump.interrupt_link[0], dump.interrupt_link[1]);
2897 printf(" time = %u\n", dump.time[0]);
2898
2899 /* Dump driver status (TX and RX rings) while we're here. */
2900 printf("driver status:\n");
2901 for (i = 0; i < sc->ntxqs; i++) {
2902 struct iwn_tx_ring *ring = &sc->txq[i];
2903 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2904 i, ring->qid, ring->cur, ring->queued);
2905 }
2906 printf(" rx ring: cur=%d\n", sc->rxq.cur);
2907 }
2908
2909 static void
2910 iwn_intr(void *arg)
2911 {
2912 struct iwn_softc *sc = arg;
2913 struct ifnet *ifp = sc->sc_ifp;
2914 uint32_t r1, r2, tmp;
2915
2916 IWN_LOCK(sc);
2917
2918 /* Disable interrupts. */
2919 IWN_WRITE(sc, IWN_INT_MASK, 0);
2920
2921 /* Read interrupts from ICT (fast) or from registers (slow). */
2922 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2923 tmp = 0;
2924 while (sc->ict[sc->ict_cur] != 0) {
2925 tmp |= sc->ict[sc->ict_cur];
2926 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
2927 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2928 }
2929 tmp = le32toh(tmp);
2930 if (tmp == 0xffffffff) /* Shouldn't happen. */
2931 tmp = 0;
2932 else if (tmp & 0xc0000) /* Workaround a HW bug. */
2933 tmp |= 0x8000;
2934 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2935 r2 = 0; /* Unused. */
2936 } else {
2937 r1 = IWN_READ(sc, IWN_INT);
2938 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2939 return; /* Hardware gone! */
2940 r2 = IWN_READ(sc, IWN_FH_INT);
2941 }
2942
2943 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
2944
2945 if (r1 == 0 && r2 == 0)
2946 goto done; /* Interrupt not for us. */
2947
2948 /* Acknowledge interrupts. */
2949 IWN_WRITE(sc, IWN_INT, r1);
2950 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2951 IWN_WRITE(sc, IWN_FH_INT, r2);
2952
2953 if (r1 & IWN_INT_RF_TOGGLED) {
2954 iwn_rftoggle_intr(sc);
2955 goto done;
2956 }
2957 if (r1 & IWN_INT_CT_REACHED) {
2958 device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
2959 __func__);
2960 }
2961 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2962 device_printf(sc->sc_dev, "%s: fatal firmware error\n",
2963 __func__);
2964 /* Dump firmware error log and stop. */
2965 iwn_fatal_intr(sc);
2966 ifp->if_flags &= ~IFF_UP;
2967 iwn_stop_locked(sc);
2968 goto done;
2969 }
2970 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2971 (r2 & IWN_FH_INT_RX)) {
2972 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2973 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2974 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2975 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2976 IWN_INT_PERIODIC_DIS);
2977 iwn_notif_intr(sc);
2978 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2979 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2980 IWN_INT_PERIODIC_ENA);
2981 }
2982 } else
2983 iwn_notif_intr(sc);
2984 }
2985
2986 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2987 if (sc->sc_flags & IWN_FLAG_USE_ICT)
2988 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2989 wakeup(sc); /* FH DMA transfer completed. */
2990 }
2991
2992 if (r1 & IWN_INT_ALIVE)
2993 wakeup(sc); /* Firmware is alive. */
2994
2995 if (r1 & IWN_INT_WAKEUP)
2996 iwn_wakeup_intr(sc);
2997
2998 done:
2999 /* Re-enable interrupts. */
3000 if (ifp->if_flags & IFF_UP)
3001 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3002
3003 IWN_UNLOCK(sc);
3004 }
3005
3006 /*
3007 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3008 * 5000 adapters use a slightly different format).
3009 */
3010 static void
3011 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3012 uint16_t len)
3013 {
3014 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3015
3016 *w = htole16(len + 8);
3017 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3018 BUS_DMASYNC_PREWRITE);
3019 if (idx < IWN_SCHED_WINSZ) {
3020 *(w + IWN_TX_RING_COUNT) = *w;
3021 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3022 BUS_DMASYNC_PREWRITE);
3023 }
3024 }
3025
3026 static void
3027 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3028 uint16_t len)
3029 {
3030 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3031
3032 *w = htole16(id << 12 | (len + 8));
3033 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3034 BUS_DMASYNC_PREWRITE);
3035 if (idx < IWN_SCHED_WINSZ) {
3036 *(w + IWN_TX_RING_COUNT) = *w;
3037 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3038 BUS_DMASYNC_PREWRITE);
3039 }
3040 }
3041
3042 #ifdef notyet
3043 static void
3044 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3045 {
3046 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3047
3048 *w = (*w & htole16(0xf000)) | htole16(1);
3049 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3050 BUS_DMASYNC_PREWRITE);
3051 if (idx < IWN_SCHED_WINSZ) {
3052 *(w + IWN_TX_RING_COUNT) = *w;
3053 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3054 BUS_DMASYNC_PREWRITE);
3055 }
3056 }
3057 #endif
3058
3059 static uint8_t
3060 iwn_plcp_signal(int rate) {
3061 int i;
3062
3063 for (i = 0; i < IWN_RIDX_MAX + 1; i++) {
3064 if ((rate & IEEE80211_RATE_VAL) == iwn_rates[i].rate)
3065 return i;
3066 }
3067
3068 return 0;
3069 }
3070
3071 static int
3072 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3073 {
3074 const struct ieee80211_txparam *tp;
3075 struct ieee80211vap *vap = ni->ni_vap;
3076 struct ieee80211com *ic = ni->ni_ic;
3077 struct iwn_node *wn = (void *)ni;
3078 struct iwn_tx_ring *ring;
3079 struct iwn_tx_desc *desc;
3080 struct iwn_tx_data *data;
3081 struct iwn_tx_cmd *cmd;
3082 struct iwn_cmd_data *tx;
3083 const struct iwn_rate *rinfo;
3084 struct ieee80211_frame *wh;
3085 struct ieee80211_key *k = NULL;
3086 struct mbuf *m1;
3087 uint32_t flags;
3088 uint16_t qos;
3089 u_int hdrlen;
3090 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3091 uint8_t tid, ridx, txant, type;
3092 int ac, i, totlen, error, pad, nsegs = 0, rate;
3093
3094 IWN_LOCK_ASSERT(sc);
3095
3096 wh = mtod(m, struct ieee80211_frame *);
3097 hdrlen = ieee80211_anyhdrsize(wh);
3098 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3099
3100 /* Select EDCA Access Category and TX ring for this frame. */
3101 if (IEEE80211_QOS_HAS_SEQ(wh)) {
3102 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
3103 tid = qos & IEEE80211_QOS_TID;
3104 } else {
3105 qos = 0;
3106 tid = 0;
3107 }
3108 ac = M_WME_GETAC(m);
3109
3110 ring = &sc->txq[ac];
3111 desc = &ring->desc[ring->cur];
3112 data = &ring->data[ring->cur];
3113
3114 /* Choose a TX rate index. */
3115 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
3116 if (type == IEEE80211_FC0_TYPE_MGT)
3117 rate = tp->mgmtrate;
3118 else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
3119 rate = tp->mcastrate;
3120 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
3121 rate = tp->ucastrate;
3122 else {
3123 /* XXX pass pktlen */
3124 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3125 rate = ni->ni_txrate;
3126 }
3127 ridx = iwn_plcp_signal(rate);
3128 rinfo = &iwn_rates[ridx];
3129
3130 /* Encrypt the frame if need be. */
3131 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3132 /* Retrieve key for TX. */
3133 k = ieee80211_crypto_encap(ni, m);
3134 if (k == NULL) {
3135 m_freem(m);
3136 return ENOBUFS;
3137 }
3138 /* 802.11 header may have moved. */
3139 wh = mtod(m, struct ieee80211_frame *);
3140 }
3141 totlen = m->m_pkthdr.len;
3142
3143 if (ieee80211_radiotap_active_vap(vap)) {
3144 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3145
3146 tap->wt_flags = 0;
3147 tap->wt_rate = rinfo->rate;
3148 if (k != NULL)
3149 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3150
3151 ieee80211_radiotap_tx(vap, m);
3152 }
3153
3154 /* Prepare TX firmware command. */
3155 cmd = &ring->cmd[ring->cur];
3156 cmd->code = IWN_CMD_TX_DATA;
3157 cmd->flags = 0;
3158 cmd->qid = ring->qid;
3159 cmd->idx = ring->cur;
3160
3161 tx = (struct iwn_cmd_data *)cmd->data;
3162 /* NB: No need to clear tx, all fields are reinitialized here. */
3163 tx->scratch = 0; /* clear "scratch" area */
3164
3165 flags = 0;
3166 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3167 /* Unicast frame, check if an ACK is expected. */
3168 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
3169 IEEE80211_QOS_ACKPOLICY_NOACK)
3170 flags |= IWN_TX_NEED_ACK;
3171 }
3172 if ((wh->i_fc[0] &
3173 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3174 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3175 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
3176
3177 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3178 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
3179
3180 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3181 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3182 /* NB: Group frames are sent using CCK in 802.11b/g. */
3183 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
3184 flags |= IWN_TX_NEED_RTS;
3185 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3186 ridx >= IWN_RIDX_OFDM6) {
3187 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3188 flags |= IWN_TX_NEED_CTS;
3189 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3190 flags |= IWN_TX_NEED_RTS;
3191 }
3192 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3193 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3194 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3195 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3196 flags |= IWN_TX_NEED_PROTECTION;
3197 } else
3198 flags |= IWN_TX_FULL_TXOP;
3199 }
3200 }
3201
3202 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3203 type != IEEE80211_FC0_TYPE_DATA)
3204 tx->id = sc->broadcast_id;
3205 else
3206 tx->id = wn->id;
3207
3208 if (type == IEEE80211_FC0_TYPE_MGT) {
3209 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3210
3211 /* Tell HW to set timestamp in probe responses. */
3212 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3213 flags |= IWN_TX_INSERT_TSTAMP;
3214 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3215 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3216 tx->timeout = htole16(3);
3217 else
3218 tx->timeout = htole16(2);
3219 } else
3220 tx->timeout = htole16(0);
3221
3222 if (hdrlen & 3) {
3223 /* First segment length must be a multiple of 4. */
3224 flags |= IWN_TX_NEED_PADDING;
3225 pad = 4 - (hdrlen & 3);
3226 } else
3227 pad = 0;
3228
3229 tx->len = htole16(totlen);
3230 tx->tid = tid;
3231 tx->rts_ntries = 60;
3232 tx->data_ntries = 15;
3233 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3234 tx->plcp = rinfo->plcp;
3235 tx->rflags = rinfo->flags;
3236 if (tx->id == sc->broadcast_id) {
3237 /* Group or management frame. */
3238 tx->linkq = 0;
3239 /* XXX Alternate between antenna A and B? */
3240 txant = IWN_LSB(sc->txchainmask);
3241 tx->rflags |= IWN_RFLAG_ANT(txant);
3242 } else {
3243 tx->linkq = ni->ni_rates.rs_nrates - ridx - 1;
3244 flags |= IWN_TX_LINKQ; /* enable MRR */
3245 }
3246 /* Set physical address of "scratch area". */
3247 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3248 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3249
3250 /* Copy 802.11 header in TX command. */
3251 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3252
3253 /* Trim 802.11 header. */
3254 m_adj(m, hdrlen);
3255 tx->security = 0;
3256 tx->flags = htole32(flags);
3257
3258 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3259 &nsegs, BUS_DMA_NOWAIT);
3260 if (error != 0) {
3261 if (error != EFBIG) {
3262 device_printf(sc->sc_dev,
3263 "%s: can't map mbuf (error %d)\n", __func__, error);
3264 m_freem(m);
3265 return error;
3266 }
3267 /* Too many DMA segments, linearize mbuf. */
3268 m1 = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3269 if (m1 == NULL) {
3270 device_printf(sc->sc_dev,
3271 "%s: could not defrag mbuf\n", __func__);
3272 m_freem(m);
3273 return ENOBUFS;
3274 }
3275 m = m1;
3276
3277 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3278 segs, &nsegs, BUS_DMA_NOWAIT);
3279 if (error != 0) {
3280 device_printf(sc->sc_dev,
3281 "%s: can't map mbuf (error %d)\n", __func__, error);
3282 m_freem(m);
3283 return error;
3284 }
3285 }
3286
3287 data->m = m;
3288 data->ni = ni;
3289
3290 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3291 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3292
3293 /* Fill TX descriptor. */
3294 desc->nsegs = 1;
3295 if (m->m_len != 0)
3296 desc->nsegs += nsegs;
3297 /* First DMA segment is used by the TX command. */
3298 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3299 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3300 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3301 /* Other DMA segments are for data payload. */
3302 seg = &segs[0];
3303 for (i = 1; i <= nsegs; i++) {
3304 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3305 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
3306 seg->ds_len << 4);
3307 seg++;
3308 }
3309
3310 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3311 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3312 BUS_DMASYNC_PREWRITE);
3313 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3314 BUS_DMASYNC_PREWRITE);
3315
3316 #ifdef notyet
3317 /* Update TX scheduler. */
3318 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3319 #endif
3320
3321 /* Kick TX ring. */
3322 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3323 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3324
3325 /* Mark TX ring as full if we reach a certain threshold. */
3326 if (++ring->queued > IWN_TX_RING_HIMARK)
3327 sc->qfullmsk |= 1 << ring->qid;
3328
3329 return 0;
3330 }
3331
3332 static int
3333 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3334 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
3335 {
3336 const struct iwn_rate *rinfo;
3337 struct ifnet *ifp = sc->sc_ifp;
3338 struct ieee80211vap *vap = ni->ni_vap;
3339 struct ieee80211com *ic = ifp->if_l2com;
3340 struct iwn_tx_cmd *cmd;
3341 struct iwn_cmd_data *tx;
3342 struct ieee80211_frame *wh;
3343 struct iwn_tx_ring *ring;
3344 struct iwn_tx_desc *desc;
3345 struct iwn_tx_data *data;
3346 struct mbuf *m1;
3347 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3348 uint32_t flags;
3349 u_int hdrlen;
3350 int ac, totlen, error, pad, nsegs = 0, i, rate;
3351 uint8_t ridx, type, txant;
3352
3353 IWN_LOCK_ASSERT(sc);
3354
3355 wh = mtod(m, struct ieee80211_frame *);
3356 hdrlen = ieee80211_anyhdrsize(wh);
3357 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3358
3359 ac = params->ibp_pri & 3;
3360
3361 ring = &sc->txq[ac];
3362 desc = &ring->desc[ring->cur];
3363 data = &ring->data[ring->cur];
3364
3365 /* Choose a TX rate index. */
3366 rate = params->ibp_rate0;
3367 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3368 /* XXX fall back to mcast/mgmt rate? */
3369 m_freem(m);
3370 return EINVAL;
3371 }
3372 ridx = iwn_plcp_signal(rate);
3373 rinfo = &iwn_rates[ridx];
3374
3375 totlen = m->m_pkthdr.len;
3376
3377 /* Prepare TX firmware command. */
3378 cmd = &ring->cmd[ring->cur];
3379 cmd->code = IWN_CMD_TX_DATA;
3380 cmd->flags = 0;
3381 cmd->qid = ring->qid;
3382 cmd->idx = ring->cur;
3383
3384 tx = (struct iwn_cmd_data *)cmd->data;
3385 /* NB: No need to clear tx, all fields are reinitialized here. */
3386 tx->scratch = 0; /* clear "scratch" area */
3387
3388 flags = 0;
3389 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3390 flags |= IWN_TX_NEED_ACK;
3391 if (params->ibp_flags & IEEE80211_BPF_RTS) {
3392 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3393 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3394 flags &= ~IWN_TX_NEED_RTS;
3395 flags |= IWN_TX_NEED_PROTECTION;
3396 } else
3397 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3398 }
3399 if (params->ibp_flags & IEEE80211_BPF_CTS) {
3400 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3401 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3402 flags &= ~IWN_TX_NEED_CTS;
3403 flags |= IWN_TX_NEED_PROTECTION;
3404 } else
3405 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3406 }
3407 if (type == IEEE80211_FC0_TYPE_MGT) {
3408 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3409
3410 /* Tell HW to set timestamp in probe responses. */
3411 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3412 flags |= IWN_TX_INSERT_TSTAMP;
3413
3414 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3415 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3416 tx->timeout = htole16(3);
3417 else
3418 tx->timeout = htole16(2);
3419 } else
3420 tx->timeout = htole16(0);
3421
3422 if (hdrlen & 3) {
3423 /* First segment length must be a multiple of 4. */
3424 flags |= IWN_TX_NEED_PADDING;
3425 pad = 4 - (hdrlen & 3);
3426 } else
3427 pad = 0;
3428
3429 if (ieee80211_radiotap_active_vap(vap)) {
3430 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3431
3432 tap->wt_flags = 0;
3433 tap->wt_rate = rate;
3434
3435 ieee80211_radiotap_tx(vap, m);
3436 }
3437
3438 tx->len = htole16(totlen);
3439 tx->tid = 0;
3440 tx->id = sc->broadcast_id;
3441 tx->rts_ntries = params->ibp_try1;
3442 tx->data_ntries = params->ibp_try0;
3443 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3444 tx->plcp = rinfo->plcp;
3445 tx->rflags = rinfo->flags;
3446 /* Group or management frame. */
3447 tx->linkq = 0;
3448 txant = IWN_LSB(sc->txchainmask);
3449 tx->rflags |= IWN_RFLAG_ANT(txant);
3450 /* Set physical address of "scratch area". */
3451 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3452 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3453
3454 /* Copy 802.11 header in TX command. */
3455 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3456
3457 /* Trim 802.11 header. */
3458 m_adj(m, hdrlen);
3459 tx->security = 0;
3460 tx->flags = htole32(flags);
3461
3462 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3463 &nsegs, BUS_DMA_NOWAIT);
3464 if (error != 0) {
3465 if (error != EFBIG) {
3466 device_printf(sc->sc_dev,
3467 "%s: can't map mbuf (error %d)\n", __func__, error);
3468 m_freem(m);
3469 return error;
3470 }
3471 /* Too many DMA segments, linearize mbuf. */
3472 m1 = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3473 if (m1 == NULL) {
3474 device_printf(sc->sc_dev,
3475 "%s: could not defrag mbuf\n", __func__);
3476 m_freem(m);
3477 return ENOBUFS;
3478 }
3479 m = m1;
3480
3481 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3482 segs, &nsegs, BUS_DMA_NOWAIT);
3483 if (error != 0) {
3484 device_printf(sc->sc_dev,
3485 "%s: can't map mbuf (error %d)\n", __func__, error);
3486 m_freem(m);
3487 return error;
3488 }
3489 }
3490
3491 data->m = m;
3492 data->ni = ni;
3493
3494 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3495 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3496
3497 /* Fill TX descriptor. */
3498 desc->nsegs = 1;
3499 if (m->m_len != 0)
3500 desc->nsegs += nsegs;
3501 /* First DMA segment is used by the TX command. */
3502 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3503 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3504 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3505 /* Other DMA segments are for data payload. */
3506 seg = &segs[0];
3507 for (i = 1; i <= nsegs; i++) {
3508 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3509 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
3510 seg->ds_len << 4);
3511 seg++;
3512 }
3513
3514 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3515 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3516 BUS_DMASYNC_PREWRITE);
3517 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3518 BUS_DMASYNC_PREWRITE);
3519
3520 #ifdef notyet
3521 /* Update TX scheduler. */
3522 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3523 #endif
3524
3525 /* Kick TX ring. */
3526 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3527 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3528
3529 /* Mark TX ring as full if we reach a certain threshold. */
3530 if (++ring->queued > IWN_TX_RING_HIMARK)
3531 sc->qfullmsk |= 1 << ring->qid;
3532
3533 return 0;
3534 }
3535
3536 static int
3537 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3538 const struct ieee80211_bpf_params *params)
3539 {
3540 struct ieee80211com *ic = ni->ni_ic;
3541 struct ifnet *ifp = ic->ic_ifp;
3542 struct iwn_softc *sc = ifp->if_softc;
3543 int error = 0;
3544
3545 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3546 ieee80211_free_node(ni);
3547 m_freem(m);
3548 return ENETDOWN;
3549 }
3550
3551 IWN_LOCK(sc);
3552 if (params == NULL) {
3553 /*
3554 * Legacy path; interpret frame contents to decide
3555 * precisely how to send the frame.
3556 */
3557 error = iwn_tx_data(sc, m, ni);
3558 } else {
3559 /*
3560 * Caller supplied explicit parameters to use in
3561 * sending the frame.
3562 */
3563 error = iwn_tx_data_raw(sc, m, ni, params);
3564 }
3565 if (error != 0) {
3566 /* NB: m is reclaimed on tx failure */
3567 ieee80211_free_node(ni);
3568 ifp->if_oerrors++;
3569 }
3570 sc->sc_tx_timer = 5;
3571
3572 IWN_UNLOCK(sc);
3573 return error;
3574 }
3575
3576 static void
3577 iwn_start(struct ifnet *ifp)
3578 {
3579 struct iwn_softc *sc = ifp->if_softc;
3580
3581 IWN_LOCK(sc);
3582 iwn_start_locked(ifp);
3583 IWN_UNLOCK(sc);
3584 }
3585
3586 static void
3587 iwn_start_locked(struct ifnet *ifp)
3588 {
3589 struct iwn_softc *sc = ifp->if_softc;
3590 struct ieee80211_node *ni;
3591 struct mbuf *m;
3592
3593 IWN_LOCK_ASSERT(sc);
3594
3595 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
3596 (ifp->if_drv_flags & IFF_DRV_OACTIVE))
3597 return;
3598
3599 for (;;) {
3600 if (sc->qfullmsk != 0) {
3601 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3602 break;
3603 }
3604 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3605 if (m == NULL)
3606 break;
3607 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3608 if (iwn_tx_data(sc, m, ni) != 0) {
3609 ieee80211_free_node(ni);
3610 ifp->if_oerrors++;
3611 continue;
3612 }
3613 sc->sc_tx_timer = 5;
3614 }
3615 }
3616
3617 static void
3618 iwn_watchdog(void *arg)
3619 {
3620 struct iwn_softc *sc = arg;
3621 struct ifnet *ifp = sc->sc_ifp;
3622 struct ieee80211com *ic = ifp->if_l2com;
3623
3624 IWN_LOCK_ASSERT(sc);
3625
3626 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
3627
3628 if (sc->sc_tx_timer > 0) {
3629 if (--sc->sc_tx_timer == 0) {
3630 if_printf(ifp, "device timeout\n");
3631 ieee80211_runtask(ic, &sc->sc_reinit_task);
3632 return;
3633 }
3634 }
3635 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
3636 }
3637
3638 static int
3639 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3640 {
3641 struct iwn_softc *sc = ifp->if_softc;
3642 struct ieee80211com *ic = ifp->if_l2com;
3643 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3644 struct ifreq *ifr = (struct ifreq *) data;
3645 int error = 0, startall = 0, stop = 0;
3646
3647 switch (cmd) {
3648 case SIOCGIFADDR:
3649 error = ether_ioctl(ifp, cmd, data);
3650 break;
3651 case SIOCSIFFLAGS:
3652 IWN_LOCK(sc);
3653 if (ifp->if_flags & IFF_UP) {
3654 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3655 iwn_init_locked(sc);
3656 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3657 startall = 1;
3658 else
3659 stop = 1;
3660 }
3661 } else {
3662 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3663 iwn_stop_locked(sc);
3664 }
3665 IWN_UNLOCK(sc);
3666 if (startall)
3667 ieee80211_start_all(ic);
3668 else if (vap != NULL && stop)
3669 ieee80211_stop(vap);
3670 break;
3671 case SIOCGIFMEDIA:
3672 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3673 break;
3674 default:
3675 error = EINVAL;
3676 break;
3677 }
3678 return error;
3679 }
3680
3681 /*
3682 * Send a command to the firmware.
3683 */
3684 static int
3685 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3686 {
3687 struct iwn_tx_ring *ring = &sc->txq[4];
3688 struct iwn_tx_desc *desc;
3689 struct iwn_tx_data *data;
3690 struct iwn_tx_cmd *cmd;
3691 struct mbuf *m;
3692 bus_addr_t paddr;
3693 int totlen, error;
3694
3695 IWN_LOCK_ASSERT(sc);
3696
3697 desc = &ring->desc[ring->cur];
3698 data = &ring->data[ring->cur];
3699 totlen = 4 + size;
3700
3701 if (size > sizeof cmd->data) {
3702 /* Command is too large to fit in a descriptor. */
3703 if (totlen > MCLBYTES)
3704 return EINVAL;
3705 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3706 if (m == NULL)
3707 return ENOMEM;
3708 cmd = mtod(m, struct iwn_tx_cmd *);
3709 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3710 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3711 if (error != 0) {
3712 m_freem(m);
3713 return error;
3714 }
3715 data->m = m;
3716 } else {
3717 cmd = &ring->cmd[ring->cur];
3718 paddr = data->cmd_paddr;
3719 }
3720
3721 cmd->code = code;
3722 cmd->flags = 0;
3723 cmd->qid = ring->qid;
3724 cmd->idx = ring->cur;
3725 memcpy(cmd->data, buf, size);
3726
3727 desc->nsegs = 1;
3728 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3729 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
3730
3731 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
3732 __func__, iwn_intr_str(cmd->code), cmd->code,
3733 cmd->flags, cmd->qid, cmd->idx);
3734
3735 if (size > sizeof cmd->data) {
3736 bus_dmamap_sync(ring->data_dmat, data->map,
3737 BUS_DMASYNC_PREWRITE);
3738 } else {
3739 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3740 BUS_DMASYNC_PREWRITE);
3741 }
3742 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3743 BUS_DMASYNC_PREWRITE);
3744
3745 #ifdef notyet
3746 /* Update TX scheduler. */
3747 ops->update_sched(sc, ring->qid, ring->cur, 0, 0);
3748 #endif
3749
3750 /* Kick command ring. */
3751 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3752 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3753
3754 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
3755 }
3756
3757 static int
3758 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3759 {
3760 struct iwn4965_node_info hnode;
3761 caddr_t src, dst;
3762
3763 /*
3764 * We use the node structure for 5000 Series internally (it is
3765 * a superset of the one for 4965AGN). We thus copy the common
3766 * fields before sending the command.
3767 */
3768 src = (caddr_t)node;
3769 dst = (caddr_t)&hnode;
3770 memcpy(dst, src, 48);
3771 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3772 memcpy(dst + 48, src + 72, 20);
3773 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3774 }
3775
3776 static int
3777 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3778 {
3779 /* Direct mapping. */
3780 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3781 }
3782
3783 static int
3784 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
3785 {
3786 struct iwn_node *wn = (void *)ni;
3787 struct ieee80211_rateset *rs = &ni->ni_rates;
3788 struct iwn_cmd_link_quality linkq;
3789 const struct iwn_rate *rinfo;
3790 uint8_t txant;
3791 int i, txrate;
3792
3793 /* Use the first valid TX antenna. */
3794 txant = IWN_LSB(sc->txchainmask);
3795
3796 memset(&linkq, 0, sizeof linkq);
3797 linkq.id = wn->id;
3798 linkq.antmsk_1stream = txant;
3799 linkq.antmsk_2stream = IWN_ANT_AB;
3800 linkq.ampdu_max = 31;
3801 linkq.ampdu_threshold = 3;
3802 linkq.ampdu_limit = htole16(4000); /* 4ms */
3803
3804 /* Start at highest available bit-rate. */
3805 txrate = rs->rs_nrates - 1;
3806 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3807 rinfo = &iwn_rates[wn->ridx[txrate]];
3808 linkq.retry[i].plcp = rinfo->plcp;
3809 linkq.retry[i].rflags = rinfo->flags;
3810 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3811 /* Next retry at immediate lower bit-rate. */
3812 if (txrate > 0)
3813 txrate--;
3814 }
3815 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
3816 }
3817
3818 /*
3819 * Broadcast node is used to send group-addressed and management frames.
3820 */
3821 static int
3822 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3823 {
3824 struct iwn_ops *ops = &sc->ops;
3825 struct ifnet *ifp = sc->sc_ifp;
3826 struct ieee80211com *ic = ifp->if_l2com;
3827 struct iwn_node_info node;
3828 struct iwn_cmd_link_quality linkq;
3829 const struct iwn_rate *rinfo;
3830 uint8_t txant;
3831 int i, error;
3832
3833 memset(&node, 0, sizeof node);
3834 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
3835 node.id = sc->broadcast_id;
3836 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
3837 if ((error = ops->add_node(sc, &node, async)) != 0)
3838 return error;
3839
3840 /* Use the first valid TX antenna. */
3841 txant = IWN_LSB(sc->txchainmask);
3842
3843 memset(&linkq, 0, sizeof linkq);
3844 linkq.id = sc->broadcast_id;
3845 linkq.antmsk_1stream = txant;
3846 linkq.antmsk_2stream = IWN_ANT_AB;
3847 linkq.ampdu_max = 64;
3848 linkq.ampdu_threshold = 3;
3849 linkq.ampdu_limit = htole16(4000); /* 4ms */
3850
3851 /* Use lowest mandatory bit-rate. */
3852 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
3853 rinfo = &iwn_rates[IWN_RIDX_OFDM6];
3854 else
3855 rinfo = &iwn_rates[IWN_RIDX_CCK1];
3856 linkq.retry[0].plcp = rinfo->plcp;
3857 linkq.retry[0].rflags = rinfo->flags;
3858 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
3859 /* Use same bit-rate for all TX retries. */
3860 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
3861 linkq.retry[i].plcp = linkq.retry[0].plcp;
3862 linkq.retry[i].rflags = linkq.retry[0].rflags;
3863 }
3864 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3865 }
3866
3867 static int
3868 iwn_updateedca(struct ieee80211com *ic)
3869 {
3870 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
3871 struct iwn_softc *sc = ic->ic_ifp->if_softc;
3872 struct iwn_edca_params cmd;
3873 int aci;
3874
3875 memset(&cmd, 0, sizeof cmd);
3876 cmd.flags = htole32(IWN_EDCA_UPDATE);
3877 for (aci = 0; aci < WME_NUM_AC; aci++) {
3878 const struct wmeParams *ac =
3879 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
3880 cmd.ac[aci].aifsn = ac->wmep_aifsn;
3881 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
3882 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
3883 cmd.ac[aci].txoplimit =
3884 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
3885 }
3886 IEEE80211_UNLOCK(ic);
3887 IWN_LOCK(sc);
3888 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
3889 IWN_UNLOCK(sc);
3890 IEEE80211_LOCK(ic);
3891 return 0;
3892 #undef IWN_EXP2
3893 }
3894
3895 static void
3896 iwn_update_mcast(struct ifnet *ifp)
3897 {
3898 /* Ignore */
3899 }
3900
3901 static void
3902 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3903 {
3904 struct iwn_cmd_led led;
3905
3906 /* Clear microcode LED ownership. */
3907 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3908
3909 led.which = which;
3910 led.unit = htole32(10000); /* on/off in unit of 100ms */
3911 led.off = off;
3912 led.on = on;
3913 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3914 }
3915
3916 /*
3917 * Set the critical temperature at which the firmware will stop the radio
3918 * and notify us.
3919 */
3920 static int
3921 iwn_set_critical_temp(struct iwn_softc *sc)
3922 {
3923 struct iwn_critical_temp crit;
3924 int32_t temp;
3925
3926 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3927
3928 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3929 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3930 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3931 temp = IWN_CTOK(110);
3932 else
3933 temp = 110;
3934 memset(&crit, 0, sizeof crit);
3935 crit.tempR = htole32(temp);
3936 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
3937 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3938 }
3939
3940 static int
3941 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3942 {
3943 struct iwn_cmd_timing cmd;
3944 uint64_t val, mod;
3945
3946 memset(&cmd, 0, sizeof cmd);
3947 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3948 cmd.bintval = htole16(ni->ni_intval);
3949 cmd.lintval = htole16(10);
3950
3951 /* Compute remaining time until next beacon. */
3952 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
3953 mod = le64toh(cmd.tstamp) % val;
3954 cmd.binitval = htole32((uint32_t)(val - mod));
3955
3956 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3957 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3958
3959 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3960 }
3961
3962 static void
3963 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3964 {
3965 struct ifnet *ifp = sc->sc_ifp;
3966 struct ieee80211com *ic = ifp->if_l2com;
3967
3968 /* Adjust TX power if need be (delta >= 3 degC). */
3969 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
3970 __func__, sc->temp, temp);
3971 if (abs(temp - sc->temp) >= 3) {
3972 /* Record temperature of last calibration. */
3973 sc->temp = temp;
3974 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
3975 }
3976 }
3977
3978 /*
3979 * Set TX power for current channel (each rate has its own power settings).
3980 * This function takes into account the regulatory information from EEPROM,
3981 * the current temperature and the current voltage.
3982 */
3983 static int
3984 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3985 int async)
3986 {
3987 /* Fixed-point arithmetic division using a n-bit fractional part. */
3988 #define fdivround(a, b, n) \
3989 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3990 /* Linear interpolation. */
3991 #define interpolate(x, x1, y1, x2, y2, n) \
3992 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3993
3994 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3995 struct iwn_ucode_info *uc = &sc->ucode_info;
3996 struct iwn4965_cmd_txpower cmd;
3997 struct iwn4965_eeprom_chan_samples *chans;
3998 const uint8_t *rf_gain, *dsp_gain;
3999 int32_t vdiff, tdiff;
4000 int i, c, grp, maxpwr;
4001 uint8_t chan;
4002
4003 /* Retrieve current channel from last RXON. */
4004 chan = sc->rxon.chan;
4005 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
4006 chan);
4007
4008 memset(&cmd, 0, sizeof cmd);
4009 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4010 cmd.chan = chan;
4011
4012 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4013 maxpwr = sc->maxpwr5GHz;
4014 rf_gain = iwn4965_rf_gain_5ghz;
4015 dsp_gain = iwn4965_dsp_gain_5ghz;
4016 } else {
4017 maxpwr = sc->maxpwr2GHz;
4018 rf_gain = iwn4965_rf_gain_2ghz;
4019 dsp_gain = iwn4965_dsp_gain_2ghz;
4020 }
4021
4022 /* Compute voltage compensation. */
4023 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4024 if (vdiff > 0)
4025 vdiff *= 2;
4026 if (abs(vdiff) > 2)
4027 vdiff = 0;
4028 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4029 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4030 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
4031
4032 /* Get channel attenuation group. */
4033 if (chan <= 20) /* 1-20 */
4034 grp = 4;
4035 else if (chan <= 43) /* 34-43 */
4036 grp = 0;
4037 else if (chan <= 70) /* 44-70 */
4038 grp = 1;
4039 else if (chan <= 124) /* 71-124 */
4040 grp = 2;
4041 else /* 125-200 */
4042 grp = 3;
4043 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4044 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
4045
4046 /* Get channel sub-band. */
4047 for (i = 0; i < IWN_NBANDS; i++)
4048 if (sc->bands[i].lo != 0 &&
4049 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4050 break;
4051 if (i == IWN_NBANDS) /* Can't happen in real-life. */
4052 return EINVAL;
4053 chans = sc->bands[i].chans;
4054 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4055 "%s: chan %d sub-band=%d\n", __func__, chan, i);
4056
4057 for (c = 0; c < 2; c++) {
4058 uint8_t power, gain, temp;
4059 int maxchpwr, pwr, ridx, idx;
4060
4061 power = interpolate(chan,
4062 chans[0].num, chans[0].samples[c][1].power,
4063 chans[1].num, chans[1].samples[c][1].power, 1);
4064 gain = interpolate(chan,
4065 chans[0].num, chans[0].samples[c][1].gain,
4066 chans[1].num, chans[1].samples[c][1].gain, 1);
4067 temp = interpolate(chan,
4068 chans[0].num, chans[0].samples[c][1].temp,
4069 chans[1].num, chans[1].samples[c][1].temp, 1);
4070 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4071 "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
4072 __func__, c, power, gain, temp);
4073
4074 /* Compute temperature compensation. */
4075 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4076 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4077 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
4078 __func__, tdiff, sc->temp, temp);
4079
4080 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4081 /* Convert dBm to half-dBm. */
4082 maxchpwr = sc->maxpwr[chan] * 2;
4083 if ((ridx / 8) & 1)
4084 maxchpwr -= 6; /* MIMO 2T: -3dB */
4085
4086 pwr = maxpwr;
4087
4088 /* Adjust TX power based on rate. */
4089 if ((ridx % 8) == 5)
4090 pwr -= 15; /* OFDM48: -7.5dB */
4091 else if ((ridx % 8) == 6)
4092 pwr -= 17; /* OFDM54: -8.5dB */
4093 else if ((ridx % 8) == 7)
4094 pwr -= 20; /* OFDM60: -10dB */
4095 else
4096 pwr -= 10; /* Others: -5dB */
4097
4098 /* Do not exceed channel max TX power. */
4099 if (pwr > maxchpwr)
4100 pwr = maxchpwr;
4101
4102 idx = gain - (pwr - power) - tdiff - vdiff;
4103 if ((ridx / 8) & 1) /* MIMO */
4104 idx += (int32_t)le32toh(uc->atten[grp][c]);
4105
4106 if (cmd.band == 0)
4107 idx += 9; /* 5GHz */
4108 if (ridx == IWN_RIDX_MAX)
4109 idx += 5; /* CCK */
4110
4111 /* Make sure idx stays in a valid range. */
4112 if (idx < 0)
4113 idx = 0;
4114 else if (idx > IWN4965_MAX_PWR_INDEX)
4115 idx = IWN4965_MAX_PWR_INDEX;
4116
4117 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4118 "%s: Tx chain %d, rate idx %d: power=%d\n",
4119 __func__, c, ridx, idx);
4120 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4121 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4122 }
4123 }
4124
4125 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4126 "%s: set tx power for chan %d\n", __func__, chan);
4127 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4128
4129 #undef interpolate
4130 #undef fdivround
4131 }
4132
4133 static int
4134 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4135 int async)
4136 {
4137 struct iwn5000_cmd_txpower cmd;
4138
4139 /*
4140 * TX power calibration is handled automatically by the firmware
4141 * for 5000 Series.
4142 */
4143 memset(&cmd, 0, sizeof cmd);
4144 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
4145 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4146 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4147 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
4148 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4149 }
4150
4151 /*
4152 * Retrieve the maximum RSSI (in dBm) among receivers.
4153 */
4154 static int
4155 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4156 {
4157 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4158 uint8_t mask, agc;
4159 int rssi;
4160
4161 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4162 agc = (le16toh(phy->agc) >> 7) & 0x7f;
4163
4164 rssi = 0;
4165 if (mask & IWN_ANT_A)
4166 rssi = MAX(rssi, phy->rssi[0]);
4167 if (mask & IWN_ANT_B)
4168 rssi = MAX(rssi, phy->rssi[2]);
4169 if (mask & IWN_ANT_C)
4170 rssi = MAX(rssi, phy->rssi[4]);
4171
4172 DPRINTF(sc, IWN_DEBUG_RECV,
4173 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
4174 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
4175 rssi - agc - IWN_RSSI_TO_DBM);
4176 return rssi - agc - IWN_RSSI_TO_DBM;
4177 }
4178
4179 static int
4180 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4181 {
4182 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4183 uint8_t agc;
4184 int rssi;
4185
4186 agc = (le32toh(phy->agc) >> 9) & 0x7f;
4187
4188 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4189 le16toh(phy->rssi[1]) & 0xff);
4190 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4191
4192 DPRINTF(sc, IWN_DEBUG_RECV,
4193 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
4194 phy->rssi[0], phy->rssi[1], phy->rssi[2],
4195 rssi - agc - IWN_RSSI_TO_DBM);
4196 return rssi - agc - IWN_RSSI_TO_DBM;
4197 }
4198
4199 /*
4200 * Retrieve the average noise (in dBm) among receivers.
4201 */
4202 static int
4203 iwn_get_noise(const struct iwn_rx_general_stats *stats)
4204 {
4205 int i, total, nbant, noise;
4206
4207 total = nbant = 0;
4208 for (i = 0; i < 3; i++) {
4209 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4210 continue;
4211 total += noise;
4212 nbant++;
4213 }
4214 /* There should be at least one antenna but check anyway. */
4215 return (nbant == 0) ? -127 : (total / nbant) - 107;
4216 }
4217
4218 /*
4219 * Compute temperature (in degC) from last received statistics.
4220 */
4221 static int
4222 iwn4965_get_temperature(struct iwn_softc *sc)
4223 {
4224 struct iwn_ucode_info *uc = &sc->ucode_info;
4225 int32_t r1, r2, r3, r4, temp;
4226
4227 r1 = le32toh(uc->temp[0].chan20MHz);
4228 r2 = le32toh(uc->temp[1].chan20MHz);
4229 r3 = le32toh(uc->temp[2].chan20MHz);
4230 r4 = le32toh(sc->rawtemp);
4231
4232 if (r1 == r3) /* Prevents division by 0 (should not happen). */
4233 return 0;
4234
4235 /* Sign-extend 23-bit R4 value to 32-bit. */
4236 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4237 /* Compute temperature in Kelvin. */
4238 temp = (259 * (r4 - r2)) / (r3 - r1);
4239 temp = (temp * 97) / 100 + 8;
4240
4241 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4242 IWN_KTOC(temp));
4243 return IWN_KTOC(temp);
4244 }
4245
4246 static int
4247 iwn5000_get_temperature(struct iwn_softc *sc)
4248 {
4249 int32_t temp;
4250
4251 /*
4252 * Temperature is not used by the driver for 5000 Series because
4253 * TX power calibration is handled by firmware.
4254 */
4255 temp = le32toh(sc->rawtemp);
4256 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4257 temp = (temp / -5) + sc->temp_off;
4258 temp = IWN_KTOC(temp);
4259 }
4260 return temp;
4261 }
4262
4263 /*
4264 * Initialize sensitivity calibration state machine.
4265 */
4266 static int
4267 iwn_init_sensitivity(struct iwn_softc *sc)
4268 {
4269 struct iwn_ops *ops = &sc->ops;
4270 struct iwn_calib_state *calib = &sc->calib;
4271 uint32_t flags;
4272 int error;
4273
4274 /* Reset calibration state machine. */
4275 memset(calib, 0, sizeof (*calib));
4276 calib->state = IWN_CALIB_STATE_INIT;
4277 calib->cck_state = IWN_CCK_STATE_HIFA;
4278 /* Set initial correlation values. */
4279 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
4280 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4281 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
4282 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4283 calib->cck_x4 = 125;
4284 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
4285 calib->energy_cck = sc->limits->energy_cck;
4286
4287 /* Write initial sensitivity. */
4288 if ((error = iwn_send_sensitivity(sc)) != 0)
4289 return error;
4290
4291 /* Write initial gains. */
4292 if ((error = ops->init_gains(sc)) != 0)
4293 return error;
4294
4295 /* Request statistics at each beacon interval. */
4296 flags = 0;
4297 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
4298 __func__);
4299 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4300 }
4301
4302 /*
4303 * Collect noise and RSSI statistics for the first 20 beacons received
4304 * after association and use them to determine connected antennas and
4305 * to set differential gains.
4306 */
4307 static void
4308 iwn_collect_noise(struct iwn_softc *sc,
4309 const struct iwn_rx_general_stats *stats)
4310 {
4311 struct iwn_ops *ops = &sc->ops;
4312 struct iwn_calib_state *calib = &sc->calib;
4313 uint32_t val;
4314 int i;
4315
4316 /* Accumulate RSSI and noise for all 3 antennas. */
4317 for (i = 0; i < 3; i++) {
4318 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4319 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4320 }
4321 /* NB: We update differential gains only once after 20 beacons. */
4322 if (++calib->nbeacons < 20)
4323 return;
4324
4325 /* Determine highest average RSSI. */
4326 val = MAX(calib->rssi[0], calib->rssi[1]);
4327 val = MAX(calib->rssi[2], val);
4328
4329 /* Determine which antennas are connected. */
4330 sc->chainmask = sc->rxchainmask;
4331 for (i = 0; i < 3; i++)
4332 if (val - calib->rssi[i] > 15 * 20)
4333 sc->chainmask &= ~(1 << i);
4334 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4335 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4336 __func__, sc->rxchainmask, sc->chainmask);
4337
4338 /* If none of the TX antennas are connected, keep at least one. */
4339 if ((sc->chainmask & sc->txchainmask) == 0)
4340 sc->chainmask |= IWN_LSB(sc->txchainmask);
4341
4342 (void)ops->set_gains(sc);
4343 calib->state = IWN_CALIB_STATE_RUN;
4344
4345 #ifdef notyet
4346 /* XXX Disable RX chains with no antennas connected. */
4347 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4348 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4349 #endif
4350
4351 #if 0
4352 /* XXX: not yet */
4353 /* Enable power-saving mode if requested by user. */
4354 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4355 (void)iwn_set_pslevel(sc, 0, 3, 1);
4356 #endif
4357 }
4358
4359 static int
4360 iwn4965_init_gains(struct iwn_softc *sc)
4361 {
4362 struct iwn_phy_calib_gain cmd;
4363
4364 memset(&cmd, 0, sizeof cmd);
4365 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4366 /* Differential gains initially set to 0 for all 3 antennas. */
4367 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4368 "%s: setting initial differential gains\n", __func__);
4369 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4370 }
4371
4372 static int
4373 iwn5000_init_gains(struct iwn_softc *sc)
4374 {
4375 struct iwn_phy_calib cmd;
4376
4377 memset(&cmd, 0, sizeof cmd);
4378 cmd.code = sc->reset_noise_gain;
4379 cmd.ngroups = 1;
4380 cmd.isvalid = 1;
4381 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4382 "%s: setting initial differential gains\n", __func__);
4383 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4384 }
4385
4386 static int
4387 iwn4965_set_gains(struct iwn_softc *sc)
4388 {
4389 struct iwn_calib_state *calib = &sc->calib;
4390 struct iwn_phy_calib_gain cmd;
4391 int i, delta, noise;
4392
4393 /* Get minimal noise among connected antennas. */
4394 noise = INT_MAX; /* NB: There's at least one antenna. */
4395 for (i = 0; i < 3; i++)
4396 if (sc->chainmask & (1 << i))
4397 noise = MIN(calib->noise[i], noise);
4398
4399 memset(&cmd, 0, sizeof cmd);
4400 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4401 /* Set differential gains for connected antennas. */
4402 for (i = 0; i < 3; i++) {
4403 if (sc->chainmask & (1 << i)) {
4404 /* Compute attenuation (in unit of 1.5dB). */
4405 delta = (noise - (int32_t)calib->noise[i]) / 30;
4406 /* NB: delta <= 0 */
4407 /* Limit to [-4.5dB,0]. */
4408 cmd.gain[i] = MIN(abs(delta), 3);
4409 if (delta < 0)
4410 cmd.gain[i] |= 1 << 2; /* sign bit */
4411 }
4412 }
4413 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4414 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4415 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4416 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4417 }
4418
4419 static int
4420 iwn5000_set_gains(struct iwn_softc *sc)
4421 {
4422 struct iwn_calib_state *calib = &sc->calib;
4423 struct iwn_phy_calib_gain cmd;
4424 int i, ant, div, delta;
4425
4426 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
4427 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4428
4429 memset(&cmd, 0, sizeof cmd);
4430 cmd.code = sc->noise_gain;
4431 cmd.ngroups = 1;
4432 cmd.isvalid = 1;
4433 /* Get first available RX antenna as referential. */
4434 ant = IWN_LSB(sc->rxchainmask);
4435 /* Set differential gains for other antennas. */
4436 for (i = ant + 1; i < 3; i++) {
4437 if (sc->chainmask & (1 << i)) {
4438 /* The delta is relative to antenna "ant". */
4439 delta = ((int32_t)calib->noise[ant] -
4440 (int32_t)calib->noise[i]) / div;
4441 /* Limit to [-4.5dB,+4.5dB]. */
4442 cmd.gain[i - 1] = MIN(abs(delta), 3);
4443 if (delta < 0)
4444 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
4445 }
4446 }
4447 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4448 "setting differential gains Ant B/C: %x/%x (%x)\n",
4449 cmd.gain[0], cmd.gain[1], sc->chainmask);
4450 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4451 }
4452
4453 /*
4454 * Tune RF RX sensitivity based on the number of false alarms detected
4455 * during the last beacon period.
4456 */
4457 static void
4458 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4459 {
4460 #define inc(val, inc, max) \
4461 if ((val) < (max)) { \
4462 if ((val) < (max) - (inc)) \
4463 (val) += (inc); \
4464 else \
4465 (val) = (max); \
4466 needs_update = 1; \
4467 }
4468 #define dec(val, dec, min) \
4469 if ((val) > (min)) { \
4470 if ((val) > (min) + (dec)) \
4471 (val) -= (dec); \
4472 else \
4473 (val) = (min); \
4474 needs_update = 1; \
4475 }
4476
4477 const struct iwn_sensitivity_limits *limits = sc->limits;
4478 struct iwn_calib_state *calib = &sc->calib;
4479 uint32_t val, rxena, fa;
4480 uint32_t energy[3], energy_min;
4481 uint8_t noise[3], noise_ref;
4482 int i, needs_update = 0;
4483
4484 /* Check that we've been enabled long enough. */
4485 if ((rxena = le32toh(stats->general.load)) == 0)
4486 return;
4487
4488 /* Compute number of false alarms since last call for OFDM. */
4489 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4490 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4491 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4492
4493 /* Save counters values for next call. */
4494 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4495 calib->fa_ofdm = le32toh(stats->ofdm.fa);
4496
4497 if (fa > 50 * rxena) {
4498 /* High false alarm count, decrease sensitivity. */
4499 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4500 "%s: OFDM high false alarm count: %u\n", __func__, fa);
4501 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
4502 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4503 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
4504 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4505
4506 } else if (fa < 5 * rxena) {
4507 /* Low false alarm count, increase sensitivity. */
4508 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4509 "%s: OFDM low false alarm count: %u\n", __func__, fa);
4510 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
4511 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4512 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
4513 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4514 }
4515
4516 /* Compute maximum noise among 3 receivers. */
4517 for (i = 0; i < 3; i++)
4518 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4519 val = MAX(noise[0], noise[1]);
4520 val = MAX(noise[2], val);
4521 /* Insert it into our samples table. */
4522 calib->noise_samples[calib->cur_noise_sample] = val;
4523 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4524
4525 /* Compute maximum noise among last 20 samples. */
4526 noise_ref = calib->noise_samples[0];
4527 for (i = 1; i < 20; i++)
4528 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4529
4530 /* Compute maximum energy among 3 receivers. */
4531 for (i = 0; i < 3; i++)
4532 energy[i] = le32toh(stats->general.energy[i]);
4533 val = MIN(energy[0], energy[1]);
4534 val = MIN(energy[2], val);
4535 /* Insert it into our samples table. */
4536 calib->energy_samples[calib->cur_energy_sample] = val;
4537 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4538
4539 /* Compute minimum energy among last 10 samples. */
4540 energy_min = calib->energy_samples[0];
4541 for (i = 1; i < 10; i++)
4542 energy_min = MAX(energy_min, calib->energy_samples[i]);
4543 energy_min += 6;
4544
4545 /* Compute number of false alarms since last call for CCK. */
4546 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4547 fa += le32toh(stats->cck.fa) - calib->fa_cck;
4548 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4549
4550 /* Save counters values for next call. */
4551 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4552 calib->fa_cck = le32toh(stats->cck.fa);
4553
4554 if (fa > 50 * rxena) {
4555 /* High false alarm count, decrease sensitivity. */
4556 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4557 "%s: CCK high false alarm count: %u\n", __func__, fa);
4558 calib->cck_state = IWN_CCK_STATE_HIFA;
4559 calib->low_fa = 0;
4560
4561 if (calib->cck_x4 > 160) {
4562 calib->noise_ref = noise_ref;
4563 if (calib->energy_cck > 2)
4564 dec(calib->energy_cck, 2, energy_min);
4565 }
4566 if (calib->cck_x4 < 160) {
4567 calib->cck_x4 = 161;
4568 needs_update = 1;
4569 } else
4570 inc(calib->cck_x4, 3, limits->max_cck_x4);
4571
4572 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4573
4574 } else if (fa < 5 * rxena) {
4575 /* Low false alarm count, increase sensitivity. */
4576 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4577 "%s: CCK low false alarm count: %u\n", __func__, fa);
4578 calib->cck_state = IWN_CCK_STATE_LOFA;
4579 calib->low_fa++;
4580
4581 if (calib->cck_state != IWN_CCK_STATE_INIT &&
4582 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4583 calib->low_fa > 100)) {
4584 inc(calib->energy_cck, 2, limits->min_energy_cck);
4585 dec(calib->cck_x4, 3, limits->min_cck_x4);
4586 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4587 }
4588 } else {
4589 /* Not worth to increase or decrease sensitivity. */
4590 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4591 "%s: CCK normal false alarm count: %u\n", __func__, fa);
4592 calib->low_fa = 0;
4593 calib->noise_ref = noise_ref;
4594
4595 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4596 /* Previous interval had many false alarms. */
4597 dec(calib->energy_cck, 8, energy_min);
4598 }
4599 calib->cck_state = IWN_CCK_STATE_INIT;
4600 }
4601
4602 if (needs_update)
4603 (void)iwn_send_sensitivity(sc);
4604 #undef dec
4605 #undef inc
4606 }
4607
4608 static int
4609 iwn_send_sensitivity(struct iwn_softc *sc)
4610 {
4611 struct iwn_calib_state *calib = &sc->calib;
4612 struct iwn_enhanced_sensitivity_cmd cmd;
4613 int len;
4614
4615 memset(&cmd, 0, sizeof cmd);
4616 len = sizeof (struct iwn_sensitivity_cmd);
4617 cmd.which = IWN_SENSITIVITY_WORKTBL;
4618 /* OFDM modulation. */
4619 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
4620 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4621 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
4622 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4623 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
4624 cmd.energy_ofdm_th = htole16(62);
4625 /* CCK modulation. */
4626 cmd.corr_cck_x4 = htole16(calib->cck_x4);
4627 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
4628 cmd.energy_cck = htole16(calib->energy_cck);
4629 /* Barker modulation: use default values. */
4630 cmd.corr_barker = htole16(190);
4631 cmd.corr_barker_mrc = htole16(390);
4632
4633 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4634 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4635 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4636 calib->ofdm_mrc_x4, calib->cck_x4,
4637 calib->cck_mrc_x4, calib->energy_cck);
4638
4639 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
4640 goto send;
4641 /* Enhanced sensitivity settings. */
4642 len = sizeof (struct iwn_enhanced_sensitivity_cmd);
4643 cmd.ofdm_det_slope_mrc = htole16(668);
4644 cmd.ofdm_det_icept_mrc = htole16(4);
4645 cmd.ofdm_det_slope = htole16(486);
4646 cmd.ofdm_det_icept = htole16(37);
4647 cmd.cck_det_slope_mrc = htole16(853);
4648 cmd.cck_det_icept_mrc = htole16(4);
4649 cmd.cck_det_slope = htole16(476);
4650 cmd.cck_det_icept = htole16(99);
4651 send:
4652 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
4653 }
4654
4655 /*
4656 * Set STA mode power saving level (between 0 and 5).
4657 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4658 */
4659 static int
4660 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4661 {
4662 struct iwn_pmgt_cmd cmd;
4663 const struct iwn_pmgt *pmgt;
4664 uint32_t max, skip_dtim;
4665 uint32_t reg;
4666 int i;
4667
4668 /* Select which PS parameters to use. */
4669 if (dtim <= 2)
4670 pmgt = &iwn_pmgt[0][level];
4671 else if (dtim <= 10)
4672 pmgt = &iwn_pmgt[1][level];
4673 else
4674 pmgt = &iwn_pmgt[2][level];
4675
4676 memset(&cmd, 0, sizeof cmd);
4677 if (level != 0) /* not CAM */
4678 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4679 if (level == 5)
4680 cmd.flags |= htole16(IWN_PS_FAST_PD);
4681 /* Retrieve PCIe Active State Power Management (ASPM). */
4682 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
4683 if (!(reg & 0x1)) /* L0s Entry disabled. */
4684 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4685 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4686 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4687
4688 if (dtim == 0) {
4689 dtim = 1;
4690 skip_dtim = 0;
4691 } else
4692 skip_dtim = pmgt->skip_dtim;
4693 if (skip_dtim != 0) {
4694 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4695 max = pmgt->intval[4];
4696 if (max == (uint32_t)-1)
4697 max = dtim * (skip_dtim + 1);
4698 else if (max > dtim)
4699 max = (max / dtim) * dtim;
4700 } else
4701 max = dtim;
4702 for (i = 0; i < 5; i++)
4703 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4704
4705 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
4706 level);
4707 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4708 }
4709
4710 static int
4711 iwn_send_btcoex(struct iwn_softc *sc)
4712 {
4713 struct iwn_bluetooth cmd;
4714
4715 memset(&cmd, 0, sizeof cmd);
4716 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4717 cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4718 cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4719 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
4720 __func__);
4721 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4722 }
4723
4724 static int
4725 iwn_send_advanced_btcoex(struct iwn_softc *sc)
4726 {
4727 static const uint32_t btcoex_3wire[12] = {
4728 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
4729 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
4730 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
4731 };
4732 struct iwn6000_btcoex_config btconfig;
4733 struct iwn_btcoex_priotable btprio;
4734 struct iwn_btcoex_prot btprot;
4735 int error, i;
4736
4737 memset(&btconfig, 0, sizeof btconfig);
4738 btconfig.flags = 145;
4739 btconfig.max_kill = 5;
4740 btconfig.bt3_t7_timer = 1;
4741 btconfig.kill_ack = htole32(0xffff0000);
4742 btconfig.kill_cts = htole32(0xffff0000);
4743 btconfig.sample_time = 2;
4744 btconfig.bt3_t2_timer = 0xc;
4745 for (i = 0; i < 12; i++)
4746 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
4747 btconfig.valid = htole16(0xff);
4748 btconfig.prio_boost = 0xf0;
4749 DPRINTF(sc, IWN_DEBUG_RESET,
4750 "%s: configuring advanced bluetooth coexistence\n", __func__);
4751 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1);
4752 if (error != 0)
4753 return error;
4754
4755 memset(&btprio, 0, sizeof btprio);
4756 btprio.calib_init1 = 0x6;
4757 btprio.calib_init2 = 0x7;
4758 btprio.calib_periodic_low1 = 0x2;
4759 btprio.calib_periodic_low2 = 0x3;
4760 btprio.calib_periodic_high1 = 0x4;
4761 btprio.calib_periodic_high2 = 0x5;
4762 btprio.dtim = 0x6;
4763 btprio.scan52 = 0x8;
4764 btprio.scan24 = 0xa;
4765 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
4766 1);
4767 if (error != 0)
4768 return error;
4769
4770 /* Force BT state machine change. */
4771 memset(&btprot, 0, sizeof btprio);
4772 btprot.open = 1;
4773 btprot.type = 1;
4774 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
4775 if (error != 0)
4776 return error;
4777 btprot.open = 0;
4778 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
4779 }
4780
4781 static int
4782 iwn_config(struct iwn_softc *sc)
4783 {
4784 struct iwn_ops *ops = &sc->ops;
4785 struct ifnet *ifp = sc->sc_ifp;
4786 struct ieee80211com *ic = ifp->if_l2com;
4787 uint32_t txmask;
4788 uint16_t rxchain;
4789 int error;
4790
4791 if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
4792 /* Set radio temperature sensor offset. */
4793 error = iwn5000_temp_offset_calib(sc);
4794 if (error != 0) {
4795 device_printf(sc->sc_dev,
4796 "%s: could not set temperature offset\n", __func__);
4797 return error;
4798 }
4799 }
4800
4801 /* Configure valid TX chains for >=5000 Series. */
4802 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4803 txmask = htole32(sc->txchainmask);
4804 DPRINTF(sc, IWN_DEBUG_RESET,
4805 "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
4806 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4807 sizeof txmask, 0);
4808 if (error != 0) {
4809 device_printf(sc->sc_dev,
4810 "%s: could not configure valid TX chains, "
4811 "error %d\n", __func__, error);
4812 return error;
4813 }
4814 }
4815
4816 /* Configure bluetooth coexistence. */
4817 if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX)
4818 error = iwn_send_advanced_btcoex(sc);
4819 else
4820 error = iwn_send_btcoex(sc);
4821 if (error != 0) {
4822 device_printf(sc->sc_dev,
4823 "%s: could not configure bluetooth coexistence, error %d\n",
4824 __func__, error);
4825 return error;
4826 }
4827
4828 /* Set mode, channel, RX filter and enable RX. */
4829 memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4830 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
4831 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
4832 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
4833 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4834 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
4835 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4836 switch (ic->ic_opmode) {
4837 case IEEE80211_M_STA:
4838 sc->rxon.mode = IWN_MODE_STA;
4839 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4840 break;
4841 case IEEE80211_M_MONITOR:
4842 sc->rxon.mode = IWN_MODE_MONITOR;
4843 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4844 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4845 break;
4846 default:
4847 /* Should not get there. */
4848 break;
4849 }
4850 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
4851 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
4852 sc->rxon.ht_single_mask = 0xff;
4853 sc->rxon.ht_dual_mask = 0xff;
4854 sc->rxon.ht_triple_mask = 0xff;
4855 rxchain =
4856 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4857 IWN_RXCHAIN_MIMO_COUNT(2) |
4858 IWN_RXCHAIN_IDLE_COUNT(2);
4859 sc->rxon.rxchain = htole16(rxchain);
4860 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
4861 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
4862 if (error != 0) {
4863 device_printf(sc->sc_dev, "%s: RXON command failed\n",
4864 __func__);
4865 return error;
4866 }
4867
4868 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
4869 device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
4870 __func__);
4871 return error;
4872 }
4873
4874 /* Configuration has changed, set TX power accordingly. */
4875 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
4876 device_printf(sc->sc_dev, "%s: could not set TX power\n",
4877 __func__);
4878 return error;
4879 }
4880
4881 if ((error = iwn_set_critical_temp(sc)) != 0) {
4882 device_printf(sc->sc_dev,
4883 "%s: could not set critical temperature\n", __func__);
4884 return error;
4885 }
4886
4887 /* Set power saving level to CAM during initialization. */
4888 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
4889 device_printf(sc->sc_dev,
4890 "%s: could not set power saving level\n", __func__);
4891 return error;
4892 }
4893 return 0;
4894 }
4895
4896 /*
4897 * Add an ssid element to a frame.
4898 */
4899 static uint8_t *
4900 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
4901 {
4902 *frm++ = IEEE80211_ELEMID_SSID;
4903 *frm++ = len;
4904 memcpy(frm, ssid, len);
4905 return frm + len;
4906 }
4907
4908 static int
4909 iwn_scan(struct iwn_softc *sc)
4910 {
4911 struct ifnet *ifp = sc->sc_ifp;
4912 struct ieee80211com *ic = ifp->if_l2com;
4913 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/
4914 struct iwn_scan_hdr *hdr;
4915 struct iwn_cmd_data *tx;
4916 struct iwn_scan_essid *essid;
4917 struct iwn_scan_chan *chan;
4918 struct ieee80211_frame *wh;
4919 struct ieee80211_rateset *rs;
4920 struct ieee80211_channel *c;
4921 uint8_t *buf, *frm;
4922 uint16_t rxchain;
4923 uint8_t txant;
4924 int buflen, error;
4925
4926 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4927 if (buf == NULL) {
4928 device_printf(sc->sc_dev,
4929 "%s: could not allocate buffer for scan command\n",
4930 __func__);
4931 return ENOMEM;
4932 }
4933 hdr = (struct iwn_scan_hdr *)buf;
4934 /*
4935 * Move to the next channel if no frames are received within 10ms
4936 * after sending the probe request.
4937 */
4938 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
4939 hdr->quiet_threshold = htole16(1); /* min # of packets */
4940
4941 /* Select antennas for scanning. */
4942 rxchain =
4943 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4944 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4945 IWN_RXCHAIN_DRIVER_FORCE;
4946 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
4947 sc->hw_type == IWN_HW_REV_TYPE_4965) {
4948 /* Ant A must be avoided in 5GHz because of an HW bug. */
4949 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
4950 } else /* Use all available RX antennas. */
4951 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4952 hdr->rxchain = htole16(rxchain);
4953 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4954
4955 tx = (struct iwn_cmd_data *)(hdr + 1);
4956 tx->flags = htole32(IWN_TX_AUTO_SEQ);
4957 tx->id = sc->broadcast_id;
4958 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4959
4960 if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) {
4961 /* Send probe requests at 6Mbps. */
4962 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4963 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4964 } else {
4965 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4966 /* Send probe requests at 1Mbps. */
4967 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4968 tx->rflags = IWN_RFLAG_CCK;
4969 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4970 }
4971 /* Use the first valid TX antenna. */
4972 txant = IWN_LSB(sc->txchainmask);
4973 tx->rflags |= IWN_RFLAG_ANT(txant);
4974
4975 essid = (struct iwn_scan_essid *)(tx + 1);
4976 if (ss->ss_ssid[0].len != 0) {
4977 essid[0].id = IEEE80211_ELEMID_SSID;
4978 essid[0].len = ss->ss_ssid[0].len;
4979 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
4980 }
4981 /*
4982 * Build a probe request frame. Most of the following code is a
4983 * copy & paste of what is done in net80211.
4984 */
4985 wh = (struct ieee80211_frame *)(essid + 20);
4986 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4987 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4988 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4989 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
4990 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
4991 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
4992 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4993 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4994
4995 frm = (uint8_t *)(wh + 1);
4996 frm = ieee80211_add_ssid(frm, NULL, 0);
4997 frm = ieee80211_add_rates(frm, rs);
4998 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4999 frm = ieee80211_add_xrates(frm, rs);
5000 #if 0 /* HT */
5001 if (ic->ic_flags & IEEE80211_F_HTON)
5002 frm = ieee80211_add_htcaps(frm, ic);
5003 #endif
5004
5005 /* Set length of probe request. */
5006 tx->len = htole16(frm - (uint8_t *)wh);
5007
5008 c = ic->ic_curchan;
5009 chan = (struct iwn_scan_chan *)frm;
5010 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5011 chan->flags = 0;
5012 if (ss->ss_nssid > 0)
5013 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5014 chan->dsp_gain = 0x6e;
5015 if (IEEE80211_IS_CHAN_5GHZ(c) &&
5016 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5017 chan->rf_gain = 0x3b;
5018 chan->active = htole16(24);
5019 chan->passive = htole16(110);
5020 chan->flags |= htole32(IWN_CHAN_ACTIVE);
5021 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
5022 chan->rf_gain = 0x3b;
5023 chan->active = htole16(24);
5024 if (sc->rxon.associd)
5025 chan->passive = htole16(78);
5026 else
5027 chan->passive = htole16(110);
5028 hdr->crc_threshold = 0xffff;
5029 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5030 chan->rf_gain = 0x28;
5031 chan->active = htole16(36);
5032 chan->passive = htole16(120);
5033 chan->flags |= htole32(IWN_CHAN_ACTIVE);
5034 } else {
5035 chan->rf_gain = 0x28;
5036 chan->active = htole16(36);
5037 if (sc->rxon.associd)
5038 chan->passive = htole16(88);
5039 else
5040 chan->passive = htole16(120);
5041 hdr->crc_threshold = 0xffff;
5042 }
5043
5044 DPRINTF(sc, IWN_DEBUG_STATE,
5045 "%s: chan %u flags 0x%x rf_gain 0x%x "
5046 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
5047 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
5048 chan->active, chan->passive);
5049
5050 hdr->nchan++;
5051 chan++;
5052 buflen = (uint8_t *)chan - buf;
5053 hdr->len = htole16(buflen);
5054
5055 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
5056 hdr->nchan);
5057 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5058 free(buf, M_DEVBUF);
5059 return error;
5060 }
5061
5062 static int
5063 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
5064 {
5065 struct iwn_ops *ops = &sc->ops;
5066 struct ifnet *ifp = sc->sc_ifp;
5067 struct ieee80211com *ic = ifp->if_l2com;
5068 struct ieee80211_node *ni = vap->iv_bss;
5069 int error;
5070
5071 /* Update adapter configuration. */
5072 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5073 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5074 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5075 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5076 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5077 if (ic->ic_flags & IEEE80211_F_SHSLOT)
5078 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5079 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5080 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5081 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5082 sc->rxon.cck_mask = 0;
5083 sc->rxon.ofdm_mask = 0x15;
5084 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5085 sc->rxon.cck_mask = 0x03;
5086 sc->rxon.ofdm_mask = 0;
5087 } else {
5088 /* Assume 802.11b/g. */
5089 sc->rxon.cck_mask = 0x0f;
5090 sc->rxon.ofdm_mask = 0x15;
5091 }
5092 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
5093 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
5094 sc->rxon.ofdm_mask);
5095 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5096 if (error != 0) {
5097 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
5098 __func__, error);
5099 return error;
5100 }
5101
5102 /* Configuration has changed, set TX power accordingly. */
5103 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5104 device_printf(sc->sc_dev,
5105 "%s: could not set TX power, error %d\n", __func__, error);
5106 return error;
5107 }
5108 /*
5109 * Reconfiguring RXON clears the firmware nodes table so we must
5110 * add the broadcast node again.
5111 */
5112 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5113 device_printf(sc->sc_dev,
5114 "%s: could not add broadcast node, error %d\n", __func__,
5115 error);
5116 return error;
5117 }
5118 return 0;
5119 }
5120
5121 static int
5122 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
5123 {
5124 #define MS(v,x) (((v) & x) >> x##_S)
5125 struct iwn_ops *ops = &sc->ops;
5126 struct ifnet *ifp = sc->sc_ifp;
5127 struct ieee80211com *ic = ifp->if_l2com;
5128 struct ieee80211_node *ni = vap->iv_bss;
5129 struct iwn_node_info node;
5130 int error;
5131
5132 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5133 /* Link LED blinks while monitoring. */
5134 iwn_set_led(sc, IWN_LED_LINK, 5, 5);
5135 return 0;
5136 }
5137 if ((error = iwn_set_timing(sc, ni)) != 0) {
5138 device_printf(sc->sc_dev,
5139 "%s: could not set timing, error %d\n", __func__, error);
5140 return error;
5141 }
5142
5143 /* Update adapter configuration. */
5144 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5145 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
5146 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5147 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5148 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5149 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5150 if (ic->ic_flags & IEEE80211_F_SHSLOT)
5151 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5152 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5153 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5154 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5155 sc->rxon.cck_mask = 0;
5156 sc->rxon.ofdm_mask = 0x15;
5157 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5158 sc->rxon.cck_mask = 0x03;
5159 sc->rxon.ofdm_mask = 0;
5160 } else {
5161 /* Assume 802.11b/g. */
5162 sc->rxon.cck_mask = 0x0f;
5163 sc->rxon.ofdm_mask = 0x15;
5164 }
5165 #if 0 /* HT */
5166 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5167 sc->rxon.flags &= ~htole32(IWN_RXON_HT);
5168 if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
5169 sc->rxon.flags |= htole32(IWN_RXON_HT40U);
5170 else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
5171 sc->rxon.flags |= htole32(IWN_RXON_HT40D);
5172 else
5173 sc->rxon.flags |= htole32(IWN_RXON_HT20);
5174 sc->rxon.rxchain = htole16(
5175 IWN_RXCHAIN_VALID(3)
5176 | IWN_RXCHAIN_MIMO_COUNT(3)
5177 | IWN_RXCHAIN_IDLE_COUNT(1)
5178 | IWN_RXCHAIN_MIMO_FORCE);
5179
5180 maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU);
5181 ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
5182 } else
5183 maxrxampdu = ampdudensity = 0;
5184 #endif
5185 sc->rxon.filter |= htole32(IWN_FILTER_BSS);
5186 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n",
5187 sc->rxon.chan, sc->rxon.flags);
5188 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5189 if (error != 0) {
5190 device_printf(sc->sc_dev,
5191 "%s: could not update configuration, error %d\n", __func__,
5192 error);
5193 return error;
5194 }
5195
5196 /* Configuration has changed, set TX power accordingly. */
5197 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5198 device_printf(sc->sc_dev,
5199 "%s: could not set TX power, error %d\n", __func__, error);
5200 return error;
5201 }
5202
5203 /* Fake a join to initialize the TX rate. */
5204 ((struct iwn_node *)ni)->id = IWN_ID_BSS;
5205 iwn_newassoc(ni, 1);
5206
5207 /* Add BSS node. */
5208 memset(&node, 0, sizeof node);
5209 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5210 node.id = IWN_ID_BSS;
5211 #ifdef notyet
5212 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
5213 IWN_AMDPU_DENSITY(5)); /* 2us */
5214 #endif
5215 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
5216 error = ops->add_node(sc, &node, 1);
5217 if (error != 0) {
5218 device_printf(sc->sc_dev,
5219 "%s: could not add BSS node, error %d\n", __func__, error);
5220 return error;
5221 }
5222 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
5223 __func__, node.id);
5224 if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5225 device_printf(sc->sc_dev,
5226 "%s: could not setup link quality for node %d, error %d\n",
5227 __func__, node.id, error);
5228 return error;
5229 }
5230
5231 if ((error = iwn_init_sensitivity(sc)) != 0) {
5232 device_printf(sc->sc_dev,
5233 "%s: could not set sensitivity, error %d\n", __func__,
5234 error);
5235 return error;
5236 }
5237 /* Start periodic calibration timer. */
5238 sc->calib.state = IWN_CALIB_STATE_ASSOC;
5239 sc->calib_cnt = 0;
5240 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
5241 sc);
5242
5243 /* Link LED always on while associated. */
5244 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5245 return 0;
5246 #undef MS
5247 }
5248
5249 #if 0 /* HT */
5250 /*
5251 * This function is called by upper layer when an ADDBA request is received
5252 * from another STA and before the ADDBA response is sent.
5253 */
5254 static int
5255 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5256 uint8_t tid)
5257 {
5258 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
5259 struct iwn_softc *sc = ic->ic_softc;
5260 struct iwn_ops *ops = &sc->ops;
5261 struct iwn_node *wn = (void *)ni;
5262 struct iwn_node_info node;
5263
5264 memset(&node, 0, sizeof node);
5265 node.id = wn->id;
5266 node.control = IWN_NODE_UPDATE;
5267 node.flags = IWN_FLAG_SET_ADDBA;
5268 node.addba_tid = tid;
5269 node.addba_ssn = htole16(ba->ba_winstart);
5270 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5271 wn->id, tid, ba->ba_winstart);
5272 return ops->add_node(sc, &node, 1);
5273 }
5274
5275 /*
5276 * This function is called by upper layer on teardown of an HT-immediate
5277 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5278 */
5279 static void
5280 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5281 uint8_t tid)
5282 {
5283 struct iwn_softc *sc = ic->ic_softc;
5284 struct iwn_ops *ops = &sc->ops;
5285 struct iwn_node *wn = (void *)ni;
5286 struct iwn_node_info node;
5287
5288 memset(&node, 0, sizeof node);
5289 node.id = wn->id;
5290 node.control = IWN_NODE_UPDATE;
5291 node.flags = IWN_FLAG_SET_DELBA;
5292 node.delba_tid = tid;
5293 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5294 (void)ops->add_node(sc, &node, 1);
5295 }
5296
5297 /*
5298 * This function is called by upper layer when an ADDBA response is received
5299 * from another STA.
5300 */
5301 static int
5302 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5303 uint8_t tid)
5304 {
5305 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5306 struct iwn_softc *sc = ic->ic_softc;
5307 struct iwn_ops *ops = &sc->ops;
5308 struct iwn_node *wn = (void *)ni;
5309 struct iwn_node_info node;
5310 int error;
5311
5312 /* Enable TX for the specified RA/TID. */
5313 wn->disable_tid &= ~(1 << tid);
5314 memset(&node, 0, sizeof node);
5315 node.id = wn->id;
5316 node.control = IWN_NODE_UPDATE;
5317 node.flags = IWN_FLAG_SET_DISABLE_TID;
5318 node.disable_tid = htole16(wn->disable_tid);
5319 error = ops->add_node(sc, &node, 1);
5320 if (error != 0)
5321 return error;
5322
5323 if ((error = iwn_nic_lock(sc)) != 0)
5324 return error;
5325 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5326 iwn_nic_unlock(sc);
5327 return 0;
5328 }
5329
5330 static void
5331 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5332 uint8_t tid)
5333 {
5334 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5335 struct iwn_softc *sc = ic->ic_softc;
5336 struct iwn_ops *ops = &sc->ops;
5337
5338 if (iwn_nic_lock(sc) != 0)
5339 return;
5340 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5341 iwn_nic_unlock(sc);
5342 }
5343
5344 static void
5345 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5346 uint8_t tid, uint16_t ssn)
5347 {
5348 struct iwn_node *wn = (void *)ni;
5349 int qid = 7 + tid;
5350
5351 /* Stop TX scheduler while we're changing its configuration. */
5352 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5353 IWN4965_TXQ_STATUS_CHGACT);
5354
5355 /* Assign RA/TID translation to the queue. */
5356 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5357 wn->id << 4 | tid);
5358
5359 /* Enable chain-building mode for the queue. */
5360 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5361
5362 /* Set starting sequence number from the ADDBA request. */
5363 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5364 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5365
5366 /* Set scheduler window size. */
5367 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5368 IWN_SCHED_WINSZ);
5369 /* Set scheduler frame limit. */
5370 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5371 IWN_SCHED_LIMIT << 16);
5372
5373 /* Enable interrupts for the queue. */
5374 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5375
5376 /* Mark the queue as active. */
5377 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5378 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5379 iwn_tid2fifo[tid] << 1);
5380 }
5381
5382 static void
5383 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5384 {
5385 int qid = 7 + tid;
5386
5387 /* Stop TX scheduler while we're changing its configuration. */
5388 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5389 IWN4965_TXQ_STATUS_CHGACT);
5390
5391 /* Set starting sequence number from the ADDBA request. */
5392 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5393 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5394
5395 /* Disable interrupts for the queue. */
5396 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5397
5398 /* Mark the queue as inactive. */
5399 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5400 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5401 }
5402
5403 static void
5404 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5405 uint8_t tid, uint16_t ssn)
5406 {
5407 struct iwn_node *wn = (void *)ni;
5408 int qid = 10 + tid;
5409
5410 /* Stop TX scheduler while we're changing its configuration. */
5411 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5412 IWN5000_TXQ_STATUS_CHGACT);
5413
5414 /* Assign RA/TID translation to the queue. */
5415 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5416 wn->id << 4 | tid);
5417
5418 /* Enable chain-building mode for the queue. */
5419 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5420
5421 /* Enable aggregation for the queue. */
5422 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5423
5424 /* Set starting sequence number from the ADDBA request. */
5425 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5426 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5427
5428 /* Set scheduler window size and frame limit. */
5429 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5430 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5431
5432 /* Enable interrupts for the queue. */
5433 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5434
5435 /* Mark the queue as active. */
5436 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5437 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5438 }
5439
5440 static void
5441 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5442 {
5443 int qid = 10 + tid;
5444
5445 /* Stop TX scheduler while we're changing its configuration. */
5446 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5447 IWN5000_TXQ_STATUS_CHGACT);
5448
5449 /* Disable aggregation for the queue. */
5450 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5451
5452 /* Set starting sequence number from the ADDBA request. */
5453 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5454 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5455
5456 /* Disable interrupts for the queue. */
5457 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5458
5459 /* Mark the queue as inactive. */
5460 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5461 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5462 }
5463 #endif
5464
5465 /*
5466 * Query calibration tables from the initialization firmware. We do this
5467 * only once at first boot. Called from a process context.
5468 */
5469 static int
5470 iwn5000_query_calibration(struct iwn_softc *sc)
5471 {
5472 struct iwn5000_calib_config cmd;
5473 int error;
5474
5475 memset(&cmd, 0, sizeof cmd);
5476 cmd.ucode.once.enable = 0xffffffff;
5477 cmd.ucode.once.start = 0xffffffff;
5478 cmd.ucode.once.send = 0xffffffff;
5479 cmd.ucode.flags = 0xffffffff;
5480 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
5481 __func__);
5482 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5483 if (error != 0)
5484 return error;
5485
5486 /* Wait at most two seconds for calibration to complete. */
5487 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5488 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
5489 return error;
5490 }
5491
5492 /*
5493 * Send calibration results to the runtime firmware. These results were
5494 * obtained on first boot from the initialization firmware.
5495 */
5496 static int
5497 iwn5000_send_calibration(struct iwn_softc *sc)
5498 {
5499 int idx, error;
5500
5501 for (idx = 0; idx < 5; idx++) {
5502 if (sc->calibcmd[idx].buf == NULL)
5503 continue; /* No results available. */
5504 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5505 "send calibration result idx=%d len=%d\n", idx,
5506 sc->calibcmd[idx].len);
5507 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
5508 sc->calibcmd[idx].len, 0);
5509 if (error != 0) {
5510 device_printf(sc->sc_dev,
5511 "%s: could not send calibration result, error %d\n",
5512 __func__, error);
5513 return error;
5514 }
5515 }
5516 return 0;
5517 }
5518
5519 static int
5520 iwn5000_send_wimax_coex(struct iwn_softc *sc)
5521 {
5522 struct iwn5000_wimax_coex wimax;
5523
5524 #ifdef notyet
5525 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5526 /* Enable WiMAX coexistence for combo adapters. */
5527 wimax.flags =
5528 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5529 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5530 IWN_WIMAX_COEX_STA_TABLE_VALID |
5531 IWN_WIMAX_COEX_ENABLE;
5532 memcpy(wimax.events, iwn6050_wimax_events,
5533 sizeof iwn6050_wimax_events);
5534 } else
5535 #endif
5536 {
5537 /* Disable WiMAX coexistence. */
5538 wimax.flags = 0;
5539 memset(wimax.events, 0, sizeof wimax.events);
5540 }
5541 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5542 __func__);
5543 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5544 }
5545
5546 static int
5547 iwn5000_crystal_calib(struct iwn_softc *sc)
5548 {
5549 struct iwn5000_phy_calib_crystal cmd;
5550
5551 memset(&cmd, 0, sizeof cmd);
5552 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5553 cmd.ngroups = 1;
5554 cmd.isvalid = 1;
5555 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
5556 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
5557 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
5558 cmd.cap_pin[0], cmd.cap_pin[1]);
5559 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5560 }
5561
5562 static int
5563 iwn5000_temp_offset_calib(struct iwn_softc *sc)
5564 {
5565 struct iwn5000_phy_calib_temp_offset cmd;
5566
5567 memset(&cmd, 0, sizeof cmd);
5568 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
5569 cmd.ngroups = 1;
5570 cmd.isvalid = 1;
5571 if (sc->eeprom_temp != 0)
5572 cmd.offset = htole16(sc->eeprom_temp);
5573 else
5574 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
5575 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
5576 le16toh(cmd.offset));
5577 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5578 }
5579
5580 /*
5581 * This function is called after the runtime firmware notifies us of its
5582 * readiness (called in a process context).
5583 */
5584 static int
5585 iwn4965_post_alive(struct iwn_softc *sc)
5586 {
5587 int error, qid;
5588
5589 if ((error = iwn_nic_lock(sc)) != 0)
5590 return error;
5591
5592 /* Clear TX scheduler state in SRAM. */
5593 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5594 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5595 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5596
5597 /* Set physical address of TX scheduler rings (1KB aligned). */
5598 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5599
5600 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5601
5602 /* Disable chain mode for all our 16 queues. */
5603 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5604
5605 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5606 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5607 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5608
5609 /* Set scheduler window size. */
5610 iwn_mem_write(sc, sc->sched_base +
5611 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5612 /* Set scheduler frame limit. */
5613 iwn_mem_write(sc, sc->sched_base +
5614 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5615 IWN_SCHED_LIMIT << 16);
5616 }
5617
5618 /* Enable interrupts for all our 16 queues. */
5619 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5620 /* Identify TX FIFO rings (0-7). */
5621 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5622
5623 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5624 for (qid = 0; qid < 7; qid++) {
5625 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5626 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5627 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5628 }
5629 iwn_nic_unlock(sc);
5630 return 0;
5631 }
5632
5633 /*
5634 * This function is called after the initialization or runtime firmware
5635 * notifies us of its readiness (called in a process context).
5636 */
5637 static int
5638 iwn5000_post_alive(struct iwn_softc *sc)
5639 {
5640 int error, qid;
5641
5642 /* Switch to using ICT interrupt mode. */
5643 iwn5000_ict_reset(sc);
5644
5645 if ((error = iwn_nic_lock(sc)) != 0)
5646 return error;
5647
5648 /* Clear TX scheduler state in SRAM. */
5649 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5650 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5651 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5652
5653 /* Set physical address of TX scheduler rings (1KB aligned). */
5654 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5655
5656 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5657
5658 /* Enable chain mode for all queues, except command queue. */
5659 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5660 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5661
5662 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5663 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5664 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5665
5666 iwn_mem_write(sc, sc->sched_base +
5667 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5668 /* Set scheduler window size and frame limit. */
5669 iwn_mem_write(sc, sc->sched_base +
5670 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5671 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5672 }
5673
5674 /* Enable interrupts for all our 20 queues. */
5675 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5676 /* Identify TX FIFO rings (0-7). */
5677 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5678
5679 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5680 for (qid = 0; qid < 7; qid++) {
5681 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5682 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5683 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5684 }
5685 iwn_nic_unlock(sc);
5686
5687 /* Configure WiMAX coexistence for combo adapters. */
5688 error = iwn5000_send_wimax_coex(sc);
5689 if (error != 0) {
5690 device_printf(sc->sc_dev,
5691 "%s: could not configure WiMAX coexistence, error %d\n",
5692 __func__, error);
5693 return error;
5694 }
5695 if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
5696 /* Perform crystal calibration. */
5697 error = iwn5000_crystal_calib(sc);
5698 if (error != 0) {
5699 device_printf(sc->sc_dev,
5700 "%s: crystal calibration failed, error %d\n",
5701 __func__, error);
5702 return error;
5703 }
5704 }
5705 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5706 /* Query calibration from the initialization firmware. */
5707 if ((error = iwn5000_query_calibration(sc)) != 0) {
5708 device_printf(sc->sc_dev,
5709 "%s: could not query calibration, error %d\n",
5710 __func__, error);
5711 return error;
5712 }
5713 /*
5714 * We have the calibration results now, reboot with the
5715 * runtime firmware (call ourselves recursively!)
5716 */
5717 iwn_hw_stop(sc);
5718 error = iwn_hw_init(sc);
5719 } else {
5720 /* Send calibration results to runtime firmware. */
5721 error = iwn5000_send_calibration(sc);
5722 }
5723 return error;
5724 }
5725
5726 /*
5727 * The firmware boot code is small and is intended to be copied directly into
5728 * the NIC internal memory (no DMA transfer).
5729 */
5730 static int
5731 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5732 {
5733 int error, ntries;
5734
5735 size /= sizeof (uint32_t);
5736
5737 if ((error = iwn_nic_lock(sc)) != 0)
5738 return error;
5739
5740 /* Copy microcode image into NIC memory. */
5741 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5742 (const uint32_t *)ucode, size);
5743
5744 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5745 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5746 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5747
5748 /* Start boot load now. */
5749 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5750
5751 /* Wait for transfer to complete. */
5752 for (ntries = 0; ntries < 1000; ntries++) {
5753 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5754 IWN_BSM_WR_CTRL_START))
5755 break;
5756 DELAY(10);
5757 }
5758 if (ntries == 1000) {
5759 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5760 __func__);
5761 iwn_nic_unlock(sc);
5762 return ETIMEDOUT;
5763 }
5764
5765 /* Enable boot after power up. */
5766 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5767
5768 iwn_nic_unlock(sc);
5769 return 0;
5770 }
5771
5772 static int
5773 iwn4965_load_firmware(struct iwn_softc *sc)
5774 {
5775 struct iwn_fw_info *fw = &sc->fw;
5776 struct iwn_dma_info *dma = &sc->fw_dma;
5777 int error;
5778
5779 /* Copy initialization sections into pre-allocated DMA-safe memory. */
5780 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5781 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5782 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5783 fw->init.text, fw->init.textsz);
5784 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5785
5786 /* Tell adapter where to find initialization sections. */
5787 if ((error = iwn_nic_lock(sc)) != 0)
5788 return error;
5789 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5790 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5791 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5792 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5793 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5794 iwn_nic_unlock(sc);
5795
5796 /* Load firmware boot code. */
5797 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5798 if (error != 0) {
5799 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5800 __func__);
5801 return error;
5802 }
5803 /* Now press "execute". */
5804 IWN_WRITE(sc, IWN_RESET, 0);
5805
5806 /* Wait at most one second for first alive notification. */
5807 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
5808 device_printf(sc->sc_dev,
5809 "%s: timeout waiting for adapter to initialize, error %d\n",
5810 __func__, error);
5811 return error;
5812 }
5813
5814 /* Retrieve current temperature for initial TX power calibration. */
5815 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5816 sc->temp = iwn4965_get_temperature(sc);
5817
5818 /* Copy runtime sections into pre-allocated DMA-safe memory. */
5819 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5820 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5821 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5822 fw->main.text, fw->main.textsz);
5823 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5824
5825 /* Tell adapter where to find runtime sections. */
5826 if ((error = iwn_nic_lock(sc)) != 0)
5827 return error;
5828 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5829 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5830 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5831 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5832 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5833 IWN_FW_UPDATED | fw->main.textsz);
5834 iwn_nic_unlock(sc);
5835
5836 return 0;
5837 }
5838
5839 static int
5840 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5841 const uint8_t *section, int size)
5842 {
5843 struct iwn_dma_info *dma = &sc->fw_dma;
5844 int error;
5845
5846 /* Copy firmware section into pre-allocated DMA-safe memory. */
5847 memcpy(dma->vaddr, section, size);
5848 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5849
5850 if ((error = iwn_nic_lock(sc)) != 0)
5851 return error;
5852
5853 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5854 IWN_FH_TX_CONFIG_DMA_PAUSE);
5855
5856 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5857 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5858 IWN_LOADDR(dma->paddr));
5859 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5860 IWN_HIADDR(dma->paddr) << 28 | size);
5861 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5862 IWN_FH_TXBUF_STATUS_TBNUM(1) |
5863 IWN_FH_TXBUF_STATUS_TBIDX(1) |
5864 IWN_FH_TXBUF_STATUS_TFBD_VALID);
5865
5866 /* Kick Flow Handler to start DMA transfer. */
5867 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5868 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5869
5870 iwn_nic_unlock(sc);
5871
5872 /* Wait at most five seconds for FH DMA transfer to complete. */
5873 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
5874 }
5875
5876 static int
5877 iwn5000_load_firmware(struct iwn_softc *sc)
5878 {
5879 struct iwn_fw_part *fw;
5880 int error;
5881
5882 /* Load the initialization firmware on first boot only. */
5883 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5884 &sc->fw.main : &sc->fw.init;
5885
5886 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5887 fw->text, fw->textsz);
5888 if (error != 0) {
5889 device_printf(sc->sc_dev,
5890 "%s: could not load firmware %s section, error %d\n",
5891 __func__, ".text", error);
5892 return error;
5893 }
5894 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5895 fw->data, fw->datasz);
5896 if (error != 0) {
5897 device_printf(sc->sc_dev,
5898 "%s: could not load firmware %s section, error %d\n",
5899 __func__, ".data", error);
5900 return error;
5901 }
5902
5903 /* Now press "execute". */
5904 IWN_WRITE(sc, IWN_RESET, 0);
5905 return 0;
5906 }
5907
5908 /*
5909 * Extract text and data sections from a legacy firmware image.
5910 */
5911 static int
5912 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5913 {
5914 const uint32_t *ptr;
5915 size_t hdrlen = 24;
5916 uint32_t rev;
5917
5918 ptr = (const uint32_t *)fw->data;
5919 rev = le32toh(*ptr++);
5920
5921 /* Check firmware API version. */
5922 if (IWN_FW_API(rev) <= 1) {
5923 device_printf(sc->sc_dev,
5924 "%s: bad firmware, need API version >=2\n", __func__);
5925 return EINVAL;
5926 }
5927 if (IWN_FW_API(rev) >= 3) {
5928 /* Skip build number (version 2 header). */
5929 hdrlen += 4;
5930 ptr++;
5931 }
5932 if (fw->size < hdrlen) {
5933 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
5934 __func__, fw->size);
5935 return EINVAL;
5936 }
5937 fw->main.textsz = le32toh(*ptr++);
5938 fw->main.datasz = le32toh(*ptr++);
5939 fw->init.textsz = le32toh(*ptr++);
5940 fw->init.datasz = le32toh(*ptr++);
5941 fw->boot.textsz = le32toh(*ptr++);
5942
5943 /* Check that all firmware sections fit. */
5944 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
5945 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5946 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
5947 __func__, fw->size);
5948 return EINVAL;
5949 }
5950
5951 /* Get pointers to firmware sections. */
5952 fw->main.text = (const uint8_t *)ptr;
5953 fw->main.data = fw->main.text + fw->main.textsz;
5954 fw->init.text = fw->main.data + fw->main.datasz;
5955 fw->init.data = fw->init.text + fw->init.textsz;
5956 fw->boot.text = fw->init.data + fw->init.datasz;
5957 return 0;
5958 }
5959
5960 /*
5961 * Extract text and data sections from a TLV firmware image.
5962 */
5963 static int
5964 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
5965 uint16_t alt)
5966 {
5967 const struct iwn_fw_tlv_hdr *hdr;
5968 const struct iwn_fw_tlv *tlv;
5969 const uint8_t *ptr, *end;
5970 uint64_t altmask;
5971 uint32_t len, tmp;
5972
5973 if (fw->size < sizeof (*hdr)) {
5974 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
5975 __func__, fw->size);
5976 return EINVAL;
5977 }
5978 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
5979 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
5980 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
5981 __func__, le32toh(hdr->signature));
5982 return EINVAL;
5983 }
5984 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
5985 le32toh(hdr->build));
5986
5987 /*
5988 * Select the closest supported alternative that is less than
5989 * or equal to the specified one.
5990 */
5991 altmask = le64toh(hdr->altmask);
5992 while (alt > 0 && !(altmask & (1ULL << alt)))
5993 alt--; /* Downgrade. */
5994 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
5995
5996 ptr = (const uint8_t *)(hdr + 1);
5997 end = (const uint8_t *)(fw->data + fw->size);
5998
5999 /* Parse type-length-value fields. */
6000 while (ptr + sizeof (*tlv) <= end) {
6001 tlv = (const struct iwn_fw_tlv *)ptr;
6002 len = le32toh(tlv->len);
6003
6004 ptr += sizeof (*tlv);
6005 if (ptr + len > end) {
6006 device_printf(sc->sc_dev,
6007 "%s: firmware too short: %zu bytes\n", __func__,
6008 fw->size);
6009 return EINVAL;
6010 }
6011 /* Skip other alternatives. */
6012 if (tlv->alt != 0 && tlv->alt != htole16(alt))
6013 goto next;
6014
6015 switch (le16toh(tlv->type)) {
6016 case IWN_FW_TLV_MAIN_TEXT:
6017 fw->main.text = ptr;
6018 fw->main.textsz = len;
6019 break;
6020 case IWN_FW_TLV_MAIN_DATA:
6021 fw->main.data = ptr;
6022 fw->main.datasz = len;
6023 break;
6024 case IWN_FW_TLV_INIT_TEXT:
6025 fw->init.text = ptr;
6026 fw->init.textsz = len;
6027 break;
6028 case IWN_FW_TLV_INIT_DATA:
6029 fw->init.data = ptr;
6030 fw->init.datasz = len;
6031 break;
6032 case IWN_FW_TLV_BOOT_TEXT:
6033 fw->boot.text = ptr;
6034 fw->boot.textsz = len;
6035 break;
6036 case IWN_FW_TLV_ENH_SENS:
6037 if (!len)
6038 sc->sc_flags |= IWN_FLAG_ENH_SENS;
6039 break;
6040 case IWN_FW_TLV_PHY_CALIB:
6041 tmp = htole32(*ptr);
6042 if (tmp < 253) {
6043 sc->reset_noise_gain = tmp;
6044 sc->noise_gain = tmp + 1;
6045 }
6046 break;
6047 default:
6048 DPRINTF(sc, IWN_DEBUG_RESET,
6049 "TLV type %d not handled\n", le16toh(tlv->type));
6050 break;
6051 }
6052 next: /* TLV fields are 32-bit aligned. */
6053 ptr += (len + 3) & ~3;
6054 }
6055 return 0;
6056 }
6057
6058 static int
6059 iwn_read_firmware(struct iwn_softc *sc)
6060 {
6061 struct iwn_fw_info *fw = &sc->fw;
6062 int error;
6063
6064 IWN_UNLOCK(sc);
6065
6066 memset(fw, 0, sizeof (*fw));
6067
6068 /* Read firmware image from filesystem. */
6069 sc->fw_fp = firmware_get(sc->fwname);
6070 if (sc->fw_fp == NULL) {
6071 device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
6072 __func__, sc->fwname);
6073 IWN_LOCK(sc);
6074 return EINVAL;
6075 }
6076 IWN_LOCK(sc);
6077
6078 fw->size = sc->fw_fp->datasize;
6079 fw->data = (const uint8_t *)sc->fw_fp->data;
6080 if (fw->size < sizeof (uint32_t)) {
6081 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6082 __func__, fw->size);
6083 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6084 sc->fw_fp = NULL;
6085 return EINVAL;
6086 }
6087
6088 /* Retrieve text and data sections. */
6089 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
6090 error = iwn_read_firmware_leg(sc, fw);
6091 else
6092 error = iwn_read_firmware_tlv(sc, fw, 1);
6093 if (error != 0) {
6094 device_printf(sc->sc_dev,
6095 "%s: could not read firmware sections, error %d\n",
6096 __func__, error);
6097 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6098 sc->fw_fp = NULL;
6099 return error;
6100 }
6101
6102 /* Make sure text and data sections fit in hardware memory. */
6103 if (fw->main.textsz > sc->fw_text_maxsz ||
6104 fw->main.datasz > sc->fw_data_maxsz ||
6105 fw->init.textsz > sc->fw_text_maxsz ||
6106 fw->init.datasz > sc->fw_data_maxsz ||
6107 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6108 (fw->boot.textsz & 3) != 0) {
6109 device_printf(sc->sc_dev, "%s: firmware sections too large\n",
6110 __func__);
6111 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6112 sc->fw_fp = NULL;
6113 return EINVAL;
6114 }
6115
6116 /* We can proceed with loading the firmware. */
6117 return 0;
6118 }
6119
6120 static int
6121 iwn_clock_wait(struct iwn_softc *sc)
6122 {
6123 int ntries;
6124
6125 /* Set "initialization complete" bit. */
6126 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6127
6128 /* Wait for clock stabilization. */
6129 for (ntries = 0; ntries < 2500; ntries++) {
6130 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6131 return 0;
6132 DELAY(10);
6133 }
6134 device_printf(sc->sc_dev,
6135 "%s: timeout waiting for clock stabilization\n", __func__);
6136 return ETIMEDOUT;
6137 }
6138
6139 static int
6140 iwn_apm_init(struct iwn_softc *sc)
6141 {
6142 uint32_t reg;
6143 int error;
6144
6145 /* Disable L0s exit timer (NMI bug workaround). */
6146 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6147 /* Don't wait for ICH L0s (ICH bug workaround). */
6148 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6149
6150 /* Set FH wait threshold to max (HW bug under stress workaround). */
6151 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6152
6153 /* Enable HAP INTA to move adapter from L1a to L0s. */
6154 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6155
6156 /* Retrieve PCIe Active State Power Management (ASPM). */
6157 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6158 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6159 if (reg & 0x02) /* L1 Entry enabled. */
6160 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6161 else
6162 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6163
6164 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6165 sc->hw_type <= IWN_HW_REV_TYPE_1000)
6166 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6167
6168 /* Wait for clock stabilization before accessing prph. */
6169 if ((error = iwn_clock_wait(sc)) != 0)
6170 return error;
6171
6172 if ((error = iwn_nic_lock(sc)) != 0)
6173 return error;
6174 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6175 /* Enable DMA and BSM (Bootstrap State Machine). */
6176 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6177 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6178 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6179 } else {
6180 /* Enable DMA. */
6181 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6182 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6183 }
6184 DELAY(20);
6185 /* Disable L1-Active. */
6186 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6187 iwn_nic_unlock(sc);
6188
6189 return 0;
6190 }
6191
6192 static void
6193 iwn_apm_stop_master(struct iwn_softc *sc)
6194 {
6195 int ntries;
6196
6197 /* Stop busmaster DMA activity. */
6198 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6199 for (ntries = 0; ntries < 100; ntries++) {
6200 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6201 return;
6202 DELAY(10);
6203 }
6204 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
6205 }
6206
6207 static void
6208 iwn_apm_stop(struct iwn_softc *sc)
6209 {
6210 iwn_apm_stop_master(sc);
6211
6212 /* Reset the entire device. */
6213 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6214 DELAY(10);
6215 /* Clear "initialization complete" bit. */
6216 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6217 }
6218
6219 static int
6220 iwn4965_nic_config(struct iwn_softc *sc)
6221 {
6222 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6223 /*
6224 * I don't believe this to be correct but this is what the
6225 * vendor driver is doing. Probably the bits should not be
6226 * shifted in IWN_RFCFG_*.
6227 */
6228 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6229 IWN_RFCFG_TYPE(sc->rfcfg) |
6230 IWN_RFCFG_STEP(sc->rfcfg) |
6231 IWN_RFCFG_DASH(sc->rfcfg));
6232 }
6233 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6234 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6235 return 0;
6236 }
6237
6238 static int
6239 iwn5000_nic_config(struct iwn_softc *sc)
6240 {
6241 uint32_t tmp;
6242 int error;
6243
6244 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6245 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6246 IWN_RFCFG_TYPE(sc->rfcfg) |
6247 IWN_RFCFG_STEP(sc->rfcfg) |
6248 IWN_RFCFG_DASH(sc->rfcfg));
6249 }
6250 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6251 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6252
6253 if ((error = iwn_nic_lock(sc)) != 0)
6254 return error;
6255 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6256
6257 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6258 /*
6259 * Select first Switching Voltage Regulator (1.32V) to
6260 * solve a stability issue related to noisy DC2DC line
6261 * in the silicon of 1000 Series.
6262 */
6263 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6264 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6265 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6266 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6267 }
6268 iwn_nic_unlock(sc);
6269
6270 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6271 /* Use internal power amplifier only. */
6272 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6273 }
6274 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
6275 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
6276 /* Indicate that ROM calibration version is >=6. */
6277 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6278 }
6279 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
6280 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
6281 return 0;
6282 }
6283
6284 /*
6285 * Take NIC ownership over Intel Active Management Technology (AMT).
6286 */
6287 static int
6288 iwn_hw_prepare(struct iwn_softc *sc)
6289 {
6290 int ntries;
6291
6292 /* Check if hardware is ready. */
6293 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6294 for (ntries = 0; ntries < 5; ntries++) {
6295 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6296 IWN_HW_IF_CONFIG_NIC_READY)
6297 return 0;
6298 DELAY(10);
6299 }
6300
6301 /* Hardware not ready, force into ready state. */
6302 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6303 for (ntries = 0; ntries < 15000; ntries++) {
6304 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6305 IWN_HW_IF_CONFIG_PREPARE_DONE))
6306 break;
6307 DELAY(10);
6308 }
6309 if (ntries == 15000)
6310 return ETIMEDOUT;
6311
6312 /* Hardware should be ready now. */
6313 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6314 for (ntries = 0; ntries < 5; ntries++) {
6315 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6316 IWN_HW_IF_CONFIG_NIC_READY)
6317 return 0;
6318 DELAY(10);
6319 }
6320 return ETIMEDOUT;
6321 }
6322
6323 static int
6324 iwn_hw_init(struct iwn_softc *sc)
6325 {
6326 struct iwn_ops *ops = &sc->ops;
6327 int error, chnl, qid;
6328
6329 /* Clear pending interrupts. */
6330 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6331
6332 if ((error = iwn_apm_init(sc)) != 0) {
6333 device_printf(sc->sc_dev,
6334 "%s: could not power ON adapter, error %d\n", __func__,
6335 error);
6336 return error;
6337 }
6338
6339 /* Select VMAIN power source. */
6340 if ((error = iwn_nic_lock(sc)) != 0)
6341 return error;
6342 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6343 iwn_nic_unlock(sc);
6344
6345 /* Perform adapter-specific initialization. */
6346 if ((error = ops->nic_config(sc)) != 0)
6347 return error;
6348
6349 /* Initialize RX ring. */
6350 if ((error = iwn_nic_lock(sc)) != 0)
6351 return error;
6352 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6353 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6354 /* Set physical address of RX ring (256-byte aligned). */
6355 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6356 /* Set physical address of RX status (16-byte aligned). */
6357 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6358 /* Enable RX. */
6359 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6360 IWN_FH_RX_CONFIG_ENA |
6361 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
6362 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
6363 IWN_FH_RX_CONFIG_SINGLE_FRAME |
6364 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6365 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6366 iwn_nic_unlock(sc);
6367 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6368
6369 if ((error = iwn_nic_lock(sc)) != 0)
6370 return error;
6371
6372 /* Initialize TX scheduler. */
6373 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6374
6375 /* Set physical address of "keep warm" page (16-byte aligned). */
6376 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6377
6378 /* Initialize TX rings. */
6379 for (qid = 0; qid < sc->ntxqs; qid++) {
6380 struct iwn_tx_ring *txq = &sc->txq[qid];
6381
6382 /* Set physical address of TX ring (256-byte aligned). */
6383 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6384 txq->desc_dma.paddr >> 8);
6385 }
6386 iwn_nic_unlock(sc);
6387
6388 /* Enable DMA channels. */
6389 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6390 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6391 IWN_FH_TX_CONFIG_DMA_ENA |
6392 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6393 }
6394
6395 /* Clear "radio off" and "commands blocked" bits. */
6396 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6397 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6398
6399 /* Clear pending interrupts. */
6400 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6401 /* Enable interrupt coalescing. */
6402 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6403 /* Enable interrupts. */
6404 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6405
6406 /* _Really_ make sure "radio off" bit is cleared! */
6407 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6408 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6409
6410 /* Enable shadow registers. */
6411 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
6412 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
6413
6414 if ((error = ops->load_firmware(sc)) != 0) {
6415 device_printf(sc->sc_dev,
6416 "%s: could not load firmware, error %d\n", __func__,
6417 error);
6418 return error;
6419 }
6420 /* Wait at most one second for firmware alive notification. */
6421 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6422 device_printf(sc->sc_dev,
6423 "%s: timeout waiting for adapter to initialize, error %d\n",
6424 __func__, error);
6425 return error;
6426 }
6427 /* Do post-firmware initialization. */
6428 return ops->post_alive(sc);
6429 }
6430
6431 static void
6432 iwn_hw_stop(struct iwn_softc *sc)
6433 {
6434 int chnl, qid, ntries;
6435
6436 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6437
6438 /* Disable interrupts. */
6439 IWN_WRITE(sc, IWN_INT_MASK, 0);
6440 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6441 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6442 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6443
6444 /* Make sure we no longer hold the NIC lock. */
6445 iwn_nic_unlock(sc);
6446
6447 /* Stop TX scheduler. */
6448 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6449
6450 /* Stop all DMA channels. */
6451 if (iwn_nic_lock(sc) == 0) {
6452 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6453 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6454 for (ntries = 0; ntries < 200; ntries++) {
6455 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
6456 IWN_FH_TX_STATUS_IDLE(chnl))
6457 break;
6458 DELAY(10);
6459 }
6460 }
6461 iwn_nic_unlock(sc);
6462 }
6463
6464 /* Stop RX ring. */
6465 iwn_reset_rx_ring(sc, &sc->rxq);
6466
6467 /* Reset all TX rings. */
6468 for (qid = 0; qid < sc->ntxqs; qid++)
6469 iwn_reset_tx_ring(sc, &sc->txq[qid]);
6470
6471 if (iwn_nic_lock(sc) == 0) {
6472 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6473 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6474 iwn_nic_unlock(sc);
6475 }
6476 DELAY(5);
6477 /* Power OFF adapter. */
6478 iwn_apm_stop(sc);
6479 }
6480
6481 static void
6482 iwn_radio_on(void *arg0, int pending)
6483 {
6484 struct iwn_softc *sc = arg0;
6485 struct ifnet *ifp = sc->sc_ifp;
6486 struct ieee80211com *ic = ifp->if_l2com;
6487 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6488
6489 if (vap != NULL) {
6490 iwn_init(sc);
6491 ieee80211_init(vap);
6492 }
6493 }
6494
6495 static void
6496 iwn_radio_off(void *arg0, int pending)
6497 {
6498 struct iwn_softc *sc = arg0;
6499 struct ifnet *ifp = sc->sc_ifp;
6500 struct ieee80211com *ic = ifp->if_l2com;
6501 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6502
6503 iwn_stop(sc);
6504 if (vap != NULL)
6505 ieee80211_stop(vap);
6506
6507 /* Enable interrupts to get RF toggle notification. */
6508 IWN_LOCK(sc);
6509 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6510 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6511 IWN_UNLOCK(sc);
6512 }
6513
6514 static void
6515 iwn_init_locked(struct iwn_softc *sc)
6516 {
6517 struct ifnet *ifp = sc->sc_ifp;
6518 int error;
6519
6520 IWN_LOCK_ASSERT(sc);
6521
6522 if ((error = iwn_hw_prepare(sc)) != 0) {
6523 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
6524 __func__, error);
6525 goto fail;
6526 }
6527
6528 /* Initialize interrupt mask to default value. */
6529 sc->int_mask = IWN_INT_MASK_DEF;
6530 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6531
6532 /* Check that the radio is not disabled by hardware switch. */
6533 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6534 device_printf(sc->sc_dev,
6535 "radio is disabled by hardware switch\n");
6536 /* Enable interrupts to get RF toggle notifications. */
6537 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6538 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6539 return;
6540 }
6541
6542 /* Read firmware images from the filesystem. */
6543 if ((error = iwn_read_firmware(sc)) != 0) {
6544 device_printf(sc->sc_dev,
6545 "%s: could not read firmware, error %d\n", __func__,
6546 error);
6547 goto fail;
6548 }
6549
6550 /* Initialize hardware and upload firmware. */
6551 error = iwn_hw_init(sc);
6552 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6553 sc->fw_fp = NULL;
6554 if (error != 0) {
6555 device_printf(sc->sc_dev,
6556 "%s: could not initialize hardware, error %d\n", __func__,
6557 error);
6558 goto fail;
6559 }
6560
6561 /* Configure adapter now that it is ready. */
6562 if ((error = iwn_config(sc)) != 0) {
6563 device_printf(sc->sc_dev,
6564 "%s: could not configure device, error %d\n", __func__,
6565 error);
6566 goto fail;
6567 }
6568
6569 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6570 ifp->if_drv_flags |= IFF_DRV_RUNNING;
6571
6572 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
6573 return;
6574
6575 fail: iwn_stop_locked(sc);
6576 }
6577
6578 static void
6579 iwn_init(void *arg)
6580 {
6581 struct iwn_softc *sc = arg;
6582 struct ifnet *ifp = sc->sc_ifp;
6583 struct ieee80211com *ic = ifp->if_l2com;
6584
6585 IWN_LOCK(sc);
6586 iwn_init_locked(sc);
6587 IWN_UNLOCK(sc);
6588
6589 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6590 ieee80211_start_all(ic);
6591 }
6592
6593 static void
6594 iwn_stop_locked(struct iwn_softc *sc)
6595 {
6596 struct ifnet *ifp = sc->sc_ifp;
6597
6598 IWN_LOCK_ASSERT(sc);
6599
6600 sc->sc_tx_timer = 0;
6601 callout_stop(&sc->watchdog_to);
6602 callout_stop(&sc->calib_to);
6603 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6604
6605 /* Power OFF hardware. */
6606 iwn_hw_stop(sc);
6607 }
6608
6609 static void
6610 iwn_stop(struct iwn_softc *sc)
6611 {
6612 IWN_LOCK(sc);
6613 iwn_stop_locked(sc);
6614 IWN_UNLOCK(sc);
6615 }
6616
6617 /*
6618 * Callback from net80211 to start a scan.
6619 */
6620 static void
6621 iwn_scan_start(struct ieee80211com *ic)
6622 {
6623 struct ifnet *ifp = ic->ic_ifp;
6624 struct iwn_softc *sc = ifp->if_softc;
6625
6626 IWN_LOCK(sc);
6627 /* make the link LED blink while we're scanning */
6628 iwn_set_led(sc, IWN_LED_LINK, 20, 2);
6629 IWN_UNLOCK(sc);
6630 }
6631
6632 /*
6633 * Callback from net80211 to terminate a scan.
6634 */
6635 static void
6636 iwn_scan_end(struct ieee80211com *ic)
6637 {
6638 struct ifnet *ifp = ic->ic_ifp;
6639 struct iwn_softc *sc = ifp->if_softc;
6640 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6641
6642 IWN_LOCK(sc);
6643 if (vap->iv_state == IEEE80211_S_RUN) {
6644 /* Set link LED to ON status if we are associated */
6645 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
6646 }
6647 IWN_UNLOCK(sc);
6648 }
6649
6650 /*
6651 * Callback from net80211 to force a channel change.
6652 */
6653 static void
6654 iwn_set_channel(struct ieee80211com *ic)
6655 {
6656 const struct ieee80211_channel *c = ic->ic_curchan;
6657 struct ifnet *ifp = ic->ic_ifp;
6658 struct iwn_softc *sc = ifp->if_softc;
6659 int error;
6660
6661 IWN_LOCK(sc);
6662 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
6663 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
6664 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
6665 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
6666
6667 /*
6668 * Only need to set the channel in Monitor mode. AP scanning and auth
6669 * are already taken care of by their respective firmware commands.
6670 */
6671 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6672 error = iwn_config(sc);
6673 if (error != 0)
6674 device_printf(sc->sc_dev,
6675 "%s: error %d settting channel\n", __func__, error);
6676 }
6677 IWN_UNLOCK(sc);
6678 }
6679
6680 /*
6681 * Callback from net80211 to start scanning of the current channel.
6682 */
6683 static void
6684 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6685 {
6686 struct ieee80211vap *vap = ss->ss_vap;
6687 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6688 int error;
6689
6690 IWN_LOCK(sc);
6691 error = iwn_scan(sc);
6692 IWN_UNLOCK(sc);
6693 if (error != 0)
6694 ieee80211_cancel_scan(vap);
6695 }
6696
6697 /*
6698 * Callback from net80211 to handle the minimum dwell time being met.
6699 * The intent is to terminate the scan but we just let the firmware
6700 * notify us when it's finished as we have no safe way to abort it.
6701 */
6702 static void
6703 iwn_scan_mindwell(struct ieee80211_scan_state *ss)
6704 {
6705 /* NB: don't try to abort scan; wait for firmware to finish */
6706 }
6707
6708 static void
6709 iwn_hw_reset(void *arg0, int pending)
6710 {
6711 struct iwn_softc *sc = arg0;
6712 struct ifnet *ifp = sc->sc_ifp;
6713 struct ieee80211com *ic = ifp->if_l2com;
6714
6715 iwn_stop(sc);
6716 iwn_init(sc);
6717 ieee80211_notify_radio(ic, 1);
6718 }
Cache object: 29b882d1a3923bc6ed0fca03ff8646a9
|