xref: /freebsd/sys/dev/iwn/if_iwn.c (revision d184218c)
1 /*-
2  * Copyright (c) 2007-2009
3  *	Damien Bergamini <damien.bergamini@free.fr>
4  * Copyright (c) 2008
5  *	Benjamin Close <benjsc@FreeBSD.org>
6  * Copyright (c) 2008 Sam Leffler, Errno Consulting
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23  * adapters.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include "opt_wlan.h"
30 
31 #include <sys/param.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/mbuf.h>
35 #include <sys/kernel.h>
36 #include <sys/socket.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/bus.h>
40 #include <sys/rman.h>
41 #include <sys/endian.h>
42 #include <sys/firmware.h>
43 #include <sys/limits.h>
44 #include <sys/module.h>
45 #include <sys/queue.h>
46 #include <sys/taskqueue.h>
47 
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <machine/clock.h>
51 
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 
55 #include <net/bpf.h>
56 #include <net/if.h>
57 #include <net/if_arp.h>
58 #include <net/ethernet.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/if_ether.h>
67 #include <netinet/ip.h>
68 
69 #include <net80211/ieee80211_var.h>
70 #include <net80211/ieee80211_radiotap.h>
71 #include <net80211/ieee80211_regdomain.h>
72 #include <net80211/ieee80211_ratectl.h>
73 
74 #include <dev/iwn/if_iwnreg.h>
75 #include <dev/iwn/if_iwnvar.h>
76 
77 struct iwn_ident {
78 	uint16_t	vendor;
79 	uint16_t	device;
80 	const char	*name;
81 };
82 
83 static const struct iwn_ident iwn_ident_table[] = {
84 	{ 0x8086, 0x0082, "Intel Centrino Advanced-N 6205"		},
85 	{ 0x8086, 0x0083, "Intel Centrino Wireless-N 1000"		},
86 	{ 0x8086, 0x0084, "Intel Centrino Wireless-N 1000"		},
87 	{ 0x8086, 0x0085, "Intel Centrino Advanced-N 6205"		},
88 	{ 0x8086, 0x0087, "Intel Centrino Advanced-N + WiMAX 6250"	},
89 	{ 0x8086, 0x0089, "Intel Centrino Advanced-N + WiMAX 6250"	},
90 	{ 0x8086, 0x008a, "Intel Centrino Wireless-N 1030"		},
91 	{ 0x8086, 0x008b, "Intel Centrino Wireless-N 1030"		},
92 	{ 0x8086, 0x0090, "Intel Centrino Advanced-N 6230"		},
93 	{ 0x8086, 0x0091, "Intel Centrino Advanced-N 6230"		},
94 	{ 0x8086, 0x0885, "Intel Centrino Wireless-N + WiMAX 6150"	},
95 	{ 0x8086, 0x0886, "Intel Centrino Wireless-N + WiMAX 6150"	},
96 	{ 0x8086, 0x0896, "Intel Centrino Wireless-N 130"		},
97 	{ 0x8086, 0x0887, "Intel Centrino Wireless-N 130"		},
98 	{ 0x8086, 0x08ae, "Intel Centrino Wireless-N 100"		},
99 	{ 0x8086, 0x08af, "Intel Centrino Wireless-N 100"		},
100 	{ 0x8086, 0x4229, "Intel Wireless WiFi Link 4965"		},
101 	{ 0x8086, 0x422b, "Intel Centrino Ultimate-N 6300"		},
102 	{ 0x8086, 0x422c, "Intel Centrino Advanced-N 6200"		},
103 	{ 0x8086, 0x422d, "Intel Wireless WiFi Link 4965"		},
104 	{ 0x8086, 0x4230, "Intel Wireless WiFi Link 4965"		},
105 	{ 0x8086, 0x4232, "Intel WiFi Link 5100"			},
106 	{ 0x8086, 0x4233, "Intel Wireless WiFi Link 4965"		},
107 	{ 0x8086, 0x4235, "Intel Ultimate N WiFi Link 5300"		},
108 	{ 0x8086, 0x4236, "Intel Ultimate N WiFi Link 5300"		},
109 	{ 0x8086, 0x4237, "Intel WiFi Link 5100"			},
110 	{ 0x8086, 0x4238, "Intel Centrino Ultimate-N 6300"		},
111 	{ 0x8086, 0x4239, "Intel Centrino Advanced-N 6200"		},
112 	{ 0x8086, 0x423a, "Intel WiMAX/WiFi Link 5350"			},
113 	{ 0x8086, 0x423b, "Intel WiMAX/WiFi Link 5350"			},
114 	{ 0x8086, 0x423c, "Intel WiMAX/WiFi Link 5150"			},
115 	{ 0x8086, 0x423d, "Intel WiMAX/WiFi Link 5150"			},
116 	{ 0, 0, NULL }
117 };
118 
119 static int	iwn_probe(device_t);
120 static int	iwn_attach(device_t);
121 static int	iwn4965_attach(struct iwn_softc *, uint16_t);
122 static int	iwn5000_attach(struct iwn_softc *, uint16_t);
123 static void	iwn_radiotap_attach(struct iwn_softc *);
124 static void	iwn_sysctlattach(struct iwn_softc *);
125 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
126 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
127 		    const uint8_t [IEEE80211_ADDR_LEN],
128 		    const uint8_t [IEEE80211_ADDR_LEN]);
129 static void	iwn_vap_delete(struct ieee80211vap *);
130 static int	iwn_detach(device_t);
131 static int	iwn_shutdown(device_t);
132 static int	iwn_suspend(device_t);
133 static int	iwn_resume(device_t);
134 static int	iwn_nic_lock(struct iwn_softc *);
135 static int	iwn_eeprom_lock(struct iwn_softc *);
136 static int	iwn_init_otprom(struct iwn_softc *);
137 static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
138 static void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
139 static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
140 		    void **, bus_size_t, bus_size_t);
141 static void	iwn_dma_contig_free(struct iwn_dma_info *);
142 static int	iwn_alloc_sched(struct iwn_softc *);
143 static void	iwn_free_sched(struct iwn_softc *);
144 static int	iwn_alloc_kw(struct iwn_softc *);
145 static void	iwn_free_kw(struct iwn_softc *);
146 static int	iwn_alloc_ict(struct iwn_softc *);
147 static void	iwn_free_ict(struct iwn_softc *);
148 static int	iwn_alloc_fwmem(struct iwn_softc *);
149 static void	iwn_free_fwmem(struct iwn_softc *);
150 static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
151 static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
152 static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
153 static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
154 		    int);
155 static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
156 static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
157 static void	iwn5000_ict_reset(struct iwn_softc *);
158 static int	iwn_read_eeprom(struct iwn_softc *,
159 		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
160 static void	iwn4965_read_eeprom(struct iwn_softc *);
161 static void	iwn4965_print_power_group(struct iwn_softc *, int);
162 static void	iwn5000_read_eeprom(struct iwn_softc *);
163 static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
164 static void	iwn_read_eeprom_band(struct iwn_softc *, int);
165 static void	iwn_read_eeprom_ht40(struct iwn_softc *, int);
166 static void	iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
167 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
168 		    struct ieee80211_channel *);
169 static int	iwn_setregdomain(struct ieee80211com *,
170 		    struct ieee80211_regdomain *, int,
171 		    struct ieee80211_channel[]);
172 static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
173 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
174 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
175 static void	iwn_newassoc(struct ieee80211_node *, int);
176 static int	iwn_media_change(struct ifnet *);
177 static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
178 static void	iwn_calib_timeout(void *);
179 static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
180 		    struct iwn_rx_data *);
181 static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
182 		    struct iwn_rx_data *);
183 static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
184 		    struct iwn_rx_data *);
185 static void	iwn5000_rx_calib_results(struct iwn_softc *,
186 		    struct iwn_rx_desc *, struct iwn_rx_data *);
187 static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
188 		    struct iwn_rx_data *);
189 static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
190 		    struct iwn_rx_data *);
191 static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
192 		    struct iwn_rx_data *);
193 static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
194 		    uint8_t);
195 static void	iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *);
196 static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
197 static void	iwn_notif_intr(struct iwn_softc *);
198 static void	iwn_wakeup_intr(struct iwn_softc *);
199 static void	iwn_rftoggle_intr(struct iwn_softc *);
200 static void	iwn_fatal_intr(struct iwn_softc *);
201 static void	iwn_intr(void *);
202 static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
203 		    uint16_t);
204 static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
205 		    uint16_t);
206 #ifdef notyet
207 static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
208 #endif
209 static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
210 		    struct ieee80211_node *);
211 static int	iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
212 		    struct ieee80211_node *,
213 		    const struct ieee80211_bpf_params *params);
214 static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
215 		    const struct ieee80211_bpf_params *);
216 static void	iwn_start(struct ifnet *);
217 static void	iwn_start_locked(struct ifnet *);
218 static void	iwn_watchdog(void *);
219 static int	iwn_ioctl(struct ifnet *, u_long, caddr_t);
220 static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
221 static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
222 		    int);
223 static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
224 		    int);
225 static int	iwn_set_link_quality(struct iwn_softc *,
226 		    struct ieee80211_node *);
227 static int	iwn_add_broadcast_node(struct iwn_softc *, int);
228 static int	iwn_updateedca(struct ieee80211com *);
229 static void	iwn_update_mcast(struct ifnet *);
230 static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
231 static int	iwn_set_critical_temp(struct iwn_softc *);
232 static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
233 static void	iwn4965_power_calibration(struct iwn_softc *, int);
234 static int	iwn4965_set_txpower(struct iwn_softc *,
235 		    struct ieee80211_channel *, int);
236 static int	iwn5000_set_txpower(struct iwn_softc *,
237 		    struct ieee80211_channel *, int);
238 static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
239 static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
240 static int	iwn_get_noise(const struct iwn_rx_general_stats *);
241 static int	iwn4965_get_temperature(struct iwn_softc *);
242 static int	iwn5000_get_temperature(struct iwn_softc *);
243 static int	iwn_init_sensitivity(struct iwn_softc *);
244 static void	iwn_collect_noise(struct iwn_softc *,
245 		    const struct iwn_rx_general_stats *);
246 static int	iwn4965_init_gains(struct iwn_softc *);
247 static int	iwn5000_init_gains(struct iwn_softc *);
248 static int	iwn4965_set_gains(struct iwn_softc *);
249 static int	iwn5000_set_gains(struct iwn_softc *);
250 static void	iwn_tune_sensitivity(struct iwn_softc *,
251 		    const struct iwn_rx_stats *);
252 static int	iwn_send_sensitivity(struct iwn_softc *);
253 static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
254 static int	iwn_send_btcoex(struct iwn_softc *);
255 static int	iwn_send_advanced_btcoex(struct iwn_softc *);
256 static int	iwn5000_runtime_calib(struct iwn_softc *);
257 static int	iwn_config(struct iwn_softc *);
258 static uint8_t	*ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
259 static int	iwn_scan(struct iwn_softc *);
260 static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
261 static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
262 static int	iwn_ampdu_rx_start(struct ieee80211_node *,
263 		    struct ieee80211_rx_ampdu *, int, int, int);
264 static void	iwn_ampdu_rx_stop(struct ieee80211_node *,
265 		    struct ieee80211_rx_ampdu *);
266 static int	iwn_addba_request(struct ieee80211_node *,
267 		    struct ieee80211_tx_ampdu *, int, int, int);
268 static int	iwn_addba_response(struct ieee80211_node *,
269 		    struct ieee80211_tx_ampdu *, int, int, int);
270 static int	iwn_ampdu_tx_start(struct ieee80211com *,
271 		    struct ieee80211_node *, uint8_t);
272 static void	iwn_ampdu_tx_stop(struct ieee80211_node *,
273 		    struct ieee80211_tx_ampdu *);
274 static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
275 		    struct ieee80211_node *, int, uint8_t, uint16_t);
276 static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
277 		    uint8_t, uint16_t);
278 static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
279 		    struct ieee80211_node *, int, uint8_t, uint16_t);
280 static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
281 		    uint8_t, uint16_t);
282 static int	iwn5000_query_calibration(struct iwn_softc *);
283 static int	iwn5000_send_calibration(struct iwn_softc *);
284 static int	iwn5000_send_wimax_coex(struct iwn_softc *);
285 static int	iwn5000_crystal_calib(struct iwn_softc *);
286 static int	iwn5000_temp_offset_calib(struct iwn_softc *);
287 static int	iwn4965_post_alive(struct iwn_softc *);
288 static int	iwn5000_post_alive(struct iwn_softc *);
289 static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
290 		    int);
291 static int	iwn4965_load_firmware(struct iwn_softc *);
292 static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
293 		    const uint8_t *, int);
294 static int	iwn5000_load_firmware(struct iwn_softc *);
295 static int	iwn_read_firmware_leg(struct iwn_softc *,
296 		    struct iwn_fw_info *);
297 static int	iwn_read_firmware_tlv(struct iwn_softc *,
298 		    struct iwn_fw_info *, uint16_t);
299 static int	iwn_read_firmware(struct iwn_softc *);
300 static int	iwn_clock_wait(struct iwn_softc *);
301 static int	iwn_apm_init(struct iwn_softc *);
302 static void	iwn_apm_stop_master(struct iwn_softc *);
303 static void	iwn_apm_stop(struct iwn_softc *);
304 static int	iwn4965_nic_config(struct iwn_softc *);
305 static int	iwn5000_nic_config(struct iwn_softc *);
306 static int	iwn_hw_prepare(struct iwn_softc *);
307 static int	iwn_hw_init(struct iwn_softc *);
308 static void	iwn_hw_stop(struct iwn_softc *);
309 static void	iwn_radio_on(void *, int);
310 static void	iwn_radio_off(void *, int);
311 static void	iwn_init_locked(struct iwn_softc *);
312 static void	iwn_init(void *);
313 static void	iwn_stop_locked(struct iwn_softc *);
314 static void	iwn_stop(struct iwn_softc *);
315 static void	iwn_scan_start(struct ieee80211com *);
316 static void	iwn_scan_end(struct ieee80211com *);
317 static void	iwn_set_channel(struct ieee80211com *);
318 static void	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
319 static void	iwn_scan_mindwell(struct ieee80211_scan_state *);
320 static void	iwn_hw_reset(void *, int);
321 
322 #define IWN_DEBUG
323 #ifdef IWN_DEBUG
324 enum {
325 	IWN_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
326 	IWN_DEBUG_RECV		= 0x00000002,	/* basic recv operation */
327 	IWN_DEBUG_STATE		= 0x00000004,	/* 802.11 state transitions */
328 	IWN_DEBUG_TXPOW		= 0x00000008,	/* tx power processing */
329 	IWN_DEBUG_RESET		= 0x00000010,	/* reset processing */
330 	IWN_DEBUG_OPS		= 0x00000020,	/* iwn_ops processing */
331 	IWN_DEBUG_BEACON 	= 0x00000040,	/* beacon handling */
332 	IWN_DEBUG_WATCHDOG 	= 0x00000080,	/* watchdog timeout */
333 	IWN_DEBUG_INTR		= 0x00000100,	/* ISR */
334 	IWN_DEBUG_CALIBRATE	= 0x00000200,	/* periodic calibration */
335 	IWN_DEBUG_NODE		= 0x00000400,	/* node management */
336 	IWN_DEBUG_LED		= 0x00000800,	/* led management */
337 	IWN_DEBUG_CMD		= 0x00001000,	/* cmd submission */
338 	IWN_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
339 	IWN_DEBUG_ANY		= 0xffffffff
340 };
341 
342 #define DPRINTF(sc, m, fmt, ...) do {			\
343 	if (sc->sc_debug & (m))				\
344 		printf(fmt, __VA_ARGS__);		\
345 } while (0)
346 
347 static const char *
348 iwn_intr_str(uint8_t cmd)
349 {
350 	switch (cmd) {
351 	/* Notifications */
352 	case IWN_UC_READY:		return "UC_READY";
353 	case IWN_ADD_NODE_DONE:		return "ADD_NODE_DONE";
354 	case IWN_TX_DONE:		return "TX_DONE";
355 	case IWN_START_SCAN:		return "START_SCAN";
356 	case IWN_STOP_SCAN:		return "STOP_SCAN";
357 	case IWN_RX_STATISTICS:		return "RX_STATS";
358 	case IWN_BEACON_STATISTICS:	return "BEACON_STATS";
359 	case IWN_STATE_CHANGED:		return "STATE_CHANGED";
360 	case IWN_BEACON_MISSED:		return "BEACON_MISSED";
361 	case IWN_RX_PHY:		return "RX_PHY";
362 	case IWN_MPDU_RX_DONE:		return "MPDU_RX_DONE";
363 	case IWN_RX_DONE:		return "RX_DONE";
364 
365 	/* Command Notifications */
366 	case IWN_CMD_RXON:		return "IWN_CMD_RXON";
367 	case IWN_CMD_RXON_ASSOC:	return "IWN_CMD_RXON_ASSOC";
368 	case IWN_CMD_EDCA_PARAMS:	return "IWN_CMD_EDCA_PARAMS";
369 	case IWN_CMD_TIMING:		return "IWN_CMD_TIMING";
370 	case IWN_CMD_LINK_QUALITY:	return "IWN_CMD_LINK_QUALITY";
371 	case IWN_CMD_SET_LED:		return "IWN_CMD_SET_LED";
372 	case IWN5000_CMD_WIMAX_COEX:	return "IWN5000_CMD_WIMAX_COEX";
373 	case IWN5000_CMD_CALIB_CONFIG:	return "IWN5000_CMD_CALIB_CONFIG";
374 	case IWN5000_CMD_CALIB_RESULT:	return "IWN5000_CMD_CALIB_RESULT";
375 	case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
376 	case IWN_CMD_SET_POWER_MODE:	return "IWN_CMD_SET_POWER_MODE";
377 	case IWN_CMD_SCAN:		return "IWN_CMD_SCAN";
378 	case IWN_CMD_SCAN_RESULTS:	return "IWN_CMD_SCAN_RESULTS";
379 	case IWN_CMD_TXPOWER:		return "IWN_CMD_TXPOWER";
380 	case IWN_CMD_TXPOWER_DBM:	return "IWN_CMD_TXPOWER_DBM";
381 	case IWN5000_CMD_TX_ANT_CONFIG:	return "IWN5000_CMD_TX_ANT_CONFIG";
382 	case IWN_CMD_BT_COEX:		return "IWN_CMD_BT_COEX";
383 	case IWN_CMD_SET_CRITICAL_TEMP:	return "IWN_CMD_SET_CRITICAL_TEMP";
384 	case IWN_CMD_SET_SENSITIVITY:	return "IWN_CMD_SET_SENSITIVITY";
385 	case IWN_CMD_PHY_CALIB:		return "IWN_CMD_PHY_CALIB";
386 	}
387 	return "UNKNOWN INTR NOTIF/CMD";
388 }
389 #else
390 #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
391 #endif
392 
393 static device_method_t iwn_methods[] = {
394 	/* Device interface */
395 	DEVMETHOD(device_probe,		iwn_probe),
396 	DEVMETHOD(device_attach,	iwn_attach),
397 	DEVMETHOD(device_detach,	iwn_detach),
398 	DEVMETHOD(device_shutdown,	iwn_shutdown),
399 	DEVMETHOD(device_suspend,	iwn_suspend),
400 	DEVMETHOD(device_resume,	iwn_resume),
401 	{ 0, 0 }
402 };
403 
404 static driver_t iwn_driver = {
405 	"iwn",
406 	iwn_methods,
407 	sizeof(struct iwn_softc)
408 };
409 static devclass_t iwn_devclass;
410 
411 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
412 
413 MODULE_VERSION(iwn, 1);
414 
415 MODULE_DEPEND(iwn, firmware, 1, 1, 1);
416 MODULE_DEPEND(iwn, pci, 1, 1, 1);
417 MODULE_DEPEND(iwn, wlan, 1, 1, 1);
418 
419 static int
420 iwn_probe(device_t dev)
421 {
422 	const struct iwn_ident *ident;
423 
424 	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
425 		if (pci_get_vendor(dev) == ident->vendor &&
426 		    pci_get_device(dev) == ident->device) {
427 			device_set_desc(dev, ident->name);
428 			return 0;
429 		}
430 	}
431 	return ENXIO;
432 }
433 
434 static int
435 iwn_attach(device_t dev)
436 {
437 	struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
438 	struct ieee80211com *ic;
439 	struct ifnet *ifp;
440 	uint32_t reg;
441 	int i, error, result;
442 	uint8_t macaddr[IEEE80211_ADDR_LEN];
443 
444 	sc->sc_dev = dev;
445 
446 	/*
447 	 * Get the offset of the PCI Express Capability Structure in PCI
448 	 * Configuration Space.
449 	 */
450 	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
451 	if (error != 0) {
452 		device_printf(dev, "PCIe capability structure not found!\n");
453 		return error;
454 	}
455 
456 	/* Clear device-specific "PCI retry timeout" register (41h). */
457 	pci_write_config(dev, 0x41, 0, 1);
458 
459 	/* Hardware bug workaround. */
460 	reg = pci_read_config(dev, PCIR_COMMAND, 1);
461 	if (reg & PCIM_CMD_INTxDIS) {
462 		DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
463 		    __func__);
464 		reg &= ~PCIM_CMD_INTxDIS;
465 		pci_write_config(dev, PCIR_COMMAND, reg, 1);
466 	}
467 
468 	/* Enable bus-mastering. */
469 	pci_enable_busmaster(dev);
470 
471 	sc->mem_rid = PCIR_BAR(0);
472 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
473 	    RF_ACTIVE);
474 	if (sc->mem == NULL) {
475 		device_printf(dev, "can't map mem space\n");
476 		error = ENOMEM;
477 		return error;
478 	}
479 	sc->sc_st = rman_get_bustag(sc->mem);
480 	sc->sc_sh = rman_get_bushandle(sc->mem);
481 
482 	sc->irq_rid = 0;
483 	if ((result = pci_msi_count(dev)) == 1 &&
484 	    pci_alloc_msi(dev, &result) == 0)
485 		sc->irq_rid = 1;
486 	/* Install interrupt handler. */
487 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
488 	    RF_ACTIVE | RF_SHAREABLE);
489 	if (sc->irq == NULL) {
490 		device_printf(dev, "can't map interrupt\n");
491 		error = ENOMEM;
492 		goto fail;
493 	}
494 
495 	IWN_LOCK_INIT(sc);
496 
497 	/* Read hardware revision and attach. */
498 	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
499 	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
500 		error = iwn4965_attach(sc, pci_get_device(dev));
501 	else
502 		error = iwn5000_attach(sc, pci_get_device(dev));
503 	if (error != 0) {
504 		device_printf(dev, "could not attach device, error %d\n",
505 		    error);
506 		goto fail;
507 	}
508 
509 	if ((error = iwn_hw_prepare(sc)) != 0) {
510 		device_printf(dev, "hardware not ready, error %d\n", error);
511 		goto fail;
512 	}
513 
514 	/* Allocate DMA memory for firmware transfers. */
515 	if ((error = iwn_alloc_fwmem(sc)) != 0) {
516 		device_printf(dev,
517 		    "could not allocate memory for firmware, error %d\n",
518 		    error);
519 		goto fail;
520 	}
521 
522 	/* Allocate "Keep Warm" page. */
523 	if ((error = iwn_alloc_kw(sc)) != 0) {
524 		device_printf(dev,
525 		    "could not allocate keep warm page, error %d\n", error);
526 		goto fail;
527 	}
528 
529 	/* Allocate ICT table for 5000 Series. */
530 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
531 	    (error = iwn_alloc_ict(sc)) != 0) {
532 		device_printf(dev, "could not allocate ICT table, error %d\n",
533 		    error);
534 		goto fail;
535 	}
536 
537 	/* Allocate TX scheduler "rings". */
538 	if ((error = iwn_alloc_sched(sc)) != 0) {
539 		device_printf(dev,
540 		    "could not allocate TX scheduler rings, error %d\n", error);
541 		goto fail;
542 	}
543 
544 	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
545 	for (i = 0; i < sc->ntxqs; i++) {
546 		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
547 			device_printf(dev,
548 			    "could not allocate TX ring %d, error %d\n", i,
549 			    error);
550 			goto fail;
551 		}
552 	}
553 
554 	/* Allocate RX ring. */
555 	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
556 		device_printf(dev, "could not allocate RX ring, error %d\n",
557 		    error);
558 		goto fail;
559 	}
560 
561 	/* Clear pending interrupts. */
562 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
563 
564 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
565 	if (ifp == NULL) {
566 		device_printf(dev, "can not allocate ifnet structure\n");
567 		goto fail;
568 	}
569 
570 	ic = ifp->if_l2com;
571 	ic->ic_ifp = ifp;
572 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
573 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
574 
575 	/* Set device capabilities. */
576 	ic->ic_caps =
577 		  IEEE80211_C_STA		/* station mode supported */
578 		| IEEE80211_C_MONITOR		/* monitor mode supported */
579 		| IEEE80211_C_BGSCAN		/* background scanning */
580 		| IEEE80211_C_TXPMGT		/* tx power management */
581 		| IEEE80211_C_SHSLOT		/* short slot time supported */
582 		| IEEE80211_C_WPA
583 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
584 #if 0
585 		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
586 #endif
587 		| IEEE80211_C_WME		/* WME */
588 		;
589 
590 	/* Read MAC address, channels, etc from EEPROM. */
591 	if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
592 		device_printf(dev, "could not read EEPROM, error %d\n",
593 		    error);
594 		goto fail;
595 	}
596 
597 	/* Count the number of available chains. */
598 	sc->ntxchains =
599 	    ((sc->txchainmask >> 2) & 1) +
600 	    ((sc->txchainmask >> 1) & 1) +
601 	    ((sc->txchainmask >> 0) & 1);
602 	sc->nrxchains =
603 	    ((sc->rxchainmask >> 2) & 1) +
604 	    ((sc->rxchainmask >> 1) & 1) +
605 	    ((sc->rxchainmask >> 0) & 1);
606 	if (bootverbose) {
607 		device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
608 		    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
609 		    macaddr, ":");
610 	}
611 
612 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
613 		ic->ic_rxstream = sc->nrxchains;
614 		ic->ic_txstream = sc->ntxchains;
615 		ic->ic_htcaps =
616 			  IEEE80211_HTCAP_SMPS_OFF	/* SMPS mode disabled */
617 			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
618 			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
619 			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
620 #ifdef notyet
621 			| IEEE80211_HTCAP_GREENFIELD
622 #if IWN_RBUF_SIZE == 8192
623 			| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
624 #else
625 			| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
626 #endif
627 #endif
628 			/* s/w capabilities */
629 			| IEEE80211_HTC_HT		/* HT operation */
630 			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
631 #ifdef notyet
632 			| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
633 #endif
634 			;
635 	}
636 
637 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
638 	ifp->if_softc = sc;
639 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
640 	ifp->if_init = iwn_init;
641 	ifp->if_ioctl = iwn_ioctl;
642 	ifp->if_start = iwn_start;
643 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
644 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
645 	IFQ_SET_READY(&ifp->if_snd);
646 
647 	ieee80211_ifattach(ic, macaddr);
648 	ic->ic_vap_create = iwn_vap_create;
649 	ic->ic_vap_delete = iwn_vap_delete;
650 	ic->ic_raw_xmit = iwn_raw_xmit;
651 	ic->ic_node_alloc = iwn_node_alloc;
652 	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
653 	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
654 	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
655 	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
656 	sc->sc_addba_request = ic->ic_addba_request;
657 	ic->ic_addba_request = iwn_addba_request;
658 	sc->sc_addba_response = ic->ic_addba_response;
659 	ic->ic_addba_response = iwn_addba_response;
660 	sc->sc_addba_stop = ic->ic_addba_stop;
661 	ic->ic_addba_stop = iwn_ampdu_tx_stop;
662 	ic->ic_newassoc = iwn_newassoc;
663 	ic->ic_wme.wme_update = iwn_updateedca;
664 	ic->ic_update_mcast = iwn_update_mcast;
665 	ic->ic_scan_start = iwn_scan_start;
666 	ic->ic_scan_end = iwn_scan_end;
667 	ic->ic_set_channel = iwn_set_channel;
668 	ic->ic_scan_curchan = iwn_scan_curchan;
669 	ic->ic_scan_mindwell = iwn_scan_mindwell;
670 	ic->ic_setregdomain = iwn_setregdomain;
671 
672 	iwn_radiotap_attach(sc);
673 
674 	callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
675 	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
676 	TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
677 	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
678 	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
679 
680 	iwn_sysctlattach(sc);
681 
682 	/*
683 	 * Hook our interrupt after all initialization is complete.
684 	 */
685 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
686 	    NULL, iwn_intr, sc, &sc->sc_ih);
687 	if (error != 0) {
688 		device_printf(dev, "can't establish interrupt, error %d\n",
689 		    error);
690 		goto fail;
691 	}
692 
693 	if (bootverbose)
694 		ieee80211_announce(ic);
695 	return 0;
696 fail:
697 	iwn_detach(dev);
698 	return error;
699 }
700 
701 static int
702 iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
703 {
704 	struct iwn_ops *ops = &sc->ops;
705 
706 	ops->load_firmware = iwn4965_load_firmware;
707 	ops->read_eeprom = iwn4965_read_eeprom;
708 	ops->post_alive = iwn4965_post_alive;
709 	ops->nic_config = iwn4965_nic_config;
710 	ops->update_sched = iwn4965_update_sched;
711 	ops->get_temperature = iwn4965_get_temperature;
712 	ops->get_rssi = iwn4965_get_rssi;
713 	ops->set_txpower = iwn4965_set_txpower;
714 	ops->init_gains = iwn4965_init_gains;
715 	ops->set_gains = iwn4965_set_gains;
716 	ops->add_node = iwn4965_add_node;
717 	ops->tx_done = iwn4965_tx_done;
718 	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
719 	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
720 	sc->ntxqs = IWN4965_NTXQUEUES;
721 	sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
722 	sc->ndmachnls = IWN4965_NDMACHNLS;
723 	sc->broadcast_id = IWN4965_ID_BROADCAST;
724 	sc->rxonsz = IWN4965_RXONSZ;
725 	sc->schedsz = IWN4965_SCHEDSZ;
726 	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
727 	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
728 	sc->fwsz = IWN4965_FWSZ;
729 	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
730 	sc->limits = &iwn4965_sensitivity_limits;
731 	sc->fwname = "iwn4965fw";
732 	/* Override chains masks, ROM is known to be broken. */
733 	sc->txchainmask = IWN_ANT_AB;
734 	sc->rxchainmask = IWN_ANT_ABC;
735 
736 	return 0;
737 }
738 
739 static int
740 iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
741 {
742 	struct iwn_ops *ops = &sc->ops;
743 
744 	ops->load_firmware = iwn5000_load_firmware;
745 	ops->read_eeprom = iwn5000_read_eeprom;
746 	ops->post_alive = iwn5000_post_alive;
747 	ops->nic_config = iwn5000_nic_config;
748 	ops->update_sched = iwn5000_update_sched;
749 	ops->get_temperature = iwn5000_get_temperature;
750 	ops->get_rssi = iwn5000_get_rssi;
751 	ops->set_txpower = iwn5000_set_txpower;
752 	ops->init_gains = iwn5000_init_gains;
753 	ops->set_gains = iwn5000_set_gains;
754 	ops->add_node = iwn5000_add_node;
755 	ops->tx_done = iwn5000_tx_done;
756 	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
757 	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
758 	sc->ntxqs = IWN5000_NTXQUEUES;
759 	sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
760 	sc->ndmachnls = IWN5000_NDMACHNLS;
761 	sc->broadcast_id = IWN5000_ID_BROADCAST;
762 	sc->rxonsz = IWN5000_RXONSZ;
763 	sc->schedsz = IWN5000_SCHEDSZ;
764 	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
765 	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
766 	sc->fwsz = IWN5000_FWSZ;
767 	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
768 	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
769 	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
770 
771 	switch (sc->hw_type) {
772 	case IWN_HW_REV_TYPE_5100:
773 		sc->limits = &iwn5000_sensitivity_limits;
774 		sc->fwname = "iwn5000fw";
775 		/* Override chains masks, ROM is known to be broken. */
776 		sc->txchainmask = IWN_ANT_B;
777 		sc->rxchainmask = IWN_ANT_AB;
778 		break;
779 	case IWN_HW_REV_TYPE_5150:
780 		sc->limits = &iwn5150_sensitivity_limits;
781 		sc->fwname = "iwn5150fw";
782 		break;
783 	case IWN_HW_REV_TYPE_5300:
784 	case IWN_HW_REV_TYPE_5350:
785 		sc->limits = &iwn5000_sensitivity_limits;
786 		sc->fwname = "iwn5000fw";
787 		break;
788 	case IWN_HW_REV_TYPE_1000:
789 		sc->limits = &iwn1000_sensitivity_limits;
790 		sc->fwname = "iwn1000fw";
791 		break;
792 	case IWN_HW_REV_TYPE_6000:
793 		sc->limits = &iwn6000_sensitivity_limits;
794 		sc->fwname = "iwn6000fw";
795 		if (pid == 0x422c || pid == 0x4239) {
796 			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
797 			/* Override chains masks, ROM is known to be broken. */
798 			sc->txchainmask = IWN_ANT_BC;
799 			sc->rxchainmask = IWN_ANT_BC;
800 		}
801 		break;
802 	case IWN_HW_REV_TYPE_6050:
803 		sc->limits = &iwn6000_sensitivity_limits;
804 		sc->fwname = "iwn6050fw";
805 		/* Override chains masks, ROM is known to be broken. */
806 		sc->txchainmask = IWN_ANT_AB;
807 		sc->rxchainmask = IWN_ANT_AB;
808 		break;
809 	case IWN_HW_REV_TYPE_6005:
810 		sc->limits = &iwn6000_sensitivity_limits;
811 		if (pid != 0x0082 && pid != 0x0085) {
812 			sc->fwname = "iwn6000g2bfw";
813 			sc->sc_flags |= IWN_FLAG_ADV_BTCOEX;
814 		} else
815 			sc->fwname = "iwn6000g2afw";
816 		break;
817 	default:
818 		device_printf(sc->sc_dev, "adapter type %d not supported\n",
819 		    sc->hw_type);
820 		return ENOTSUP;
821 	}
822 	return 0;
823 }
824 
825 /*
826  * Attach the interface to 802.11 radiotap.
827  */
828 static void
829 iwn_radiotap_attach(struct iwn_softc *sc)
830 {
831 	struct ifnet *ifp = sc->sc_ifp;
832 	struct ieee80211com *ic = ifp->if_l2com;
833 
834 	ieee80211_radiotap_attach(ic,
835 	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
836 		IWN_TX_RADIOTAP_PRESENT,
837 	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
838 		IWN_RX_RADIOTAP_PRESENT);
839 }
840 
841 static void
842 iwn_sysctlattach(struct iwn_softc *sc)
843 {
844 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
845 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
846 
847 #ifdef IWN_DEBUG
848 	sc->sc_debug = 0;
849 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
850 	    "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
851 #endif
852 }
853 
854 static struct ieee80211vap *
855 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
856     enum ieee80211_opmode opmode, int flags,
857     const uint8_t bssid[IEEE80211_ADDR_LEN],
858     const uint8_t mac[IEEE80211_ADDR_LEN])
859 {
860 	struct iwn_vap *ivp;
861 	struct ieee80211vap *vap;
862 
863 	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
864 		return NULL;
865 	ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
866 	    M_80211_VAP, M_NOWAIT | M_ZERO);
867 	if (ivp == NULL)
868 		return NULL;
869 	vap = &ivp->iv_vap;
870 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
871 	vap->iv_bmissthreshold = 10;		/* override default */
872 	/* Override with driver methods. */
873 	ivp->iv_newstate = vap->iv_newstate;
874 	vap->iv_newstate = iwn_newstate;
875 
876 	ieee80211_ratectl_init(vap);
877 	/* Complete setup. */
878 	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
879 	ic->ic_opmode = opmode;
880 	return vap;
881 }
882 
883 static void
884 iwn_vap_delete(struct ieee80211vap *vap)
885 {
886 	struct iwn_vap *ivp = IWN_VAP(vap);
887 
888 	ieee80211_ratectl_deinit(vap);
889 	ieee80211_vap_detach(vap);
890 	free(ivp, M_80211_VAP);
891 }
892 
893 static int
894 iwn_detach(device_t dev)
895 {
896 	struct iwn_softc *sc = device_get_softc(dev);
897 	struct ifnet *ifp = sc->sc_ifp;
898 	struct ieee80211com *ic;
899 	int qid;
900 
901 	if (ifp != NULL) {
902 		ic = ifp->if_l2com;
903 
904 		ieee80211_draintask(ic, &sc->sc_reinit_task);
905 		ieee80211_draintask(ic, &sc->sc_radioon_task);
906 		ieee80211_draintask(ic, &sc->sc_radiooff_task);
907 
908 		iwn_stop(sc);
909 		callout_drain(&sc->watchdog_to);
910 		callout_drain(&sc->calib_to);
911 		ieee80211_ifdetach(ic);
912 	}
913 
914 	/* Uninstall interrupt handler. */
915 	if (sc->irq != NULL) {
916 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
917 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
918 		if (sc->irq_rid == 1)
919 			pci_release_msi(dev);
920 	}
921 
922 	/* Free DMA resources. */
923 	iwn_free_rx_ring(sc, &sc->rxq);
924 	for (qid = 0; qid < sc->ntxqs; qid++)
925 		iwn_free_tx_ring(sc, &sc->txq[qid]);
926 	iwn_free_sched(sc);
927 	iwn_free_kw(sc);
928 	if (sc->ict != NULL)
929 		iwn_free_ict(sc);
930 	iwn_free_fwmem(sc);
931 
932 	if (sc->mem != NULL)
933 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
934 
935 	if (ifp != NULL)
936 		if_free(ifp);
937 
938 	IWN_LOCK_DESTROY(sc);
939 	return 0;
940 }
941 
942 static int
943 iwn_shutdown(device_t dev)
944 {
945 	struct iwn_softc *sc = device_get_softc(dev);
946 
947 	iwn_stop(sc);
948 	return 0;
949 }
950 
951 static int
952 iwn_suspend(device_t dev)
953 {
954 	struct iwn_softc *sc = device_get_softc(dev);
955 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
956 
957 	ieee80211_suspend_all(ic);
958 	return 0;
959 }
960 
961 static int
962 iwn_resume(device_t dev)
963 {
964 	struct iwn_softc *sc = device_get_softc(dev);
965 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
966 
967 	/* Clear device-specific "PCI retry timeout" register (41h). */
968 	pci_write_config(dev, 0x41, 0, 1);
969 
970 	ieee80211_resume_all(ic);
971 	return 0;
972 }
973 
974 static int
975 iwn_nic_lock(struct iwn_softc *sc)
976 {
977 	int ntries;
978 
979 	/* Request exclusive access to NIC. */
980 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
981 
982 	/* Spin until we actually get the lock. */
983 	for (ntries = 0; ntries < 1000; ntries++) {
984 		if ((IWN_READ(sc, IWN_GP_CNTRL) &
985 		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
986 		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
987 			return 0;
988 		DELAY(10);
989 	}
990 	return ETIMEDOUT;
991 }
992 
993 static __inline void
994 iwn_nic_unlock(struct iwn_softc *sc)
995 {
996 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
997 }
998 
999 static __inline uint32_t
1000 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1001 {
1002 	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1003 	IWN_BARRIER_READ_WRITE(sc);
1004 	return IWN_READ(sc, IWN_PRPH_RDATA);
1005 }
1006 
1007 static __inline void
1008 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1009 {
1010 	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1011 	IWN_BARRIER_WRITE(sc);
1012 	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1013 }
1014 
1015 static __inline void
1016 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1017 {
1018 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1019 }
1020 
1021 static __inline void
1022 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1023 {
1024 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1025 }
1026 
1027 static __inline void
1028 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1029     const uint32_t *data, int count)
1030 {
1031 	for (; count > 0; count--, data++, addr += 4)
1032 		iwn_prph_write(sc, addr, *data);
1033 }
1034 
1035 static __inline uint32_t
1036 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1037 {
1038 	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1039 	IWN_BARRIER_READ_WRITE(sc);
1040 	return IWN_READ(sc, IWN_MEM_RDATA);
1041 }
1042 
1043 static __inline void
1044 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1045 {
1046 	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1047 	IWN_BARRIER_WRITE(sc);
1048 	IWN_WRITE(sc, IWN_MEM_WDATA, data);
1049 }
1050 
1051 static __inline void
1052 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1053 {
1054 	uint32_t tmp;
1055 
1056 	tmp = iwn_mem_read(sc, addr & ~3);
1057 	if (addr & 3)
1058 		tmp = (tmp & 0x0000ffff) | data << 16;
1059 	else
1060 		tmp = (tmp & 0xffff0000) | data;
1061 	iwn_mem_write(sc, addr & ~3, tmp);
1062 }
1063 
1064 static __inline void
1065 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1066     int count)
1067 {
1068 	for (; count > 0; count--, addr += 4)
1069 		*data++ = iwn_mem_read(sc, addr);
1070 }
1071 
1072 static __inline void
1073 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1074     int count)
1075 {
1076 	for (; count > 0; count--, addr += 4)
1077 		iwn_mem_write(sc, addr, val);
1078 }
1079 
1080 static int
1081 iwn_eeprom_lock(struct iwn_softc *sc)
1082 {
1083 	int i, ntries;
1084 
1085 	for (i = 0; i < 100; i++) {
1086 		/* Request exclusive access to EEPROM. */
1087 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1088 		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1089 
1090 		/* Spin until we actually get the lock. */
1091 		for (ntries = 0; ntries < 100; ntries++) {
1092 			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1093 			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1094 				return 0;
1095 			DELAY(10);
1096 		}
1097 	}
1098 	return ETIMEDOUT;
1099 }
1100 
1101 static __inline void
1102 iwn_eeprom_unlock(struct iwn_softc *sc)
1103 {
1104 	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1105 }
1106 
1107 /*
1108  * Initialize access by host to One Time Programmable ROM.
1109  * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1110  */
1111 static int
1112 iwn_init_otprom(struct iwn_softc *sc)
1113 {
1114 	uint16_t prev, base, next;
1115 	int count, error;
1116 
1117 	/* Wait for clock stabilization before accessing prph. */
1118 	if ((error = iwn_clock_wait(sc)) != 0)
1119 		return error;
1120 
1121 	if ((error = iwn_nic_lock(sc)) != 0)
1122 		return error;
1123 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1124 	DELAY(5);
1125 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1126 	iwn_nic_unlock(sc);
1127 
1128 	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1129 	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1130 		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1131 		    IWN_RESET_LINK_PWR_MGMT_DIS);
1132 	}
1133 	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1134 	/* Clear ECC status. */
1135 	IWN_SETBITS(sc, IWN_OTP_GP,
1136 	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1137 
1138 	/*
1139 	 * Find the block before last block (contains the EEPROM image)
1140 	 * for HW without OTP shadow RAM.
1141 	 */
1142 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1143 		/* Switch to absolute addressing mode. */
1144 		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1145 		base = prev = 0;
1146 		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1147 			error = iwn_read_prom_data(sc, base, &next, 2);
1148 			if (error != 0)
1149 				return error;
1150 			if (next == 0)	/* End of linked-list. */
1151 				break;
1152 			prev = base;
1153 			base = le16toh(next);
1154 		}
1155 		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1156 			return EIO;
1157 		/* Skip "next" word. */
1158 		sc->prom_base = prev + 1;
1159 	}
1160 	return 0;
1161 }
1162 
1163 static int
1164 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1165 {
1166 	uint8_t *out = data;
1167 	uint32_t val, tmp;
1168 	int ntries;
1169 
1170 	addr += sc->prom_base;
1171 	for (; count > 0; count -= 2, addr++) {
1172 		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1173 		for (ntries = 0; ntries < 10; ntries++) {
1174 			val = IWN_READ(sc, IWN_EEPROM);
1175 			if (val & IWN_EEPROM_READ_VALID)
1176 				break;
1177 			DELAY(5);
1178 		}
1179 		if (ntries == 10) {
1180 			device_printf(sc->sc_dev,
1181 			    "timeout reading ROM at 0x%x\n", addr);
1182 			return ETIMEDOUT;
1183 		}
1184 		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1185 			/* OTPROM, check for ECC errors. */
1186 			tmp = IWN_READ(sc, IWN_OTP_GP);
1187 			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1188 				device_printf(sc->sc_dev,
1189 				    "OTPROM ECC error at 0x%x\n", addr);
1190 				return EIO;
1191 			}
1192 			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1193 				/* Correctable ECC error, clear bit. */
1194 				IWN_SETBITS(sc, IWN_OTP_GP,
1195 				    IWN_OTP_GP_ECC_CORR_STTS);
1196 			}
1197 		}
1198 		*out++ = val >> 16;
1199 		if (count > 1)
1200 			*out++ = val >> 24;
1201 	}
1202 	return 0;
1203 }
1204 
1205 static void
1206 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1207 {
1208 	if (error != 0)
1209 		return;
1210 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1211 	*(bus_addr_t *)arg = segs[0].ds_addr;
1212 }
1213 
1214 static int
1215 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1216     void **kvap, bus_size_t size, bus_size_t alignment)
1217 {
1218 	int error;
1219 
1220 	dma->tag = NULL;
1221 	dma->size = size;
1222 
1223 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1224 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1225 	    1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1226 	if (error != 0)
1227 		goto fail;
1228 
1229 	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1230 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1231 	if (error != 0)
1232 		goto fail;
1233 
1234 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1235 	    iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1236 	if (error != 0)
1237 		goto fail;
1238 
1239 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1240 
1241 	if (kvap != NULL)
1242 		*kvap = dma->vaddr;
1243 
1244 	return 0;
1245 
1246 fail:	iwn_dma_contig_free(dma);
1247 	return error;
1248 }
1249 
1250 static void
1251 iwn_dma_contig_free(struct iwn_dma_info *dma)
1252 {
1253 	if (dma->map != NULL) {
1254 		if (dma->vaddr != NULL) {
1255 			bus_dmamap_sync(dma->tag, dma->map,
1256 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1257 			bus_dmamap_unload(dma->tag, dma->map);
1258 			bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1259 			dma->vaddr = NULL;
1260 		}
1261 		bus_dmamap_destroy(dma->tag, dma->map);
1262 		dma->map = NULL;
1263 	}
1264 	if (dma->tag != NULL) {
1265 		bus_dma_tag_destroy(dma->tag);
1266 		dma->tag = NULL;
1267 	}
1268 }
1269 
1270 static int
1271 iwn_alloc_sched(struct iwn_softc *sc)
1272 {
1273 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1274 	return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1275 	    sc->schedsz, 1024);
1276 }
1277 
1278 static void
1279 iwn_free_sched(struct iwn_softc *sc)
1280 {
1281 	iwn_dma_contig_free(&sc->sched_dma);
1282 }
1283 
1284 static int
1285 iwn_alloc_kw(struct iwn_softc *sc)
1286 {
1287 	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1288 	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1289 }
1290 
1291 static void
1292 iwn_free_kw(struct iwn_softc *sc)
1293 {
1294 	iwn_dma_contig_free(&sc->kw_dma);
1295 }
1296 
1297 static int
1298 iwn_alloc_ict(struct iwn_softc *sc)
1299 {
1300 	/* ICT table must be aligned on a 4KB boundary. */
1301 	return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1302 	    IWN_ICT_SIZE, 4096);
1303 }
1304 
1305 static void
1306 iwn_free_ict(struct iwn_softc *sc)
1307 {
1308 	iwn_dma_contig_free(&sc->ict_dma);
1309 }
1310 
1311 static int
1312 iwn_alloc_fwmem(struct iwn_softc *sc)
1313 {
1314 	/* Must be aligned on a 16-byte boundary. */
1315 	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1316 }
1317 
1318 static void
1319 iwn_free_fwmem(struct iwn_softc *sc)
1320 {
1321 	iwn_dma_contig_free(&sc->fw_dma);
1322 }
1323 
1324 static int
1325 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1326 {
1327 	bus_size_t size;
1328 	int i, error;
1329 
1330 	ring->cur = 0;
1331 
1332 	/* Allocate RX descriptors (256-byte aligned). */
1333 	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1334 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1335 	    size, 256);
1336 	if (error != 0) {
1337 		device_printf(sc->sc_dev,
1338 		    "%s: could not allocate RX ring DMA memory, error %d\n",
1339 		    __func__, error);
1340 		goto fail;
1341 	}
1342 
1343 	/* Allocate RX status area (16-byte aligned). */
1344 	error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1345 	    sizeof (struct iwn_rx_status), 16);
1346 	if (error != 0) {
1347 		device_printf(sc->sc_dev,
1348 		    "%s: could not allocate RX status DMA memory, error %d\n",
1349 		    __func__, error);
1350 		goto fail;
1351 	}
1352 
1353 	/* Create RX buffer DMA tag. */
1354 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1355 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1356 	    IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1357 	    &ring->data_dmat);
1358 	if (error != 0) {
1359 		device_printf(sc->sc_dev,
1360 		    "%s: could not create RX buf DMA tag, error %d\n",
1361 		    __func__, error);
1362 		goto fail;
1363 	}
1364 
1365 	/*
1366 	 * Allocate and map RX buffers.
1367 	 */
1368 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1369 		struct iwn_rx_data *data = &ring->data[i];
1370 		bus_addr_t paddr;
1371 
1372 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1373 		if (error != 0) {
1374 			device_printf(sc->sc_dev,
1375 			    "%s: could not create RX buf DMA map, error %d\n",
1376 			    __func__, error);
1377 			goto fail;
1378 		}
1379 
1380 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1381 		    IWN_RBUF_SIZE);
1382 		if (data->m == NULL) {
1383 			device_printf(sc->sc_dev,
1384 			    "%s: could not allocate RX mbuf\n", __func__);
1385 			error = ENOBUFS;
1386 			goto fail;
1387 		}
1388 
1389 		error = bus_dmamap_load(ring->data_dmat, data->map,
1390 		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1391 		    &paddr, BUS_DMA_NOWAIT);
1392 		if (error != 0 && error != EFBIG) {
1393 			device_printf(sc->sc_dev,
1394 			    "%s: can't not map mbuf, error %d\n", __func__,
1395 			    error);
1396 			goto fail;
1397 		}
1398 
1399 		/* Set physical address of RX buffer (256-byte aligned). */
1400 		ring->desc[i] = htole32(paddr >> 8);
1401 	}
1402 
1403 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1404 	    BUS_DMASYNC_PREWRITE);
1405 
1406 	return 0;
1407 
1408 fail:	iwn_free_rx_ring(sc, ring);
1409 	return error;
1410 }
1411 
1412 static void
1413 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1414 {
1415 	int ntries;
1416 
1417 	if (iwn_nic_lock(sc) == 0) {
1418 		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1419 		for (ntries = 0; ntries < 1000; ntries++) {
1420 			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1421 			    IWN_FH_RX_STATUS_IDLE)
1422 				break;
1423 			DELAY(10);
1424 		}
1425 		iwn_nic_unlock(sc);
1426 	}
1427 	ring->cur = 0;
1428 	sc->last_rx_valid = 0;
1429 }
1430 
1431 static void
1432 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1433 {
1434 	int i;
1435 
1436 	iwn_dma_contig_free(&ring->desc_dma);
1437 	iwn_dma_contig_free(&ring->stat_dma);
1438 
1439 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1440 		struct iwn_rx_data *data = &ring->data[i];
1441 
1442 		if (data->m != NULL) {
1443 			bus_dmamap_sync(ring->data_dmat, data->map,
1444 			    BUS_DMASYNC_POSTREAD);
1445 			bus_dmamap_unload(ring->data_dmat, data->map);
1446 			m_freem(data->m);
1447 			data->m = NULL;
1448 		}
1449 		if (data->map != NULL)
1450 			bus_dmamap_destroy(ring->data_dmat, data->map);
1451 	}
1452 	if (ring->data_dmat != NULL) {
1453 		bus_dma_tag_destroy(ring->data_dmat);
1454 		ring->data_dmat = NULL;
1455 	}
1456 }
1457 
1458 static int
1459 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1460 {
1461 	bus_addr_t paddr;
1462 	bus_size_t size;
1463 	int i, error;
1464 
1465 	ring->qid = qid;
1466 	ring->queued = 0;
1467 	ring->cur = 0;
1468 
1469 	/* Allocate TX descriptors (256-byte aligned). */
1470 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1471 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1472 	    size, 256);
1473 	if (error != 0) {
1474 		device_printf(sc->sc_dev,
1475 		    "%s: could not allocate TX ring DMA memory, error %d\n",
1476 		    __func__, error);
1477 		goto fail;
1478 	}
1479 
1480 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1481 	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1482 	    size, 4);
1483 	if (error != 0) {
1484 		device_printf(sc->sc_dev,
1485 		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1486 		    __func__, error);
1487 		goto fail;
1488 	}
1489 
1490 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1491 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1492 	    IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1493 	    &ring->data_dmat);
1494 	if (error != 0) {
1495 		device_printf(sc->sc_dev,
1496 		    "%s: could not create TX buf DMA tag, error %d\n",
1497 		    __func__, error);
1498 		goto fail;
1499 	}
1500 
1501 	paddr = ring->cmd_dma.paddr;
1502 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1503 		struct iwn_tx_data *data = &ring->data[i];
1504 
1505 		data->cmd_paddr = paddr;
1506 		data->scratch_paddr = paddr + 12;
1507 		paddr += sizeof (struct iwn_tx_cmd);
1508 
1509 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1510 		if (error != 0) {
1511 			device_printf(sc->sc_dev,
1512 			    "%s: could not create TX buf DMA map, error %d\n",
1513 			    __func__, error);
1514 			goto fail;
1515 		}
1516 	}
1517 	return 0;
1518 
1519 fail:	iwn_free_tx_ring(sc, ring);
1520 	return error;
1521 }
1522 
1523 static void
1524 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1525 {
1526 	int i;
1527 
1528 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1529 		struct iwn_tx_data *data = &ring->data[i];
1530 
1531 		if (data->m != NULL) {
1532 			bus_dmamap_sync(ring->data_dmat, data->map,
1533 			    BUS_DMASYNC_POSTWRITE);
1534 			bus_dmamap_unload(ring->data_dmat, data->map);
1535 			m_freem(data->m);
1536 			data->m = NULL;
1537 		}
1538 	}
1539 	/* Clear TX descriptors. */
1540 	memset(ring->desc, 0, ring->desc_dma.size);
1541 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1542 	    BUS_DMASYNC_PREWRITE);
1543 	sc->qfullmsk &= ~(1 << ring->qid);
1544 	ring->queued = 0;
1545 	ring->cur = 0;
1546 }
1547 
1548 static void
1549 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1550 {
1551 	int i;
1552 
1553 	iwn_dma_contig_free(&ring->desc_dma);
1554 	iwn_dma_contig_free(&ring->cmd_dma);
1555 
1556 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1557 		struct iwn_tx_data *data = &ring->data[i];
1558 
1559 		if (data->m != NULL) {
1560 			bus_dmamap_sync(ring->data_dmat, data->map,
1561 			    BUS_DMASYNC_POSTWRITE);
1562 			bus_dmamap_unload(ring->data_dmat, data->map);
1563 			m_freem(data->m);
1564 		}
1565 		if (data->map != NULL)
1566 			bus_dmamap_destroy(ring->data_dmat, data->map);
1567 	}
1568 	if (ring->data_dmat != NULL) {
1569 		bus_dma_tag_destroy(ring->data_dmat);
1570 		ring->data_dmat = NULL;
1571 	}
1572 }
1573 
1574 static void
1575 iwn5000_ict_reset(struct iwn_softc *sc)
1576 {
1577 	/* Disable interrupts. */
1578 	IWN_WRITE(sc, IWN_INT_MASK, 0);
1579 
1580 	/* Reset ICT table. */
1581 	memset(sc->ict, 0, IWN_ICT_SIZE);
1582 	sc->ict_cur = 0;
1583 
1584 	/* Set physical address of ICT table (4KB aligned). */
1585 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1586 	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1587 	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1588 
1589 	/* Enable periodic RX interrupt. */
1590 	sc->int_mask |= IWN_INT_RX_PERIODIC;
1591 	/* Switch to ICT interrupt mode in driver. */
1592 	sc->sc_flags |= IWN_FLAG_USE_ICT;
1593 
1594 	/* Re-enable interrupts. */
1595 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1596 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1597 }
1598 
1599 static int
1600 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1601 {
1602 	struct iwn_ops *ops = &sc->ops;
1603 	uint16_t val;
1604 	int error;
1605 
1606 	/* Check whether adapter has an EEPROM or an OTPROM. */
1607 	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1608 	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1609 		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1610 	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1611 	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1612 
1613 	/* Adapter has to be powered on for EEPROM access to work. */
1614 	if ((error = iwn_apm_init(sc)) != 0) {
1615 		device_printf(sc->sc_dev,
1616 		    "%s: could not power ON adapter, error %d\n", __func__,
1617 		    error);
1618 		return error;
1619 	}
1620 
1621 	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1622 		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1623 		return EIO;
1624 	}
1625 	if ((error = iwn_eeprom_lock(sc)) != 0) {
1626 		device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
1627 		    __func__, error);
1628 		return error;
1629 	}
1630 	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1631 		if ((error = iwn_init_otprom(sc)) != 0) {
1632 			device_printf(sc->sc_dev,
1633 			    "%s: could not initialize OTPROM, error %d\n",
1634 			    __func__, error);
1635 			return error;
1636 		}
1637 	}
1638 
1639 	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1640 	DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
1641 	/* Check if HT support is bonded out. */
1642 	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1643 		sc->sc_flags |= IWN_FLAG_HAS_11N;
1644 
1645 	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1646 	sc->rfcfg = le16toh(val);
1647 	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1648 	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
1649 	if (sc->txchainmask == 0)
1650 		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1651 	if (sc->rxchainmask == 0)
1652 		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1653 
1654 	/* Read MAC address. */
1655 	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1656 
1657 	/* Read adapter-specific information from EEPROM. */
1658 	ops->read_eeprom(sc);
1659 
1660 	iwn_apm_stop(sc);	/* Power OFF adapter. */
1661 
1662 	iwn_eeprom_unlock(sc);
1663 	return 0;
1664 }
1665 
1666 static void
1667 iwn4965_read_eeprom(struct iwn_softc *sc)
1668 {
1669 	uint32_t addr;
1670 	uint16_t val;
1671 	int i;
1672 
1673 	/* Read regulatory domain (4 ASCII characters). */
1674 	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1675 
1676 	/* Read the list of authorized channels (20MHz ones only). */
1677 	for (i = 0; i < 7; i++) {
1678 		addr = iwn4965_regulatory_bands[i];
1679 		iwn_read_eeprom_channels(sc, i, addr);
1680 	}
1681 
1682 	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1683 	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1684 	sc->maxpwr2GHz = val & 0xff;
1685 	sc->maxpwr5GHz = val >> 8;
1686 	/* Check that EEPROM values are within valid range. */
1687 	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1688 		sc->maxpwr5GHz = 38;
1689 	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1690 		sc->maxpwr2GHz = 38;
1691 	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1692 	    sc->maxpwr2GHz, sc->maxpwr5GHz);
1693 
1694 	/* Read samples for each TX power group. */
1695 	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1696 	    sizeof sc->bands);
1697 
1698 	/* Read voltage at which samples were taken. */
1699 	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1700 	sc->eeprom_voltage = (int16_t)le16toh(val);
1701 	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1702 	    sc->eeprom_voltage);
1703 
1704 #ifdef IWN_DEBUG
1705 	/* Print samples. */
1706 	if (sc->sc_debug & IWN_DEBUG_ANY) {
1707 		for (i = 0; i < IWN_NBANDS; i++)
1708 			iwn4965_print_power_group(sc, i);
1709 	}
1710 #endif
1711 }
1712 
1713 #ifdef IWN_DEBUG
1714 static void
1715 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1716 {
1717 	struct iwn4965_eeprom_band *band = &sc->bands[i];
1718 	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1719 	int j, c;
1720 
1721 	printf("===band %d===\n", i);
1722 	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1723 	printf("chan1 num=%d\n", chans[0].num);
1724 	for (c = 0; c < 2; c++) {
1725 		for (j = 0; j < IWN_NSAMPLES; j++) {
1726 			printf("chain %d, sample %d: temp=%d gain=%d "
1727 			    "power=%d pa_det=%d\n", c, j,
1728 			    chans[0].samples[c][j].temp,
1729 			    chans[0].samples[c][j].gain,
1730 			    chans[0].samples[c][j].power,
1731 			    chans[0].samples[c][j].pa_det);
1732 		}
1733 	}
1734 	printf("chan2 num=%d\n", chans[1].num);
1735 	for (c = 0; c < 2; c++) {
1736 		for (j = 0; j < IWN_NSAMPLES; j++) {
1737 			printf("chain %d, sample %d: temp=%d gain=%d "
1738 			    "power=%d pa_det=%d\n", c, j,
1739 			    chans[1].samples[c][j].temp,
1740 			    chans[1].samples[c][j].gain,
1741 			    chans[1].samples[c][j].power,
1742 			    chans[1].samples[c][j].pa_det);
1743 		}
1744 	}
1745 }
1746 #endif
1747 
1748 static void
1749 iwn5000_read_eeprom(struct iwn_softc *sc)
1750 {
1751 	struct iwn5000_eeprom_calib_hdr hdr;
1752 	int32_t volt;
1753 	uint32_t base, addr;
1754 	uint16_t val;
1755 	int i;
1756 
1757 	/* Read regulatory domain (4 ASCII characters). */
1758 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1759 	base = le16toh(val);
1760 	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1761 	    sc->eeprom_domain, 4);
1762 
1763 	/* Read the list of authorized channels (20MHz ones only). */
1764 	for (i = 0; i < 7; i++) {
1765 		if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1766 			addr = base + iwn6000_regulatory_bands[i];
1767 		else
1768 			addr = base + iwn5000_regulatory_bands[i];
1769 		iwn_read_eeprom_channels(sc, i, addr);
1770 	}
1771 
1772 	/* Read enhanced TX power information for 6000 Series. */
1773 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1774 		iwn_read_eeprom_enhinfo(sc);
1775 
1776 	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1777 	base = le16toh(val);
1778 	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1779 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1780 	    "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
1781 	    hdr.version, hdr.pa_type, le16toh(hdr.volt));
1782 	sc->calib_ver = hdr.version;
1783 
1784 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1785 		/* Compute temperature offset. */
1786 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1787 		sc->eeprom_temp = le16toh(val);
1788 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1789 		volt = le16toh(val);
1790 		sc->temp_off = sc->eeprom_temp - (volt / -5);
1791 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1792 		    sc->eeprom_temp, volt, sc->temp_off);
1793 	} else {
1794 		/* Read crystal calibration. */
1795 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1796 		    &sc->eeprom_crystal, sizeof (uint32_t));
1797 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1798 		    le32toh(sc->eeprom_crystal));
1799 	}
1800 }
1801 
1802 /*
1803  * Translate EEPROM flags to net80211.
1804  */
1805 static uint32_t
1806 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1807 {
1808 	uint32_t nflags;
1809 
1810 	nflags = 0;
1811 	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1812 		nflags |= IEEE80211_CHAN_PASSIVE;
1813 	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1814 		nflags |= IEEE80211_CHAN_NOADHOC;
1815 	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1816 		nflags |= IEEE80211_CHAN_DFS;
1817 		/* XXX apparently IBSS may still be marked */
1818 		nflags |= IEEE80211_CHAN_NOADHOC;
1819 	}
1820 
1821 	return nflags;
1822 }
1823 
1824 static void
1825 iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1826 {
1827 	struct ifnet *ifp = sc->sc_ifp;
1828 	struct ieee80211com *ic = ifp->if_l2com;
1829 	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1830 	const struct iwn_chan_band *band = &iwn_bands[n];
1831 	struct ieee80211_channel *c;
1832 	uint8_t chan;
1833 	int i, nflags;
1834 
1835 	for (i = 0; i < band->nchan; i++) {
1836 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1837 			DPRINTF(sc, IWN_DEBUG_RESET,
1838 			    "skip chan %d flags 0x%x maxpwr %d\n",
1839 			    band->chan[i], channels[i].flags,
1840 			    channels[i].maxpwr);
1841 			continue;
1842 		}
1843 		chan = band->chan[i];
1844 		nflags = iwn_eeprom_channel_flags(&channels[i]);
1845 
1846 		c = &ic->ic_channels[ic->ic_nchans++];
1847 		c->ic_ieee = chan;
1848 		c->ic_maxregpower = channels[i].maxpwr;
1849 		c->ic_maxpower = 2*c->ic_maxregpower;
1850 
1851 		if (n == 0) {	/* 2GHz band */
1852 			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
1853 			/* G =>'s B is supported */
1854 			c->ic_flags = IEEE80211_CHAN_B | nflags;
1855 			c = &ic->ic_channels[ic->ic_nchans++];
1856 			c[0] = c[-1];
1857 			c->ic_flags = IEEE80211_CHAN_G | nflags;
1858 		} else {	/* 5GHz band */
1859 			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
1860 			c->ic_flags = IEEE80211_CHAN_A | nflags;
1861 		}
1862 
1863 		/* Save maximum allowed TX power for this channel. */
1864 		sc->maxpwr[chan] = channels[i].maxpwr;
1865 
1866 		DPRINTF(sc, IWN_DEBUG_RESET,
1867 		    "add chan %d flags 0x%x maxpwr %d\n", chan,
1868 		    channels[i].flags, channels[i].maxpwr);
1869 
1870 		if (sc->sc_flags & IWN_FLAG_HAS_11N) {
1871 			/* add HT20, HT40 added separately */
1872 			c = &ic->ic_channels[ic->ic_nchans++];
1873 			c[0] = c[-1];
1874 			c->ic_flags |= IEEE80211_CHAN_HT20;
1875 		}
1876 	}
1877 }
1878 
1879 static void
1880 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1881 {
1882 	struct ifnet *ifp = sc->sc_ifp;
1883 	struct ieee80211com *ic = ifp->if_l2com;
1884 	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1885 	const struct iwn_chan_band *band = &iwn_bands[n];
1886 	struct ieee80211_channel *c, *cent, *extc;
1887 	uint8_t chan;
1888 	int i, nflags;
1889 
1890 	if (!(sc->sc_flags & IWN_FLAG_HAS_11N))
1891 		return;
1892 
1893 	for (i = 0; i < band->nchan; i++) {
1894 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1895 			DPRINTF(sc, IWN_DEBUG_RESET,
1896 			    "skip chan %d flags 0x%x maxpwr %d\n",
1897 			    band->chan[i], channels[i].flags,
1898 			    channels[i].maxpwr);
1899 			continue;
1900 		}
1901 		chan = band->chan[i];
1902 		nflags = iwn_eeprom_channel_flags(&channels[i]);
1903 
1904 		/*
1905 		 * Each entry defines an HT40 channel pair; find the
1906 		 * center channel, then the extension channel above.
1907 		 */
1908 		cent = ieee80211_find_channel_byieee(ic, chan,
1909 		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
1910 		if (cent == NULL) {	/* XXX shouldn't happen */
1911 			device_printf(sc->sc_dev,
1912 			    "%s: no entry for channel %d\n", __func__, chan);
1913 			continue;
1914 		}
1915 		extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1916 		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
1917 		if (extc == NULL) {
1918 			DPRINTF(sc, IWN_DEBUG_RESET,
1919 			    "%s: skip chan %d, extension channel not found\n",
1920 			    __func__, chan);
1921 			continue;
1922 		}
1923 
1924 		DPRINTF(sc, IWN_DEBUG_RESET,
1925 		    "add ht40 chan %d flags 0x%x maxpwr %d\n",
1926 		    chan, channels[i].flags, channels[i].maxpwr);
1927 
1928 		c = &ic->ic_channels[ic->ic_nchans++];
1929 		c[0] = cent[0];
1930 		c->ic_extieee = extc->ic_ieee;
1931 		c->ic_flags &= ~IEEE80211_CHAN_HT;
1932 		c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
1933 		c = &ic->ic_channels[ic->ic_nchans++];
1934 		c[0] = extc[0];
1935 		c->ic_extieee = cent->ic_ieee;
1936 		c->ic_flags &= ~IEEE80211_CHAN_HT;
1937 		c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
1938 	}
1939 }
1940 
1941 static void
1942 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1943 {
1944 	struct ifnet *ifp = sc->sc_ifp;
1945 	struct ieee80211com *ic = ifp->if_l2com;
1946 
1947 	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1948 	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1949 
1950 	if (n < 5)
1951 		iwn_read_eeprom_band(sc, n);
1952 	else
1953 		iwn_read_eeprom_ht40(sc, n);
1954 	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1955 }
1956 
1957 static struct iwn_eeprom_chan *
1958 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
1959 {
1960 	int band, chan, i, j;
1961 
1962 	if (IEEE80211_IS_CHAN_HT40(c)) {
1963 		band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
1964 		if (IEEE80211_IS_CHAN_HT40D(c))
1965 			chan = c->ic_extieee;
1966 		else
1967 			chan = c->ic_ieee;
1968 		for (i = 0; i < iwn_bands[band].nchan; i++) {
1969 			if (iwn_bands[band].chan[i] == chan)
1970 				return &sc->eeprom_channels[band][i];
1971 		}
1972 	} else {
1973 		for (j = 0; j < 5; j++) {
1974 			for (i = 0; i < iwn_bands[j].nchan; i++) {
1975 				if (iwn_bands[j].chan[i] == c->ic_ieee)
1976 					return &sc->eeprom_channels[j][i];
1977 			}
1978 		}
1979 	}
1980 	return NULL;
1981 }
1982 
1983 /*
1984  * Enforce flags read from EEPROM.
1985  */
1986 static int
1987 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
1988     int nchan, struct ieee80211_channel chans[])
1989 {
1990 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
1991 	int i;
1992 
1993 	for (i = 0; i < nchan; i++) {
1994 		struct ieee80211_channel *c = &chans[i];
1995 		struct iwn_eeprom_chan *channel;
1996 
1997 		channel = iwn_find_eeprom_channel(sc, c);
1998 		if (channel == NULL) {
1999 			if_printf(ic->ic_ifp,
2000 			    "%s: invalid channel %u freq %u/0x%x\n",
2001 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2002 			return EINVAL;
2003 		}
2004 		c->ic_flags |= iwn_eeprom_channel_flags(channel);
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 static void
2011 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2012 {
2013 	struct iwn_eeprom_enhinfo enhinfo[35];
2014 	struct ifnet *ifp = sc->sc_ifp;
2015 	struct ieee80211com *ic = ifp->if_l2com;
2016 	struct ieee80211_channel *c;
2017 	uint16_t val, base;
2018 	int8_t maxpwr;
2019 	uint8_t flags;
2020 	int i, j;
2021 
2022 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2023 	base = le16toh(val);
2024 	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2025 	    enhinfo, sizeof enhinfo);
2026 
2027 	for (i = 0; i < nitems(enhinfo); i++) {
2028 		flags = enhinfo[i].flags;
2029 		if (!(flags & IWN_ENHINFO_VALID))
2030 			continue;	/* Skip invalid entries. */
2031 
2032 		maxpwr = 0;
2033 		if (sc->txchainmask & IWN_ANT_A)
2034 			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2035 		if (sc->txchainmask & IWN_ANT_B)
2036 			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2037 		if (sc->txchainmask & IWN_ANT_C)
2038 			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2039 		if (sc->ntxchains == 2)
2040 			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2041 		else if (sc->ntxchains == 3)
2042 			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2043 
2044 		for (j = 0; j < ic->ic_nchans; j++) {
2045 			c = &ic->ic_channels[j];
2046 			if ((flags & IWN_ENHINFO_5GHZ)) {
2047 				if (!IEEE80211_IS_CHAN_A(c))
2048 					continue;
2049 			} else if ((flags & IWN_ENHINFO_OFDM)) {
2050 				if (!IEEE80211_IS_CHAN_G(c))
2051 					continue;
2052 			} else if (!IEEE80211_IS_CHAN_B(c))
2053 				continue;
2054 			if ((flags & IWN_ENHINFO_HT40)) {
2055 				if (!IEEE80211_IS_CHAN_HT40(c))
2056 					continue;
2057 			} else {
2058 				if (IEEE80211_IS_CHAN_HT40(c))
2059 					continue;
2060 			}
2061 			if (enhinfo[i].chan != 0 &&
2062 			    enhinfo[i].chan != c->ic_ieee)
2063 				continue;
2064 
2065 			DPRINTF(sc, IWN_DEBUG_RESET,
2066 			    "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2067 			    c->ic_flags, maxpwr / 2);
2068 			c->ic_maxregpower = maxpwr / 2;
2069 			c->ic_maxpower = maxpwr;
2070 		}
2071 	}
2072 }
2073 
2074 static struct ieee80211_node *
2075 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2076 {
2077 	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2078 }
2079 
2080 static __inline int
2081 rate2plcp(int rate)
2082 {
2083 	switch (rate & 0xff) {
2084 	case 12:	return 0xd;
2085 	case 18:	return 0xf;
2086 	case 24:	return 0x5;
2087 	case 36:	return 0x7;
2088 	case 48:	return 0x9;
2089 	case 72:	return 0xb;
2090 	case 96:	return 0x1;
2091 	case 108:	return 0x3;
2092 	case 2:		return 10;
2093 	case 4:		return 20;
2094 	case 11:	return 55;
2095 	case 22:	return 110;
2096 	}
2097 	return 0;
2098 }
2099 
2100 static void
2101 iwn_newassoc(struct ieee80211_node *ni, int isnew)
2102 {
2103 #define	RV(v)	((v) & IEEE80211_RATE_VAL)
2104 	struct ieee80211com *ic = ni->ni_ic;
2105 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2106 	struct iwn_node *wn = (void *)ni;
2107 	uint8_t txant1, txant2;
2108 	int i, plcp, rate, ridx;
2109 
2110 	/* Use the first valid TX antenna. */
2111 	txant1 = IWN_LSB(sc->txchainmask);
2112 	txant2 = IWN_LSB(sc->txchainmask & ~txant1);
2113 
2114 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
2115 		ridx = ni->ni_rates.rs_nrates - 1;
2116 		for (i = ni->ni_htrates.rs_nrates - 1; i >= 0; i--) {
2117 			plcp = RV(ni->ni_htrates.rs_rates[i]) | IWN_RFLAG_MCS;
2118 			if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2119 				plcp |= IWN_RFLAG_HT40;
2120 				if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2121 					plcp |= IWN_RFLAG_SGI;
2122 			} else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
2123 				plcp |= IWN_RFLAG_SGI;
2124 			if (RV(ni->ni_htrates.rs_rates[i]) > 7)
2125 				plcp |= IWN_RFLAG_ANT(txant1 | txant2);
2126 			else
2127 				plcp |= IWN_RFLAG_ANT(txant1);
2128 			if (ridx >= 0) {
2129 				rate = RV(ni->ni_rates.rs_rates[ridx]);
2130 				wn->ridx[rate] = plcp;
2131 			}
2132 			wn->ridx[IEEE80211_RATE_MCS | i] = plcp;
2133 			ridx--;
2134 		}
2135 	} else {
2136 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
2137 			rate = RV(ni->ni_rates.rs_rates[i]);
2138 			plcp = rate2plcp(rate);
2139 			ridx = ic->ic_rt->rateCodeToIndex[rate];
2140 			if (ridx < IWN_RIDX_OFDM6 &&
2141 			    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2142 				plcp |= IWN_RFLAG_CCK;
2143 			plcp |= IWN_RFLAG_ANT(txant1);
2144 			wn->ridx[rate] = htole32(plcp);
2145 		}
2146 	}
2147 #undef	RV
2148 }
2149 
2150 static int
2151 iwn_media_change(struct ifnet *ifp)
2152 {
2153 	int error;
2154 
2155 	error = ieee80211_media_change(ifp);
2156 	/* NB: only the fixed rate can change and that doesn't need a reset */
2157 	return (error == ENETRESET ? 0 : error);
2158 }
2159 
2160 static int
2161 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2162 {
2163 	struct iwn_vap *ivp = IWN_VAP(vap);
2164 	struct ieee80211com *ic = vap->iv_ic;
2165 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2166 	int error = 0;
2167 
2168 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2169 	    ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2170 
2171 	IEEE80211_UNLOCK(ic);
2172 	IWN_LOCK(sc);
2173 	callout_stop(&sc->calib_to);
2174 
2175 	switch (nstate) {
2176 	case IEEE80211_S_ASSOC:
2177 		if (vap->iv_state != IEEE80211_S_RUN)
2178 			break;
2179 		/* FALLTHROUGH */
2180 	case IEEE80211_S_AUTH:
2181 		if (vap->iv_state == IEEE80211_S_AUTH)
2182 			break;
2183 
2184 		/*
2185 		 * !AUTH -> AUTH transition requires state reset to handle
2186 		 * reassociations correctly.
2187 		 */
2188 		sc->rxon.associd = 0;
2189 		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
2190 		sc->calib.state = IWN_CALIB_STATE_INIT;
2191 
2192 		if ((error = iwn_auth(sc, vap)) != 0) {
2193 			device_printf(sc->sc_dev,
2194 			    "%s: could not move to auth state\n", __func__);
2195 		}
2196 		break;
2197 
2198 	case IEEE80211_S_RUN:
2199 		/*
2200 		 * RUN -> RUN transition; Just restart the timers.
2201 		 */
2202 		if (vap->iv_state == IEEE80211_S_RUN) {
2203 			sc->calib_cnt = 0;
2204 			break;
2205 		}
2206 
2207 		/*
2208 		 * !RUN -> RUN requires setting the association id
2209 		 * which is done with a firmware cmd.  We also defer
2210 		 * starting the timers until that work is done.
2211 		 */
2212 		if ((error = iwn_run(sc, vap)) != 0) {
2213 			device_printf(sc->sc_dev,
2214 			    "%s: could not move to run state\n", __func__);
2215 		}
2216 		break;
2217 
2218 	case IEEE80211_S_INIT:
2219 		sc->calib.state = IWN_CALIB_STATE_INIT;
2220 		break;
2221 
2222 	default:
2223 		break;
2224 	}
2225 	IWN_UNLOCK(sc);
2226 	IEEE80211_LOCK(ic);
2227 	if (error != 0)
2228 		return error;
2229 	return ivp->iv_newstate(vap, nstate, arg);
2230 }
2231 
2232 static void
2233 iwn_calib_timeout(void *arg)
2234 {
2235 	struct iwn_softc *sc = arg;
2236 
2237 	IWN_LOCK_ASSERT(sc);
2238 
2239 	/* Force automatic TX power calibration every 60 secs. */
2240 	if (++sc->calib_cnt >= 120) {
2241 		uint32_t flags = 0;
2242 
2243 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2244 		    "sending request for statistics");
2245 		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2246 		    sizeof flags, 1);
2247 		sc->calib_cnt = 0;
2248 	}
2249 	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2250 	    sc);
2251 }
2252 
2253 /*
2254  * Process an RX_PHY firmware notification.  This is usually immediately
2255  * followed by an MPDU_RX_DONE notification.
2256  */
2257 static void
2258 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2259     struct iwn_rx_data *data)
2260 {
2261 	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2262 
2263 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2264 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2265 
2266 	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2267 	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2268 	sc->last_rx_valid = 1;
2269 }
2270 
2271 /*
2272  * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2273  * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2274  */
2275 static void
2276 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2277     struct iwn_rx_data *data)
2278 {
2279 	struct iwn_ops *ops = &sc->ops;
2280 	struct ifnet *ifp = sc->sc_ifp;
2281 	struct ieee80211com *ic = ifp->if_l2com;
2282 	struct iwn_rx_ring *ring = &sc->rxq;
2283 	struct ieee80211_frame *wh;
2284 	struct ieee80211_node *ni;
2285 	struct mbuf *m, *m1;
2286 	struct iwn_rx_stat *stat;
2287 	caddr_t head;
2288 	bus_addr_t paddr;
2289 	uint32_t flags;
2290 	int error, len, rssi, nf;
2291 
2292 	if (desc->type == IWN_MPDU_RX_DONE) {
2293 		/* Check for prior RX_PHY notification. */
2294 		if (!sc->last_rx_valid) {
2295 			DPRINTF(sc, IWN_DEBUG_ANY,
2296 			    "%s: missing RX_PHY\n", __func__);
2297 			return;
2298 		}
2299 		stat = &sc->last_rx_stat;
2300 	} else
2301 		stat = (struct iwn_rx_stat *)(desc + 1);
2302 
2303 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2304 
2305 	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2306 		device_printf(sc->sc_dev,
2307 		    "%s: invalid RX statistic header, len %d\n", __func__,
2308 		    stat->cfg_phy_len);
2309 		return;
2310 	}
2311 	if (desc->type == IWN_MPDU_RX_DONE) {
2312 		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2313 		head = (caddr_t)(mpdu + 1);
2314 		len = le16toh(mpdu->len);
2315 	} else {
2316 		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2317 		len = le16toh(stat->len);
2318 	}
2319 
2320 	flags = le32toh(*(uint32_t *)(head + len));
2321 
2322 	/* Discard frames with a bad FCS early. */
2323 	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2324 		DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2325 		    __func__, flags);
2326 		ifp->if_ierrors++;
2327 		return;
2328 	}
2329 	/* Discard frames that are too short. */
2330 	if (len < sizeof (*wh)) {
2331 		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2332 		    __func__, len);
2333 		ifp->if_ierrors++;
2334 		return;
2335 	}
2336 
2337 	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2338 	if (m1 == NULL) {
2339 		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2340 		    __func__);
2341 		ifp->if_ierrors++;
2342 		return;
2343 	}
2344 	bus_dmamap_unload(ring->data_dmat, data->map);
2345 
2346 	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
2347 	    IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2348 	if (error != 0 && error != EFBIG) {
2349 		device_printf(sc->sc_dev,
2350 		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2351 		m_freem(m1);
2352 
2353 		/* Try to reload the old mbuf. */
2354 		error = bus_dmamap_load(ring->data_dmat, data->map,
2355 		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
2356 		    &paddr, BUS_DMA_NOWAIT);
2357 		if (error != 0 && error != EFBIG) {
2358 			panic("%s: could not load old RX mbuf", __func__);
2359 		}
2360 		/* Physical address may have changed. */
2361 		ring->desc[ring->cur] = htole32(paddr >> 8);
2362 		bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
2363 		    BUS_DMASYNC_PREWRITE);
2364 		ifp->if_ierrors++;
2365 		return;
2366 	}
2367 
2368 	m = data->m;
2369 	data->m = m1;
2370 	/* Update RX descriptor. */
2371 	ring->desc[ring->cur] = htole32(paddr >> 8);
2372 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2373 	    BUS_DMASYNC_PREWRITE);
2374 
2375 	/* Finalize mbuf. */
2376 	m->m_pkthdr.rcvif = ifp;
2377 	m->m_data = head;
2378 	m->m_pkthdr.len = m->m_len = len;
2379 
2380 	/* Grab a reference to the source node. */
2381 	wh = mtod(m, struct ieee80211_frame *);
2382 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2383 	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2384 	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2385 
2386 	rssi = ops->get_rssi(sc, stat);
2387 
2388 	if (ieee80211_radiotap_active(ic)) {
2389 		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2390 
2391 		tap->wr_flags = 0;
2392 		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2393 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2394 		tap->wr_dbm_antsignal = (int8_t)rssi;
2395 		tap->wr_dbm_antnoise = (int8_t)nf;
2396 		tap->wr_tsft = stat->tstamp;
2397 		switch (stat->rate) {
2398 		/* CCK rates. */
2399 		case  10: tap->wr_rate =   2; break;
2400 		case  20: tap->wr_rate =   4; break;
2401 		case  55: tap->wr_rate =  11; break;
2402 		case 110: tap->wr_rate =  22; break;
2403 		/* OFDM rates. */
2404 		case 0xd: tap->wr_rate =  12; break;
2405 		case 0xf: tap->wr_rate =  18; break;
2406 		case 0x5: tap->wr_rate =  24; break;
2407 		case 0x7: tap->wr_rate =  36; break;
2408 		case 0x9: tap->wr_rate =  48; break;
2409 		case 0xb: tap->wr_rate =  72; break;
2410 		case 0x1: tap->wr_rate =  96; break;
2411 		case 0x3: tap->wr_rate = 108; break;
2412 		/* Unknown rate: should not happen. */
2413 		default:  tap->wr_rate =   0;
2414 		}
2415 	}
2416 
2417 	IWN_UNLOCK(sc);
2418 
2419 	/* Send the frame to the 802.11 layer. */
2420 	if (ni != NULL) {
2421 		if (ni->ni_flags & IEEE80211_NODE_HT)
2422 			m->m_flags |= M_AMPDU;
2423 		(void)ieee80211_input(ni, m, rssi - nf, nf);
2424 		/* Node is no longer needed. */
2425 		ieee80211_free_node(ni);
2426 	} else
2427 		(void)ieee80211_input_all(ic, m, rssi - nf, nf);
2428 
2429 	IWN_LOCK(sc);
2430 }
2431 
2432 /* Process an incoming Compressed BlockAck. */
2433 static void
2434 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2435     struct iwn_rx_data *data)
2436 {
2437 	struct iwn_ops *ops = &sc->ops;
2438 	struct ifnet *ifp = sc->sc_ifp;
2439 	struct iwn_node *wn;
2440 	struct ieee80211_node *ni;
2441 	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2442 	struct iwn_tx_ring *txq;
2443 	struct iwn_tx_data *txdata;
2444 	struct ieee80211_tx_ampdu *tap;
2445 	struct mbuf *m;
2446 	uint64_t bitmap;
2447 	uint16_t ssn;
2448 	uint8_t tid;
2449 	int ackfailcnt = 0, i, lastidx, qid, *res, shift;
2450 
2451 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2452 
2453 	qid = le16toh(ba->qid);
2454 	txq = &sc->txq[ba->qid];
2455 	tap = sc->qid2tap[ba->qid];
2456 	tid = tap->txa_tid;
2457 	wn = (void *)tap->txa_ni;
2458 
2459 	res = NULL;
2460 	ssn = 0;
2461 	if (!IEEE80211_AMPDU_RUNNING(tap)) {
2462 		res = tap->txa_private;
2463 		ssn = tap->txa_start & 0xfff;
2464 	}
2465 
2466 	for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
2467 		txdata = &txq->data[txq->read];
2468 
2469 		/* Unmap and free mbuf. */
2470 		bus_dmamap_sync(txq->data_dmat, txdata->map,
2471 		    BUS_DMASYNC_POSTWRITE);
2472 		bus_dmamap_unload(txq->data_dmat, txdata->map);
2473 		m = txdata->m, txdata->m = NULL;
2474 		ni = txdata->ni, txdata->ni = NULL;
2475 
2476 		KASSERT(ni != NULL, ("no node"));
2477 		KASSERT(m != NULL, ("no mbuf"));
2478 
2479 		if (m->m_flags & M_TXCB)
2480 			ieee80211_process_callback(ni, m, 1);
2481 
2482 		m_freem(m);
2483 		ieee80211_free_node(ni);
2484 
2485 		txq->queued--;
2486 		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
2487 	}
2488 
2489 	if (txq->queued == 0 && res != NULL) {
2490 		iwn_nic_lock(sc);
2491 		ops->ampdu_tx_stop(sc, qid, tid, ssn);
2492 		iwn_nic_unlock(sc);
2493 		sc->qid2tap[qid] = NULL;
2494 		free(res, M_DEVBUF);
2495 		return;
2496 	}
2497 
2498 	if (wn->agg[tid].bitmap == 0)
2499 		return;
2500 
2501 	shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
2502 	if (shift < 0)
2503 		shift += 0x100;
2504 
2505 	if (wn->agg[tid].nframes > (64 - shift))
2506 		return;
2507 
2508 	ni = tap->txa_ni;
2509 	bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
2510 	for (i = 0; bitmap; i++) {
2511 		if ((bitmap & 1) == 0) {
2512 			ifp->if_oerrors++;
2513 			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2514 			    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2515 		} else {
2516 			ifp->if_opackets++;
2517 			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2518 			    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2519 		}
2520 		bitmap >>= 1;
2521 	}
2522 }
2523 
2524 /*
2525  * Process a CALIBRATION_RESULT notification sent by the initialization
2526  * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2527  */
2528 static void
2529 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2530     struct iwn_rx_data *data)
2531 {
2532 	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2533 	int len, idx = -1;
2534 
2535 	/* Runtime firmware should not send such a notification. */
2536 	if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2537 		return;
2538 
2539 	len = (le32toh(desc->len) & 0x3fff) - 4;
2540 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2541 
2542 	switch (calib->code) {
2543 	case IWN5000_PHY_CALIB_DC:
2544 		if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 &&
2545 		    (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2546 		     sc->hw_type >= IWN_HW_REV_TYPE_6000) &&
2547 		     sc->hw_type != IWN_HW_REV_TYPE_6050)
2548 			idx = 0;
2549 		break;
2550 	case IWN5000_PHY_CALIB_LO:
2551 		idx = 1;
2552 		break;
2553 	case IWN5000_PHY_CALIB_TX_IQ:
2554 		idx = 2;
2555 		break;
2556 	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2557 		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2558 		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2559 			idx = 3;
2560 		break;
2561 	case IWN5000_PHY_CALIB_BASE_BAND:
2562 		idx = 4;
2563 		break;
2564 	}
2565 	if (idx == -1)	/* Ignore other results. */
2566 		return;
2567 
2568 	/* Save calibration result. */
2569 	if (sc->calibcmd[idx].buf != NULL)
2570 		free(sc->calibcmd[idx].buf, M_DEVBUF);
2571 	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2572 	if (sc->calibcmd[idx].buf == NULL) {
2573 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2574 		    "not enough memory for calibration result %d\n",
2575 		    calib->code);
2576 		return;
2577 	}
2578 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2579 	    "saving calibration result code=%d len=%d\n", calib->code, len);
2580 	sc->calibcmd[idx].len = len;
2581 	memcpy(sc->calibcmd[idx].buf, calib, len);
2582 }
2583 
2584 /*
2585  * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2586  * The latter is sent by the firmware after each received beacon.
2587  */
2588 static void
2589 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2590     struct iwn_rx_data *data)
2591 {
2592 	struct iwn_ops *ops = &sc->ops;
2593 	struct ifnet *ifp = sc->sc_ifp;
2594 	struct ieee80211com *ic = ifp->if_l2com;
2595 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2596 	struct iwn_calib_state *calib = &sc->calib;
2597 	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2598 	int temp;
2599 
2600 	/* Ignore statistics received during a scan. */
2601 	if (vap->iv_state != IEEE80211_S_RUN ||
2602 	    (ic->ic_flags & IEEE80211_F_SCAN))
2603 		return;
2604 
2605 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2606 
2607 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n",
2608 	    __func__, desc->type);
2609 	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2610 
2611 	/* Test if temperature has changed. */
2612 	if (stats->general.temp != sc->rawtemp) {
2613 		/* Convert "raw" temperature to degC. */
2614 		sc->rawtemp = stats->general.temp;
2615 		temp = ops->get_temperature(sc);
2616 		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2617 		    __func__, temp);
2618 
2619 		/* Update TX power if need be (4965AGN only). */
2620 		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2621 			iwn4965_power_calibration(sc, temp);
2622 	}
2623 
2624 	if (desc->type != IWN_BEACON_STATISTICS)
2625 		return;	/* Reply to a statistics request. */
2626 
2627 	sc->noise = iwn_get_noise(&stats->rx.general);
2628 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2629 
2630 	/* Test that RSSI and noise are present in stats report. */
2631 	if (le32toh(stats->rx.general.flags) != 1) {
2632 		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2633 		    "received statistics without RSSI");
2634 		return;
2635 	}
2636 
2637 	if (calib->state == IWN_CALIB_STATE_ASSOC)
2638 		iwn_collect_noise(sc, &stats->rx.general);
2639 	else if (calib->state == IWN_CALIB_STATE_RUN)
2640 		iwn_tune_sensitivity(sc, &stats->rx);
2641 }
2642 
2643 /*
2644  * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2645  * and 5000 adapters have different incompatible TX status formats.
2646  */
2647 static void
2648 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2649     struct iwn_rx_data *data)
2650 {
2651 	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2652 	struct iwn_tx_ring *ring;
2653 	int qid;
2654 
2655 	qid = desc->qid & 0xf;
2656 	ring = &sc->txq[qid];
2657 
2658 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2659 	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2660 	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2661 	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2662 	    le32toh(stat->status));
2663 
2664 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2665 	if (qid >= sc->firstaggqueue) {
2666 		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2667 		    &stat->status);
2668 	} else {
2669 		iwn_tx_done(sc, desc, stat->ackfailcnt,
2670 		    le32toh(stat->status) & 0xff);
2671 	}
2672 }
2673 
2674 static void
2675 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2676     struct iwn_rx_data *data)
2677 {
2678 	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2679 	struct iwn_tx_ring *ring;
2680 	int qid;
2681 
2682 	qid = desc->qid & 0xf;
2683 	ring = &sc->txq[qid];
2684 
2685 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2686 	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2687 	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2688 	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2689 	    le32toh(stat->status));
2690 
2691 #ifdef notyet
2692 	/* Reset TX scheduler slot. */
2693 	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2694 #endif
2695 
2696 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2697 	if (qid >= sc->firstaggqueue) {
2698 		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2699 		    &stat->status);
2700 	} else {
2701 		iwn_tx_done(sc, desc, stat->ackfailcnt,
2702 		    le16toh(stat->status) & 0xff);
2703 	}
2704 }
2705 
2706 /*
2707  * Adapter-independent backend for TX_DONE firmware notifications.
2708  */
2709 static void
2710 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2711     uint8_t status)
2712 {
2713 	struct ifnet *ifp = sc->sc_ifp;
2714 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2715 	struct iwn_tx_data *data = &ring->data[desc->idx];
2716 	struct mbuf *m;
2717 	struct ieee80211_node *ni;
2718 	struct ieee80211vap *vap;
2719 
2720 	KASSERT(data->ni != NULL, ("no node"));
2721 
2722 	/* Unmap and free mbuf. */
2723 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2724 	bus_dmamap_unload(ring->data_dmat, data->map);
2725 	m = data->m, data->m = NULL;
2726 	ni = data->ni, data->ni = NULL;
2727 	vap = ni->ni_vap;
2728 
2729 	if (m->m_flags & M_TXCB) {
2730 		/*
2731 		 * Channels marked for "radar" require traffic to be received
2732 		 * to unlock before we can transmit.  Until traffic is seen
2733 		 * any attempt to transmit is returned immediately with status
2734 		 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
2735 		 * happen on first authenticate after scanning.  To workaround
2736 		 * this we ignore a failure of this sort in AUTH state so the
2737 		 * 802.11 layer will fall back to using a timeout to wait for
2738 		 * the AUTH reply.  This allows the firmware time to see
2739 		 * traffic so a subsequent retry of AUTH succeeds.  It's
2740 		 * unclear why the firmware does not maintain state for
2741 		 * channels recently visited as this would allow immediate
2742 		 * use of the channel after a scan (where we see traffic).
2743 		 */
2744 		if (status == IWN_TX_FAIL_TX_LOCKED &&
2745 		    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2746 			ieee80211_process_callback(ni, m, 0);
2747 		else
2748 			ieee80211_process_callback(ni, m,
2749 			    (status & IWN_TX_FAIL) != 0);
2750 	}
2751 
2752 	/*
2753 	 * Update rate control statistics for the node.
2754 	 */
2755 	if (status & IWN_TX_FAIL) {
2756 		ifp->if_oerrors++;
2757 		ieee80211_ratectl_tx_complete(vap, ni,
2758 		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2759 	} else {
2760 		ifp->if_opackets++;
2761 		ieee80211_ratectl_tx_complete(vap, ni,
2762 		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2763 	}
2764 	m_freem(m);
2765 	ieee80211_free_node(ni);
2766 
2767 	sc->sc_tx_timer = 0;
2768 	if (--ring->queued < IWN_TX_RING_LOMARK) {
2769 		sc->qfullmsk &= ~(1 << ring->qid);
2770 		if (sc->qfullmsk == 0 &&
2771 		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2772 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2773 			iwn_start_locked(ifp);
2774 		}
2775 	}
2776 }
2777 
2778 /*
2779  * Process a "command done" firmware notification.  This is where we wakeup
2780  * processes waiting for a synchronous command completion.
2781  */
2782 static void
2783 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2784 {
2785 	struct iwn_tx_ring *ring = &sc->txq[4];
2786 	struct iwn_tx_data *data;
2787 
2788 	if ((desc->qid & 0xf) != 4)
2789 		return;	/* Not a command ack. */
2790 
2791 	data = &ring->data[desc->idx];
2792 
2793 	/* If the command was mapped in an mbuf, free it. */
2794 	if (data->m != NULL) {
2795 		bus_dmamap_sync(ring->data_dmat, data->map,
2796 		    BUS_DMASYNC_POSTWRITE);
2797 		bus_dmamap_unload(ring->data_dmat, data->map);
2798 		m_freem(data->m);
2799 		data->m = NULL;
2800 	}
2801 	wakeup(&ring->desc[desc->idx]);
2802 }
2803 
2804 static void
2805 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
2806     void *stat)
2807 {
2808 	struct iwn_ops *ops = &sc->ops;
2809 	struct ifnet *ifp = sc->sc_ifp;
2810 	struct iwn_tx_ring *ring = &sc->txq[qid];
2811 	struct iwn_tx_data *data;
2812 	struct mbuf *m;
2813 	struct iwn_node *wn;
2814 	struct ieee80211_node *ni;
2815 	struct ieee80211_tx_ampdu *tap;
2816 	uint64_t bitmap;
2817 	uint32_t *status = stat;
2818 	uint16_t *aggstatus = stat;
2819 	uint16_t ssn;
2820 	uint8_t tid;
2821 	int bit, i, lastidx, *res, seqno, shift, start;
2822 
2823 #ifdef NOT_YET
2824 	if (nframes == 1) {
2825 		if ((*status & 0xff) != 1 && (*status & 0xff) != 2)
2826 			printf("ieee80211_send_bar()\n");
2827 	}
2828 #endif
2829 
2830 	bitmap = 0;
2831 	start = idx;
2832 	for (i = 0; i < nframes; i++) {
2833 		if (le16toh(aggstatus[i * 2]) & 0xc)
2834 			continue;
2835 
2836 		idx = le16toh(aggstatus[2*i + 1]) & 0xff;
2837 		bit = idx - start;
2838 		shift = 0;
2839 		if (bit >= 64) {
2840 			shift = 0x100 - idx + start;
2841 			bit = 0;
2842 			start = idx;
2843 		} else if (bit <= -64)
2844 			bit = 0x100 - start + idx;
2845 		else if (bit < 0) {
2846 			shift = start - idx;
2847 			start = idx;
2848 			bit = 0;
2849 		}
2850 		bitmap = bitmap << shift;
2851 		bitmap |= 1ULL << bit;
2852 	}
2853 	tap = sc->qid2tap[qid];
2854 	tid = tap->txa_tid;
2855 	wn = (void *)tap->txa_ni;
2856 	wn->agg[tid].bitmap = bitmap;
2857 	wn->agg[tid].startidx = start;
2858 	wn->agg[tid].nframes = nframes;
2859 
2860 	res = NULL;
2861 	ssn = 0;
2862 	if (!IEEE80211_AMPDU_RUNNING(tap)) {
2863 		res = tap->txa_private;
2864 		ssn = tap->txa_start & 0xfff;
2865 	}
2866 
2867 	seqno = le32toh(*(status + nframes)) & 0xfff;
2868 	for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
2869 		data = &ring->data[ring->read];
2870 
2871 		/* Unmap and free mbuf. */
2872 		bus_dmamap_sync(ring->data_dmat, data->map,
2873 		    BUS_DMASYNC_POSTWRITE);
2874 		bus_dmamap_unload(ring->data_dmat, data->map);
2875 		m = data->m, data->m = NULL;
2876 		ni = data->ni, data->ni = NULL;
2877 
2878 		KASSERT(ni != NULL, ("no node"));
2879 		KASSERT(m != NULL, ("no mbuf"));
2880 
2881 		if (m->m_flags & M_TXCB)
2882 			ieee80211_process_callback(ni, m, 1);
2883 
2884 		m_freem(m);
2885 		ieee80211_free_node(ni);
2886 
2887 		ring->queued--;
2888 		ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
2889 	}
2890 
2891 	if (ring->queued == 0 && res != NULL) {
2892 		iwn_nic_lock(sc);
2893 		ops->ampdu_tx_stop(sc, qid, tid, ssn);
2894 		iwn_nic_unlock(sc);
2895 		sc->qid2tap[qid] = NULL;
2896 		free(res, M_DEVBUF);
2897 		return;
2898 	}
2899 
2900 	sc->sc_tx_timer = 0;
2901 	if (ring->queued < IWN_TX_RING_LOMARK) {
2902 		sc->qfullmsk &= ~(1 << ring->qid);
2903 		if (sc->qfullmsk == 0 &&
2904 		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2905 			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2906 			iwn_start_locked(ifp);
2907 		}
2908 	}
2909 }
2910 
2911 /*
2912  * Process an INT_FH_RX or INT_SW_RX interrupt.
2913  */
2914 static void
2915 iwn_notif_intr(struct iwn_softc *sc)
2916 {
2917 	struct iwn_ops *ops = &sc->ops;
2918 	struct ifnet *ifp = sc->sc_ifp;
2919 	struct ieee80211com *ic = ifp->if_l2com;
2920 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2921 	uint16_t hw;
2922 
2923 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2924 	    BUS_DMASYNC_POSTREAD);
2925 
2926 	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2927 	while (sc->rxq.cur != hw) {
2928 		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2929 		struct iwn_rx_desc *desc;
2930 
2931 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2932 		    BUS_DMASYNC_POSTREAD);
2933 		desc = mtod(data->m, struct iwn_rx_desc *);
2934 
2935 		DPRINTF(sc, IWN_DEBUG_RECV,
2936 		    "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2937 		    __func__, desc->qid & 0xf, desc->idx, desc->flags,
2938 		    desc->type, iwn_intr_str(desc->type),
2939 		    le16toh(desc->len));
2940 
2941 		if (!(desc->qid & 0x80))	/* Reply to a command. */
2942 			iwn_cmd_done(sc, desc);
2943 
2944 		switch (desc->type) {
2945 		case IWN_RX_PHY:
2946 			iwn_rx_phy(sc, desc, data);
2947 			break;
2948 
2949 		case IWN_RX_DONE:		/* 4965AGN only. */
2950 		case IWN_MPDU_RX_DONE:
2951 			/* An 802.11 frame has been received. */
2952 			iwn_rx_done(sc, desc, data);
2953 			break;
2954 
2955 		case IWN_RX_COMPRESSED_BA:
2956 			/* A Compressed BlockAck has been received. */
2957 			iwn_rx_compressed_ba(sc, desc, data);
2958 			break;
2959 
2960 		case IWN_TX_DONE:
2961 			/* An 802.11 frame has been transmitted. */
2962 			ops->tx_done(sc, desc, data);
2963 			break;
2964 
2965 		case IWN_RX_STATISTICS:
2966 		case IWN_BEACON_STATISTICS:
2967 			iwn_rx_statistics(sc, desc, data);
2968 			break;
2969 
2970 		case IWN_BEACON_MISSED:
2971 		{
2972 			struct iwn_beacon_missed *miss =
2973 			    (struct iwn_beacon_missed *)(desc + 1);
2974 			int misses;
2975 
2976 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2977 			    BUS_DMASYNC_POSTREAD);
2978 			misses = le32toh(miss->consecutive);
2979 
2980 			DPRINTF(sc, IWN_DEBUG_STATE,
2981 			    "%s: beacons missed %d/%d\n", __func__,
2982 			    misses, le32toh(miss->total));
2983 			/*
2984 			 * If more than 5 consecutive beacons are missed,
2985 			 * reinitialize the sensitivity state machine.
2986 			 */
2987 			if (vap->iv_state == IEEE80211_S_RUN &&
2988 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
2989 				if (misses > 5)
2990 					(void)iwn_init_sensitivity(sc);
2991 				if (misses >= vap->iv_bmissthreshold) {
2992 					IWN_UNLOCK(sc);
2993 					ieee80211_beacon_miss(ic);
2994 					IWN_LOCK(sc);
2995 				}
2996 			}
2997 			break;
2998 		}
2999 		case IWN_UC_READY:
3000 		{
3001 			struct iwn_ucode_info *uc =
3002 			    (struct iwn_ucode_info *)(desc + 1);
3003 
3004 			/* The microcontroller is ready. */
3005 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3006 			    BUS_DMASYNC_POSTREAD);
3007 			DPRINTF(sc, IWN_DEBUG_RESET,
3008 			    "microcode alive notification version=%d.%d "
3009 			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3010 			    uc->subtype, le32toh(uc->valid));
3011 
3012 			if (le32toh(uc->valid) != 1) {
3013 				device_printf(sc->sc_dev,
3014 				    "microcontroller initialization failed");
3015 				break;
3016 			}
3017 			if (uc->subtype == IWN_UCODE_INIT) {
3018 				/* Save microcontroller report. */
3019 				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3020 			}
3021 			/* Save the address of the error log in SRAM. */
3022 			sc->errptr = le32toh(uc->errptr);
3023 			break;
3024 		}
3025 		case IWN_STATE_CHANGED:
3026 		{
3027 			uint32_t *status = (uint32_t *)(desc + 1);
3028 
3029 			/*
3030 			 * State change allows hardware switch change to be
3031 			 * noted. However, we handle this in iwn_intr as we
3032 			 * get both the enable/disble intr.
3033 			 */
3034 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3035 			    BUS_DMASYNC_POSTREAD);
3036 			DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
3037 			    le32toh(*status));
3038 			break;
3039 		}
3040 		case IWN_START_SCAN:
3041 		{
3042 			struct iwn_start_scan *scan =
3043 			    (struct iwn_start_scan *)(desc + 1);
3044 
3045 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3046 			    BUS_DMASYNC_POSTREAD);
3047 			DPRINTF(sc, IWN_DEBUG_ANY,
3048 			    "%s: scanning channel %d status %x\n",
3049 			    __func__, scan->chan, le32toh(scan->status));
3050 			break;
3051 		}
3052 		case IWN_STOP_SCAN:
3053 		{
3054 			struct iwn_stop_scan *scan =
3055 			    (struct iwn_stop_scan *)(desc + 1);
3056 
3057 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3058 			    BUS_DMASYNC_POSTREAD);
3059 			DPRINTF(sc, IWN_DEBUG_STATE,
3060 			    "scan finished nchan=%d status=%d chan=%d\n",
3061 			    scan->nchan, scan->status, scan->chan);
3062 
3063 			IWN_UNLOCK(sc);
3064 			ieee80211_scan_next(vap);
3065 			IWN_LOCK(sc);
3066 			break;
3067 		}
3068 		case IWN5000_CALIBRATION_RESULT:
3069 			iwn5000_rx_calib_results(sc, desc, data);
3070 			break;
3071 
3072 		case IWN5000_CALIBRATION_DONE:
3073 			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3074 			wakeup(sc);
3075 			break;
3076 		}
3077 
3078 		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3079 	}
3080 
3081 	/* Tell the firmware what we have processed. */
3082 	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3083 	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3084 }
3085 
3086 /*
3087  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3088  * from power-down sleep mode.
3089  */
3090 static void
3091 iwn_wakeup_intr(struct iwn_softc *sc)
3092 {
3093 	int qid;
3094 
3095 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3096 	    __func__);
3097 
3098 	/* Wakeup RX and TX rings. */
3099 	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3100 	for (qid = 0; qid < sc->ntxqs; qid++) {
3101 		struct iwn_tx_ring *ring = &sc->txq[qid];
3102 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3103 	}
3104 }
3105 
3106 static void
3107 iwn_rftoggle_intr(struct iwn_softc *sc)
3108 {
3109 	struct ifnet *ifp = sc->sc_ifp;
3110 	struct ieee80211com *ic = ifp->if_l2com;
3111 	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
3112 
3113 	IWN_LOCK_ASSERT(sc);
3114 
3115 	device_printf(sc->sc_dev, "RF switch: radio %s\n",
3116 	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3117 	if (tmp & IWN_GP_CNTRL_RFKILL)
3118 		ieee80211_runtask(ic, &sc->sc_radioon_task);
3119 	else
3120 		ieee80211_runtask(ic, &sc->sc_radiooff_task);
3121 }
3122 
3123 /*
3124  * Dump the error log of the firmware when a firmware panic occurs.  Although
3125  * we can't debug the firmware because it is neither open source nor free, it
3126  * can help us to identify certain classes of problems.
3127  */
3128 static void
3129 iwn_fatal_intr(struct iwn_softc *sc)
3130 {
3131 	struct iwn_fw_dump dump;
3132 	int i;
3133 
3134 	IWN_LOCK_ASSERT(sc);
3135 
3136 	/* Force a complete recalibration on next init. */
3137 	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3138 
3139 	/* Check that the error log address is valid. */
3140 	if (sc->errptr < IWN_FW_DATA_BASE ||
3141 	    sc->errptr + sizeof (dump) >
3142 	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3143 		printf("%s: bad firmware error log address 0x%08x\n", __func__,
3144 		    sc->errptr);
3145 		return;
3146 	}
3147 	if (iwn_nic_lock(sc) != 0) {
3148 		printf("%s: could not read firmware error log\n", __func__);
3149 		return;
3150 	}
3151 	/* Read firmware error log from SRAM. */
3152 	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3153 	    sizeof (dump) / sizeof (uint32_t));
3154 	iwn_nic_unlock(sc);
3155 
3156 	if (dump.valid == 0) {
3157 		printf("%s: firmware error log is empty\n", __func__);
3158 		return;
3159 	}
3160 	printf("firmware error log:\n");
3161 	printf("  error type      = \"%s\" (0x%08X)\n",
3162 	    (dump.id < nitems(iwn_fw_errmsg)) ?
3163 		iwn_fw_errmsg[dump.id] : "UNKNOWN",
3164 	    dump.id);
3165 	printf("  program counter = 0x%08X\n", dump.pc);
3166 	printf("  source line     = 0x%08X\n", dump.src_line);
3167 	printf("  error data      = 0x%08X%08X\n",
3168 	    dump.error_data[0], dump.error_data[1]);
3169 	printf("  branch link     = 0x%08X%08X\n",
3170 	    dump.branch_link[0], dump.branch_link[1]);
3171 	printf("  interrupt link  = 0x%08X%08X\n",
3172 	    dump.interrupt_link[0], dump.interrupt_link[1]);
3173 	printf("  time            = %u\n", dump.time[0]);
3174 
3175 	/* Dump driver status (TX and RX rings) while we're here. */
3176 	printf("driver status:\n");
3177 	for (i = 0; i < sc->ntxqs; i++) {
3178 		struct iwn_tx_ring *ring = &sc->txq[i];
3179 		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
3180 		    i, ring->qid, ring->cur, ring->queued);
3181 	}
3182 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
3183 }
3184 
3185 static void
3186 iwn_intr(void *arg)
3187 {
3188 	struct iwn_softc *sc = arg;
3189 	struct ifnet *ifp = sc->sc_ifp;
3190 	uint32_t r1, r2, tmp;
3191 
3192 	IWN_LOCK(sc);
3193 
3194 	/* Disable interrupts. */
3195 	IWN_WRITE(sc, IWN_INT_MASK, 0);
3196 
3197 	/* Read interrupts from ICT (fast) or from registers (slow). */
3198 	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3199 		tmp = 0;
3200 		while (sc->ict[sc->ict_cur] != 0) {
3201 			tmp |= sc->ict[sc->ict_cur];
3202 			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
3203 			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3204 		}
3205 		tmp = le32toh(tmp);
3206 		if (tmp == 0xffffffff)	/* Shouldn't happen. */
3207 			tmp = 0;
3208 		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
3209 			tmp |= 0x8000;
3210 		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3211 		r2 = 0;	/* Unused. */
3212 	} else {
3213 		r1 = IWN_READ(sc, IWN_INT);
3214 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3215 			return;	/* Hardware gone! */
3216 		r2 = IWN_READ(sc, IWN_FH_INT);
3217 	}
3218 
3219 	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
3220 
3221 	if (r1 == 0 && r2 == 0)
3222 		goto done;	/* Interrupt not for us. */
3223 
3224 	/* Acknowledge interrupts. */
3225 	IWN_WRITE(sc, IWN_INT, r1);
3226 	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3227 		IWN_WRITE(sc, IWN_FH_INT, r2);
3228 
3229 	if (r1 & IWN_INT_RF_TOGGLED) {
3230 		iwn_rftoggle_intr(sc);
3231 		goto done;
3232 	}
3233 	if (r1 & IWN_INT_CT_REACHED) {
3234 		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
3235 		    __func__);
3236 	}
3237 	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3238 		device_printf(sc->sc_dev, "%s: fatal firmware error\n",
3239 		    __func__);
3240 		/* Dump firmware error log and stop. */
3241 		iwn_fatal_intr(sc);
3242 		ifp->if_flags &= ~IFF_UP;
3243 		iwn_stop_locked(sc);
3244 		goto done;
3245 	}
3246 	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3247 	    (r2 & IWN_FH_INT_RX)) {
3248 		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3249 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
3250 				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3251 			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3252 			    IWN_INT_PERIODIC_DIS);
3253 			iwn_notif_intr(sc);
3254 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
3255 				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3256 				    IWN_INT_PERIODIC_ENA);
3257 			}
3258 		} else
3259 			iwn_notif_intr(sc);
3260 	}
3261 
3262 	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3263 		if (sc->sc_flags & IWN_FLAG_USE_ICT)
3264 			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3265 		wakeup(sc);	/* FH DMA transfer completed. */
3266 	}
3267 
3268 	if (r1 & IWN_INT_ALIVE)
3269 		wakeup(sc);	/* Firmware is alive. */
3270 
3271 	if (r1 & IWN_INT_WAKEUP)
3272 		iwn_wakeup_intr(sc);
3273 
3274 done:
3275 	/* Re-enable interrupts. */
3276 	if (ifp->if_flags & IFF_UP)
3277 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3278 
3279 	IWN_UNLOCK(sc);
3280 }
3281 
3282 /*
3283  * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3284  * 5000 adapters use a slightly different format).
3285  */
3286 static void
3287 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3288     uint16_t len)
3289 {
3290 	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3291 
3292 	*w = htole16(len + 8);
3293 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3294 	    BUS_DMASYNC_PREWRITE);
3295 	if (idx < IWN_SCHED_WINSZ) {
3296 		*(w + IWN_TX_RING_COUNT) = *w;
3297 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3298 		    BUS_DMASYNC_PREWRITE);
3299 	}
3300 }
3301 
3302 static void
3303 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3304     uint16_t len)
3305 {
3306 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3307 
3308 	*w = htole16(id << 12 | (len + 8));
3309 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3310 	    BUS_DMASYNC_PREWRITE);
3311 	if (idx < IWN_SCHED_WINSZ) {
3312 		*(w + IWN_TX_RING_COUNT) = *w;
3313 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3314 		    BUS_DMASYNC_PREWRITE);
3315 	}
3316 }
3317 
3318 #ifdef notyet
3319 static void
3320 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3321 {
3322 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3323 
3324 	*w = (*w & htole16(0xf000)) | htole16(1);
3325 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3326 	    BUS_DMASYNC_PREWRITE);
3327 	if (idx < IWN_SCHED_WINSZ) {
3328 		*(w + IWN_TX_RING_COUNT) = *w;
3329 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3330 		    BUS_DMASYNC_PREWRITE);
3331 	}
3332 }
3333 #endif
3334 
3335 static int
3336 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3337 {
3338 	struct iwn_ops *ops = &sc->ops;
3339 	const struct ieee80211_txparam *tp;
3340 	struct ieee80211vap *vap = ni->ni_vap;
3341 	struct ieee80211com *ic = ni->ni_ic;
3342 	struct iwn_node *wn = (void *)ni;
3343 	struct iwn_tx_ring *ring;
3344 	struct iwn_tx_desc *desc;
3345 	struct iwn_tx_data *data;
3346 	struct iwn_tx_cmd *cmd;
3347 	struct iwn_cmd_data *tx;
3348 	struct ieee80211_frame *wh;
3349 	struct ieee80211_key *k = NULL;
3350 	struct mbuf *m1;
3351 	uint32_t flags;
3352 	uint16_t qos;
3353 	u_int hdrlen;
3354 	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3355 	uint8_t tid, ridx, txant, type;
3356 	int ac, i, totlen, error, pad, nsegs = 0, rate;
3357 
3358 	IWN_LOCK_ASSERT(sc);
3359 
3360 	wh = mtod(m, struct ieee80211_frame *);
3361 	hdrlen = ieee80211_anyhdrsize(wh);
3362 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3363 
3364 	/* Select EDCA Access Category and TX ring for this frame. */
3365 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3366 		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
3367 		tid = qos & IEEE80211_QOS_TID;
3368 	} else {
3369 		qos = 0;
3370 		tid = 0;
3371 	}
3372 	ac = M_WME_GETAC(m);
3373 	if (m->m_flags & M_AMPDU_MPDU) {
3374 		struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
3375 
3376 		if (!IEEE80211_AMPDU_RUNNING(tap)) {
3377 			m_freem(m);
3378 			return EINVAL;
3379 		}
3380 
3381 		ac = *(int *)tap->txa_private;
3382 		*(uint16_t *)wh->i_seq =
3383 		    htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT);
3384 		ni->ni_txseqs[tid]++;
3385 	}
3386 	ring = &sc->txq[ac];
3387 	desc = &ring->desc[ring->cur];
3388 	data = &ring->data[ring->cur];
3389 
3390 	/* Choose a TX rate index. */
3391 	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
3392 	if (type == IEEE80211_FC0_TYPE_MGT)
3393 		rate = tp->mgmtrate;
3394 	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
3395 		rate = tp->mcastrate;
3396 	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
3397 		rate = tp->ucastrate;
3398 	else {
3399 		/* XXX pass pktlen */
3400 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3401 		rate = ni->ni_txrate;
3402 	}
3403 	ridx = ic->ic_rt->rateCodeToIndex[rate];
3404 
3405 	/* Encrypt the frame if need be. */
3406 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3407 		/* Retrieve key for TX. */
3408 		k = ieee80211_crypto_encap(ni, m);
3409 		if (k == NULL) {
3410 			m_freem(m);
3411 			return ENOBUFS;
3412 		}
3413 		/* 802.11 header may have moved. */
3414 		wh = mtod(m, struct ieee80211_frame *);
3415 	}
3416 	totlen = m->m_pkthdr.len;
3417 
3418 	if (ieee80211_radiotap_active_vap(vap)) {
3419 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3420 
3421 		tap->wt_flags = 0;
3422 		tap->wt_rate = rate;
3423 		if (k != NULL)
3424 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3425 
3426 		ieee80211_radiotap_tx(vap, m);
3427 	}
3428 
3429 	/* Prepare TX firmware command. */
3430 	cmd = &ring->cmd[ring->cur];
3431 	cmd->code = IWN_CMD_TX_DATA;
3432 	cmd->flags = 0;
3433 	cmd->qid = ring->qid;
3434 	cmd->idx = ring->cur;
3435 
3436 	tx = (struct iwn_cmd_data *)cmd->data;
3437 	/* NB: No need to clear tx, all fields are reinitialized here. */
3438 	tx->scratch = 0;	/* clear "scratch" area */
3439 
3440 	flags = 0;
3441 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3442 		/* Unicast frame, check if an ACK is expected. */
3443 		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
3444 		    IEEE80211_QOS_ACKPOLICY_NOACK)
3445 			flags |= IWN_TX_NEED_ACK;
3446 	}
3447 	if ((wh->i_fc[0] &
3448 	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3449 	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3450 		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
3451 
3452 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3453 		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3454 
3455 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3456 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3457 		/* NB: Group frames are sent using CCK in 802.11b/g. */
3458 		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
3459 			flags |= IWN_TX_NEED_RTS;
3460 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3461 		    ridx >= IWN_RIDX_OFDM6) {
3462 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3463 				flags |= IWN_TX_NEED_CTS;
3464 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3465 				flags |= IWN_TX_NEED_RTS;
3466 		}
3467 		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3468 			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3469 				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3470 				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3471 				flags |= IWN_TX_NEED_PROTECTION;
3472 			} else
3473 				flags |= IWN_TX_FULL_TXOP;
3474 		}
3475 	}
3476 
3477 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3478 	    type != IEEE80211_FC0_TYPE_DATA)
3479 		tx->id = sc->broadcast_id;
3480 	else
3481 		tx->id = wn->id;
3482 
3483 	if (type == IEEE80211_FC0_TYPE_MGT) {
3484 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3485 
3486 		/* Tell HW to set timestamp in probe responses. */
3487 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3488 			flags |= IWN_TX_INSERT_TSTAMP;
3489 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3490 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3491 			tx->timeout = htole16(3);
3492 		else
3493 			tx->timeout = htole16(2);
3494 	} else
3495 		tx->timeout = htole16(0);
3496 
3497 	if (hdrlen & 3) {
3498 		/* First segment length must be a multiple of 4. */
3499 		flags |= IWN_TX_NEED_PADDING;
3500 		pad = 4 - (hdrlen & 3);
3501 	} else
3502 		pad = 0;
3503 
3504 	tx->len = htole16(totlen);
3505 	tx->tid = tid;
3506 	tx->rts_ntries = 60;
3507 	tx->data_ntries = 15;
3508 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3509 	tx->rate = wn->ridx[rate];
3510 	if (tx->id == sc->broadcast_id) {
3511 		/* Group or management frame. */
3512 		tx->linkq = 0;
3513 		/* XXX Alternate between antenna A and B? */
3514 		txant = IWN_LSB(sc->txchainmask);
3515 		tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3516 	} else {
3517 		tx->linkq = ni->ni_rates.rs_nrates - ridx - 1;
3518 		flags |= IWN_TX_LINKQ;	/* enable MRR */
3519 	}
3520 	/* Set physical address of "scratch area". */
3521 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3522 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3523 
3524 	/* Copy 802.11 header in TX command. */
3525 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3526 
3527 	/* Trim 802.11 header. */
3528 	m_adj(m, hdrlen);
3529 	tx->security = 0;
3530 	tx->flags = htole32(flags);
3531 
3532 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3533 	    &nsegs, BUS_DMA_NOWAIT);
3534 	if (error != 0) {
3535 		if (error != EFBIG) {
3536 			device_printf(sc->sc_dev,
3537 			    "%s: can't map mbuf (error %d)\n", __func__, error);
3538 			m_freem(m);
3539 			return error;
3540 		}
3541 		/* Too many DMA segments, linearize mbuf. */
3542 		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3543 		if (m1 == NULL) {
3544 			device_printf(sc->sc_dev,
3545 			    "%s: could not defrag mbuf\n", __func__);
3546 			m_freem(m);
3547 			return ENOBUFS;
3548 		}
3549 		m = m1;
3550 
3551 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3552 		    segs, &nsegs, BUS_DMA_NOWAIT);
3553 		if (error != 0) {
3554 			device_printf(sc->sc_dev,
3555 			    "%s: can't map mbuf (error %d)\n", __func__, error);
3556 			m_freem(m);
3557 			return error;
3558 		}
3559 	}
3560 
3561 	data->m = m;
3562 	data->ni = ni;
3563 
3564 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3565 	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3566 
3567 	/* Fill TX descriptor. */
3568 	desc->nsegs = 1;
3569 	if (m->m_len != 0)
3570 		desc->nsegs += nsegs;
3571 	/* First DMA segment is used by the TX command. */
3572 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3573 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3574 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3575 	/* Other DMA segments are for data payload. */
3576 	seg = &segs[0];
3577 	for (i = 1; i <= nsegs; i++) {
3578 		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3579 		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3580 		    seg->ds_len << 4);
3581 		seg++;
3582 	}
3583 
3584 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3585 	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3586 	    BUS_DMASYNC_PREWRITE);
3587 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3588 	    BUS_DMASYNC_PREWRITE);
3589 
3590 	/* Update TX scheduler. */
3591 	if (ring->qid >= sc->firstaggqueue)
3592 		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3593 
3594 	/* Kick TX ring. */
3595 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3596 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3597 
3598 	/* Mark TX ring as full if we reach a certain threshold. */
3599 	if (++ring->queued > IWN_TX_RING_HIMARK)
3600 		sc->qfullmsk |= 1 << ring->qid;
3601 
3602 	return 0;
3603 }
3604 
3605 static int
3606 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3607     struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
3608 {
3609 	struct iwn_ops *ops = &sc->ops;
3610 	struct ifnet *ifp = sc->sc_ifp;
3611 	struct ieee80211vap *vap = ni->ni_vap;
3612 	struct ieee80211com *ic = ifp->if_l2com;
3613 	struct iwn_tx_cmd *cmd;
3614 	struct iwn_cmd_data *tx;
3615 	struct ieee80211_frame *wh;
3616 	struct iwn_tx_ring *ring;
3617 	struct iwn_tx_desc *desc;
3618 	struct iwn_tx_data *data;
3619 	struct mbuf *m1;
3620 	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3621 	uint32_t flags;
3622 	u_int hdrlen;
3623 	int ac, totlen, error, pad, nsegs = 0, i, rate;
3624 	uint8_t ridx, type, txant;
3625 
3626 	IWN_LOCK_ASSERT(sc);
3627 
3628 	wh = mtod(m, struct ieee80211_frame *);
3629 	hdrlen = ieee80211_anyhdrsize(wh);
3630 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3631 
3632 	ac = params->ibp_pri & 3;
3633 
3634 	ring = &sc->txq[ac];
3635 	desc = &ring->desc[ring->cur];
3636 	data = &ring->data[ring->cur];
3637 
3638 	/* Choose a TX rate index. */
3639 	rate = params->ibp_rate0;
3640 	ridx = ic->ic_rt->rateCodeToIndex[rate];
3641 	if (ridx == (uint8_t)-1) {
3642 		/* XXX fall back to mcast/mgmt rate? */
3643 		m_freem(m);
3644 		return EINVAL;
3645 	}
3646 
3647 	totlen = m->m_pkthdr.len;
3648 
3649 	/* Prepare TX firmware command. */
3650 	cmd = &ring->cmd[ring->cur];
3651 	cmd->code = IWN_CMD_TX_DATA;
3652 	cmd->flags = 0;
3653 	cmd->qid = ring->qid;
3654 	cmd->idx = ring->cur;
3655 
3656 	tx = (struct iwn_cmd_data *)cmd->data;
3657 	/* NB: No need to clear tx, all fields are reinitialized here. */
3658 	tx->scratch = 0;	/* clear "scratch" area */
3659 
3660 	flags = 0;
3661 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3662 		flags |= IWN_TX_NEED_ACK;
3663 	if (params->ibp_flags & IEEE80211_BPF_RTS) {
3664 		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3665 			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3666 			flags &= ~IWN_TX_NEED_RTS;
3667 			flags |= IWN_TX_NEED_PROTECTION;
3668 		} else
3669 			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3670 	}
3671 	if (params->ibp_flags & IEEE80211_BPF_CTS) {
3672 		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3673 			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3674 			flags &= ~IWN_TX_NEED_CTS;
3675 			flags |= IWN_TX_NEED_PROTECTION;
3676 		} else
3677 			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3678 	}
3679 	if (type == IEEE80211_FC0_TYPE_MGT) {
3680 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3681 
3682 		/* Tell HW to set timestamp in probe responses. */
3683 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3684 			flags |= IWN_TX_INSERT_TSTAMP;
3685 
3686 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3687 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3688 			tx->timeout = htole16(3);
3689 		else
3690 			tx->timeout = htole16(2);
3691 	} else
3692 		tx->timeout = htole16(0);
3693 
3694 	if (hdrlen & 3) {
3695 		/* First segment length must be a multiple of 4. */
3696 		flags |= IWN_TX_NEED_PADDING;
3697 		pad = 4 - (hdrlen & 3);
3698 	} else
3699 		pad = 0;
3700 
3701 	if (ieee80211_radiotap_active_vap(vap)) {
3702 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3703 
3704 		tap->wt_flags = 0;
3705 		tap->wt_rate = rate;
3706 
3707 		ieee80211_radiotap_tx(vap, m);
3708 	}
3709 
3710 	tx->len = htole16(totlen);
3711 	tx->tid = 0;
3712 	tx->id = sc->broadcast_id;
3713 	tx->rts_ntries = params->ibp_try1;
3714 	tx->data_ntries = params->ibp_try0;
3715 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3716 	tx->rate = htole32(rate2plcp(rate));
3717 	if (ridx < IWN_RIDX_OFDM6 &&
3718 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
3719 		tx->rate |= htole32(IWN_RFLAG_CCK);
3720 	/* Group or management frame. */
3721 	tx->linkq = 0;
3722 	txant = IWN_LSB(sc->txchainmask);
3723 	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3724 	/* Set physical address of "scratch area". */
3725 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3726 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3727 
3728 	/* Copy 802.11 header in TX command. */
3729 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3730 
3731 	/* Trim 802.11 header. */
3732 	m_adj(m, hdrlen);
3733 	tx->security = 0;
3734 	tx->flags = htole32(flags);
3735 
3736 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3737 	    &nsegs, BUS_DMA_NOWAIT);
3738 	if (error != 0) {
3739 		if (error != EFBIG) {
3740 			device_printf(sc->sc_dev,
3741 			    "%s: can't map mbuf (error %d)\n", __func__, error);
3742 			m_freem(m);
3743 			return error;
3744 		}
3745 		/* Too many DMA segments, linearize mbuf. */
3746 		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3747 		if (m1 == NULL) {
3748 			device_printf(sc->sc_dev,
3749 			    "%s: could not defrag mbuf\n", __func__);
3750 			m_freem(m);
3751 			return ENOBUFS;
3752 		}
3753 		m = m1;
3754 
3755 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3756 		    segs, &nsegs, BUS_DMA_NOWAIT);
3757 		if (error != 0) {
3758 			device_printf(sc->sc_dev,
3759 			    "%s: can't map mbuf (error %d)\n", __func__, error);
3760 			m_freem(m);
3761 			return error;
3762 		}
3763 	}
3764 
3765 	data->m = m;
3766 	data->ni = ni;
3767 
3768 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3769 	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3770 
3771 	/* Fill TX descriptor. */
3772 	desc->nsegs = 1;
3773 	if (m->m_len != 0)
3774 		desc->nsegs += nsegs;
3775 	/* First DMA segment is used by the TX command. */
3776 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3777 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3778 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3779 	/* Other DMA segments are for data payload. */
3780 	seg = &segs[0];
3781 	for (i = 1; i <= nsegs; i++) {
3782 		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3783 		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3784 		    seg->ds_len << 4);
3785 		seg++;
3786 	}
3787 
3788 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3789 	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3790 	    BUS_DMASYNC_PREWRITE);
3791 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3792 	    BUS_DMASYNC_PREWRITE);
3793 
3794 	/* Update TX scheduler. */
3795 	if (ring->qid >= sc->firstaggqueue)
3796 		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3797 
3798 	/* Kick TX ring. */
3799 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3800 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3801 
3802 	/* Mark TX ring as full if we reach a certain threshold. */
3803 	if (++ring->queued > IWN_TX_RING_HIMARK)
3804 		sc->qfullmsk |= 1 << ring->qid;
3805 
3806 	return 0;
3807 }
3808 
3809 static int
3810 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3811     const struct ieee80211_bpf_params *params)
3812 {
3813 	struct ieee80211com *ic = ni->ni_ic;
3814 	struct ifnet *ifp = ic->ic_ifp;
3815 	struct iwn_softc *sc = ifp->if_softc;
3816 	int error = 0;
3817 
3818 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3819 		ieee80211_free_node(ni);
3820 		m_freem(m);
3821 		return ENETDOWN;
3822 	}
3823 
3824 	IWN_LOCK(sc);
3825 	if (params == NULL) {
3826 		/*
3827 		 * Legacy path; interpret frame contents to decide
3828 		 * precisely how to send the frame.
3829 		 */
3830 		error = iwn_tx_data(sc, m, ni);
3831 	} else {
3832 		/*
3833 		 * Caller supplied explicit parameters to use in
3834 		 * sending the frame.
3835 		 */
3836 		error = iwn_tx_data_raw(sc, m, ni, params);
3837 	}
3838 	if (error != 0) {
3839 		/* NB: m is reclaimed on tx failure */
3840 		ieee80211_free_node(ni);
3841 		ifp->if_oerrors++;
3842 	}
3843 	sc->sc_tx_timer = 5;
3844 
3845 	IWN_UNLOCK(sc);
3846 	return error;
3847 }
3848 
3849 static void
3850 iwn_start(struct ifnet *ifp)
3851 {
3852 	struct iwn_softc *sc = ifp->if_softc;
3853 
3854 	IWN_LOCK(sc);
3855 	iwn_start_locked(ifp);
3856 	IWN_UNLOCK(sc);
3857 }
3858 
3859 static void
3860 iwn_start_locked(struct ifnet *ifp)
3861 {
3862 	struct iwn_softc *sc = ifp->if_softc;
3863 	struct ieee80211_node *ni;
3864 	struct mbuf *m;
3865 
3866 	IWN_LOCK_ASSERT(sc);
3867 
3868 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
3869 	    (ifp->if_drv_flags & IFF_DRV_OACTIVE))
3870 		return;
3871 
3872 	for (;;) {
3873 		if (sc->qfullmsk != 0) {
3874 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3875 			break;
3876 		}
3877 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3878 		if (m == NULL)
3879 			break;
3880 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3881 		if (iwn_tx_data(sc, m, ni) != 0) {
3882 			ieee80211_free_node(ni);
3883 			ifp->if_oerrors++;
3884 			continue;
3885 		}
3886 		sc->sc_tx_timer = 5;
3887 	}
3888 }
3889 
3890 static void
3891 iwn_watchdog(void *arg)
3892 {
3893 	struct iwn_softc *sc = arg;
3894 	struct ifnet *ifp = sc->sc_ifp;
3895 	struct ieee80211com *ic = ifp->if_l2com;
3896 
3897 	IWN_LOCK_ASSERT(sc);
3898 
3899 	KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
3900 
3901 	if (sc->sc_tx_timer > 0) {
3902 		if (--sc->sc_tx_timer == 0) {
3903 			if_printf(ifp, "device timeout\n");
3904 			ieee80211_runtask(ic, &sc->sc_reinit_task);
3905 			return;
3906 		}
3907 	}
3908 	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
3909 }
3910 
3911 static int
3912 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3913 {
3914 	struct iwn_softc *sc = ifp->if_softc;
3915 	struct ieee80211com *ic = ifp->if_l2com;
3916 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3917 	struct ifreq *ifr = (struct ifreq *) data;
3918 	int error = 0, startall = 0, stop = 0;
3919 
3920 	switch (cmd) {
3921 	case SIOCGIFADDR:
3922 		error = ether_ioctl(ifp, cmd, data);
3923 		break;
3924 	case SIOCSIFFLAGS:
3925 		IWN_LOCK(sc);
3926 		if (ifp->if_flags & IFF_UP) {
3927 			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3928 				iwn_init_locked(sc);
3929 				if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3930 					startall = 1;
3931 				else
3932 					stop = 1;
3933 			}
3934 		} else {
3935 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3936 				iwn_stop_locked(sc);
3937 		}
3938 		IWN_UNLOCK(sc);
3939 		if (startall)
3940 			ieee80211_start_all(ic);
3941 		else if (vap != NULL && stop)
3942 			ieee80211_stop(vap);
3943 		break;
3944 	case SIOCGIFMEDIA:
3945 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3946 		break;
3947 	default:
3948 		error = EINVAL;
3949 		break;
3950 	}
3951 	return error;
3952 }
3953 
3954 /*
3955  * Send a command to the firmware.
3956  */
3957 static int
3958 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3959 {
3960 	struct iwn_tx_ring *ring = &sc->txq[4];
3961 	struct iwn_tx_desc *desc;
3962 	struct iwn_tx_data *data;
3963 	struct iwn_tx_cmd *cmd;
3964 	struct mbuf *m;
3965 	bus_addr_t paddr;
3966 	int totlen, error;
3967 
3968 	if (async == 0)
3969 		IWN_LOCK_ASSERT(sc);
3970 
3971 	desc = &ring->desc[ring->cur];
3972 	data = &ring->data[ring->cur];
3973 	totlen = 4 + size;
3974 
3975 	if (size > sizeof cmd->data) {
3976 		/* Command is too large to fit in a descriptor. */
3977 		if (totlen > MCLBYTES)
3978 			return EINVAL;
3979 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3980 		if (m == NULL)
3981 			return ENOMEM;
3982 		cmd = mtod(m, struct iwn_tx_cmd *);
3983 		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3984 		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3985 		if (error != 0) {
3986 			m_freem(m);
3987 			return error;
3988 		}
3989 		data->m = m;
3990 	} else {
3991 		cmd = &ring->cmd[ring->cur];
3992 		paddr = data->cmd_paddr;
3993 	}
3994 
3995 	cmd->code = code;
3996 	cmd->flags = 0;
3997 	cmd->qid = ring->qid;
3998 	cmd->idx = ring->cur;
3999 	memcpy(cmd->data, buf, size);
4000 
4001 	desc->nsegs = 1;
4002 	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
4003 	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
4004 
4005 	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
4006 	    __func__, iwn_intr_str(cmd->code), cmd->code,
4007 	    cmd->flags, cmd->qid, cmd->idx);
4008 
4009 	if (size > sizeof cmd->data) {
4010 		bus_dmamap_sync(ring->data_dmat, data->map,
4011 		    BUS_DMASYNC_PREWRITE);
4012 	} else {
4013 		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4014 		    BUS_DMASYNC_PREWRITE);
4015 	}
4016 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4017 	    BUS_DMASYNC_PREWRITE);
4018 
4019 	/* Kick command ring. */
4020 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4021 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4022 
4023 	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
4024 }
4025 
4026 static int
4027 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4028 {
4029 	struct iwn4965_node_info hnode;
4030 	caddr_t src, dst;
4031 
4032 	/*
4033 	 * We use the node structure for 5000 Series internally (it is
4034 	 * a superset of the one for 4965AGN). We thus copy the common
4035 	 * fields before sending the command.
4036 	 */
4037 	src = (caddr_t)node;
4038 	dst = (caddr_t)&hnode;
4039 	memcpy(dst, src, 48);
4040 	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
4041 	memcpy(dst + 48, src + 72, 20);
4042 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
4043 }
4044 
4045 static int
4046 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4047 {
4048 	/* Direct mapping. */
4049 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
4050 }
4051 
4052 static int
4053 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
4054 {
4055 #define	RV(v)	((v) & IEEE80211_RATE_VAL)
4056 	struct iwn_node *wn = (void *)ni;
4057 	struct ieee80211_rateset *rs = &ni->ni_rates;
4058 	struct iwn_cmd_link_quality linkq;
4059 	uint8_t txant;
4060 	int i, rate, txrate;
4061 
4062 	/* Use the first valid TX antenna. */
4063 	txant = IWN_LSB(sc->txchainmask);
4064 
4065 	memset(&linkq, 0, sizeof linkq);
4066 	linkq.id = wn->id;
4067 	linkq.antmsk_1stream = txant;
4068 	linkq.antmsk_2stream = IWN_ANT_AB;
4069 	linkq.ampdu_max = 64;
4070 	linkq.ampdu_threshold = 3;
4071 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4072 
4073 	/* Start at highest available bit-rate. */
4074 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4075 		txrate = ni->ni_htrates.rs_nrates - 1;
4076 	else
4077 		txrate = rs->rs_nrates - 1;
4078 	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
4079 		if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4080 			rate = IEEE80211_RATE_MCS | txrate;
4081 		else
4082 			rate = RV(rs->rs_rates[txrate]);
4083 		linkq.retry[i] = wn->ridx[rate];
4084 
4085 		if ((le32toh(wn->ridx[rate]) & IWN_RFLAG_MCS) &&
4086 		    RV(le32toh(wn->ridx[rate])) > 7)
4087 			linkq.mimo = i + 1;
4088 
4089 		/* Next retry at immediate lower bit-rate. */
4090 		if (txrate > 0)
4091 			txrate--;
4092 	}
4093 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4094 #undef	RV
4095 }
4096 
4097 /*
4098  * Broadcast node is used to send group-addressed and management frames.
4099  */
4100 static int
4101 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
4102 {
4103 	struct iwn_ops *ops = &sc->ops;
4104 	struct ifnet *ifp = sc->sc_ifp;
4105 	struct ieee80211com *ic = ifp->if_l2com;
4106 	struct iwn_node_info node;
4107 	struct iwn_cmd_link_quality linkq;
4108 	uint8_t txant;
4109 	int i, error;
4110 
4111 	memset(&node, 0, sizeof node);
4112 	IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
4113 	node.id = sc->broadcast_id;
4114 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
4115 	if ((error = ops->add_node(sc, &node, async)) != 0)
4116 		return error;
4117 
4118 	/* Use the first valid TX antenna. */
4119 	txant = IWN_LSB(sc->txchainmask);
4120 
4121 	memset(&linkq, 0, sizeof linkq);
4122 	linkq.id = sc->broadcast_id;
4123 	linkq.antmsk_1stream = txant;
4124 	linkq.antmsk_2stream = IWN_ANT_AB;
4125 	linkq.ampdu_max = 64;
4126 	linkq.ampdu_threshold = 3;
4127 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4128 
4129 	/* Use lowest mandatory bit-rate. */
4130 	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
4131 		linkq.retry[0] = htole32(0xd);
4132 	else
4133 		linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
4134 	linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
4135 	/* Use same bit-rate for all TX retries. */
4136 	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4137 		linkq.retry[i] = linkq.retry[0];
4138 	}
4139 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4140 }
4141 
4142 static int
4143 iwn_updateedca(struct ieee80211com *ic)
4144 {
4145 #define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
4146 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
4147 	struct iwn_edca_params cmd;
4148 	int aci;
4149 
4150 	memset(&cmd, 0, sizeof cmd);
4151 	cmd.flags = htole32(IWN_EDCA_UPDATE);
4152 	for (aci = 0; aci < WME_NUM_AC; aci++) {
4153 		const struct wmeParams *ac =
4154 		    &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
4155 		cmd.ac[aci].aifsn = ac->wmep_aifsn;
4156 		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
4157 		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
4158 		cmd.ac[aci].txoplimit =
4159 		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
4160 	}
4161 	IEEE80211_UNLOCK(ic);
4162 	IWN_LOCK(sc);
4163 	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4164 	IWN_UNLOCK(sc);
4165 	IEEE80211_LOCK(ic);
4166 	return 0;
4167 #undef IWN_EXP2
4168 }
4169 
4170 static void
4171 iwn_update_mcast(struct ifnet *ifp)
4172 {
4173 	/* Ignore */
4174 }
4175 
4176 static void
4177 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4178 {
4179 	struct iwn_cmd_led led;
4180 
4181 	/* Clear microcode LED ownership. */
4182 	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4183 
4184 	led.which = which;
4185 	led.unit = htole32(10000);	/* on/off in unit of 100ms */
4186 	led.off = off;
4187 	led.on = on;
4188 	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4189 }
4190 
4191 /*
4192  * Set the critical temperature at which the firmware will stop the radio
4193  * and notify us.
4194  */
4195 static int
4196 iwn_set_critical_temp(struct iwn_softc *sc)
4197 {
4198 	struct iwn_critical_temp crit;
4199 	int32_t temp;
4200 
4201 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4202 
4203 	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4204 		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4205 	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4206 		temp = IWN_CTOK(110);
4207 	else
4208 		temp = 110;
4209 	memset(&crit, 0, sizeof crit);
4210 	crit.tempR = htole32(temp);
4211 	DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
4212 	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4213 }
4214 
4215 static int
4216 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4217 {
4218 	struct iwn_cmd_timing cmd;
4219 	uint64_t val, mod;
4220 
4221 	memset(&cmd, 0, sizeof cmd);
4222 	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
4223 	cmd.bintval = htole16(ni->ni_intval);
4224 	cmd.lintval = htole16(10);
4225 
4226 	/* Compute remaining time until next beacon. */
4227 	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
4228 	mod = le64toh(cmd.tstamp) % val;
4229 	cmd.binitval = htole32((uint32_t)(val - mod));
4230 
4231 	DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
4232 	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
4233 
4234 	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4235 }
4236 
4237 static void
4238 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4239 {
4240 	struct ifnet *ifp = sc->sc_ifp;
4241 	struct ieee80211com *ic = ifp->if_l2com;
4242 
4243 	/* Adjust TX power if need be (delta >= 3 degC). */
4244 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
4245 	    __func__, sc->temp, temp);
4246 	if (abs(temp - sc->temp) >= 3) {
4247 		/* Record temperature of last calibration. */
4248 		sc->temp = temp;
4249 		(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
4250 	}
4251 }
4252 
4253 /*
4254  * Set TX power for current channel (each rate has its own power settings).
4255  * This function takes into account the regulatory information from EEPROM,
4256  * the current temperature and the current voltage.
4257  */
4258 static int
4259 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4260     int async)
4261 {
4262 /* Fixed-point arithmetic division using a n-bit fractional part. */
4263 #define fdivround(a, b, n)	\
4264 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4265 /* Linear interpolation. */
4266 #define interpolate(x, x1, y1, x2, y2, n)	\
4267 	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4268 
4269 	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4270 	struct iwn_ucode_info *uc = &sc->ucode_info;
4271 	struct iwn4965_cmd_txpower cmd;
4272 	struct iwn4965_eeprom_chan_samples *chans;
4273 	const uint8_t *rf_gain, *dsp_gain;
4274 	int32_t vdiff, tdiff;
4275 	int i, c, grp, maxpwr;
4276 	uint8_t chan;
4277 
4278 	/* Retrieve current channel from last RXON. */
4279 	chan = sc->rxon.chan;
4280 	DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
4281 	    chan);
4282 
4283 	memset(&cmd, 0, sizeof cmd);
4284 	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4285 	cmd.chan = chan;
4286 
4287 	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4288 		maxpwr   = sc->maxpwr5GHz;
4289 		rf_gain  = iwn4965_rf_gain_5ghz;
4290 		dsp_gain = iwn4965_dsp_gain_5ghz;
4291 	} else {
4292 		maxpwr   = sc->maxpwr2GHz;
4293 		rf_gain  = iwn4965_rf_gain_2ghz;
4294 		dsp_gain = iwn4965_dsp_gain_2ghz;
4295 	}
4296 
4297 	/* Compute voltage compensation. */
4298 	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4299 	if (vdiff > 0)
4300 		vdiff *= 2;
4301 	if (abs(vdiff) > 2)
4302 		vdiff = 0;
4303 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4304 	    "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4305 	    __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
4306 
4307 	/* Get channel attenuation group. */
4308 	if (chan <= 20)		/* 1-20 */
4309 		grp = 4;
4310 	else if (chan <= 43)	/* 34-43 */
4311 		grp = 0;
4312 	else if (chan <= 70)	/* 44-70 */
4313 		grp = 1;
4314 	else if (chan <= 124)	/* 71-124 */
4315 		grp = 2;
4316 	else			/* 125-200 */
4317 		grp = 3;
4318 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4319 	    "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
4320 
4321 	/* Get channel sub-band. */
4322 	for (i = 0; i < IWN_NBANDS; i++)
4323 		if (sc->bands[i].lo != 0 &&
4324 		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4325 			break;
4326 	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
4327 		return EINVAL;
4328 	chans = sc->bands[i].chans;
4329 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4330 	    "%s: chan %d sub-band=%d\n", __func__, chan, i);
4331 
4332 	for (c = 0; c < 2; c++) {
4333 		uint8_t power, gain, temp;
4334 		int maxchpwr, pwr, ridx, idx;
4335 
4336 		power = interpolate(chan,
4337 		    chans[0].num, chans[0].samples[c][1].power,
4338 		    chans[1].num, chans[1].samples[c][1].power, 1);
4339 		gain  = interpolate(chan,
4340 		    chans[0].num, chans[0].samples[c][1].gain,
4341 		    chans[1].num, chans[1].samples[c][1].gain, 1);
4342 		temp  = interpolate(chan,
4343 		    chans[0].num, chans[0].samples[c][1].temp,
4344 		    chans[1].num, chans[1].samples[c][1].temp, 1);
4345 		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4346 		    "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
4347 		    __func__, c, power, gain, temp);
4348 
4349 		/* Compute temperature compensation. */
4350 		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4351 		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4352 		    "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
4353 		    __func__, tdiff, sc->temp, temp);
4354 
4355 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4356 			/* Convert dBm to half-dBm. */
4357 			maxchpwr = sc->maxpwr[chan] * 2;
4358 			if ((ridx / 8) & 1)
4359 				maxchpwr -= 6;	/* MIMO 2T: -3dB */
4360 
4361 			pwr = maxpwr;
4362 
4363 			/* Adjust TX power based on rate. */
4364 			if ((ridx % 8) == 5)
4365 				pwr -= 15;	/* OFDM48: -7.5dB */
4366 			else if ((ridx % 8) == 6)
4367 				pwr -= 17;	/* OFDM54: -8.5dB */
4368 			else if ((ridx % 8) == 7)
4369 				pwr -= 20;	/* OFDM60: -10dB */
4370 			else
4371 				pwr -= 10;	/* Others: -5dB */
4372 
4373 			/* Do not exceed channel max TX power. */
4374 			if (pwr > maxchpwr)
4375 				pwr = maxchpwr;
4376 
4377 			idx = gain - (pwr - power) - tdiff - vdiff;
4378 			if ((ridx / 8) & 1)	/* MIMO */
4379 				idx += (int32_t)le32toh(uc->atten[grp][c]);
4380 
4381 			if (cmd.band == 0)
4382 				idx += 9;	/* 5GHz */
4383 			if (ridx == IWN_RIDX_MAX)
4384 				idx += 5;	/* CCK */
4385 
4386 			/* Make sure idx stays in a valid range. */
4387 			if (idx < 0)
4388 				idx = 0;
4389 			else if (idx > IWN4965_MAX_PWR_INDEX)
4390 				idx = IWN4965_MAX_PWR_INDEX;
4391 
4392 			DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4393 			    "%s: Tx chain %d, rate idx %d: power=%d\n",
4394 			    __func__, c, ridx, idx);
4395 			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4396 			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4397 		}
4398 	}
4399 
4400 	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4401 	    "%s: set tx power for chan %d\n", __func__, chan);
4402 	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4403 
4404 #undef interpolate
4405 #undef fdivround
4406 }
4407 
4408 static int
4409 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4410     int async)
4411 {
4412 	struct iwn5000_cmd_txpower cmd;
4413 
4414 	/*
4415 	 * TX power calibration is handled automatically by the firmware
4416 	 * for 5000 Series.
4417 	 */
4418 	memset(&cmd, 0, sizeof cmd);
4419 	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
4420 	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4421 	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4422 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
4423 	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4424 }
4425 
4426 /*
4427  * Retrieve the maximum RSSI (in dBm) among receivers.
4428  */
4429 static int
4430 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4431 {
4432 	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4433 	uint8_t mask, agc;
4434 	int rssi;
4435 
4436 	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4437 	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
4438 
4439 	rssi = 0;
4440 	if (mask & IWN_ANT_A)
4441 		rssi = MAX(rssi, phy->rssi[0]);
4442 	if (mask & IWN_ANT_B)
4443 		rssi = MAX(rssi, phy->rssi[2]);
4444 	if (mask & IWN_ANT_C)
4445 		rssi = MAX(rssi, phy->rssi[4]);
4446 
4447 	DPRINTF(sc, IWN_DEBUG_RECV,
4448 	    "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
4449 	    mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
4450 	    rssi - agc - IWN_RSSI_TO_DBM);
4451 	return rssi - agc - IWN_RSSI_TO_DBM;
4452 }
4453 
4454 static int
4455 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4456 {
4457 	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4458 	uint8_t agc;
4459 	int rssi;
4460 
4461 	agc = (le32toh(phy->agc) >> 9) & 0x7f;
4462 
4463 	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4464 		   le16toh(phy->rssi[1]) & 0xff);
4465 	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4466 
4467 	DPRINTF(sc, IWN_DEBUG_RECV,
4468 	    "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
4469 	    phy->rssi[0], phy->rssi[1], phy->rssi[2],
4470 	    rssi - agc - IWN_RSSI_TO_DBM);
4471 	return rssi - agc - IWN_RSSI_TO_DBM;
4472 }
4473 
4474 /*
4475  * Retrieve the average noise (in dBm) among receivers.
4476  */
4477 static int
4478 iwn_get_noise(const struct iwn_rx_general_stats *stats)
4479 {
4480 	int i, total, nbant, noise;
4481 
4482 	total = nbant = 0;
4483 	for (i = 0; i < 3; i++) {
4484 		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4485 			continue;
4486 		total += noise;
4487 		nbant++;
4488 	}
4489 	/* There should be at least one antenna but check anyway. */
4490 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4491 }
4492 
4493 /*
4494  * Compute temperature (in degC) from last received statistics.
4495  */
4496 static int
4497 iwn4965_get_temperature(struct iwn_softc *sc)
4498 {
4499 	struct iwn_ucode_info *uc = &sc->ucode_info;
4500 	int32_t r1, r2, r3, r4, temp;
4501 
4502 	r1 = le32toh(uc->temp[0].chan20MHz);
4503 	r2 = le32toh(uc->temp[1].chan20MHz);
4504 	r3 = le32toh(uc->temp[2].chan20MHz);
4505 	r4 = le32toh(sc->rawtemp);
4506 
4507 	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
4508 		return 0;
4509 
4510 	/* Sign-extend 23-bit R4 value to 32-bit. */
4511 	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4512 	/* Compute temperature in Kelvin. */
4513 	temp = (259 * (r4 - r2)) / (r3 - r1);
4514 	temp = (temp * 97) / 100 + 8;
4515 
4516 	DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4517 	    IWN_KTOC(temp));
4518 	return IWN_KTOC(temp);
4519 }
4520 
4521 static int
4522 iwn5000_get_temperature(struct iwn_softc *sc)
4523 {
4524 	int32_t temp;
4525 
4526 	/*
4527 	 * Temperature is not used by the driver for 5000 Series because
4528 	 * TX power calibration is handled by firmware.
4529 	 */
4530 	temp = le32toh(sc->rawtemp);
4531 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4532 		temp = (temp / -5) + sc->temp_off;
4533 		temp = IWN_KTOC(temp);
4534 	}
4535 	return temp;
4536 }
4537 
4538 /*
4539  * Initialize sensitivity calibration state machine.
4540  */
4541 static int
4542 iwn_init_sensitivity(struct iwn_softc *sc)
4543 {
4544 	struct iwn_ops *ops = &sc->ops;
4545 	struct iwn_calib_state *calib = &sc->calib;
4546 	uint32_t flags;
4547 	int error;
4548 
4549 	/* Reset calibration state machine. */
4550 	memset(calib, 0, sizeof (*calib));
4551 	calib->state = IWN_CALIB_STATE_INIT;
4552 	calib->cck_state = IWN_CCK_STATE_HIFA;
4553 	/* Set initial correlation values. */
4554 	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4555 	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4556 	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4557 	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4558 	calib->cck_x4      = 125;
4559 	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4560 	calib->energy_cck  = sc->limits->energy_cck;
4561 
4562 	/* Write initial sensitivity. */
4563 	if ((error = iwn_send_sensitivity(sc)) != 0)
4564 		return error;
4565 
4566 	/* Write initial gains. */
4567 	if ((error = ops->init_gains(sc)) != 0)
4568 		return error;
4569 
4570 	/* Request statistics at each beacon interval. */
4571 	flags = 0;
4572 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
4573 	    __func__);
4574 	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4575 }
4576 
4577 /*
4578  * Collect noise and RSSI statistics for the first 20 beacons received
4579  * after association and use them to determine connected antennas and
4580  * to set differential gains.
4581  */
4582 static void
4583 iwn_collect_noise(struct iwn_softc *sc,
4584     const struct iwn_rx_general_stats *stats)
4585 {
4586 	struct iwn_ops *ops = &sc->ops;
4587 	struct iwn_calib_state *calib = &sc->calib;
4588 	uint32_t val;
4589 	int i;
4590 
4591 	/* Accumulate RSSI and noise for all 3 antennas. */
4592 	for (i = 0; i < 3; i++) {
4593 		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4594 		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4595 	}
4596 	/* NB: We update differential gains only once after 20 beacons. */
4597 	if (++calib->nbeacons < 20)
4598 		return;
4599 
4600 	/* Determine highest average RSSI. */
4601 	val = MAX(calib->rssi[0], calib->rssi[1]);
4602 	val = MAX(calib->rssi[2], val);
4603 
4604 	/* Determine which antennas are connected. */
4605 	sc->chainmask = sc->rxchainmask;
4606 	for (i = 0; i < 3; i++)
4607 		if (val - calib->rssi[i] > 15 * 20)
4608 			sc->chainmask &= ~(1 << i);
4609 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4610 	    "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4611 	    __func__, sc->rxchainmask, sc->chainmask);
4612 
4613 	/* If none of the TX antennas are connected, keep at least one. */
4614 	if ((sc->chainmask & sc->txchainmask) == 0)
4615 		sc->chainmask |= IWN_LSB(sc->txchainmask);
4616 
4617 	(void)ops->set_gains(sc);
4618 	calib->state = IWN_CALIB_STATE_RUN;
4619 
4620 #ifdef notyet
4621 	/* XXX Disable RX chains with no antennas connected. */
4622 	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4623 	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4624 #endif
4625 
4626 #if 0
4627 	/* XXX: not yet */
4628 	/* Enable power-saving mode if requested by user. */
4629 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4630 		(void)iwn_set_pslevel(sc, 0, 3, 1);
4631 #endif
4632 }
4633 
4634 static int
4635 iwn4965_init_gains(struct iwn_softc *sc)
4636 {
4637 	struct iwn_phy_calib_gain cmd;
4638 
4639 	memset(&cmd, 0, sizeof cmd);
4640 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4641 	/* Differential gains initially set to 0 for all 3 antennas. */
4642 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4643 	    "%s: setting initial differential gains\n", __func__);
4644 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4645 }
4646 
4647 static int
4648 iwn5000_init_gains(struct iwn_softc *sc)
4649 {
4650 	struct iwn_phy_calib cmd;
4651 
4652 	memset(&cmd, 0, sizeof cmd);
4653 	cmd.code = sc->reset_noise_gain;
4654 	cmd.ngroups = 1;
4655 	cmd.isvalid = 1;
4656 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4657 	    "%s: setting initial differential gains\n", __func__);
4658 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4659 }
4660 
4661 static int
4662 iwn4965_set_gains(struct iwn_softc *sc)
4663 {
4664 	struct iwn_calib_state *calib = &sc->calib;
4665 	struct iwn_phy_calib_gain cmd;
4666 	int i, delta, noise;
4667 
4668 	/* Get minimal noise among connected antennas. */
4669 	noise = INT_MAX;	/* NB: There's at least one antenna. */
4670 	for (i = 0; i < 3; i++)
4671 		if (sc->chainmask & (1 << i))
4672 			noise = MIN(calib->noise[i], noise);
4673 
4674 	memset(&cmd, 0, sizeof cmd);
4675 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4676 	/* Set differential gains for connected antennas. */
4677 	for (i = 0; i < 3; i++) {
4678 		if (sc->chainmask & (1 << i)) {
4679 			/* Compute attenuation (in unit of 1.5dB). */
4680 			delta = (noise - (int32_t)calib->noise[i]) / 30;
4681 			/* NB: delta <= 0 */
4682 			/* Limit to [-4.5dB,0]. */
4683 			cmd.gain[i] = MIN(abs(delta), 3);
4684 			if (delta < 0)
4685 				cmd.gain[i] |= 1 << 2;	/* sign bit */
4686 		}
4687 	}
4688 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4689 	    "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4690 	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4691 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4692 }
4693 
4694 static int
4695 iwn5000_set_gains(struct iwn_softc *sc)
4696 {
4697 	struct iwn_calib_state *calib = &sc->calib;
4698 	struct iwn_phy_calib_gain cmd;
4699 	int i, ant, div, delta;
4700 
4701 	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4702 	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4703 
4704 	memset(&cmd, 0, sizeof cmd);
4705 	cmd.code = sc->noise_gain;
4706 	cmd.ngroups = 1;
4707 	cmd.isvalid = 1;
4708 	/* Get first available RX antenna as referential. */
4709 	ant = IWN_LSB(sc->rxchainmask);
4710 	/* Set differential gains for other antennas. */
4711 	for (i = ant + 1; i < 3; i++) {
4712 		if (sc->chainmask & (1 << i)) {
4713 			/* The delta is relative to antenna "ant". */
4714 			delta = ((int32_t)calib->noise[ant] -
4715 			    (int32_t)calib->noise[i]) / div;
4716 			/* Limit to [-4.5dB,+4.5dB]. */
4717 			cmd.gain[i - 1] = MIN(abs(delta), 3);
4718 			if (delta < 0)
4719 				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
4720 		}
4721 	}
4722 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4723 	    "setting differential gains Ant B/C: %x/%x (%x)\n",
4724 	    cmd.gain[0], cmd.gain[1], sc->chainmask);
4725 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4726 }
4727 
4728 /*
4729  * Tune RF RX sensitivity based on the number of false alarms detected
4730  * during the last beacon period.
4731  */
4732 static void
4733 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4734 {
4735 #define inc(val, inc, max)			\
4736 	if ((val) < (max)) {			\
4737 		if ((val) < (max) - (inc))	\
4738 			(val) += (inc);		\
4739 		else				\
4740 			(val) = (max);		\
4741 		needs_update = 1;		\
4742 	}
4743 #define dec(val, dec, min)			\
4744 	if ((val) > (min)) {			\
4745 		if ((val) > (min) + (dec))	\
4746 			(val) -= (dec);		\
4747 		else				\
4748 			(val) = (min);		\
4749 		needs_update = 1;		\
4750 	}
4751 
4752 	const struct iwn_sensitivity_limits *limits = sc->limits;
4753 	struct iwn_calib_state *calib = &sc->calib;
4754 	uint32_t val, rxena, fa;
4755 	uint32_t energy[3], energy_min;
4756 	uint8_t noise[3], noise_ref;
4757 	int i, needs_update = 0;
4758 
4759 	/* Check that we've been enabled long enough. */
4760 	if ((rxena = le32toh(stats->general.load)) == 0)
4761 		return;
4762 
4763 	/* Compute number of false alarms since last call for OFDM. */
4764 	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4765 	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4766 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4767 
4768 	/* Save counters values for next call. */
4769 	calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4770 	calib->fa_ofdm = le32toh(stats->ofdm.fa);
4771 
4772 	if (fa > 50 * rxena) {
4773 		/* High false alarm count, decrease sensitivity. */
4774 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4775 		    "%s: OFDM high false alarm count: %u\n", __func__, fa);
4776 		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
4777 		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4778 		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
4779 		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4780 
4781 	} else if (fa < 5 * rxena) {
4782 		/* Low false alarm count, increase sensitivity. */
4783 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4784 		    "%s: OFDM low false alarm count: %u\n", __func__, fa);
4785 		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
4786 		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4787 		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
4788 		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4789 	}
4790 
4791 	/* Compute maximum noise among 3 receivers. */
4792 	for (i = 0; i < 3; i++)
4793 		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4794 	val = MAX(noise[0], noise[1]);
4795 	val = MAX(noise[2], val);
4796 	/* Insert it into our samples table. */
4797 	calib->noise_samples[calib->cur_noise_sample] = val;
4798 	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4799 
4800 	/* Compute maximum noise among last 20 samples. */
4801 	noise_ref = calib->noise_samples[0];
4802 	for (i = 1; i < 20; i++)
4803 		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4804 
4805 	/* Compute maximum energy among 3 receivers. */
4806 	for (i = 0; i < 3; i++)
4807 		energy[i] = le32toh(stats->general.energy[i]);
4808 	val = MIN(energy[0], energy[1]);
4809 	val = MIN(energy[2], val);
4810 	/* Insert it into our samples table. */
4811 	calib->energy_samples[calib->cur_energy_sample] = val;
4812 	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4813 
4814 	/* Compute minimum energy among last 10 samples. */
4815 	energy_min = calib->energy_samples[0];
4816 	for (i = 1; i < 10; i++)
4817 		energy_min = MAX(energy_min, calib->energy_samples[i]);
4818 	energy_min += 6;
4819 
4820 	/* Compute number of false alarms since last call for CCK. */
4821 	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4822 	fa += le32toh(stats->cck.fa) - calib->fa_cck;
4823 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4824 
4825 	/* Save counters values for next call. */
4826 	calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4827 	calib->fa_cck = le32toh(stats->cck.fa);
4828 
4829 	if (fa > 50 * rxena) {
4830 		/* High false alarm count, decrease sensitivity. */
4831 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4832 		    "%s: CCK high false alarm count: %u\n", __func__, fa);
4833 		calib->cck_state = IWN_CCK_STATE_HIFA;
4834 		calib->low_fa = 0;
4835 
4836 		if (calib->cck_x4 > 160) {
4837 			calib->noise_ref = noise_ref;
4838 			if (calib->energy_cck > 2)
4839 				dec(calib->energy_cck, 2, energy_min);
4840 		}
4841 		if (calib->cck_x4 < 160) {
4842 			calib->cck_x4 = 161;
4843 			needs_update = 1;
4844 		} else
4845 			inc(calib->cck_x4, 3, limits->max_cck_x4);
4846 
4847 		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4848 
4849 	} else if (fa < 5 * rxena) {
4850 		/* Low false alarm count, increase sensitivity. */
4851 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4852 		    "%s: CCK low false alarm count: %u\n", __func__, fa);
4853 		calib->cck_state = IWN_CCK_STATE_LOFA;
4854 		calib->low_fa++;
4855 
4856 		if (calib->cck_state != IWN_CCK_STATE_INIT &&
4857 		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4858 		     calib->low_fa > 100)) {
4859 			inc(calib->energy_cck, 2, limits->min_energy_cck);
4860 			dec(calib->cck_x4,     3, limits->min_cck_x4);
4861 			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4862 		}
4863 	} else {
4864 		/* Not worth to increase or decrease sensitivity. */
4865 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4866 		    "%s: CCK normal false alarm count: %u\n", __func__, fa);
4867 		calib->low_fa = 0;
4868 		calib->noise_ref = noise_ref;
4869 
4870 		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4871 			/* Previous interval had many false alarms. */
4872 			dec(calib->energy_cck, 8, energy_min);
4873 		}
4874 		calib->cck_state = IWN_CCK_STATE_INIT;
4875 	}
4876 
4877 	if (needs_update)
4878 		(void)iwn_send_sensitivity(sc);
4879 #undef dec
4880 #undef inc
4881 }
4882 
4883 static int
4884 iwn_send_sensitivity(struct iwn_softc *sc)
4885 {
4886 	struct iwn_calib_state *calib = &sc->calib;
4887 	struct iwn_enhanced_sensitivity_cmd cmd;
4888 	int len;
4889 
4890 	memset(&cmd, 0, sizeof cmd);
4891 	len = sizeof (struct iwn_sensitivity_cmd);
4892 	cmd.which = IWN_SENSITIVITY_WORKTBL;
4893 	/* OFDM modulation. */
4894 	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
4895 	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
4896 	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
4897 	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
4898 	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
4899 	cmd.energy_ofdm_th     = htole16(62);
4900 	/* CCK modulation. */
4901 	cmd.corr_cck_x4        = htole16(calib->cck_x4);
4902 	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
4903 	cmd.energy_cck         = htole16(calib->energy_cck);
4904 	/* Barker modulation: use default values. */
4905 	cmd.corr_barker        = htole16(190);
4906 	cmd.corr_barker_mrc    = htole16(390);
4907 
4908 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4909 	    "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4910 	    calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4911 	    calib->ofdm_mrc_x4, calib->cck_x4,
4912 	    calib->cck_mrc_x4, calib->energy_cck);
4913 
4914 	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
4915 		goto send;
4916 	/* Enhanced sensitivity settings. */
4917 	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
4918 	cmd.ofdm_det_slope_mrc = htole16(668);
4919 	cmd.ofdm_det_icept_mrc = htole16(4);
4920 	cmd.ofdm_det_slope     = htole16(486);
4921 	cmd.ofdm_det_icept     = htole16(37);
4922 	cmd.cck_det_slope_mrc  = htole16(853);
4923 	cmd.cck_det_icept_mrc  = htole16(4);
4924 	cmd.cck_det_slope      = htole16(476);
4925 	cmd.cck_det_icept      = htole16(99);
4926 send:
4927 	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
4928 }
4929 
4930 /*
4931  * Set STA mode power saving level (between 0 and 5).
4932  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4933  */
4934 static int
4935 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4936 {
4937 	struct iwn_pmgt_cmd cmd;
4938 	const struct iwn_pmgt *pmgt;
4939 	uint32_t max, skip_dtim;
4940 	uint32_t reg;
4941 	int i;
4942 
4943 	/* Select which PS parameters to use. */
4944 	if (dtim <= 2)
4945 		pmgt = &iwn_pmgt[0][level];
4946 	else if (dtim <= 10)
4947 		pmgt = &iwn_pmgt[1][level];
4948 	else
4949 		pmgt = &iwn_pmgt[2][level];
4950 
4951 	memset(&cmd, 0, sizeof cmd);
4952 	if (level != 0)	/* not CAM */
4953 		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4954 	if (level == 5)
4955 		cmd.flags |= htole16(IWN_PS_FAST_PD);
4956 	/* Retrieve PCIe Active State Power Management (ASPM). */
4957 	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
4958 	if (!(reg & 0x1))	/* L0s Entry disabled. */
4959 		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4960 	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4961 	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4962 
4963 	if (dtim == 0) {
4964 		dtim = 1;
4965 		skip_dtim = 0;
4966 	} else
4967 		skip_dtim = pmgt->skip_dtim;
4968 	if (skip_dtim != 0) {
4969 		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4970 		max = pmgt->intval[4];
4971 		if (max == (uint32_t)-1)
4972 			max = dtim * (skip_dtim + 1);
4973 		else if (max > dtim)
4974 			max = (max / dtim) * dtim;
4975 	} else
4976 		max = dtim;
4977 	for (i = 0; i < 5; i++)
4978 		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4979 
4980 	DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
4981 	    level);
4982 	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4983 }
4984 
4985 static int
4986 iwn_send_btcoex(struct iwn_softc *sc)
4987 {
4988 	struct iwn_bluetooth cmd;
4989 
4990 	memset(&cmd, 0, sizeof cmd);
4991 	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4992 	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4993 	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4994 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
4995 	    __func__);
4996 	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4997 }
4998 
4999 static int
5000 iwn_send_advanced_btcoex(struct iwn_softc *sc)
5001 {
5002 	static const uint32_t btcoex_3wire[12] = {
5003 		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
5004 		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
5005 		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
5006 	};
5007 	struct iwn6000_btcoex_config btconfig;
5008 	struct iwn_btcoex_priotable btprio;
5009 	struct iwn_btcoex_prot btprot;
5010 	int error, i;
5011 
5012 	memset(&btconfig, 0, sizeof btconfig);
5013 	btconfig.flags = 145;
5014 	btconfig.max_kill = 5;
5015 	btconfig.bt3_t7_timer = 1;
5016 	btconfig.kill_ack = htole32(0xffff0000);
5017 	btconfig.kill_cts = htole32(0xffff0000);
5018 	btconfig.sample_time = 2;
5019 	btconfig.bt3_t2_timer = 0xc;
5020 	for (i = 0; i < 12; i++)
5021 		btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
5022 	btconfig.valid = htole16(0xff);
5023 	btconfig.prio_boost = 0xf0;
5024 	DPRINTF(sc, IWN_DEBUG_RESET,
5025 	    "%s: configuring advanced bluetooth coexistence\n", __func__);
5026 	error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1);
5027 	if (error != 0)
5028 		return error;
5029 
5030 	memset(&btprio, 0, sizeof btprio);
5031 	btprio.calib_init1 = 0x6;
5032 	btprio.calib_init2 = 0x7;
5033 	btprio.calib_periodic_low1 = 0x2;
5034 	btprio.calib_periodic_low2 = 0x3;
5035 	btprio.calib_periodic_high1 = 0x4;
5036 	btprio.calib_periodic_high2 = 0x5;
5037 	btprio.dtim = 0x6;
5038 	btprio.scan52 = 0x8;
5039 	btprio.scan24 = 0xa;
5040 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
5041 	    1);
5042 	if (error != 0)
5043 		return error;
5044 
5045 	/* Force BT state machine change. */
5046 	memset(&btprot, 0, sizeof btprio);
5047 	btprot.open = 1;
5048 	btprot.type = 1;
5049 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5050 	if (error != 0)
5051 		return error;
5052 	btprot.open = 0;
5053 	return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5054 }
5055 
5056 static int
5057 iwn5000_runtime_calib(struct iwn_softc *sc)
5058 {
5059 	struct iwn5000_calib_config cmd;
5060 
5061 	memset(&cmd, 0, sizeof cmd);
5062 	cmd.ucode.once.enable = 0xffffffff;
5063 	cmd.ucode.once.start = IWN5000_CALIB_DC;
5064 	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5065 	    "%s: configuring runtime calibration\n", __func__);
5066 	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
5067 }
5068 
5069 static int
5070 iwn_config(struct iwn_softc *sc)
5071 {
5072 	struct iwn_ops *ops = &sc->ops;
5073 	struct ifnet *ifp = sc->sc_ifp;
5074 	struct ieee80211com *ic = ifp->if_l2com;
5075 	uint32_t txmask;
5076 	uint16_t rxchain;
5077 	int error;
5078 
5079 	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
5080 		/* Set radio temperature sensor offset. */
5081 		error = iwn5000_temp_offset_calib(sc);
5082 		if (error != 0) {
5083 			device_printf(sc->sc_dev,
5084 			    "%s: could not set temperature offset\n", __func__);
5085 			return error;
5086 		}
5087 	}
5088 
5089 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5090 		/* Configure runtime DC calibration. */
5091 		error = iwn5000_runtime_calib(sc);
5092 		if (error != 0) {
5093 			device_printf(sc->sc_dev,
5094 			    "%s: could not configure runtime calibration\n",
5095 			    __func__);
5096 			return error;
5097 		}
5098 	}
5099 
5100 	/* Configure valid TX chains for >=5000 Series. */
5101 	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5102 		txmask = htole32(sc->txchainmask);
5103 		DPRINTF(sc, IWN_DEBUG_RESET,
5104 		    "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
5105 		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5106 		    sizeof txmask, 0);
5107 		if (error != 0) {
5108 			device_printf(sc->sc_dev,
5109 			    "%s: could not configure valid TX chains, "
5110 			    "error %d\n", __func__, error);
5111 			return error;
5112 		}
5113 	}
5114 
5115 	/* Configure bluetooth coexistence. */
5116 	if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX)
5117 		error = iwn_send_advanced_btcoex(sc);
5118 	else
5119 		error = iwn_send_btcoex(sc);
5120 	if (error != 0) {
5121 		device_printf(sc->sc_dev,
5122 		    "%s: could not configure bluetooth coexistence, error %d\n",
5123 		    __func__, error);
5124 		return error;
5125 	}
5126 
5127 	/* Set mode, channel, RX filter and enable RX. */
5128 	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
5129 	IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
5130 	IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
5131 	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
5132 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5133 	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
5134 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5135 	switch (ic->ic_opmode) {
5136 	case IEEE80211_M_STA:
5137 		sc->rxon.mode = IWN_MODE_STA;
5138 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5139 		break;
5140 	case IEEE80211_M_MONITOR:
5141 		sc->rxon.mode = IWN_MODE_MONITOR;
5142 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
5143 		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5144 		break;
5145 	default:
5146 		/* Should not get there. */
5147 		break;
5148 	}
5149 	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
5150 	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
5151 	sc->rxon.ht_single_mask = 0xff;
5152 	sc->rxon.ht_dual_mask = 0xff;
5153 	sc->rxon.ht_triple_mask = 0xff;
5154 	rxchain =
5155 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5156 	    IWN_RXCHAIN_MIMO_COUNT(2) |
5157 	    IWN_RXCHAIN_IDLE_COUNT(2);
5158 	sc->rxon.rxchain = htole16(rxchain);
5159 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
5160 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
5161 	if (error != 0) {
5162 		device_printf(sc->sc_dev, "%s: RXON command failed\n",
5163 		    __func__);
5164 		return error;
5165 	}
5166 
5167 	if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
5168 		device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
5169 		    __func__);
5170 		return error;
5171 	}
5172 
5173 	/* Configuration has changed, set TX power accordingly. */
5174 	if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
5175 		device_printf(sc->sc_dev, "%s: could not set TX power\n",
5176 		    __func__);
5177 		return error;
5178 	}
5179 
5180 	if ((error = iwn_set_critical_temp(sc)) != 0) {
5181 		device_printf(sc->sc_dev,
5182 		    "%s: could not set critical temperature\n", __func__);
5183 		return error;
5184 	}
5185 
5186 	/* Set power saving level to CAM during initialization. */
5187 	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5188 		device_printf(sc->sc_dev,
5189 		    "%s: could not set power saving level\n", __func__);
5190 		return error;
5191 	}
5192 	return 0;
5193 }
5194 
5195 /*
5196  * Add an ssid element to a frame.
5197  */
5198 static uint8_t *
5199 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
5200 {
5201 	*frm++ = IEEE80211_ELEMID_SSID;
5202 	*frm++ = len;
5203 	memcpy(frm, ssid, len);
5204 	return frm + len;
5205 }
5206 
5207 static int
5208 iwn_scan(struct iwn_softc *sc)
5209 {
5210 	struct ifnet *ifp = sc->sc_ifp;
5211 	struct ieee80211com *ic = ifp->if_l2com;
5212 	struct ieee80211_scan_state *ss = ic->ic_scan;	/*XXX*/
5213 	struct ieee80211_node *ni = ss->ss_vap->iv_bss;
5214 	struct iwn_scan_hdr *hdr;
5215 	struct iwn_cmd_data *tx;
5216 	struct iwn_scan_essid *essid;
5217 	struct iwn_scan_chan *chan;
5218 	struct ieee80211_frame *wh;
5219 	struct ieee80211_rateset *rs;
5220 	struct ieee80211_channel *c;
5221 	uint8_t *buf, *frm;
5222 	uint16_t rxchain;
5223 	uint8_t txant;
5224 	int buflen, error;
5225 
5226 	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
5227 	if (buf == NULL) {
5228 		device_printf(sc->sc_dev,
5229 		    "%s: could not allocate buffer for scan command\n",
5230 		    __func__);
5231 		return ENOMEM;
5232 	}
5233 	hdr = (struct iwn_scan_hdr *)buf;
5234 	/*
5235 	 * Move to the next channel if no frames are received within 10ms
5236 	 * after sending the probe request.
5237 	 */
5238 	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
5239 	hdr->quiet_threshold = htole16(1);	/* min # of packets */
5240 
5241 	/* Select antennas for scanning. */
5242 	rxchain =
5243 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5244 	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5245 	    IWN_RXCHAIN_DRIVER_FORCE;
5246 	if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
5247 	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
5248 		/* Ant A must be avoided in 5GHz because of an HW bug. */
5249 		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
5250 	} else	/* Use all available RX antennas. */
5251 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5252 	hdr->rxchain = htole16(rxchain);
5253 	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5254 
5255 	tx = (struct iwn_cmd_data *)(hdr + 1);
5256 	tx->flags = htole32(IWN_TX_AUTO_SEQ);
5257 	tx->id = sc->broadcast_id;
5258 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5259 
5260 	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
5261 		/* Send probe requests at 6Mbps. */
5262 		tx->rate = htole32(0xd);
5263 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5264 	} else {
5265 		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5266 		if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
5267 		    sc->rxon.associd && sc->rxon.chan > 14)
5268 			tx->rate = htole32(0xd);
5269 		else {
5270 			/* Send probe requests at 1Mbps. */
5271 			tx->rate = htole32(10 | IWN_RFLAG_CCK);
5272 		}
5273 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5274 	}
5275 	/* Use the first valid TX antenna. */
5276 	txant = IWN_LSB(sc->txchainmask);
5277 	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
5278 
5279 	essid = (struct iwn_scan_essid *)(tx + 1);
5280 	if (ss->ss_ssid[0].len != 0) {
5281 		essid[0].id = IEEE80211_ELEMID_SSID;
5282 		essid[0].len = ss->ss_ssid[0].len;
5283 		memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
5284 	}
5285 	/*
5286 	 * Build a probe request frame.  Most of the following code is a
5287 	 * copy & paste of what is done in net80211.
5288 	 */
5289 	wh = (struct ieee80211_frame *)(essid + 20);
5290 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5291 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5292 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5293 	IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
5294 	IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
5295 	IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
5296 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5297 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5298 
5299 	frm = (uint8_t *)(wh + 1);
5300 	frm = ieee80211_add_ssid(frm, NULL, 0);
5301 	frm = ieee80211_add_rates(frm, rs);
5302 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5303 		frm = ieee80211_add_xrates(frm, rs);
5304 	if (ic->ic_htcaps & IEEE80211_HTC_HT)
5305 		frm = ieee80211_add_htcap(frm, ni);
5306 
5307 	/* Set length of probe request. */
5308 	tx->len = htole16(frm - (uint8_t *)wh);
5309 
5310 	c = ic->ic_curchan;
5311 	chan = (struct iwn_scan_chan *)frm;
5312 	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5313 	chan->flags = 0;
5314 	if (ss->ss_nssid > 0)
5315 		chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5316 	chan->dsp_gain = 0x6e;
5317 	if (IEEE80211_IS_CHAN_5GHZ(c) &&
5318 	    !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5319 		chan->rf_gain = 0x3b;
5320 		chan->active  = htole16(24);
5321 		chan->passive = htole16(110);
5322 		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5323 	} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
5324 		chan->rf_gain = 0x3b;
5325 		chan->active  = htole16(24);
5326 		if (sc->rxon.associd)
5327 			chan->passive = htole16(78);
5328 		else
5329 			chan->passive = htole16(110);
5330 		hdr->crc_threshold = 0xffff;
5331 	} else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5332 		chan->rf_gain = 0x28;
5333 		chan->active  = htole16(36);
5334 		chan->passive = htole16(120);
5335 		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5336 	} else {
5337 		chan->rf_gain = 0x28;
5338 		chan->active  = htole16(36);
5339 		if (sc->rxon.associd)
5340 			chan->passive = htole16(88);
5341 		else
5342 			chan->passive = htole16(120);
5343 		hdr->crc_threshold = 0xffff;
5344 	}
5345 
5346 	DPRINTF(sc, IWN_DEBUG_STATE,
5347 	    "%s: chan %u flags 0x%x rf_gain 0x%x "
5348 	    "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
5349 	    chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
5350 	    chan->active, chan->passive);
5351 
5352 	hdr->nchan++;
5353 	chan++;
5354 	buflen = (uint8_t *)chan - buf;
5355 	hdr->len = htole16(buflen);
5356 
5357 	DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
5358 	    hdr->nchan);
5359 	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5360 	free(buf, M_DEVBUF);
5361 	return error;
5362 }
5363 
5364 static int
5365 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
5366 {
5367 	struct iwn_ops *ops = &sc->ops;
5368 	struct ifnet *ifp = sc->sc_ifp;
5369 	struct ieee80211com *ic = ifp->if_l2com;
5370 	struct ieee80211_node *ni = vap->iv_bss;
5371 	int error;
5372 
5373 	/* Update adapter configuration. */
5374 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5375 	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5376 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5377 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5378 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5379 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5380 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5381 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5382 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5383 	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5384 		sc->rxon.cck_mask  = 0;
5385 		sc->rxon.ofdm_mask = 0x15;
5386 	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5387 		sc->rxon.cck_mask  = 0x03;
5388 		sc->rxon.ofdm_mask = 0;
5389 	} else {
5390 		/* Assume 802.11b/g. */
5391 		sc->rxon.cck_mask  = 0x0f;
5392 		sc->rxon.ofdm_mask = 0x15;
5393 	}
5394 	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
5395 	    sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
5396 	    sc->rxon.ofdm_mask);
5397 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5398 	if (error != 0) {
5399 		device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
5400 		    __func__, error);
5401 		return error;
5402 	}
5403 
5404 	/* Configuration has changed, set TX power accordingly. */
5405 	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5406 		device_printf(sc->sc_dev,
5407 		    "%s: could not set TX power, error %d\n", __func__, error);
5408 		return error;
5409 	}
5410 	/*
5411 	 * Reconfiguring RXON clears the firmware nodes table so we must
5412 	 * add the broadcast node again.
5413 	 */
5414 	if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5415 		device_printf(sc->sc_dev,
5416 		    "%s: could not add broadcast node, error %d\n", __func__,
5417 		    error);
5418 		return error;
5419 	}
5420 	return 0;
5421 }
5422 
5423 static int
5424 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
5425 {
5426 	struct iwn_ops *ops = &sc->ops;
5427 	struct ifnet *ifp = sc->sc_ifp;
5428 	struct ieee80211com *ic = ifp->if_l2com;
5429 	struct ieee80211_node *ni = vap->iv_bss;
5430 	struct iwn_node_info node;
5431 	uint32_t htflags = 0;
5432 	int error;
5433 
5434 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5435 		/* Link LED blinks while monitoring. */
5436 		iwn_set_led(sc, IWN_LED_LINK, 5, 5);
5437 		return 0;
5438 	}
5439 	if ((error = iwn_set_timing(sc, ni)) != 0) {
5440 		device_printf(sc->sc_dev,
5441 		    "%s: could not set timing, error %d\n", __func__, error);
5442 		return error;
5443 	}
5444 
5445 	/* Update adapter configuration. */
5446 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5447 	sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
5448 	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5449 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5450 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5451 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5452 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5453 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5454 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5455 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5456 	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5457 		sc->rxon.cck_mask  = 0;
5458 		sc->rxon.ofdm_mask = 0x15;
5459 	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5460 		sc->rxon.cck_mask  = 0x03;
5461 		sc->rxon.ofdm_mask = 0;
5462 	} else {
5463 		/* Assume 802.11b/g. */
5464 		sc->rxon.cck_mask  = 0x0f;
5465 		sc->rxon.ofdm_mask = 0x15;
5466 	}
5467 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5468 		htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
5469 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
5470 			switch (ic->ic_curhtprotmode) {
5471 			case IEEE80211_HTINFO_OPMODE_HT20PR:
5472 				htflags |= IWN_RXON_HT_MODEPURE40;
5473 				break;
5474 			default:
5475 				htflags |= IWN_RXON_HT_MODEMIXED;
5476 				break;
5477 			}
5478 		}
5479 		if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
5480 			htflags |= IWN_RXON_HT_HT40MINUS;
5481 	}
5482 	sc->rxon.flags |= htole32(htflags);
5483 	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
5484 	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n",
5485 	    sc->rxon.chan, sc->rxon.flags);
5486 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5487 	if (error != 0) {
5488 		device_printf(sc->sc_dev,
5489 		    "%s: could not update configuration, error %d\n", __func__,
5490 		    error);
5491 		return error;
5492 	}
5493 
5494 	/* Configuration has changed, set TX power accordingly. */
5495 	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5496 		device_printf(sc->sc_dev,
5497 		    "%s: could not set TX power, error %d\n", __func__, error);
5498 		return error;
5499 	}
5500 
5501 	/* Fake a join to initialize the TX rate. */
5502 	((struct iwn_node *)ni)->id = IWN_ID_BSS;
5503 	iwn_newassoc(ni, 1);
5504 
5505 	/* Add BSS node. */
5506 	memset(&node, 0, sizeof node);
5507 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5508 	node.id = IWN_ID_BSS;
5509 	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5510 		switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
5511 		case IEEE80211_HTCAP_SMPS_ENA:
5512 			node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
5513 			break;
5514 		case IEEE80211_HTCAP_SMPS_DYNAMIC:
5515 			node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
5516 			break;
5517 		}
5518 		node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
5519 		    IWN_AMDPU_DENSITY(5));	/* 4us */
5520 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
5521 			node.htflags |= htole32(IWN_NODE_HT40);
5522 	}
5523 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
5524 	error = ops->add_node(sc, &node, 1);
5525 	if (error != 0) {
5526 		device_printf(sc->sc_dev,
5527 		    "%s: could not add BSS node, error %d\n", __func__, error);
5528 		return error;
5529 	}
5530 	DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
5531 	    __func__, node.id);
5532 	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5533 		device_printf(sc->sc_dev,
5534 		    "%s: could not setup link quality for node %d, error %d\n",
5535 		    __func__, node.id, error);
5536 		return error;
5537 	}
5538 
5539 	if ((error = iwn_init_sensitivity(sc)) != 0) {
5540 		device_printf(sc->sc_dev,
5541 		    "%s: could not set sensitivity, error %d\n", __func__,
5542 		    error);
5543 		return error;
5544 	}
5545 	/* Start periodic calibration timer. */
5546 	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5547 	sc->calib_cnt = 0;
5548 	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
5549 	    sc);
5550 
5551 	/* Link LED always on while associated. */
5552 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5553 	return 0;
5554 }
5555 
5556 /*
5557  * This function is called by upper layer when an ADDBA request is received
5558  * from another STA and before the ADDBA response is sent.
5559  */
5560 static int
5561 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
5562     int baparamset, int batimeout, int baseqctl)
5563 {
5564 #define MS(_v, _f)	(((_v) & _f) >> _f##_S)
5565 	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5566 	struct iwn_ops *ops = &sc->ops;
5567 	struct iwn_node *wn = (void *)ni;
5568 	struct iwn_node_info node;
5569 	uint16_t ssn;
5570 	uint8_t tid;
5571 	int error;
5572 
5573 	tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
5574 	ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
5575 
5576 	memset(&node, 0, sizeof node);
5577 	node.id = wn->id;
5578 	node.control = IWN_NODE_UPDATE;
5579 	node.flags = IWN_FLAG_SET_ADDBA;
5580 	node.addba_tid = tid;
5581 	node.addba_ssn = htole16(ssn);
5582 	DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5583 	    wn->id, tid, ssn);
5584 	error = ops->add_node(sc, &node, 1);
5585 	if (error != 0)
5586 		return error;
5587 	return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
5588 #undef MS
5589 }
5590 
5591 /*
5592  * This function is called by upper layer on teardown of an HT-immediate
5593  * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5594  */
5595 static void
5596 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
5597 {
5598 	struct ieee80211com *ic = ni->ni_ic;
5599 	struct iwn_softc *sc = ic->ic_ifp->if_softc;
5600 	struct iwn_ops *ops = &sc->ops;
5601 	struct iwn_node *wn = (void *)ni;
5602 	struct iwn_node_info node;
5603 	uint8_t tid;
5604 
5605 	/* XXX: tid as an argument */
5606 	for (tid = 0; tid < WME_NUM_TID; tid++) {
5607 		if (&ni->ni_rx_ampdu[tid] == rap)
5608 			break;
5609 	}
5610 
5611 	memset(&node, 0, sizeof node);
5612 	node.id = wn->id;
5613 	node.control = IWN_NODE_UPDATE;
5614 	node.flags = IWN_FLAG_SET_DELBA;
5615 	node.delba_tid = tid;
5616 	DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5617 	(void)ops->add_node(sc, &node, 1);
5618 	sc->sc_ampdu_rx_stop(ni, rap);
5619 }
5620 
5621 static int
5622 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5623     int dialogtoken, int baparamset, int batimeout)
5624 {
5625 	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5626 	int qid;
5627 
5628 	for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
5629 		if (sc->qid2tap[qid] == NULL)
5630 			break;
5631 	}
5632 	if (qid == sc->ntxqs) {
5633 		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
5634 		    __func__);
5635 		return 0;
5636 	}
5637 	tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
5638 	if (tap->txa_private == NULL) {
5639 		device_printf(sc->sc_dev,
5640 		    "%s: failed to alloc TX aggregation structure\n", __func__);
5641 		return 0;
5642 	}
5643 	sc->qid2tap[qid] = tap;
5644 	*(int *)tap->txa_private = qid;
5645 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5646 	    batimeout);
5647 }
5648 
5649 static int
5650 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5651     int code, int baparamset, int batimeout)
5652 {
5653 	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5654 	int qid = *(int *)tap->txa_private;
5655 	uint8_t tid = tap->txa_tid;
5656 	int ret;
5657 
5658 	if (code == IEEE80211_STATUS_SUCCESS) {
5659 		ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
5660 		ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
5661 		if (ret != 1)
5662 			return ret;
5663 	} else {
5664 		sc->qid2tap[qid] = NULL;
5665 		free(tap->txa_private, M_DEVBUF);
5666 		tap->txa_private = NULL;
5667 	}
5668 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
5669 }
5670 
5671 /*
5672  * This function is called by upper layer when an ADDBA response is received
5673  * from another STA.
5674  */
5675 static int
5676 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5677     uint8_t tid)
5678 {
5679 	struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
5680 	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5681 	struct iwn_ops *ops = &sc->ops;
5682 	struct iwn_node *wn = (void *)ni;
5683 	struct iwn_node_info node;
5684 	int error, qid;
5685 
5686 	/* Enable TX for the specified RA/TID. */
5687 	wn->disable_tid &= ~(1 << tid);
5688 	memset(&node, 0, sizeof node);
5689 	node.id = wn->id;
5690 	node.control = IWN_NODE_UPDATE;
5691 	node.flags = IWN_FLAG_SET_DISABLE_TID;
5692 	node.disable_tid = htole16(wn->disable_tid);
5693 	error = ops->add_node(sc, &node, 1);
5694 	if (error != 0)
5695 		return 0;
5696 
5697 	if ((error = iwn_nic_lock(sc)) != 0)
5698 		return 0;
5699 	qid = *(int *)tap->txa_private;
5700 	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
5701 	    __func__, wn->id, tid, tap->txa_start, qid);
5702 	ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
5703 	iwn_nic_unlock(sc);
5704 
5705 	iwn_set_link_quality(sc, ni);
5706 	return 1;
5707 }
5708 
5709 static void
5710 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5711 {
5712 	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5713 	struct iwn_ops *ops = &sc->ops;
5714 	uint8_t tid = tap->txa_tid;
5715 	int qid;
5716 
5717 	sc->sc_addba_stop(ni, tap);
5718 
5719 	if (tap->txa_private == NULL)
5720 		return;
5721 
5722 	qid = *(int *)tap->txa_private;
5723 	if (sc->txq[qid].queued != 0)
5724 		return;
5725 	if (iwn_nic_lock(sc) != 0)
5726 		return;
5727 	ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
5728 	iwn_nic_unlock(sc);
5729 	sc->qid2tap[qid] = NULL;
5730 	free(tap->txa_private, M_DEVBUF);
5731 	tap->txa_private = NULL;
5732 }
5733 
5734 static void
5735 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5736     int qid, uint8_t tid, uint16_t ssn)
5737 {
5738 	struct iwn_node *wn = (void *)ni;
5739 
5740 	/* Stop TX scheduler while we're changing its configuration. */
5741 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5742 	    IWN4965_TXQ_STATUS_CHGACT);
5743 
5744 	/* Assign RA/TID translation to the queue. */
5745 	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5746 	    wn->id << 4 | tid);
5747 
5748 	/* Enable chain-building mode for the queue. */
5749 	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5750 
5751 	/* Set starting sequence number from the ADDBA request. */
5752 	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
5753 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5754 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5755 
5756 	/* Set scheduler window size. */
5757 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5758 	    IWN_SCHED_WINSZ);
5759 	/* Set scheduler frame limit. */
5760 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5761 	    IWN_SCHED_LIMIT << 16);
5762 
5763 	/* Enable interrupts for the queue. */
5764 	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5765 
5766 	/* Mark the queue as active. */
5767 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5768 	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5769 	    iwn_tid2fifo[tid] << 1);
5770 }
5771 
5772 static void
5773 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
5774 {
5775 	/* Stop TX scheduler while we're changing its configuration. */
5776 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5777 	    IWN4965_TXQ_STATUS_CHGACT);
5778 
5779 	/* Set starting sequence number from the ADDBA request. */
5780 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5781 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5782 
5783 	/* Disable interrupts for the queue. */
5784 	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5785 
5786 	/* Mark the queue as inactive. */
5787 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5788 	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5789 }
5790 
5791 static void
5792 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5793     int qid, uint8_t tid, uint16_t ssn)
5794 {
5795 	struct iwn_node *wn = (void *)ni;
5796 
5797 	/* Stop TX scheduler while we're changing its configuration. */
5798 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5799 	    IWN5000_TXQ_STATUS_CHGACT);
5800 
5801 	/* Assign RA/TID translation to the queue. */
5802 	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5803 	    wn->id << 4 | tid);
5804 
5805 	/* Enable chain-building mode for the queue. */
5806 	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5807 
5808 	/* Enable aggregation for the queue. */
5809 	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5810 
5811 	/* Set starting sequence number from the ADDBA request. */
5812 	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
5813 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5814 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5815 
5816 	/* Set scheduler window size and frame limit. */
5817 	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5818 	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5819 
5820 	/* Enable interrupts for the queue. */
5821 	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5822 
5823 	/* Mark the queue as active. */
5824 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5825 	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5826 }
5827 
5828 static void
5829 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
5830 {
5831 	/* Stop TX scheduler while we're changing its configuration. */
5832 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5833 	    IWN5000_TXQ_STATUS_CHGACT);
5834 
5835 	/* Disable aggregation for the queue. */
5836 	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5837 
5838 	/* Set starting sequence number from the ADDBA request. */
5839 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5840 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5841 
5842 	/* Disable interrupts for the queue. */
5843 	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5844 
5845 	/* Mark the queue as inactive. */
5846 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5847 	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5848 }
5849 
5850 /*
5851  * Query calibration tables from the initialization firmware.  We do this
5852  * only once at first boot.  Called from a process context.
5853  */
5854 static int
5855 iwn5000_query_calibration(struct iwn_softc *sc)
5856 {
5857 	struct iwn5000_calib_config cmd;
5858 	int error;
5859 
5860 	memset(&cmd, 0, sizeof cmd);
5861 	cmd.ucode.once.enable = 0xffffffff;
5862 	cmd.ucode.once.start  = 0xffffffff;
5863 	cmd.ucode.once.send   = 0xffffffff;
5864 	cmd.ucode.flags       = 0xffffffff;
5865 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
5866 	    __func__);
5867 	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5868 	if (error != 0)
5869 		return error;
5870 
5871 	/* Wait at most two seconds for calibration to complete. */
5872 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5873 		error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
5874 	return error;
5875 }
5876 
5877 /*
5878  * Send calibration results to the runtime firmware.  These results were
5879  * obtained on first boot from the initialization firmware.
5880  */
5881 static int
5882 iwn5000_send_calibration(struct iwn_softc *sc)
5883 {
5884 	int idx, error;
5885 
5886 	for (idx = 0; idx < 5; idx++) {
5887 		if (sc->calibcmd[idx].buf == NULL)
5888 			continue;	/* No results available. */
5889 		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5890 		    "send calibration result idx=%d len=%d\n", idx,
5891 		    sc->calibcmd[idx].len);
5892 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
5893 		    sc->calibcmd[idx].len, 0);
5894 		if (error != 0) {
5895 			device_printf(sc->sc_dev,
5896 			    "%s: could not send calibration result, error %d\n",
5897 			    __func__, error);
5898 			return error;
5899 		}
5900 	}
5901 	return 0;
5902 }
5903 
5904 static int
5905 iwn5000_send_wimax_coex(struct iwn_softc *sc)
5906 {
5907 	struct iwn5000_wimax_coex wimax;
5908 
5909 #ifdef notyet
5910 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5911 		/* Enable WiMAX coexistence for combo adapters. */
5912 		wimax.flags =
5913 		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5914 		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5915 		    IWN_WIMAX_COEX_STA_TABLE_VALID |
5916 		    IWN_WIMAX_COEX_ENABLE;
5917 		memcpy(wimax.events, iwn6050_wimax_events,
5918 		    sizeof iwn6050_wimax_events);
5919 	} else
5920 #endif
5921 	{
5922 		/* Disable WiMAX coexistence. */
5923 		wimax.flags = 0;
5924 		memset(wimax.events, 0, sizeof wimax.events);
5925 	}
5926 	DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5927 	    __func__);
5928 	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5929 }
5930 
5931 static int
5932 iwn5000_crystal_calib(struct iwn_softc *sc)
5933 {
5934 	struct iwn5000_phy_calib_crystal cmd;
5935 
5936 	memset(&cmd, 0, sizeof cmd);
5937 	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5938 	cmd.ngroups = 1;
5939 	cmd.isvalid = 1;
5940 	cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
5941 	cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
5942 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
5943 	    cmd.cap_pin[0], cmd.cap_pin[1]);
5944 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5945 }
5946 
5947 static int
5948 iwn5000_temp_offset_calib(struct iwn_softc *sc)
5949 {
5950 	struct iwn5000_phy_calib_temp_offset cmd;
5951 
5952 	memset(&cmd, 0, sizeof cmd);
5953 	cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
5954 	cmd.ngroups = 1;
5955 	cmd.isvalid = 1;
5956 	if (sc->eeprom_temp != 0)
5957 		cmd.offset = htole16(sc->eeprom_temp);
5958 	else
5959 		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
5960 	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
5961 	    le16toh(cmd.offset));
5962 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5963 }
5964 
5965 /*
5966  * This function is called after the runtime firmware notifies us of its
5967  * readiness (called in a process context).
5968  */
5969 static int
5970 iwn4965_post_alive(struct iwn_softc *sc)
5971 {
5972 	int error, qid;
5973 
5974 	if ((error = iwn_nic_lock(sc)) != 0)
5975 		return error;
5976 
5977 	/* Clear TX scheduler state in SRAM. */
5978 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5979 	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5980 	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5981 
5982 	/* Set physical address of TX scheduler rings (1KB aligned). */
5983 	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5984 
5985 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5986 
5987 	/* Disable chain mode for all our 16 queues. */
5988 	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5989 
5990 	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5991 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5992 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5993 
5994 		/* Set scheduler window size. */
5995 		iwn_mem_write(sc, sc->sched_base +
5996 		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5997 		/* Set scheduler frame limit. */
5998 		iwn_mem_write(sc, sc->sched_base +
5999 		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6000 		    IWN_SCHED_LIMIT << 16);
6001 	}
6002 
6003 	/* Enable interrupts for all our 16 queues. */
6004 	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6005 	/* Identify TX FIFO rings (0-7). */
6006 	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6007 
6008 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6009 	for (qid = 0; qid < 7; qid++) {
6010 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6011 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6012 		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6013 	}
6014 	iwn_nic_unlock(sc);
6015 	return 0;
6016 }
6017 
6018 /*
6019  * This function is called after the initialization or runtime firmware
6020  * notifies us of its readiness (called in a process context).
6021  */
6022 static int
6023 iwn5000_post_alive(struct iwn_softc *sc)
6024 {
6025 	int error, qid;
6026 
6027 	/* Switch to using ICT interrupt mode. */
6028 	iwn5000_ict_reset(sc);
6029 
6030 	if ((error = iwn_nic_lock(sc)) != 0)
6031 		return error;
6032 
6033 	/* Clear TX scheduler state in SRAM. */
6034 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6035 	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6036 	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6037 
6038 	/* Set physical address of TX scheduler rings (1KB aligned). */
6039 	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6040 
6041 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6042 
6043 	/* Enable chain mode for all queues, except command queue. */
6044 	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6045 	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6046 
6047 	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6048 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6049 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6050 
6051 		iwn_mem_write(sc, sc->sched_base +
6052 		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6053 		/* Set scheduler window size and frame limit. */
6054 		iwn_mem_write(sc, sc->sched_base +
6055 		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6056 		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6057 	}
6058 
6059 	/* Enable interrupts for all our 20 queues. */
6060 	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6061 	/* Identify TX FIFO rings (0-7). */
6062 	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6063 
6064 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6065 	for (qid = 0; qid < 7; qid++) {
6066 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6067 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6068 		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6069 	}
6070 	iwn_nic_unlock(sc);
6071 
6072 	/* Configure WiMAX coexistence for combo adapters. */
6073 	error = iwn5000_send_wimax_coex(sc);
6074 	if (error != 0) {
6075 		device_printf(sc->sc_dev,
6076 		    "%s: could not configure WiMAX coexistence, error %d\n",
6077 		    __func__, error);
6078 		return error;
6079 	}
6080 	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6081 		/* Perform crystal calibration. */
6082 		error = iwn5000_crystal_calib(sc);
6083 		if (error != 0) {
6084 			device_printf(sc->sc_dev,
6085 			    "%s: crystal calibration failed, error %d\n",
6086 			    __func__, error);
6087 			return error;
6088 		}
6089 	}
6090 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6091 		/* Query calibration from the initialization firmware. */
6092 		if ((error = iwn5000_query_calibration(sc)) != 0) {
6093 			device_printf(sc->sc_dev,
6094 			    "%s: could not query calibration, error %d\n",
6095 			    __func__, error);
6096 			return error;
6097 		}
6098 		/*
6099 		 * We have the calibration results now, reboot with the
6100 		 * runtime firmware (call ourselves recursively!)
6101 		 */
6102 		iwn_hw_stop(sc);
6103 		error = iwn_hw_init(sc);
6104 	} else {
6105 		/* Send calibration results to runtime firmware. */
6106 		error = iwn5000_send_calibration(sc);
6107 	}
6108 	return error;
6109 }
6110 
6111 /*
6112  * The firmware boot code is small and is intended to be copied directly into
6113  * the NIC internal memory (no DMA transfer).
6114  */
6115 static int
6116 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6117 {
6118 	int error, ntries;
6119 
6120 	size /= sizeof (uint32_t);
6121 
6122 	if ((error = iwn_nic_lock(sc)) != 0)
6123 		return error;
6124 
6125 	/* Copy microcode image into NIC memory. */
6126 	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6127 	    (const uint32_t *)ucode, size);
6128 
6129 	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6130 	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6131 	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6132 
6133 	/* Start boot load now. */
6134 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6135 
6136 	/* Wait for transfer to complete. */
6137 	for (ntries = 0; ntries < 1000; ntries++) {
6138 		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6139 		    IWN_BSM_WR_CTRL_START))
6140 			break;
6141 		DELAY(10);
6142 	}
6143 	if (ntries == 1000) {
6144 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6145 		    __func__);
6146 		iwn_nic_unlock(sc);
6147 		return ETIMEDOUT;
6148 	}
6149 
6150 	/* Enable boot after power up. */
6151 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6152 
6153 	iwn_nic_unlock(sc);
6154 	return 0;
6155 }
6156 
6157 static int
6158 iwn4965_load_firmware(struct iwn_softc *sc)
6159 {
6160 	struct iwn_fw_info *fw = &sc->fw;
6161 	struct iwn_dma_info *dma = &sc->fw_dma;
6162 	int error;
6163 
6164 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
6165 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6166 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6167 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6168 	    fw->init.text, fw->init.textsz);
6169 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6170 
6171 	/* Tell adapter where to find initialization sections. */
6172 	if ((error = iwn_nic_lock(sc)) != 0)
6173 		return error;
6174 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6175 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6176 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6177 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6178 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6179 	iwn_nic_unlock(sc);
6180 
6181 	/* Load firmware boot code. */
6182 	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6183 	if (error != 0) {
6184 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6185 		    __func__);
6186 		return error;
6187 	}
6188 	/* Now press "execute". */
6189 	IWN_WRITE(sc, IWN_RESET, 0);
6190 
6191 	/* Wait at most one second for first alive notification. */
6192 	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6193 		device_printf(sc->sc_dev,
6194 		    "%s: timeout waiting for adapter to initialize, error %d\n",
6195 		    __func__, error);
6196 		return error;
6197 	}
6198 
6199 	/* Retrieve current temperature for initial TX power calibration. */
6200 	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6201 	sc->temp = iwn4965_get_temperature(sc);
6202 
6203 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
6204 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6205 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6206 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6207 	    fw->main.text, fw->main.textsz);
6208 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6209 
6210 	/* Tell adapter where to find runtime sections. */
6211 	if ((error = iwn_nic_lock(sc)) != 0)
6212 		return error;
6213 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6214 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6215 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6216 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6217 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6218 	    IWN_FW_UPDATED | fw->main.textsz);
6219 	iwn_nic_unlock(sc);
6220 
6221 	return 0;
6222 }
6223 
6224 static int
6225 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6226     const uint8_t *section, int size)
6227 {
6228 	struct iwn_dma_info *dma = &sc->fw_dma;
6229 	int error;
6230 
6231 	/* Copy firmware section into pre-allocated DMA-safe memory. */
6232 	memcpy(dma->vaddr, section, size);
6233 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6234 
6235 	if ((error = iwn_nic_lock(sc)) != 0)
6236 		return error;
6237 
6238 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6239 	    IWN_FH_TX_CONFIG_DMA_PAUSE);
6240 
6241 	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6242 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6243 	    IWN_LOADDR(dma->paddr));
6244 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6245 	    IWN_HIADDR(dma->paddr) << 28 | size);
6246 	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6247 	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
6248 	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
6249 	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
6250 
6251 	/* Kick Flow Handler to start DMA transfer. */
6252 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6253 	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6254 
6255 	iwn_nic_unlock(sc);
6256 
6257 	/* Wait at most five seconds for FH DMA transfer to complete. */
6258 	return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
6259 }
6260 
6261 static int
6262 iwn5000_load_firmware(struct iwn_softc *sc)
6263 {
6264 	struct iwn_fw_part *fw;
6265 	int error;
6266 
6267 	/* Load the initialization firmware on first boot only. */
6268 	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6269 	    &sc->fw.main : &sc->fw.init;
6270 
6271 	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6272 	    fw->text, fw->textsz);
6273 	if (error != 0) {
6274 		device_printf(sc->sc_dev,
6275 		    "%s: could not load firmware %s section, error %d\n",
6276 		    __func__, ".text", error);
6277 		return error;
6278 	}
6279 	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6280 	    fw->data, fw->datasz);
6281 	if (error != 0) {
6282 		device_printf(sc->sc_dev,
6283 		    "%s: could not load firmware %s section, error %d\n",
6284 		    __func__, ".data", error);
6285 		return error;
6286 	}
6287 
6288 	/* Now press "execute". */
6289 	IWN_WRITE(sc, IWN_RESET, 0);
6290 	return 0;
6291 }
6292 
6293 /*
6294  * Extract text and data sections from a legacy firmware image.
6295  */
6296 static int
6297 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6298 {
6299 	const uint32_t *ptr;
6300 	size_t hdrlen = 24;
6301 	uint32_t rev;
6302 
6303 	ptr = (const uint32_t *)fw->data;
6304 	rev = le32toh(*ptr++);
6305 
6306 	/* Check firmware API version. */
6307 	if (IWN_FW_API(rev) <= 1) {
6308 		device_printf(sc->sc_dev,
6309 		    "%s: bad firmware, need API version >=2\n", __func__);
6310 		return EINVAL;
6311 	}
6312 	if (IWN_FW_API(rev) >= 3) {
6313 		/* Skip build number (version 2 header). */
6314 		hdrlen += 4;
6315 		ptr++;
6316 	}
6317 	if (fw->size < hdrlen) {
6318 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6319 		    __func__, fw->size);
6320 		return EINVAL;
6321 	}
6322 	fw->main.textsz = le32toh(*ptr++);
6323 	fw->main.datasz = le32toh(*ptr++);
6324 	fw->init.textsz = le32toh(*ptr++);
6325 	fw->init.datasz = le32toh(*ptr++);
6326 	fw->boot.textsz = le32toh(*ptr++);
6327 
6328 	/* Check that all firmware sections fit. */
6329 	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6330 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6331 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6332 		    __func__, fw->size);
6333 		return EINVAL;
6334 	}
6335 
6336 	/* Get pointers to firmware sections. */
6337 	fw->main.text = (const uint8_t *)ptr;
6338 	fw->main.data = fw->main.text + fw->main.textsz;
6339 	fw->init.text = fw->main.data + fw->main.datasz;
6340 	fw->init.data = fw->init.text + fw->init.textsz;
6341 	fw->boot.text = fw->init.data + fw->init.datasz;
6342 	return 0;
6343 }
6344 
6345 /*
6346  * Extract text and data sections from a TLV firmware image.
6347  */
6348 static int
6349 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6350     uint16_t alt)
6351 {
6352 	const struct iwn_fw_tlv_hdr *hdr;
6353 	const struct iwn_fw_tlv *tlv;
6354 	const uint8_t *ptr, *end;
6355 	uint64_t altmask;
6356 	uint32_t len, tmp;
6357 
6358 	if (fw->size < sizeof (*hdr)) {
6359 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6360 		    __func__, fw->size);
6361 		return EINVAL;
6362 	}
6363 	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6364 	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6365 		device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
6366 		    __func__, le32toh(hdr->signature));
6367 		return EINVAL;
6368 	}
6369 	DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
6370 	    le32toh(hdr->build));
6371 
6372 	/*
6373 	 * Select the closest supported alternative that is less than
6374 	 * or equal to the specified one.
6375 	 */
6376 	altmask = le64toh(hdr->altmask);
6377 	while (alt > 0 && !(altmask & (1ULL << alt)))
6378 		alt--;	/* Downgrade. */
6379 	DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
6380 
6381 	ptr = (const uint8_t *)(hdr + 1);
6382 	end = (const uint8_t *)(fw->data + fw->size);
6383 
6384 	/* Parse type-length-value fields. */
6385 	while (ptr + sizeof (*tlv) <= end) {
6386 		tlv = (const struct iwn_fw_tlv *)ptr;
6387 		len = le32toh(tlv->len);
6388 
6389 		ptr += sizeof (*tlv);
6390 		if (ptr + len > end) {
6391 			device_printf(sc->sc_dev,
6392 			    "%s: firmware too short: %zu bytes\n", __func__,
6393 			    fw->size);
6394 			return EINVAL;
6395 		}
6396 		/* Skip other alternatives. */
6397 		if (tlv->alt != 0 && tlv->alt != htole16(alt))
6398 			goto next;
6399 
6400 		switch (le16toh(tlv->type)) {
6401 		case IWN_FW_TLV_MAIN_TEXT:
6402 			fw->main.text = ptr;
6403 			fw->main.textsz = len;
6404 			break;
6405 		case IWN_FW_TLV_MAIN_DATA:
6406 			fw->main.data = ptr;
6407 			fw->main.datasz = len;
6408 			break;
6409 		case IWN_FW_TLV_INIT_TEXT:
6410 			fw->init.text = ptr;
6411 			fw->init.textsz = len;
6412 			break;
6413 		case IWN_FW_TLV_INIT_DATA:
6414 			fw->init.data = ptr;
6415 			fw->init.datasz = len;
6416 			break;
6417 		case IWN_FW_TLV_BOOT_TEXT:
6418 			fw->boot.text = ptr;
6419 			fw->boot.textsz = len;
6420 			break;
6421 		case IWN_FW_TLV_ENH_SENS:
6422 			if (!len)
6423 				sc->sc_flags |= IWN_FLAG_ENH_SENS;
6424 			break;
6425 		case IWN_FW_TLV_PHY_CALIB:
6426 			tmp = htole32(*ptr);
6427 			if (tmp < 253) {
6428 				sc->reset_noise_gain = tmp;
6429 				sc->noise_gain = tmp + 1;
6430 			}
6431 			break;
6432 		default:
6433 			DPRINTF(sc, IWN_DEBUG_RESET,
6434 			    "TLV type %d not handled\n", le16toh(tlv->type));
6435 			break;
6436 		}
6437  next:		/* TLV fields are 32-bit aligned. */
6438 		ptr += (len + 3) & ~3;
6439 	}
6440 	return 0;
6441 }
6442 
6443 static int
6444 iwn_read_firmware(struct iwn_softc *sc)
6445 {
6446 	struct iwn_fw_info *fw = &sc->fw;
6447 	int error;
6448 
6449 	IWN_UNLOCK(sc);
6450 
6451 	memset(fw, 0, sizeof (*fw));
6452 
6453 	/* Read firmware image from filesystem. */
6454 	sc->fw_fp = firmware_get(sc->fwname);
6455 	if (sc->fw_fp == NULL) {
6456 		device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
6457 		    __func__, sc->fwname);
6458 		IWN_LOCK(sc);
6459 		return EINVAL;
6460 	}
6461 	IWN_LOCK(sc);
6462 
6463 	fw->size = sc->fw_fp->datasize;
6464 	fw->data = (const uint8_t *)sc->fw_fp->data;
6465 	if (fw->size < sizeof (uint32_t)) {
6466 		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6467 		    __func__, fw->size);
6468 		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6469 		sc->fw_fp = NULL;
6470 		return EINVAL;
6471 	}
6472 
6473 	/* Retrieve text and data sections. */
6474 	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
6475 		error = iwn_read_firmware_leg(sc, fw);
6476 	else
6477 		error = iwn_read_firmware_tlv(sc, fw, 1);
6478 	if (error != 0) {
6479 		device_printf(sc->sc_dev,
6480 		    "%s: could not read firmware sections, error %d\n",
6481 		    __func__, error);
6482 		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6483 		sc->fw_fp = NULL;
6484 		return error;
6485 	}
6486 
6487 	/* Make sure text and data sections fit in hardware memory. */
6488 	if (fw->main.textsz > sc->fw_text_maxsz ||
6489 	    fw->main.datasz > sc->fw_data_maxsz ||
6490 	    fw->init.textsz > sc->fw_text_maxsz ||
6491 	    fw->init.datasz > sc->fw_data_maxsz ||
6492 	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6493 	    (fw->boot.textsz & 3) != 0) {
6494 		device_printf(sc->sc_dev, "%s: firmware sections too large\n",
6495 		    __func__);
6496 		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6497 		sc->fw_fp = NULL;
6498 		return EINVAL;
6499 	}
6500 
6501 	/* We can proceed with loading the firmware. */
6502 	return 0;
6503 }
6504 
6505 static int
6506 iwn_clock_wait(struct iwn_softc *sc)
6507 {
6508 	int ntries;
6509 
6510 	/* Set "initialization complete" bit. */
6511 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6512 
6513 	/* Wait for clock stabilization. */
6514 	for (ntries = 0; ntries < 2500; ntries++) {
6515 		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6516 			return 0;
6517 		DELAY(10);
6518 	}
6519 	device_printf(sc->sc_dev,
6520 	    "%s: timeout waiting for clock stabilization\n", __func__);
6521 	return ETIMEDOUT;
6522 }
6523 
6524 static int
6525 iwn_apm_init(struct iwn_softc *sc)
6526 {
6527 	uint32_t reg;
6528 	int error;
6529 
6530 	/* Disable L0s exit timer (NMI bug workaround). */
6531 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6532 	/* Don't wait for ICH L0s (ICH bug workaround). */
6533 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6534 
6535 	/* Set FH wait threshold to max (HW bug under stress workaround). */
6536 	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6537 
6538 	/* Enable HAP INTA to move adapter from L1a to L0s. */
6539 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6540 
6541 	/* Retrieve PCIe Active State Power Management (ASPM). */
6542 	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6543 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6544 	if (reg & 0x02)	/* L1 Entry enabled. */
6545 		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6546 	else
6547 		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6548 
6549 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6550 	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6551 		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6552 
6553 	/* Wait for clock stabilization before accessing prph. */
6554 	if ((error = iwn_clock_wait(sc)) != 0)
6555 		return error;
6556 
6557 	if ((error = iwn_nic_lock(sc)) != 0)
6558 		return error;
6559 	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6560 		/* Enable DMA and BSM (Bootstrap State Machine). */
6561 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6562 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6563 		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6564 	} else {
6565 		/* Enable DMA. */
6566 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6567 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6568 	}
6569 	DELAY(20);
6570 	/* Disable L1-Active. */
6571 	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6572 	iwn_nic_unlock(sc);
6573 
6574 	return 0;
6575 }
6576 
6577 static void
6578 iwn_apm_stop_master(struct iwn_softc *sc)
6579 {
6580 	int ntries;
6581 
6582 	/* Stop busmaster DMA activity. */
6583 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6584 	for (ntries = 0; ntries < 100; ntries++) {
6585 		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6586 			return;
6587 		DELAY(10);
6588 	}
6589 	device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
6590 }
6591 
6592 static void
6593 iwn_apm_stop(struct iwn_softc *sc)
6594 {
6595 	iwn_apm_stop_master(sc);
6596 
6597 	/* Reset the entire device. */
6598 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6599 	DELAY(10);
6600 	/* Clear "initialization complete" bit. */
6601 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6602 }
6603 
6604 static int
6605 iwn4965_nic_config(struct iwn_softc *sc)
6606 {
6607 	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6608 		/*
6609 		 * I don't believe this to be correct but this is what the
6610 		 * vendor driver is doing. Probably the bits should not be
6611 		 * shifted in IWN_RFCFG_*.
6612 		 */
6613 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6614 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6615 		    IWN_RFCFG_STEP(sc->rfcfg) |
6616 		    IWN_RFCFG_DASH(sc->rfcfg));
6617 	}
6618 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6619 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6620 	return 0;
6621 }
6622 
6623 static int
6624 iwn5000_nic_config(struct iwn_softc *sc)
6625 {
6626 	uint32_t tmp;
6627 	int error;
6628 
6629 	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6630 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6631 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6632 		    IWN_RFCFG_STEP(sc->rfcfg) |
6633 		    IWN_RFCFG_DASH(sc->rfcfg));
6634 	}
6635 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6636 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6637 
6638 	if ((error = iwn_nic_lock(sc)) != 0)
6639 		return error;
6640 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6641 
6642 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6643 		/*
6644 		 * Select first Switching Voltage Regulator (1.32V) to
6645 		 * solve a stability issue related to noisy DC2DC line
6646 		 * in the silicon of 1000 Series.
6647 		 */
6648 		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6649 		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6650 		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6651 		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6652 	}
6653 	iwn_nic_unlock(sc);
6654 
6655 	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6656 		/* Use internal power amplifier only. */
6657 		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6658 	}
6659 	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
6660 	     sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
6661 		/* Indicate that ROM calibration version is >=6. */
6662 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6663 	}
6664 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
6665 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
6666 	return 0;
6667 }
6668 
6669 /*
6670  * Take NIC ownership over Intel Active Management Technology (AMT).
6671  */
6672 static int
6673 iwn_hw_prepare(struct iwn_softc *sc)
6674 {
6675 	int ntries;
6676 
6677 	/* Check if hardware is ready. */
6678 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6679 	for (ntries = 0; ntries < 5; ntries++) {
6680 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6681 		    IWN_HW_IF_CONFIG_NIC_READY)
6682 			return 0;
6683 		DELAY(10);
6684 	}
6685 
6686 	/* Hardware not ready, force into ready state. */
6687 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6688 	for (ntries = 0; ntries < 15000; ntries++) {
6689 		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6690 		    IWN_HW_IF_CONFIG_PREPARE_DONE))
6691 			break;
6692 		DELAY(10);
6693 	}
6694 	if (ntries == 15000)
6695 		return ETIMEDOUT;
6696 
6697 	/* Hardware should be ready now. */
6698 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6699 	for (ntries = 0; ntries < 5; ntries++) {
6700 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6701 		    IWN_HW_IF_CONFIG_NIC_READY)
6702 			return 0;
6703 		DELAY(10);
6704 	}
6705 	return ETIMEDOUT;
6706 }
6707 
6708 static int
6709 iwn_hw_init(struct iwn_softc *sc)
6710 {
6711 	struct iwn_ops *ops = &sc->ops;
6712 	int error, chnl, qid;
6713 
6714 	/* Clear pending interrupts. */
6715 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6716 
6717 	if ((error = iwn_apm_init(sc)) != 0) {
6718 		device_printf(sc->sc_dev,
6719 		    "%s: could not power ON adapter, error %d\n", __func__,
6720 		    error);
6721 		return error;
6722 	}
6723 
6724 	/* Select VMAIN power source. */
6725 	if ((error = iwn_nic_lock(sc)) != 0)
6726 		return error;
6727 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6728 	iwn_nic_unlock(sc);
6729 
6730 	/* Perform adapter-specific initialization. */
6731 	if ((error = ops->nic_config(sc)) != 0)
6732 		return error;
6733 
6734 	/* Initialize RX ring. */
6735 	if ((error = iwn_nic_lock(sc)) != 0)
6736 		return error;
6737 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6738 	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6739 	/* Set physical address of RX ring (256-byte aligned). */
6740 	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6741 	/* Set physical address of RX status (16-byte aligned). */
6742 	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6743 	/* Enable RX. */
6744 	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6745 	    IWN_FH_RX_CONFIG_ENA           |
6746 	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
6747 	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
6748 	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
6749 	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6750 	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6751 	iwn_nic_unlock(sc);
6752 	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6753 
6754 	if ((error = iwn_nic_lock(sc)) != 0)
6755 		return error;
6756 
6757 	/* Initialize TX scheduler. */
6758 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6759 
6760 	/* Set physical address of "keep warm" page (16-byte aligned). */
6761 	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6762 
6763 	/* Initialize TX rings. */
6764 	for (qid = 0; qid < sc->ntxqs; qid++) {
6765 		struct iwn_tx_ring *txq = &sc->txq[qid];
6766 
6767 		/* Set physical address of TX ring (256-byte aligned). */
6768 		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6769 		    txq->desc_dma.paddr >> 8);
6770 	}
6771 	iwn_nic_unlock(sc);
6772 
6773 	/* Enable DMA channels. */
6774 	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6775 		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6776 		    IWN_FH_TX_CONFIG_DMA_ENA |
6777 		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6778 	}
6779 
6780 	/* Clear "radio off" and "commands blocked" bits. */
6781 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6782 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6783 
6784 	/* Clear pending interrupts. */
6785 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6786 	/* Enable interrupt coalescing. */
6787 	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6788 	/* Enable interrupts. */
6789 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6790 
6791 	/* _Really_ make sure "radio off" bit is cleared! */
6792 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6793 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6794 
6795 	/* Enable shadow registers. */
6796 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
6797 		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
6798 
6799 	if ((error = ops->load_firmware(sc)) != 0) {
6800 		device_printf(sc->sc_dev,
6801 		    "%s: could not load firmware, error %d\n", __func__,
6802 		    error);
6803 		return error;
6804 	}
6805 	/* Wait at most one second for firmware alive notification. */
6806 	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6807 		device_printf(sc->sc_dev,
6808 		    "%s: timeout waiting for adapter to initialize, error %d\n",
6809 		    __func__, error);
6810 		return error;
6811 	}
6812 	/* Do post-firmware initialization. */
6813 	return ops->post_alive(sc);
6814 }
6815 
6816 static void
6817 iwn_hw_stop(struct iwn_softc *sc)
6818 {
6819 	int chnl, qid, ntries;
6820 
6821 	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6822 
6823 	/* Disable interrupts. */
6824 	IWN_WRITE(sc, IWN_INT_MASK, 0);
6825 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6826 	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6827 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6828 
6829 	/* Make sure we no longer hold the NIC lock. */
6830 	iwn_nic_unlock(sc);
6831 
6832 	/* Stop TX scheduler. */
6833 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6834 
6835 	/* Stop all DMA channels. */
6836 	if (iwn_nic_lock(sc) == 0) {
6837 		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6838 			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6839 			for (ntries = 0; ntries < 200; ntries++) {
6840 				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
6841 				    IWN_FH_TX_STATUS_IDLE(chnl))
6842 					break;
6843 				DELAY(10);
6844 			}
6845 		}
6846 		iwn_nic_unlock(sc);
6847 	}
6848 
6849 	/* Stop RX ring. */
6850 	iwn_reset_rx_ring(sc, &sc->rxq);
6851 
6852 	/* Reset all TX rings. */
6853 	for (qid = 0; qid < sc->ntxqs; qid++)
6854 		iwn_reset_tx_ring(sc, &sc->txq[qid]);
6855 
6856 	if (iwn_nic_lock(sc) == 0) {
6857 		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6858 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6859 		iwn_nic_unlock(sc);
6860 	}
6861 	DELAY(5);
6862 	/* Power OFF adapter. */
6863 	iwn_apm_stop(sc);
6864 }
6865 
6866 static void
6867 iwn_radio_on(void *arg0, int pending)
6868 {
6869 	struct iwn_softc *sc = arg0;
6870 	struct ifnet *ifp = sc->sc_ifp;
6871 	struct ieee80211com *ic = ifp->if_l2com;
6872 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6873 
6874 	if (vap != NULL) {
6875 		iwn_init(sc);
6876 		ieee80211_init(vap);
6877 	}
6878 }
6879 
6880 static void
6881 iwn_radio_off(void *arg0, int pending)
6882 {
6883 	struct iwn_softc *sc = arg0;
6884 	struct ifnet *ifp = sc->sc_ifp;
6885 	struct ieee80211com *ic = ifp->if_l2com;
6886 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6887 
6888 	iwn_stop(sc);
6889 	if (vap != NULL)
6890 		ieee80211_stop(vap);
6891 
6892 	/* Enable interrupts to get RF toggle notification. */
6893 	IWN_LOCK(sc);
6894 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6895 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6896 	IWN_UNLOCK(sc);
6897 }
6898 
6899 static void
6900 iwn_init_locked(struct iwn_softc *sc)
6901 {
6902 	struct ifnet *ifp = sc->sc_ifp;
6903 	int error;
6904 
6905 	IWN_LOCK_ASSERT(sc);
6906 
6907 	if ((error = iwn_hw_prepare(sc)) != 0) {
6908 		device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
6909 		    __func__, error);
6910 		goto fail;
6911 	}
6912 
6913 	/* Initialize interrupt mask to default value. */
6914 	sc->int_mask = IWN_INT_MASK_DEF;
6915 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6916 
6917 	/* Check that the radio is not disabled by hardware switch. */
6918 	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6919 		device_printf(sc->sc_dev,
6920 		    "radio is disabled by hardware switch\n");
6921 		/* Enable interrupts to get RF toggle notifications. */
6922 		IWN_WRITE(sc, IWN_INT, 0xffffffff);
6923 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6924 		return;
6925 	}
6926 
6927 	/* Read firmware images from the filesystem. */
6928 	if ((error = iwn_read_firmware(sc)) != 0) {
6929 		device_printf(sc->sc_dev,
6930 		    "%s: could not read firmware, error %d\n", __func__,
6931 		    error);
6932 		goto fail;
6933 	}
6934 
6935 	/* Initialize hardware and upload firmware. */
6936 	error = iwn_hw_init(sc);
6937 	firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6938 	sc->fw_fp = NULL;
6939 	if (error != 0) {
6940 		device_printf(sc->sc_dev,
6941 		    "%s: could not initialize hardware, error %d\n", __func__,
6942 		    error);
6943 		goto fail;
6944 	}
6945 
6946 	/* Configure adapter now that it is ready. */
6947 	if ((error = iwn_config(sc)) != 0) {
6948 		device_printf(sc->sc_dev,
6949 		    "%s: could not configure device, error %d\n", __func__,
6950 		    error);
6951 		goto fail;
6952 	}
6953 
6954 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6955 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
6956 
6957 	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
6958 	return;
6959 
6960 fail:	iwn_stop_locked(sc);
6961 }
6962 
6963 static void
6964 iwn_init(void *arg)
6965 {
6966 	struct iwn_softc *sc = arg;
6967 	struct ifnet *ifp = sc->sc_ifp;
6968 	struct ieee80211com *ic = ifp->if_l2com;
6969 
6970 	IWN_LOCK(sc);
6971 	iwn_init_locked(sc);
6972 	IWN_UNLOCK(sc);
6973 
6974 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6975 		ieee80211_start_all(ic);
6976 }
6977 
6978 static void
6979 iwn_stop_locked(struct iwn_softc *sc)
6980 {
6981 	struct ifnet *ifp = sc->sc_ifp;
6982 
6983 	IWN_LOCK_ASSERT(sc);
6984 
6985 	sc->sc_tx_timer = 0;
6986 	callout_stop(&sc->watchdog_to);
6987 	callout_stop(&sc->calib_to);
6988 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6989 
6990 	/* Power OFF hardware. */
6991 	iwn_hw_stop(sc);
6992 }
6993 
6994 static void
6995 iwn_stop(struct iwn_softc *sc)
6996 {
6997 	IWN_LOCK(sc);
6998 	iwn_stop_locked(sc);
6999 	IWN_UNLOCK(sc);
7000 }
7001 
7002 /*
7003  * Callback from net80211 to start a scan.
7004  */
7005 static void
7006 iwn_scan_start(struct ieee80211com *ic)
7007 {
7008 	struct ifnet *ifp = ic->ic_ifp;
7009 	struct iwn_softc *sc = ifp->if_softc;
7010 
7011 	IWN_LOCK(sc);
7012 	/* make the link LED blink while we're scanning */
7013 	iwn_set_led(sc, IWN_LED_LINK, 20, 2);
7014 	IWN_UNLOCK(sc);
7015 }
7016 
7017 /*
7018  * Callback from net80211 to terminate a scan.
7019  */
7020 static void
7021 iwn_scan_end(struct ieee80211com *ic)
7022 {
7023 	struct ifnet *ifp = ic->ic_ifp;
7024 	struct iwn_softc *sc = ifp->if_softc;
7025 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7026 
7027 	IWN_LOCK(sc);
7028 	if (vap->iv_state == IEEE80211_S_RUN) {
7029 		/* Set link LED to ON status if we are associated */
7030 		iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7031 	}
7032 	IWN_UNLOCK(sc);
7033 }
7034 
7035 /*
7036  * Callback from net80211 to force a channel change.
7037  */
7038 static void
7039 iwn_set_channel(struct ieee80211com *ic)
7040 {
7041 	const struct ieee80211_channel *c = ic->ic_curchan;
7042 	struct ifnet *ifp = ic->ic_ifp;
7043 	struct iwn_softc *sc = ifp->if_softc;
7044 	int error;
7045 
7046 	IWN_LOCK(sc);
7047 	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
7048 	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
7049 	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
7050 	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
7051 
7052 	/*
7053 	 * Only need to set the channel in Monitor mode. AP scanning and auth
7054 	 * are already taken care of by their respective firmware commands.
7055 	 */
7056 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7057 		error = iwn_config(sc);
7058 		if (error != 0)
7059 		device_printf(sc->sc_dev,
7060 		    "%s: error %d settting channel\n", __func__, error);
7061 	}
7062 	IWN_UNLOCK(sc);
7063 }
7064 
7065 /*
7066  * Callback from net80211 to start scanning of the current channel.
7067  */
7068 static void
7069 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
7070 {
7071 	struct ieee80211vap *vap = ss->ss_vap;
7072 	struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
7073 	int error;
7074 
7075 	IWN_LOCK(sc);
7076 	error = iwn_scan(sc);
7077 	IWN_UNLOCK(sc);
7078 	if (error != 0)
7079 		ieee80211_cancel_scan(vap);
7080 }
7081 
7082 /*
7083  * Callback from net80211 to handle the minimum dwell time being met.
7084  * The intent is to terminate the scan but we just let the firmware
7085  * notify us when it's finished as we have no safe way to abort it.
7086  */
7087 static void
7088 iwn_scan_mindwell(struct ieee80211_scan_state *ss)
7089 {
7090 	/* NB: don't try to abort scan; wait for firmware to finish */
7091 }
7092 
7093 static void
7094 iwn_hw_reset(void *arg0, int pending)
7095 {
7096 	struct iwn_softc *sc = arg0;
7097 	struct ifnet *ifp = sc->sc_ifp;
7098 	struct ieee80211com *ic = ifp->if_l2com;
7099 
7100 	iwn_stop(sc);
7101 	iwn_init(sc);
7102 	ieee80211_notify_radio(ic, 1);
7103 }
7104