xref: /openbsd/sys/dev/pci/if_iwn.c (revision 09467b48)
1 /*	$OpenBSD: if_iwn.c,v 1.241 2020/07/27 07:24:03 stsp Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
21  * adapters.
22  */
23 
24 #include "bpfilter.h"
25 
26 #include <sys/param.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/rwlock.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/task.h>
37 #include <sys/endian.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45 
46 #if NBPFILTER > 0
47 #include <net/bpf.h>
48 #endif
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/if_ether.h>
55 
56 #include <net80211/ieee80211_var.h>
57 #include <net80211/ieee80211_amrr.h>
58 #include <net80211/ieee80211_mira.h>
59 #include <net80211/ieee80211_radiotap.h>
60 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
61 #undef DPRINTF /* defined in ieee80211_priv.h */
62 
63 #include <dev/pci/if_iwnreg.h>
64 #include <dev/pci/if_iwnvar.h>
65 
66 static const struct pci_matchid iwn_devices[] = {
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_1 },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_2 },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_1 },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_2 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_1 },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_2 },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_1 },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_2 },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_1 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_2 },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_1 },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_2 },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_1 },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_2 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_1 },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_2 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_1 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_2 },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_1 },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_2 },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_1 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_2 },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_1 },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_2 },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_1 },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_2 },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_1 },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_2 },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_1 },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_2 },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_1 },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_2 },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_1 },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_2 },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_1 },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_2 },
103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_1 },
104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_2 },
105 };
106 
107 int		iwn_match(struct device *, void *, void *);
108 void		iwn_attach(struct device *, struct device *, void *);
109 int		iwn4965_attach(struct iwn_softc *, pci_product_id_t);
110 int		iwn5000_attach(struct iwn_softc *, pci_product_id_t);
111 #if NBPFILTER > 0
112 void		iwn_radiotap_attach(struct iwn_softc *);
113 #endif
114 int		iwn_detach(struct device *, int);
115 int		iwn_activate(struct device *, int);
116 void		iwn_wakeup(struct iwn_softc *);
117 void		iwn_init_task(void *);
118 int		iwn_nic_lock(struct iwn_softc *);
119 int		iwn_eeprom_lock(struct iwn_softc *);
120 int		iwn_init_otprom(struct iwn_softc *);
121 int		iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
122 int		iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *,
123 		    void **, bus_size_t, bus_size_t);
124 void		iwn_dma_contig_free(struct iwn_dma_info *);
125 int		iwn_alloc_sched(struct iwn_softc *);
126 void		iwn_free_sched(struct iwn_softc *);
127 int		iwn_alloc_kw(struct iwn_softc *);
128 void		iwn_free_kw(struct iwn_softc *);
129 int		iwn_alloc_ict(struct iwn_softc *);
130 void		iwn_free_ict(struct iwn_softc *);
131 int		iwn_alloc_fwmem(struct iwn_softc *);
132 void		iwn_free_fwmem(struct iwn_softc *);
133 int		iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
134 void		iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
135 void		iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
136 int		iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
137 		    int);
138 void		iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
139 void		iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
140 void		iwn5000_ict_reset(struct iwn_softc *);
141 int		iwn_read_eeprom(struct iwn_softc *);
142 void		iwn4965_read_eeprom(struct iwn_softc *);
143 void		iwn4965_print_power_group(struct iwn_softc *, int);
144 void		iwn5000_read_eeprom(struct iwn_softc *);
145 void		iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
146 void		iwn_read_eeprom_enhinfo(struct iwn_softc *);
147 struct		ieee80211_node *iwn_node_alloc(struct ieee80211com *);
148 void		iwn_newassoc(struct ieee80211com *, struct ieee80211_node *,
149 		    int);
150 int		iwn_media_change(struct ifnet *);
151 int		iwn_newstate(struct ieee80211com *, enum ieee80211_state, int);
152 void		iwn_iter_func(void *, struct ieee80211_node *);
153 void		iwn_calib_timeout(void *);
154 int		iwn_ccmp_decap(struct iwn_softc *, struct mbuf *,
155 		    struct ieee80211_node *);
156 void		iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
157 		    struct iwn_rx_data *);
158 void		iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
159 		    struct iwn_rx_data *, struct mbuf_list *);
160 void		iwn_mira_choose(struct iwn_softc *, struct ieee80211_node *);
161 void		iwn_ampdu_rate_control(struct iwn_softc *, struct ieee80211_node *,
162 		    struct iwn_tx_ring *, int, uint16_t, uint16_t);
163 void		iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
164 		    struct iwn_rx_data *);
165 void		iwn5000_rx_calib_results(struct iwn_softc *,
166 		    struct iwn_rx_desc *, struct iwn_rx_data *);
167 void		iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
168 		    struct iwn_rx_data *);
169 void		iwn_ampdu_txq_advance(struct iwn_softc *, struct iwn_tx_ring *,
170 		    int, int);
171 void		iwn_ampdu_tx_done(struct iwn_softc *, struct iwn_tx_ring *,
172 		    struct iwn_rx_desc *, uint16_t, uint8_t, uint8_t, uint8_t,
173 		    int, uint32_t, struct iwn_txagg_status *);
174 void		iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
175 		    struct iwn_rx_data *);
176 void		iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
177 		    struct iwn_rx_data *);
178 void		iwn_tx_done_free_txdata(struct iwn_softc *,
179 		    struct iwn_tx_data *);
180 void		iwn_clear_oactive(struct iwn_softc *, struct iwn_tx_ring *);
181 void		iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
182 		    uint8_t, uint8_t, int, int, uint16_t);
183 void		iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
184 void		iwn_notif_intr(struct iwn_softc *);
185 void		iwn_wakeup_intr(struct iwn_softc *);
186 void		iwn_fatal_intr(struct iwn_softc *);
187 int		iwn_intr(void *);
188 void		iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
189 		    uint16_t);
190 void		iwn4965_reset_sched(struct iwn_softc *, int, int);
191 void		iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
192 		    uint16_t);
193 void		iwn5000_reset_sched(struct iwn_softc *, int, int);
194 int		iwn_tx(struct iwn_softc *, struct mbuf *,
195 		    struct ieee80211_node *);
196 int		iwn_rval2ridx(int);
197 void		iwn_start(struct ifnet *);
198 void		iwn_watchdog(struct ifnet *);
199 int		iwn_ioctl(struct ifnet *, u_long, caddr_t);
200 int		iwn_cmd(struct iwn_softc *, int, const void *, int, int);
201 int		iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
202 		    int);
203 int		iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
204 		    int);
205 int		iwn_set_link_quality(struct iwn_softc *,
206 		    struct ieee80211_node *);
207 int		iwn_add_broadcast_node(struct iwn_softc *, int, int);
208 void		iwn_updateedca(struct ieee80211com *);
209 void		iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
210 int		iwn_set_critical_temp(struct iwn_softc *);
211 int		iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
212 void		iwn4965_power_calibration(struct iwn_softc *, int);
213 int		iwn4965_set_txpower(struct iwn_softc *, int);
214 int		iwn5000_set_txpower(struct iwn_softc *, int);
215 int		iwn4965_get_rssi(const struct iwn_rx_stat *);
216 int		iwn5000_get_rssi(const struct iwn_rx_stat *);
217 int		iwn_get_noise(const struct iwn_rx_general_stats *);
218 int		iwn4965_get_temperature(struct iwn_softc *);
219 int		iwn5000_get_temperature(struct iwn_softc *);
220 int		iwn_init_sensitivity(struct iwn_softc *);
221 void		iwn_collect_noise(struct iwn_softc *,
222 		    const struct iwn_rx_general_stats *);
223 int		iwn4965_init_gains(struct iwn_softc *);
224 int		iwn5000_init_gains(struct iwn_softc *);
225 int		iwn4965_set_gains(struct iwn_softc *);
226 int		iwn5000_set_gains(struct iwn_softc *);
227 void		iwn_tune_sensitivity(struct iwn_softc *,
228 		    const struct iwn_rx_stats *);
229 int		iwn_send_sensitivity(struct iwn_softc *);
230 int		iwn_set_pslevel(struct iwn_softc *, int, int, int);
231 int		iwn_send_temperature_offset(struct iwn_softc *);
232 int		iwn_send_btcoex(struct iwn_softc *);
233 int		iwn_send_advanced_btcoex(struct iwn_softc *);
234 int		iwn5000_runtime_calib(struct iwn_softc *);
235 int		iwn_config(struct iwn_softc *);
236 uint16_t	iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, uint8_t);
237 uint16_t	iwn_limit_dwell(struct iwn_softc *, uint16_t);
238 uint16_t	iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t);
239 int		iwn_scan(struct iwn_softc *, uint16_t, int);
240 void		iwn_scan_abort(struct iwn_softc *);
241 int		iwn_bgscan(struct ieee80211com *);
242 int		iwn_auth(struct iwn_softc *, int);
243 int		iwn_run(struct iwn_softc *);
244 int		iwn_set_key(struct ieee80211com *, struct ieee80211_node *,
245 		    struct ieee80211_key *);
246 void		iwn_delete_key(struct ieee80211com *, struct ieee80211_node *,
247 		    struct ieee80211_key *);
248 void		iwn_update_htprot(struct ieee80211com *,
249 		    struct ieee80211_node *);
250 int		iwn_ampdu_rx_start(struct ieee80211com *,
251 		    struct ieee80211_node *, uint8_t);
252 void		iwn_ampdu_rx_stop(struct ieee80211com *,
253 		    struct ieee80211_node *, uint8_t);
254 int		iwn_ampdu_tx_start(struct ieee80211com *,
255 		    struct ieee80211_node *, uint8_t);
256 void		iwn_ampdu_tx_stop(struct ieee80211com *,
257 		    struct ieee80211_node *, uint8_t);
258 void		iwn4965_ampdu_tx_start(struct iwn_softc *,
259 		    struct ieee80211_node *, uint8_t, uint16_t);
260 void		iwn4965_ampdu_tx_stop(struct iwn_softc *,
261 		    uint8_t, uint16_t);
262 void		iwn5000_ampdu_tx_start(struct iwn_softc *,
263 		    struct ieee80211_node *, uint8_t, uint16_t);
264 void		iwn5000_ampdu_tx_stop(struct iwn_softc *,
265 		    uint8_t, uint16_t);
266 int		iwn5000_query_calibration(struct iwn_softc *);
267 int		iwn5000_send_calibration(struct iwn_softc *);
268 int		iwn5000_send_wimax_coex(struct iwn_softc *);
269 int		iwn5000_crystal_calib(struct iwn_softc *);
270 int		iwn6000_temp_offset_calib(struct iwn_softc *);
271 int		iwn2000_temp_offset_calib(struct iwn_softc *);
272 int		iwn4965_post_alive(struct iwn_softc *);
273 int		iwn5000_post_alive(struct iwn_softc *);
274 int		iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
275 		    int);
276 int		iwn4965_load_firmware(struct iwn_softc *);
277 int		iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
278 		    const uint8_t *, int);
279 int		iwn5000_load_firmware(struct iwn_softc *);
280 int		iwn_read_firmware_leg(struct iwn_softc *,
281 		    struct iwn_fw_info *);
282 int		iwn_read_firmware_tlv(struct iwn_softc *,
283 		    struct iwn_fw_info *, uint16_t);
284 int		iwn_read_firmware(struct iwn_softc *);
285 int		iwn_clock_wait(struct iwn_softc *);
286 int		iwn_apm_init(struct iwn_softc *);
287 void		iwn_apm_stop_master(struct iwn_softc *);
288 void		iwn_apm_stop(struct iwn_softc *);
289 int		iwn4965_nic_config(struct iwn_softc *);
290 int		iwn5000_nic_config(struct iwn_softc *);
291 int		iwn_hw_prepare(struct iwn_softc *);
292 int		iwn_hw_init(struct iwn_softc *);
293 void		iwn_hw_stop(struct iwn_softc *);
294 int		iwn_init(struct ifnet *);
295 void		iwn_stop(struct ifnet *);
296 
297 #ifdef IWN_DEBUG
298 #define DPRINTF(x)	do { if (iwn_debug > 0) printf x; } while (0)
299 #define DPRINTFN(n, x)	do { if (iwn_debug >= (n)) printf x; } while (0)
300 int iwn_debug = 1;
301 #else
302 #define DPRINTF(x)
303 #define DPRINTFN(n, x)
304 #endif
305 
306 struct cfdriver iwn_cd = {
307 	NULL, "iwn", DV_IFNET
308 };
309 
310 struct cfattach iwn_ca = {
311 	sizeof (struct iwn_softc), iwn_match, iwn_attach, iwn_detach,
312 	iwn_activate
313 };
314 
315 int
316 iwn_match(struct device *parent, void *match, void *aux)
317 {
318 	return pci_matchbyid((struct pci_attach_args *)aux, iwn_devices,
319 	    nitems(iwn_devices));
320 }
321 
322 void
323 iwn_attach(struct device *parent, struct device *self, void *aux)
324 {
325 	struct iwn_softc *sc = (struct iwn_softc *)self;
326 	struct ieee80211com *ic = &sc->sc_ic;
327 	struct ifnet *ifp = &ic->ic_if;
328 	struct pci_attach_args *pa = aux;
329 	const char *intrstr;
330 	pci_intr_handle_t ih;
331 	pcireg_t memtype, reg;
332 	int i, error;
333 
334 	sc->sc_pct = pa->pa_pc;
335 	sc->sc_pcitag = pa->pa_tag;
336 	sc->sc_dmat = pa->pa_dmat;
337 
338 	/*
339 	 * Get the offset of the PCI Express Capability Structure in PCI
340 	 * Configuration Space.
341 	 */
342 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
343 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
344 	if (error == 0) {
345 		printf(": PCIe capability structure not found!\n");
346 		return;
347 	}
348 
349 	/* Clear device-specific "PCI retry timeout" register (41h). */
350 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
351 	if (reg & 0xff00)
352 		pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
353 
354 	/* Hardware bug workaround. */
355 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
356 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
357 		DPRINTF(("PCIe INTx Disable set\n"));
358 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
359 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
360 		    PCI_COMMAND_STATUS_REG, reg);
361 	}
362 
363 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0);
364 	error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st,
365 	    &sc->sc_sh, NULL, &sc->sc_sz, 0);
366 	if (error != 0) {
367 		printf(": can't map mem space\n");
368 		return;
369 	}
370 
371 	/* Install interrupt handler. */
372 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
373 		printf(": can't map interrupt\n");
374 		return;
375 	}
376 	intrstr = pci_intr_string(sc->sc_pct, ih);
377 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc,
378 	    sc->sc_dev.dv_xname);
379 	if (sc->sc_ih == NULL) {
380 		printf(": can't establish interrupt");
381 		if (intrstr != NULL)
382 			printf(" at %s", intrstr);
383 		printf("\n");
384 		return;
385 	}
386 	printf(": %s", intrstr);
387 
388 	/* Read hardware revision and attach. */
389 	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0x1f;
390 	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
391 		error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id));
392 	else
393 		error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id));
394 	if (error != 0) {
395 		printf(": could not attach device\n");
396 		return;
397 	}
398 
399 	if ((error = iwn_hw_prepare(sc)) != 0) {
400 		printf(": hardware not ready\n");
401 		return;
402 	}
403 
404 	/* Read MAC address, channels, etc from EEPROM. */
405 	if ((error = iwn_read_eeprom(sc)) != 0) {
406 		printf(": could not read EEPROM\n");
407 		return;
408 	}
409 
410 	/* Allocate DMA memory for firmware transfers. */
411 	if ((error = iwn_alloc_fwmem(sc)) != 0) {
412 		printf(": could not allocate memory for firmware\n");
413 		return;
414 	}
415 
416 	/* Allocate "Keep Warm" page. */
417 	if ((error = iwn_alloc_kw(sc)) != 0) {
418 		printf(": could not allocate keep warm page\n");
419 		goto fail1;
420 	}
421 
422 	/* Allocate ICT table for 5000 Series. */
423 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
424 	    (error = iwn_alloc_ict(sc)) != 0) {
425 		printf(": could not allocate ICT table\n");
426 		goto fail2;
427 	}
428 
429 	/* Allocate TX scheduler "rings". */
430 	if ((error = iwn_alloc_sched(sc)) != 0) {
431 		printf(": could not allocate TX scheduler rings\n");
432 		goto fail3;
433 	}
434 
435 	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
436 	for (i = 0; i < sc->ntxqs; i++) {
437 		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
438 			printf(": could not allocate TX ring %d\n", i);
439 			goto fail4;
440 		}
441 	}
442 
443 	/* Allocate RX ring. */
444 	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
445 		printf(": could not allocate RX ring\n");
446 		goto fail4;
447 	}
448 
449 	/* Clear pending interrupts. */
450 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
451 
452 	/* Count the number of available chains. */
453 	sc->ntxchains =
454 	    ((sc->txchainmask >> 2) & 1) +
455 	    ((sc->txchainmask >> 1) & 1) +
456 	    ((sc->txchainmask >> 0) & 1);
457 	sc->nrxchains =
458 	    ((sc->rxchainmask >> 2) & 1) +
459 	    ((sc->rxchainmask >> 1) & 1) +
460 	    ((sc->rxchainmask >> 0) & 1);
461 	printf(", MIMO %dT%dR, %.4s, address %s\n", sc->ntxchains,
462 	    sc->nrxchains, sc->eeprom_domain, ether_sprintf(ic->ic_myaddr));
463 
464 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
465 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
466 	ic->ic_state = IEEE80211_S_INIT;
467 
468 	/* Set device capabilities. */
469 	ic->ic_caps =
470 	    IEEE80211_C_WEP |		/* WEP */
471 	    IEEE80211_C_RSN |		/* WPA/RSN */
472 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
473 	    IEEE80211_C_SCANALLBAND |	/* driver scans all bands at once */
474 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
475 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
476 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
477 	    IEEE80211_C_PMGT;		/* power saving supported */
478 
479 	/* No optional HT features supported for now, */
480 	ic->ic_htcaps = 0;
481 	ic->ic_htxcaps = 0;
482 	ic->ic_txbfcaps = 0;
483 	ic->ic_aselcaps = 0;
484 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
485 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
486 		ic->ic_caps |= (IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU);
487 		/* Set HT capabilities. */
488 		ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
489 #ifdef notyet
490 		ic->ic_htcaps |=
491 #if IWN_RBUF_SIZE == 8192
492 		    IEEE80211_HTCAP_AMSDU7935 |
493 #endif
494 		    IEEE80211_HTCAP_CBW20_40 |
495 		    IEEE80211_HTCAP_SGI40;
496 		if (sc->hw_type != IWN_HW_REV_TYPE_4965)
497 			ic->ic_htcaps |= IEEE80211_HTCAP_GF;
498 		if (sc->hw_type == IWN_HW_REV_TYPE_6050)
499 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
500 		else
501 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
502 #endif	/* notyet */
503 	}
504 
505 	/* Set supported legacy rates. */
506 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
507 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
508 	if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) {
509 		ic->ic_sup_rates[IEEE80211_MODE_11A] =
510 		    ieee80211_std_rateset_11a;
511 	}
512 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
513 		/* Set supported HT rates. */
514 		ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
515 #ifdef notyet
516 		if (sc->nrxchains > 1)
517 			ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
518 		if (sc->nrxchains > 2)
519 			ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
520 #endif
521 	}
522 
523 	/* IBSS channel undefined for now. */
524 	ic->ic_ibss_chan = &ic->ic_channels[0];
525 
526 	ifp->if_softc = sc;
527 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
528 	ifp->if_ioctl = iwn_ioctl;
529 	ifp->if_start = iwn_start;
530 	ifp->if_watchdog = iwn_watchdog;
531 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
532 
533 	if_attach(ifp);
534 	ieee80211_ifattach(ifp);
535 	ic->ic_node_alloc = iwn_node_alloc;
536 	ic->ic_bgscan_start = iwn_bgscan;
537 	ic->ic_newassoc = iwn_newassoc;
538 	ic->ic_updateedca = iwn_updateedca;
539 	ic->ic_set_key = iwn_set_key;
540 	ic->ic_delete_key = iwn_delete_key;
541 	ic->ic_update_htprot = iwn_update_htprot;
542 	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
543 	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
544 	ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
545 	ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
546 
547 	/* Override 802.11 state transition machine. */
548 	sc->sc_newstate = ic->ic_newstate;
549 	ic->ic_newstate = iwn_newstate;
550 	ieee80211_media_init(ifp, iwn_media_change, ieee80211_media_status);
551 
552 	sc->amrr.amrr_min_success_threshold =  1;
553 	sc->amrr.amrr_max_success_threshold = 15;
554 
555 #if NBPFILTER > 0
556 	iwn_radiotap_attach(sc);
557 #endif
558 	timeout_set(&sc->calib_to, iwn_calib_timeout, sc);
559 	rw_init(&sc->sc_rwlock, "iwnlock");
560 	task_set(&sc->init_task, iwn_init_task, sc);
561 	return;
562 
563 	/* Free allocated memory if something failed during attachment. */
564 fail4:	while (--i >= 0)
565 		iwn_free_tx_ring(sc, &sc->txq[i]);
566 	iwn_free_sched(sc);
567 fail3:	if (sc->ict != NULL)
568 		iwn_free_ict(sc);
569 fail2:	iwn_free_kw(sc);
570 fail1:	iwn_free_fwmem(sc);
571 }
572 
573 int
574 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid)
575 {
576 	struct iwn_ops *ops = &sc->ops;
577 
578 	ops->load_firmware = iwn4965_load_firmware;
579 	ops->read_eeprom = iwn4965_read_eeprom;
580 	ops->post_alive = iwn4965_post_alive;
581 	ops->nic_config = iwn4965_nic_config;
582 	ops->reset_sched = iwn4965_reset_sched;
583 	ops->update_sched = iwn4965_update_sched;
584 	ops->get_temperature = iwn4965_get_temperature;
585 	ops->get_rssi = iwn4965_get_rssi;
586 	ops->set_txpower = iwn4965_set_txpower;
587 	ops->init_gains = iwn4965_init_gains;
588 	ops->set_gains = iwn4965_set_gains;
589 	ops->add_node = iwn4965_add_node;
590 	ops->tx_done = iwn4965_tx_done;
591 	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
592 	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
593 	sc->ntxqs = IWN4965_NTXQUEUES;
594 	sc->first_agg_txq = IWN4965_FIRST_AGG_TXQUEUE;
595 	sc->ndmachnls = IWN4965_NDMACHNLS;
596 	sc->broadcast_id = IWN4965_ID_BROADCAST;
597 	sc->rxonsz = IWN4965_RXONSZ;
598 	sc->schedsz = IWN4965_SCHEDSZ;
599 	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
600 	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
601 	sc->fwsz = IWN4965_FWSZ;
602 	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
603 	sc->limits = &iwn4965_sensitivity_limits;
604 	sc->fwname = "iwn-4965";
605 	/* Override chains masks, ROM is known to be broken. */
606 	sc->txchainmask = IWN_ANT_AB;
607 	sc->rxchainmask = IWN_ANT_ABC;
608 
609 	return 0;
610 }
611 
612 int
613 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid)
614 {
615 	struct iwn_ops *ops = &sc->ops;
616 
617 	ops->load_firmware = iwn5000_load_firmware;
618 	ops->read_eeprom = iwn5000_read_eeprom;
619 	ops->post_alive = iwn5000_post_alive;
620 	ops->nic_config = iwn5000_nic_config;
621 	ops->reset_sched = iwn5000_reset_sched;
622 	ops->update_sched = iwn5000_update_sched;
623 	ops->get_temperature = iwn5000_get_temperature;
624 	ops->get_rssi = iwn5000_get_rssi;
625 	ops->set_txpower = iwn5000_set_txpower;
626 	ops->init_gains = iwn5000_init_gains;
627 	ops->set_gains = iwn5000_set_gains;
628 	ops->add_node = iwn5000_add_node;
629 	ops->tx_done = iwn5000_tx_done;
630 	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
631 	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
632 	sc->ntxqs = IWN5000_NTXQUEUES;
633 	sc->first_agg_txq = IWN5000_FIRST_AGG_TXQUEUE;
634 	sc->ndmachnls = IWN5000_NDMACHNLS;
635 	sc->broadcast_id = IWN5000_ID_BROADCAST;
636 	sc->rxonsz = IWN5000_RXONSZ;
637 	sc->schedsz = IWN5000_SCHEDSZ;
638 	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
639 	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
640 	sc->fwsz = IWN5000_FWSZ;
641 	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
642 
643 	switch (sc->hw_type) {
644 	case IWN_HW_REV_TYPE_5100:
645 		sc->limits = &iwn5000_sensitivity_limits;
646 		sc->fwname = "iwn-5000";
647 		/* Override chains masks, ROM is known to be broken. */
648 		sc->txchainmask = IWN_ANT_B;
649 		sc->rxchainmask = IWN_ANT_AB;
650 		break;
651 	case IWN_HW_REV_TYPE_5150:
652 		sc->limits = &iwn5150_sensitivity_limits;
653 		sc->fwname = "iwn-5150";
654 		break;
655 	case IWN_HW_REV_TYPE_5300:
656 	case IWN_HW_REV_TYPE_5350:
657 		sc->limits = &iwn5000_sensitivity_limits;
658 		sc->fwname = "iwn-5000";
659 		break;
660 	case IWN_HW_REV_TYPE_1000:
661 		sc->limits = &iwn1000_sensitivity_limits;
662 		sc->fwname = "iwn-1000";
663 		break;
664 	case IWN_HW_REV_TYPE_6000:
665 		sc->limits = &iwn6000_sensitivity_limits;
666 		sc->fwname = "iwn-6000";
667 		if (pid == PCI_PRODUCT_INTEL_WL_6200_1 ||
668 		    pid == PCI_PRODUCT_INTEL_WL_6200_2) {
669 			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
670 			/* Override chains masks, ROM is known to be broken. */
671 			sc->txchainmask = IWN_ANT_BC;
672 			sc->rxchainmask = IWN_ANT_BC;
673 		}
674 		break;
675 	case IWN_HW_REV_TYPE_6050:
676 		sc->limits = &iwn6000_sensitivity_limits;
677 		sc->fwname = "iwn-6050";
678 		break;
679 	case IWN_HW_REV_TYPE_6005:
680 		sc->limits = &iwn6000_sensitivity_limits;
681 		if (pid != PCI_PRODUCT_INTEL_WL_6005_1 &&
682 		    pid != PCI_PRODUCT_INTEL_WL_6005_2) {
683 			sc->fwname = "iwn-6030";
684 			sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
685 		} else
686 			sc->fwname = "iwn-6005";
687 		break;
688 	case IWN_HW_REV_TYPE_2030:
689 		sc->limits = &iwn2000_sensitivity_limits;
690 		sc->fwname = "iwn-2030";
691 		sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
692 		break;
693 	case IWN_HW_REV_TYPE_2000:
694 		sc->limits = &iwn2000_sensitivity_limits;
695 		sc->fwname = "iwn-2000";
696 		break;
697 	case IWN_HW_REV_TYPE_135:
698 		sc->limits = &iwn2000_sensitivity_limits;
699 		sc->fwname = "iwn-135";
700 		sc->sc_flags |= IWN_FLAG_ADV_BT_COEX;
701 		break;
702 	case IWN_HW_REV_TYPE_105:
703 		sc->limits = &iwn2000_sensitivity_limits;
704 		sc->fwname = "iwn-105";
705 		break;
706 	default:
707 		printf(": adapter type %d not supported\n", sc->hw_type);
708 		return ENOTSUP;
709 	}
710 	return 0;
711 }
712 
713 #if NBPFILTER > 0
714 /*
715  * Attach the interface to 802.11 radiotap.
716  */
717 void
718 iwn_radiotap_attach(struct iwn_softc *sc)
719 {
720 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
721 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
722 
723 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
724 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
725 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT);
726 
727 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
728 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
729 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT);
730 }
731 #endif
732 
733 int
734 iwn_detach(struct device *self, int flags)
735 {
736 	struct iwn_softc *sc = (struct iwn_softc *)self;
737 	struct ifnet *ifp = &sc->sc_ic.ic_if;
738 	int qid;
739 
740 	timeout_del(&sc->calib_to);
741 	task_del(systq, &sc->init_task);
742 
743 	/* Uninstall interrupt handler. */
744 	if (sc->sc_ih != NULL)
745 		pci_intr_disestablish(sc->sc_pct, sc->sc_ih);
746 
747 	/* Free DMA resources. */
748 	iwn_free_rx_ring(sc, &sc->rxq);
749 	for (qid = 0; qid < sc->ntxqs; qid++)
750 		iwn_free_tx_ring(sc, &sc->txq[qid]);
751 	iwn_free_sched(sc);
752 	iwn_free_kw(sc);
753 	if (sc->ict != NULL)
754 		iwn_free_ict(sc);
755 	iwn_free_fwmem(sc);
756 
757 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
758 
759 	ieee80211_ifdetach(ifp);
760 	if_detach(ifp);
761 
762 	return 0;
763 }
764 
765 int
766 iwn_activate(struct device *self, int act)
767 {
768 	struct iwn_softc *sc = (struct iwn_softc *)self;
769 	struct ifnet *ifp = &sc->sc_ic.ic_if;
770 
771 	switch (act) {
772 	case DVACT_SUSPEND:
773 		if (ifp->if_flags & IFF_RUNNING)
774 			iwn_stop(ifp);
775 		break;
776 	case DVACT_WAKEUP:
777 		iwn_wakeup(sc);
778 		break;
779 	}
780 
781 	return 0;
782 }
783 
784 void
785 iwn_wakeup(struct iwn_softc *sc)
786 {
787 	pcireg_t reg;
788 
789 	/* Clear device-specific "PCI retry timeout" register (41h). */
790 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
791 	if (reg & 0xff00)
792 		pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
793 	iwn_init_task(sc);
794 }
795 
796 void
797 iwn_init_task(void *arg1)
798 {
799 	struct iwn_softc *sc = arg1;
800 	struct ifnet *ifp = &sc->sc_ic.ic_if;
801 	int s;
802 
803 	rw_enter_write(&sc->sc_rwlock);
804 	s = splnet();
805 
806 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
807 		iwn_init(ifp);
808 
809 	splx(s);
810 	rw_exit_write(&sc->sc_rwlock);
811 }
812 
813 int
814 iwn_nic_lock(struct iwn_softc *sc)
815 {
816 	int ntries;
817 
818 	/* Request exclusive access to NIC. */
819 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
820 
821 	/* Spin until we actually get the lock. */
822 	for (ntries = 0; ntries < 1000; ntries++) {
823 		if ((IWN_READ(sc, IWN_GP_CNTRL) &
824 		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
825 		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
826 			return 0;
827 		DELAY(10);
828 	}
829 	return ETIMEDOUT;
830 }
831 
832 static __inline void
833 iwn_nic_unlock(struct iwn_softc *sc)
834 {
835 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
836 }
837 
838 static __inline uint32_t
839 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
840 {
841 	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
842 	IWN_BARRIER_READ_WRITE(sc);
843 	return IWN_READ(sc, IWN_PRPH_RDATA);
844 }
845 
846 static __inline void
847 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
848 {
849 	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
850 	IWN_BARRIER_WRITE(sc);
851 	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
852 }
853 
854 static __inline void
855 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
856 {
857 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
858 }
859 
860 static __inline void
861 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
862 {
863 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
864 }
865 
866 static __inline void
867 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
868     const uint32_t *data, int count)
869 {
870 	for (; count > 0; count--, data++, addr += 4)
871 		iwn_prph_write(sc, addr, *data);
872 }
873 
874 static __inline uint32_t
875 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
876 {
877 	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
878 	IWN_BARRIER_READ_WRITE(sc);
879 	return IWN_READ(sc, IWN_MEM_RDATA);
880 }
881 
882 static __inline void
883 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
884 {
885 	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
886 	IWN_BARRIER_WRITE(sc);
887 	IWN_WRITE(sc, IWN_MEM_WDATA, data);
888 }
889 
890 static __inline void
891 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
892 {
893 	uint32_t tmp;
894 
895 	tmp = iwn_mem_read(sc, addr & ~3);
896 	if (addr & 3)
897 		tmp = (tmp & 0x0000ffff) | data << 16;
898 	else
899 		tmp = (tmp & 0xffff0000) | data;
900 	iwn_mem_write(sc, addr & ~3, tmp);
901 }
902 
903 #ifdef IWN_DEBUG
904 
905 static __inline void
906 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
907     int count)
908 {
909 	for (; count > 0; count--, addr += 4)
910 		*data++ = iwn_mem_read(sc, addr);
911 }
912 
913 #endif
914 
915 static __inline void
916 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
917     int count)
918 {
919 	for (; count > 0; count--, addr += 4)
920 		iwn_mem_write(sc, addr, val);
921 }
922 
923 int
924 iwn_eeprom_lock(struct iwn_softc *sc)
925 {
926 	int i, ntries;
927 
928 	for (i = 0; i < 100; i++) {
929 		/* Request exclusive access to EEPROM. */
930 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
931 		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
932 
933 		/* Spin until we actually get the lock. */
934 		for (ntries = 0; ntries < 100; ntries++) {
935 			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
936 			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
937 				return 0;
938 			DELAY(10);
939 		}
940 	}
941 	return ETIMEDOUT;
942 }
943 
944 static __inline void
945 iwn_eeprom_unlock(struct iwn_softc *sc)
946 {
947 	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
948 }
949 
950 /*
951  * Initialize access by host to One Time Programmable ROM.
952  * NB: This kind of ROM can be found on 1000 or 6000 Series only.
953  */
954 int
955 iwn_init_otprom(struct iwn_softc *sc)
956 {
957 	uint16_t prev, base, next;
958 	int count, error;
959 
960 	/* Wait for clock stabilization before accessing prph. */
961 	if ((error = iwn_clock_wait(sc)) != 0)
962 		return error;
963 
964 	if ((error = iwn_nic_lock(sc)) != 0)
965 		return error;
966 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
967 	DELAY(5);
968 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
969 	iwn_nic_unlock(sc);
970 
971 	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
972 	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
973 		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
974 		    IWN_RESET_LINK_PWR_MGMT_DIS);
975 	}
976 	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
977 	/* Clear ECC status. */
978 	IWN_SETBITS(sc, IWN_OTP_GP,
979 	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
980 
981 	/*
982 	 * Find the block before last block (contains the EEPROM image)
983 	 * for HW without OTP shadow RAM.
984 	 */
985 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
986 		/* Switch to absolute addressing mode. */
987 		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
988 		base = 0;
989 		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
990 			error = iwn_read_prom_data(sc, base, &next, 2);
991 			if (error != 0)
992 				return error;
993 			if (next == 0)	/* End of linked-list. */
994 				break;
995 			prev = base;
996 			base = letoh16(next);
997 		}
998 		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
999 			return EIO;
1000 		/* Skip "next" word. */
1001 		sc->prom_base = prev + 1;
1002 	}
1003 	return 0;
1004 }
1005 
1006 int
1007 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1008 {
1009 	uint8_t *out = data;
1010 	uint32_t val, tmp;
1011 	int ntries;
1012 
1013 	addr += sc->prom_base;
1014 	for (; count > 0; count -= 2, addr++) {
1015 		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1016 		for (ntries = 0; ntries < 10; ntries++) {
1017 			val = IWN_READ(sc, IWN_EEPROM);
1018 			if (val & IWN_EEPROM_READ_VALID)
1019 				break;
1020 			DELAY(5);
1021 		}
1022 		if (ntries == 10) {
1023 			printf("%s: timeout reading ROM at 0x%x\n",
1024 			    sc->sc_dev.dv_xname, addr);
1025 			return ETIMEDOUT;
1026 		}
1027 		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1028 			/* OTPROM, check for ECC errors. */
1029 			tmp = IWN_READ(sc, IWN_OTP_GP);
1030 			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1031 				printf("%s: OTPROM ECC error at 0x%x\n",
1032 				    sc->sc_dev.dv_xname, addr);
1033 				return EIO;
1034 			}
1035 			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1036 				/* Correctable ECC error, clear bit. */
1037 				IWN_SETBITS(sc, IWN_OTP_GP,
1038 				    IWN_OTP_GP_ECC_CORR_STTS);
1039 			}
1040 		}
1041 		*out++ = val >> 16;
1042 		if (count > 1)
1043 			*out++ = val >> 24;
1044 	}
1045 	return 0;
1046 }
1047 
1048 int
1049 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap,
1050     bus_size_t size, bus_size_t alignment)
1051 {
1052 	int nsegs, error;
1053 
1054 	dma->tag = tag;
1055 	dma->size = size;
1056 
1057 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1058 	    &dma->map);
1059 	if (error != 0)
1060 		goto fail;
1061 
1062 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1063 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1064 	if (error != 0)
1065 		goto fail;
1066 
1067 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr,
1068 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1069 	if (error != 0)
1070 		goto fail;
1071 
1072 	error = bus_dmamap_load_raw(tag, dma->map, &dma->seg, 1, size,
1073 	    BUS_DMA_NOWAIT);
1074 	if (error != 0)
1075 		goto fail;
1076 
1077 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1078 
1079 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1080 	if (kvap != NULL)
1081 		*kvap = dma->vaddr;
1082 
1083 	return 0;
1084 
1085 fail:	iwn_dma_contig_free(dma);
1086 	return error;
1087 }
1088 
1089 void
1090 iwn_dma_contig_free(struct iwn_dma_info *dma)
1091 {
1092 	if (dma->map != NULL) {
1093 		if (dma->vaddr != NULL) {
1094 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1095 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1096 			bus_dmamap_unload(dma->tag, dma->map);
1097 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1098 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1099 			dma->vaddr = NULL;
1100 		}
1101 		bus_dmamap_destroy(dma->tag, dma->map);
1102 		dma->map = NULL;
1103 	}
1104 }
1105 
1106 int
1107 iwn_alloc_sched(struct iwn_softc *sc)
1108 {
1109 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1110 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1111 	    (void **)&sc->sched, sc->schedsz, 1024);
1112 }
1113 
1114 void
1115 iwn_free_sched(struct iwn_softc *sc)
1116 {
1117 	iwn_dma_contig_free(&sc->sched_dma);
1118 }
1119 
1120 int
1121 iwn_alloc_kw(struct iwn_softc *sc)
1122 {
1123 	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1124 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096,
1125 	    4096);
1126 }
1127 
1128 void
1129 iwn_free_kw(struct iwn_softc *sc)
1130 {
1131 	iwn_dma_contig_free(&sc->kw_dma);
1132 }
1133 
1134 int
1135 iwn_alloc_ict(struct iwn_softc *sc)
1136 {
1137 	/* ICT table must be aligned on a 4KB boundary. */
1138 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1139 	    (void **)&sc->ict, IWN_ICT_SIZE, 4096);
1140 }
1141 
1142 void
1143 iwn_free_ict(struct iwn_softc *sc)
1144 {
1145 	iwn_dma_contig_free(&sc->ict_dma);
1146 }
1147 
1148 int
1149 iwn_alloc_fwmem(struct iwn_softc *sc)
1150 {
1151 	/* Must be aligned on a 16-byte boundary. */
1152 	return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL,
1153 	    sc->fwsz, 16);
1154 }
1155 
1156 void
1157 iwn_free_fwmem(struct iwn_softc *sc)
1158 {
1159 	iwn_dma_contig_free(&sc->fw_dma);
1160 }
1161 
1162 int
1163 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1164 {
1165 	bus_size_t size;
1166 	int i, error;
1167 
1168 	ring->cur = 0;
1169 
1170 	/* Allocate RX descriptors (256-byte aligned). */
1171 	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1172 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1173 	    (void **)&ring->desc, size, 256);
1174 	if (error != 0) {
1175 		printf("%s: could not allocate RX ring DMA memory\n",
1176 		    sc->sc_dev.dv_xname);
1177 		goto fail;
1178 	}
1179 
1180 	/* Allocate RX status area (16-byte aligned). */
1181 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1182 	    (void **)&ring->stat, sizeof (struct iwn_rx_status), 16);
1183 	if (error != 0) {
1184 		printf("%s: could not allocate RX status DMA memory\n",
1185 		    sc->sc_dev.dv_xname);
1186 		goto fail;
1187 	}
1188 
1189 	/*
1190 	 * Allocate and map RX buffers.
1191 	 */
1192 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1193 		struct iwn_rx_data *data = &ring->data[i];
1194 
1195 		error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1,
1196 		    IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1197 		    &data->map);
1198 		if (error != 0) {
1199 			printf("%s: could not create RX buf DMA map\n",
1200 			    sc->sc_dev.dv_xname);
1201 			goto fail;
1202 		}
1203 
1204 		data->m = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
1205 		if (data->m == NULL) {
1206 			printf("%s: could not allocate RX mbuf\n",
1207 			    sc->sc_dev.dv_xname);
1208 			error = ENOBUFS;
1209 			goto fail;
1210 		}
1211 
1212 		error = bus_dmamap_load(sc->sc_dmat, data->map,
1213 		    mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
1214 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
1215 		if (error != 0) {
1216 			printf("%s: can't map mbuf (error %d)\n",
1217 			    sc->sc_dev.dv_xname, error);
1218 			goto fail;
1219 		}
1220 
1221 		/* Set physical address of RX buffer (256-byte aligned). */
1222 		ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8);
1223 	}
1224 
1225 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size,
1226 	    BUS_DMASYNC_PREWRITE);
1227 
1228 	return 0;
1229 
1230 fail:	iwn_free_rx_ring(sc, ring);
1231 	return error;
1232 }
1233 
1234 void
1235 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1236 {
1237 	int ntries;
1238 
1239 	if (iwn_nic_lock(sc) == 0) {
1240 		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1241 		for (ntries = 0; ntries < 1000; ntries++) {
1242 			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1243 			    IWN_FH_RX_STATUS_IDLE)
1244 				break;
1245 			DELAY(10);
1246 		}
1247 		iwn_nic_unlock(sc);
1248 	}
1249 	ring->cur = 0;
1250 	sc->last_rx_valid = 0;
1251 }
1252 
1253 void
1254 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1255 {
1256 	int i;
1257 
1258 	iwn_dma_contig_free(&ring->desc_dma);
1259 	iwn_dma_contig_free(&ring->stat_dma);
1260 
1261 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1262 		struct iwn_rx_data *data = &ring->data[i];
1263 
1264 		if (data->m != NULL) {
1265 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1266 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1267 			bus_dmamap_unload(sc->sc_dmat, data->map);
1268 			m_freem(data->m);
1269 		}
1270 		if (data->map != NULL)
1271 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1272 	}
1273 }
1274 
1275 int
1276 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1277 {
1278 	bus_addr_t paddr;
1279 	bus_size_t size;
1280 	int i, error;
1281 
1282 	ring->qid = qid;
1283 	ring->queued = 0;
1284 	ring->cur = 0;
1285 
1286 	/* Allocate TX descriptors (256-byte aligned). */
1287 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1288 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1289 	    (void **)&ring->desc, size, 256);
1290 	if (error != 0) {
1291 		printf("%s: could not allocate TX ring DMA memory\n",
1292 		    sc->sc_dev.dv_xname);
1293 		goto fail;
1294 	}
1295 
1296 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1297 	error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma,
1298 	    (void **)&ring->cmd, size, 4);
1299 	if (error != 0) {
1300 		printf("%s: could not allocate TX cmd DMA memory\n",
1301 		    sc->sc_dev.dv_xname);
1302 		goto fail;
1303 	}
1304 
1305 	paddr = ring->cmd_dma.paddr;
1306 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1307 		struct iwn_tx_data *data = &ring->data[i];
1308 
1309 		data->cmd_paddr = paddr;
1310 		data->scratch_paddr = paddr + 12;
1311 		paddr += sizeof (struct iwn_tx_cmd);
1312 
1313 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1314 		    IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
1315 		    &data->map);
1316 		if (error != 0) {
1317 			printf("%s: could not create TX buf DMA map\n",
1318 			    sc->sc_dev.dv_xname);
1319 			goto fail;
1320 		}
1321 	}
1322 	return 0;
1323 
1324 fail:	iwn_free_tx_ring(sc, ring);
1325 	return error;
1326 }
1327 
1328 void
1329 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1330 {
1331 	int i;
1332 
1333 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1334 		struct iwn_tx_data *data = &ring->data[i];
1335 
1336 		if (data->m != NULL) {
1337 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1338 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1339 			bus_dmamap_unload(sc->sc_dmat, data->map);
1340 			m_freem(data->m);
1341 			data->m = NULL;
1342 		}
1343 	}
1344 	/* Clear TX descriptors. */
1345 	memset(ring->desc, 0, ring->desc_dma.size);
1346 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1347 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1348 	sc->qfullmsk &= ~(1 << ring->qid);
1349 	ring->queued = 0;
1350 	ring->cur = 0;
1351 }
1352 
1353 void
1354 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1355 {
1356 	int i;
1357 
1358 	iwn_dma_contig_free(&ring->desc_dma);
1359 	iwn_dma_contig_free(&ring->cmd_dma);
1360 
1361 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1362 		struct iwn_tx_data *data = &ring->data[i];
1363 
1364 		if (data->m != NULL) {
1365 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1366 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1367 			bus_dmamap_unload(sc->sc_dmat, data->map);
1368 			m_freem(data->m);
1369 		}
1370 		if (data->map != NULL)
1371 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1372 	}
1373 }
1374 
1375 void
1376 iwn5000_ict_reset(struct iwn_softc *sc)
1377 {
1378 	/* Disable interrupts. */
1379 	IWN_WRITE(sc, IWN_INT_MASK, 0);
1380 
1381 	/* Reset ICT table. */
1382 	memset(sc->ict, 0, IWN_ICT_SIZE);
1383 	sc->ict_cur = 0;
1384 
1385 	/* Set physical address of ICT table (4KB aligned). */
1386 	DPRINTF(("enabling ICT\n"));
1387 	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1388 	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1389 
1390 	/* Enable periodic RX interrupt. */
1391 	sc->int_mask |= IWN_INT_RX_PERIODIC;
1392 	/* Switch to ICT interrupt mode in driver. */
1393 	sc->sc_flags |= IWN_FLAG_USE_ICT;
1394 
1395 	/* Re-enable interrupts. */
1396 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1397 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1398 }
1399 
1400 int
1401 iwn_read_eeprom(struct iwn_softc *sc)
1402 {
1403 	struct iwn_ops *ops = &sc->ops;
1404 	struct ieee80211com *ic = &sc->sc_ic;
1405 	uint16_t val;
1406 	int error;
1407 
1408 	/* Check whether adapter has an EEPROM or an OTPROM. */
1409 	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1410 	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1411 		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1412 	DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ?
1413 	    "OTPROM" : "EEPROM"));
1414 
1415 	/* Adapter has to be powered on for EEPROM access to work. */
1416 	if ((error = iwn_apm_init(sc)) != 0) {
1417 		printf("%s: could not power ON adapter\n",
1418 		    sc->sc_dev.dv_xname);
1419 		return error;
1420 	}
1421 
1422 	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1423 		printf("%s: bad ROM signature\n", sc->sc_dev.dv_xname);
1424 		return EIO;
1425 	}
1426 	if ((error = iwn_eeprom_lock(sc)) != 0) {
1427 		printf("%s: could not lock ROM (error=%d)\n",
1428 		    sc->sc_dev.dv_xname, error);
1429 		return error;
1430 	}
1431 	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1432 		if ((error = iwn_init_otprom(sc)) != 0) {
1433 			printf("%s: could not initialize OTPROM\n",
1434 			    sc->sc_dev.dv_xname);
1435 			return error;
1436 		}
1437 	}
1438 
1439 	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1440 	DPRINTF(("SKU capabilities=0x%04x\n", letoh16(val)));
1441 	/* Check if HT support is bonded out. */
1442 	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1443 		sc->sc_flags |= IWN_FLAG_HAS_11N;
1444 
1445 	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1446 	sc->rfcfg = letoh16(val);
1447 	DPRINTF(("radio config=0x%04x\n", sc->rfcfg));
1448 	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
1449 	if (sc->txchainmask == 0)
1450 		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1451 	if (sc->rxchainmask == 0)
1452 		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1453 
1454 	/* Read MAC address. */
1455 	iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6);
1456 
1457 	/* Read adapter-specific information from EEPROM. */
1458 	ops->read_eeprom(sc);
1459 
1460 	iwn_apm_stop(sc);	/* Power OFF adapter. */
1461 
1462 	iwn_eeprom_unlock(sc);
1463 	return 0;
1464 }
1465 
1466 void
1467 iwn4965_read_eeprom(struct iwn_softc *sc)
1468 {
1469 	uint32_t addr;
1470 	uint16_t val;
1471 	int i;
1472 
1473 	/* Read regulatory domain (4 ASCII characters). */
1474 	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1475 
1476 	/* Read the list of authorized channels (20MHz ones only). */
1477 	for (i = 0; i < 5; i++) {
1478 		addr = iwn4965_regulatory_bands[i];
1479 		iwn_read_eeprom_channels(sc, i, addr);
1480 	}
1481 
1482 	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1483 	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1484 	sc->maxpwr2GHz = val & 0xff;
1485 	sc->maxpwr5GHz = val >> 8;
1486 	/* Check that EEPROM values are within valid range. */
1487 	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1488 		sc->maxpwr5GHz = 38;
1489 	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1490 		sc->maxpwr2GHz = 38;
1491 	DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz));
1492 
1493 	/* Read samples for each TX power group. */
1494 	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1495 	    sizeof sc->bands);
1496 
1497 	/* Read voltage at which samples were taken. */
1498 	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1499 	sc->eeprom_voltage = (int16_t)letoh16(val);
1500 	DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage));
1501 
1502 #ifdef IWN_DEBUG
1503 	/* Print samples. */
1504 	if (iwn_debug > 0) {
1505 		for (i = 0; i < IWN_NBANDS; i++)
1506 			iwn4965_print_power_group(sc, i);
1507 	}
1508 #endif
1509 }
1510 
1511 #ifdef IWN_DEBUG
1512 void
1513 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1514 {
1515 	struct iwn4965_eeprom_band *band = &sc->bands[i];
1516 	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1517 	int j, c;
1518 
1519 	printf("===band %d===\n", i);
1520 	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1521 	printf("chan1 num=%d\n", chans[0].num);
1522 	for (c = 0; c < 2; c++) {
1523 		for (j = 0; j < IWN_NSAMPLES; j++) {
1524 			printf("chain %d, sample %d: temp=%d gain=%d "
1525 			    "power=%d pa_det=%d\n", c, j,
1526 			    chans[0].samples[c][j].temp,
1527 			    chans[0].samples[c][j].gain,
1528 			    chans[0].samples[c][j].power,
1529 			    chans[0].samples[c][j].pa_det);
1530 		}
1531 	}
1532 	printf("chan2 num=%d\n", chans[1].num);
1533 	for (c = 0; c < 2; c++) {
1534 		for (j = 0; j < IWN_NSAMPLES; j++) {
1535 			printf("chain %d, sample %d: temp=%d gain=%d "
1536 			    "power=%d pa_det=%d\n", c, j,
1537 			    chans[1].samples[c][j].temp,
1538 			    chans[1].samples[c][j].gain,
1539 			    chans[1].samples[c][j].power,
1540 			    chans[1].samples[c][j].pa_det);
1541 		}
1542 	}
1543 }
1544 #endif
1545 
1546 void
1547 iwn5000_read_eeprom(struct iwn_softc *sc)
1548 {
1549 	struct iwn5000_eeprom_calib_hdr hdr;
1550 	int32_t volt;
1551 	uint32_t base, addr;
1552 	uint16_t val;
1553 	int i;
1554 
1555 	/* Read regulatory domain (4 ASCII characters). */
1556 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1557 	base = letoh16(val);
1558 	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1559 	    sc->eeprom_domain, 4);
1560 
1561 	/* Read the list of authorized channels (20MHz ones only). */
1562 	for (i = 0; i < 5; i++) {
1563 		addr = base + iwn5000_regulatory_bands[i];
1564 		iwn_read_eeprom_channels(sc, i, addr);
1565 	}
1566 
1567 	/* Read enhanced TX power information for 6000 Series. */
1568 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1569 		iwn_read_eeprom_enhinfo(sc);
1570 
1571 	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1572 	base = letoh16(val);
1573 	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1574 	DPRINTF(("calib version=%u pa type=%u voltage=%u\n",
1575 	    hdr.version, hdr.pa_type, letoh16(hdr.volt)));
1576 	sc->calib_ver = hdr.version;
1577 
1578 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
1579 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
1580 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
1581 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
1582 		sc->eeprom_voltage = letoh16(hdr.volt);
1583 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1584 		sc->eeprom_temp = letoh16(val);
1585 		iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2);
1586 		sc->eeprom_rawtemp = letoh16(val);
1587 	}
1588 
1589 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1590 		/* Compute temperature offset. */
1591 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1592 		sc->eeprom_temp = letoh16(val);
1593 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1594 		volt = letoh16(val);
1595 		sc->temp_off = sc->eeprom_temp - (volt / -5);
1596 		DPRINTF(("temp=%d volt=%d offset=%dK\n",
1597 		    sc->eeprom_temp, volt, sc->temp_off));
1598 	} else {
1599 		/* Read crystal calibration. */
1600 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1601 		    &sc->eeprom_crystal, sizeof (uint32_t));
1602 		DPRINTF(("crystal calibration 0x%08x\n",
1603 		    letoh32(sc->eeprom_crystal)));
1604 	}
1605 }
1606 
1607 void
1608 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1609 {
1610 	struct ieee80211com *ic = &sc->sc_ic;
1611 	const struct iwn_chan_band *band = &iwn_bands[n];
1612 	struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND];
1613 	uint8_t chan;
1614 	int i;
1615 
1616 	iwn_read_prom_data(sc, addr, channels,
1617 	    band->nchan * sizeof (struct iwn_eeprom_chan));
1618 
1619 	for (i = 0; i < band->nchan; i++) {
1620 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID))
1621 			continue;
1622 
1623 		chan = band->chan[i];
1624 
1625 		if (n == 0) {	/* 2GHz band */
1626 			ic->ic_channels[chan].ic_freq =
1627 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
1628 			ic->ic_channels[chan].ic_flags =
1629 			    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
1630 			    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
1631 
1632 		} else {	/* 5GHz band */
1633 			/*
1634 			 * Some adapters support channels 7, 8, 11 and 12
1635 			 * both in the 2GHz and 4.9GHz bands.
1636 			 * Because of limitations in our net80211 layer,
1637 			 * we don't support them in the 4.9GHz band.
1638 			 */
1639 			if (chan <= 14)
1640 				continue;
1641 
1642 			ic->ic_channels[chan].ic_freq =
1643 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
1644 			ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A;
1645 			/* We have at least one valid 5GHz channel. */
1646 			sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1647 		}
1648 
1649 		/* Is active scan allowed on this channel? */
1650 		if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) {
1651 			ic->ic_channels[chan].ic_flags |=
1652 			    IEEE80211_CHAN_PASSIVE;
1653 		}
1654 
1655 		/* Save maximum allowed TX power for this channel. */
1656 		sc->maxpwr[chan] = channels[i].maxpwr;
1657 
1658 		if (sc->sc_flags & IWN_FLAG_HAS_11N)
1659 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_HT;
1660 
1661 		DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n",
1662 		    chan, channels[i].flags, sc->maxpwr[chan]));
1663 	}
1664 }
1665 
1666 void
1667 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1668 {
1669 	struct iwn_eeprom_enhinfo enhinfo[35];
1670 	uint16_t val, base;
1671 	int8_t maxpwr;
1672 	int i;
1673 
1674 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1675 	base = letoh16(val);
1676 	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1677 	    enhinfo, sizeof enhinfo);
1678 
1679 	memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1680 	for (i = 0; i < nitems(enhinfo); i++) {
1681 		if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1682 			continue;	/* Skip invalid entries. */
1683 
1684 		maxpwr = 0;
1685 		if (sc->txchainmask & IWN_ANT_A)
1686 			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1687 		if (sc->txchainmask & IWN_ANT_B)
1688 			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1689 		if (sc->txchainmask & IWN_ANT_C)
1690 			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1691 		if (sc->ntxchains == 2)
1692 			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1693 		else if (sc->ntxchains == 3)
1694 			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1695 		maxpwr /= 2;	/* Convert half-dBm to dBm. */
1696 
1697 		DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr));
1698 		sc->enh_maxpwr[i] = maxpwr;
1699 	}
1700 }
1701 
1702 struct ieee80211_node *
1703 iwn_node_alloc(struct ieee80211com *ic)
1704 {
1705 	return malloc(sizeof (struct iwn_node), M_DEVBUF, M_NOWAIT | M_ZERO);
1706 }
1707 
1708 void
1709 iwn_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
1710 {
1711 	struct iwn_softc *sc = ic->ic_if.if_softc;
1712 	struct iwn_node *wn = (void *)ni;
1713 	uint8_t rate;
1714 	int ridx, i;
1715 
1716 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0)
1717 		ieee80211_amrr_node_init(&sc->amrr, &wn->amn);
1718 
1719 	/* Start at lowest available bit-rate, AMRR/MiRA will raise. */
1720 	ni->ni_txrate = 0;
1721 	ni->ni_txmcs = 0;
1722 
1723 	for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
1724 		rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
1725 		/* Map 802.11 rate to HW rate index. */
1726 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
1727 			if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID &&
1728 			    iwn_rates[ridx].rate == rate)
1729 				break;
1730 		}
1731 		wn->ridx[i] = ridx;
1732 	}
1733 }
1734 
1735 int
1736 iwn_media_change(struct ifnet *ifp)
1737 {
1738 	struct iwn_softc *sc = ifp->if_softc;
1739 	struct ieee80211com *ic = &sc->sc_ic;
1740 	uint8_t rate, ridx;
1741 	int error;
1742 
1743 	error = ieee80211_media_change(ifp);
1744 	if (error != ENETRESET)
1745 		return error;
1746 
1747 	if (ic->ic_fixed_mcs != -1)
1748 		sc->fixed_ridx = iwn_mcs2ridx[ic->ic_fixed_mcs];
1749 	if (ic->ic_fixed_rate != -1) {
1750 		rate = ic->ic_sup_rates[ic->ic_curmode].
1751 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
1752 		/* Map 802.11 rate to HW rate index. */
1753 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
1754 			if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID &&
1755 			    iwn_rates[ridx].rate == rate)
1756 				break;
1757 		sc->fixed_ridx = ridx;
1758 	}
1759 
1760 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1761 	    (IFF_UP | IFF_RUNNING)) {
1762 		iwn_stop(ifp);
1763 		error = iwn_init(ifp);
1764 	}
1765 	return error;
1766 }
1767 
1768 int
1769 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
1770 {
1771 	struct ifnet *ifp = &ic->ic_if;
1772 	struct iwn_softc *sc = ifp->if_softc;
1773 	struct ieee80211_node *ni = ic->ic_bss;
1774 	struct iwn_node *wn = (void *)ni;
1775 	int error;
1776 
1777 	if (ic->ic_state == IEEE80211_S_RUN) {
1778 		if (nstate == IEEE80211_S_SCAN) {
1779 			/*
1780 			 * During RUN->SCAN we don't call sc_newstate() so
1781 			 * we must stop A-MPDU Tx ourselves in this case.
1782 			 */
1783 			ieee80211_stop_ampdu_tx(ic, ni, -1);
1784 			ieee80211_ba_del(ni);
1785 		}
1786 		ieee80211_mira_cancel_timeouts(&wn->mn);
1787 		timeout_del(&sc->calib_to);
1788 		sc->calib.state = IWN_CALIB_STATE_INIT;
1789 		if (sc->sc_flags & IWN_FLAG_BGSCAN)
1790 			iwn_scan_abort(sc);
1791 	}
1792 
1793 	if (ic->ic_state == IEEE80211_S_SCAN) {
1794 		if (nstate == IEEE80211_S_SCAN) {
1795 			if (sc->sc_flags & IWN_FLAG_SCANNING)
1796 				return 0;
1797 		} else
1798 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
1799 		/* Turn LED off when leaving scan state. */
1800 		iwn_set_led(sc, IWN_LED_LINK, 1, 0);
1801 	}
1802 
1803 	if (ic->ic_state >= IEEE80211_S_ASSOC &&
1804 	    nstate <= IEEE80211_S_ASSOC) {
1805 		/* Reset state to handle re- and disassociations. */
1806 		sc->rxon.associd = 0;
1807 		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1808 		sc->calib.state = IWN_CALIB_STATE_INIT;
1809 		error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
1810 		if (error != 0)
1811 			printf("%s: RXON command failed\n",
1812 			    sc->sc_dev.dv_xname);
1813 	}
1814 
1815 	switch (nstate) {
1816 	case IEEE80211_S_SCAN:
1817 		/* Make the link LED blink while we're scanning. */
1818 		iwn_set_led(sc, IWN_LED_LINK, 10, 10);
1819 
1820 		if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 0)) != 0) {
1821 			printf("%s: could not initiate scan\n",
1822 			    sc->sc_dev.dv_xname);
1823 			return error;
1824 		}
1825 		if (ifp->if_flags & IFF_DEBUG)
1826 			printf("%s: %s -> %s\n", ifp->if_xname,
1827 			    ieee80211_state_name[ic->ic_state],
1828 			    ieee80211_state_name[nstate]);
1829 		if ((sc->sc_flags & IWN_FLAG_BGSCAN) == 0) {
1830 			ieee80211_set_link_state(ic, LINK_STATE_DOWN);
1831 			ieee80211_node_cleanup(ic, ic->ic_bss);
1832 		}
1833 		ic->ic_state = nstate;
1834 		return 0;
1835 
1836 	case IEEE80211_S_ASSOC:
1837 		if (ic->ic_state != IEEE80211_S_RUN)
1838 			break;
1839 		/* FALLTHROUGH */
1840 	case IEEE80211_S_AUTH:
1841 		if ((error = iwn_auth(sc, arg)) != 0) {
1842 			printf("%s: could not move to auth state\n",
1843 			    sc->sc_dev.dv_xname);
1844 			return error;
1845 		}
1846 		break;
1847 
1848 	case IEEE80211_S_RUN:
1849 		if ((error = iwn_run(sc)) != 0) {
1850 			printf("%s: could not move to run state\n",
1851 			    sc->sc_dev.dv_xname);
1852 			return error;
1853 		}
1854 		break;
1855 
1856 	case IEEE80211_S_INIT:
1857 		sc->calib.state = IWN_CALIB_STATE_INIT;
1858 		break;
1859 	}
1860 
1861 	return sc->sc_newstate(ic, nstate, arg);
1862 }
1863 
1864 void
1865 iwn_iter_func(void *arg, struct ieee80211_node *ni)
1866 {
1867 	struct iwn_softc *sc = arg;
1868 	struct iwn_node *wn = (void *)ni;
1869 
1870 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
1871 		int old_txrate = ni->ni_txrate;
1872 		ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn);
1873 		if (old_txrate != ni->ni_txrate)
1874 			iwn_set_link_quality(sc, ni);
1875 	}
1876 }
1877 
1878 void
1879 iwn_calib_timeout(void *arg)
1880 {
1881 	struct iwn_softc *sc = arg;
1882 	struct ieee80211com *ic = &sc->sc_ic;
1883 	int s;
1884 
1885 	s = splnet();
1886 	if (ic->ic_fixed_rate == -1) {
1887 		if (ic->ic_opmode == IEEE80211_M_STA)
1888 			iwn_iter_func(sc, ic->ic_bss);
1889 		else
1890 			ieee80211_iterate_nodes(ic, iwn_iter_func, sc);
1891 	}
1892 	/* Force automatic TX power calibration every 60 secs. */
1893 	if (++sc->calib_cnt >= 120) {
1894 		uint32_t flags = 0;
1895 
1896 		DPRINTFN(2, ("sending request for statistics\n"));
1897 		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
1898 		    sizeof flags, 1);
1899 		sc->calib_cnt = 0;
1900 	}
1901 	splx(s);
1902 
1903 	/* Automatic rate control triggered every 500ms. */
1904 	timeout_add_msec(&sc->calib_to, 500);
1905 }
1906 
1907 int
1908 iwn_ccmp_decap(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
1909 {
1910 	struct ieee80211com *ic = &sc->sc_ic;
1911 	struct ieee80211_key *k = &ni->ni_pairwise_key;
1912 	struct ieee80211_frame *wh;
1913 	struct ieee80211_rx_ba *ba;
1914 	uint64_t pn, *prsc;
1915 	uint8_t *ivp;
1916 	uint8_t tid;
1917 	int hdrlen, hasqos;
1918 
1919 	wh = mtod(m, struct ieee80211_frame *);
1920 	hdrlen = ieee80211_get_hdrlen(wh);
1921 	ivp = (uint8_t *)wh + hdrlen;
1922 
1923 	/* Check that ExtIV bit is set. */
1924 	if (!(ivp[3] & IEEE80211_WEP_EXTIV)) {
1925 		DPRINTF(("CCMP decap ExtIV not set\n"));
1926 		return 1;
1927 	}
1928 	hasqos = ieee80211_has_qos(wh);
1929 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
1930 	ba = hasqos ? &ni->ni_rx_ba[tid] : NULL;
1931 	prsc = &k->k_rsc[tid];
1932 
1933 	/* Extract the 48-bit PN from the CCMP header. */
1934 	pn = (uint64_t)ivp[0]       |
1935 	     (uint64_t)ivp[1] <<  8 |
1936 	     (uint64_t)ivp[4] << 16 |
1937 	     (uint64_t)ivp[5] << 24 |
1938 	     (uint64_t)ivp[6] << 32 |
1939 	     (uint64_t)ivp[7] << 40;
1940 	if (pn <= *prsc) {
1941 		DPRINTF(("CCMP replayed\n"));
1942 		ic->ic_stats.is_ccmp_replays++;
1943 		return 1;
1944 	}
1945 	/* Last seen packet number is updated in ieee80211_inputm(). */
1946 
1947 	/* Strip MIC. IV will be stripped by ieee80211_inputm(). */
1948 	m_adj(m, -IEEE80211_CCMP_MICLEN);
1949 	return 0;
1950 }
1951 
1952 /*
1953  * Process an RX_PHY firmware notification.  This is usually immediately
1954  * followed by an MPDU_RX_DONE notification.
1955  */
1956 void
1957 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1958     struct iwn_rx_data *data)
1959 {
1960 	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
1961 
1962 	DPRINTFN(2, ("received PHY stats\n"));
1963 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
1964 	    sizeof (*stat), BUS_DMASYNC_POSTREAD);
1965 
1966 	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
1967 	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
1968 	sc->last_rx_valid = IWN_LAST_RX_VALID;
1969 	/*
1970 	 * The firmware does not send separate RX_PHY
1971 	 * notifications for A-MPDU subframes.
1972 	 */
1973 	if (stat->flags & htole16(IWN_STAT_FLAG_AGG))
1974 		sc->last_rx_valid |= IWN_LAST_RX_AMPDU;
1975 }
1976 
1977 /*
1978  * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
1979  * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
1980  */
1981 void
1982 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1983     struct iwn_rx_data *data, struct mbuf_list *ml)
1984 {
1985 	struct iwn_ops *ops = &sc->ops;
1986 	struct ieee80211com *ic = &sc->sc_ic;
1987 	struct ifnet *ifp = &ic->ic_if;
1988 	struct iwn_rx_ring *ring = &sc->rxq;
1989 	struct ieee80211_frame *wh;
1990 	struct ieee80211_rxinfo rxi;
1991 	struct ieee80211_node *ni;
1992 	struct ieee80211_channel *bss_chan = NULL;
1993 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
1994 	struct mbuf *m, *m1;
1995 	struct iwn_rx_stat *stat;
1996 	caddr_t head;
1997 	uint32_t flags;
1998 	int error, len, rssi;
1999 	uint16_t chan;
2000 
2001 	if (desc->type == IWN_MPDU_RX_DONE) {
2002 		/* Check for prior RX_PHY notification. */
2003 		if (!sc->last_rx_valid) {
2004 			DPRINTF(("missing RX_PHY\n"));
2005 			return;
2006 		}
2007 		sc->last_rx_valid &= ~IWN_LAST_RX_VALID;
2008 		stat = &sc->last_rx_stat;
2009 		if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) &&
2010 		    (stat->flags & htole16(IWN_STAT_FLAG_AGG)) == 0) {
2011 			DPRINTF(("missing RX_PHY (expecting A-MPDU)\n"));
2012 			return;
2013 		}
2014 		if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) == 0 &&
2015 		    (stat->flags & htole16(IWN_STAT_FLAG_AGG))) {
2016 			DPRINTF(("missing RX_PHY (unexpected A-MPDU)\n"));
2017 			return;
2018 		}
2019 	} else
2020 		stat = (struct iwn_rx_stat *)(desc + 1);
2021 
2022 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE,
2023 	    BUS_DMASYNC_POSTREAD);
2024 
2025 	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2026 		printf("%s: invalid RX statistic header\n",
2027 		    sc->sc_dev.dv_xname);
2028 		return;
2029 	}
2030 	if (desc->type == IWN_MPDU_RX_DONE) {
2031 		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2032 		head = (caddr_t)(mpdu + 1);
2033 		len = letoh16(mpdu->len);
2034 	} else {
2035 		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2036 		len = letoh16(stat->len);
2037 	}
2038 
2039 	flags = letoh32(*(uint32_t *)(head + len));
2040 
2041 	/* Discard frames with a bad FCS early. */
2042 	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2043 		DPRINTFN(2, ("RX flags error %x\n", flags));
2044 		ifp->if_ierrors++;
2045 		return;
2046 	}
2047 	/* Discard frames that are too short. */
2048 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
2049 		/* Allow control frames in monitor mode. */
2050 		if (len < sizeof (struct ieee80211_frame_cts)) {
2051 			DPRINTF(("frame too short: %d\n", len));
2052 			ic->ic_stats.is_rx_tooshort++;
2053 			ifp->if_ierrors++;
2054 			return;
2055 		}
2056 	} else if (len < sizeof (*wh)) {
2057 		DPRINTF(("frame too short: %d\n", len));
2058 		ic->ic_stats.is_rx_tooshort++;
2059 		ifp->if_ierrors++;
2060 		return;
2061 	}
2062 
2063 	m1 = MCLGETI(NULL, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
2064 	if (m1 == NULL) {
2065 		ic->ic_stats.is_rx_nombuf++;
2066 		ifp->if_ierrors++;
2067 		return;
2068 	}
2069 	bus_dmamap_unload(sc->sc_dmat, data->map);
2070 
2071 	error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *),
2072 	    IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ);
2073 	if (error != 0) {
2074 		m_freem(m1);
2075 
2076 		/* Try to reload the old mbuf. */
2077 		error = bus_dmamap_load(sc->sc_dmat, data->map,
2078 		    mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
2079 		    BUS_DMA_NOWAIT | BUS_DMA_READ);
2080 		if (error != 0) {
2081 			panic("%s: could not load old RX mbuf",
2082 			    sc->sc_dev.dv_xname);
2083 		}
2084 		/* Physical address may have changed. */
2085 		ring->desc[ring->cur] =
2086 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
2087 		bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2088 		    ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2089 		    BUS_DMASYNC_PREWRITE);
2090 		ifp->if_ierrors++;
2091 		return;
2092 	}
2093 
2094 	m = data->m;
2095 	data->m = m1;
2096 	/* Update RX descriptor. */
2097 	ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8);
2098 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2099 	    ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2100 	    BUS_DMASYNC_PREWRITE);
2101 
2102 	/* Finalize mbuf. */
2103 	m->m_data = head;
2104 	m->m_pkthdr.len = m->m_len = len;
2105 
2106 	/*
2107 	 * Grab a reference to the source node. Note that control frames are
2108 	 * shorter than struct ieee80211_frame but ieee80211_find_rxnode()
2109 	 * is being careful about control frames.
2110 	 */
2111 	wh = mtod(m, struct ieee80211_frame *);
2112 	if (len < sizeof (*wh) &&
2113 	   (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
2114 		ic->ic_stats.is_rx_tooshort++;
2115 		ifp->if_ierrors++;
2116 		m_freem(m);
2117 		return;
2118 	}
2119 	ni = ieee80211_find_rxnode(ic, wh);
2120 
2121 	rxi.rxi_flags = 0;
2122 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
2123 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
2124 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
2125 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
2126 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
2127 		if ((flags & IWN_RX_CIPHER_MASK) != IWN_RX_CIPHER_CCMP) {
2128 			ic->ic_stats.is_ccmp_dec_errs++;
2129 			ifp->if_ierrors++;
2130 			m_freem(m);
2131 			ieee80211_release_node(ic, ni);
2132 			return;
2133 		}
2134 		/* Check whether decryption was successful or not. */
2135 		if ((desc->type == IWN_MPDU_RX_DONE &&
2136 		     (flags & (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) !=
2137 		      (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) ||
2138 		    (desc->type != IWN_MPDU_RX_DONE &&
2139 		     (flags & IWN_RX_DECRYPT_MASK) != IWN_RX_DECRYPT_OK)) {
2140 			DPRINTF(("CCMP decryption failed 0x%x\n", flags));
2141 			ic->ic_stats.is_ccmp_dec_errs++;
2142 			ifp->if_ierrors++;
2143 			m_freem(m);
2144 			ieee80211_release_node(ic, ni);
2145 			return;
2146 		}
2147 		if (iwn_ccmp_decap(sc, m, ni) != 0) {
2148 			ifp->if_ierrors++;
2149 			m_freem(m);
2150 			ieee80211_release_node(ic, ni);
2151 			return;
2152 		}
2153 		rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
2154 	}
2155 
2156 	rssi = ops->get_rssi(stat);
2157 
2158 	chan = stat->chan;
2159 	if (chan > IEEE80211_CHAN_MAX)
2160 		chan = IEEE80211_CHAN_MAX;
2161 
2162 	/* Fix current channel. */
2163 	if (ni == ic->ic_bss) {
2164 		/*
2165 		 * We may switch ic_bss's channel during scans.
2166 		 * Record the current channel so we can restore it later.
2167 		 */
2168 		bss_chan = ni->ni_chan;
2169 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
2170 	}
2171 	ni->ni_chan = &ic->ic_channels[chan];
2172 
2173 #if NBPFILTER > 0
2174 	if (sc->sc_drvbpf != NULL) {
2175 		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2176 		uint16_t chan_flags;
2177 
2178 		tap->wr_flags = 0;
2179 		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2180 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2181 		tap->wr_chan_freq = htole16(ic->ic_channels[chan].ic_freq);
2182 		chan_flags = ic->ic_channels[chan].ic_flags;
2183 		if (ic->ic_curmode != IEEE80211_MODE_11N)
2184 			chan_flags &= ~IEEE80211_CHAN_HT;
2185 		tap->wr_chan_flags = htole16(chan_flags);
2186 		tap->wr_dbm_antsignal = (int8_t)rssi;
2187 		tap->wr_dbm_antnoise = (int8_t)sc->noise;
2188 		tap->wr_tsft = stat->tstamp;
2189 		if (stat->rflags & IWN_RFLAG_MCS) {
2190 			tap->wr_rate = (0x80 | stat->rate); /* HT MCS index */
2191 		} else {
2192 			switch (stat->rate) {
2193 			/* CCK rates. */
2194 			case  10: tap->wr_rate =   2; break;
2195 			case  20: tap->wr_rate =   4; break;
2196 			case  55: tap->wr_rate =  11; break;
2197 			case 110: tap->wr_rate =  22; break;
2198 			/* OFDM rates. */
2199 			case 0xd: tap->wr_rate =  12; break;
2200 			case 0xf: tap->wr_rate =  18; break;
2201 			case 0x5: tap->wr_rate =  24; break;
2202 			case 0x7: tap->wr_rate =  36; break;
2203 			case 0x9: tap->wr_rate =  48; break;
2204 			case 0xb: tap->wr_rate =  72; break;
2205 			case 0x1: tap->wr_rate =  96; break;
2206 			case 0x3: tap->wr_rate = 108; break;
2207 			/* Unknown rate: should not happen. */
2208 			default:  tap->wr_rate =  0;
2209 			}
2210 		}
2211 
2212 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
2213 		    m, BPF_DIRECTION_IN);
2214 	}
2215 #endif
2216 
2217 	/* Send the frame to the 802.11 layer. */
2218 	rxi.rxi_rssi = rssi;
2219 	rxi.rxi_tstamp = 0;	/* unused */
2220 	ieee80211_inputm(ifp, m, ni, &rxi, ml);
2221 
2222 	/*
2223 	 * ieee80211_inputm() might have changed our BSS.
2224 	 * Restore ic_bss's channel if we are still in the same BSS.
2225 	 */
2226 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
2227 		ni->ni_chan = bss_chan;
2228 
2229 	/* Node is no longer needed. */
2230 	ieee80211_release_node(ic, ni);
2231 }
2232 
2233 void
2234 iwn_mira_choose(struct iwn_softc *sc, struct ieee80211_node *ni)
2235 {
2236 	struct ieee80211com *ic = &sc->sc_ic;
2237 	struct iwn_node *wn = (void *)ni;
2238 	int best_mcs = ieee80211_mira_get_best_mcs(&wn->mn);
2239 
2240 	ieee80211_mira_choose(&wn->mn, ic, ni);
2241 
2242 	/*
2243 	 * Update firmware's LQ retry table if MiRA has chosen a new MCS.
2244 	 *
2245 	 * We only need to do this if the best MCS has changed because
2246 	 * we ask firmware to use a fixed MCS while MiRA is probing a
2247 	 * candidate MCS.
2248 	 * While not probing we ask firmware to retry at lower rates in case
2249 	 * Tx at the newly chosen best MCS ends up failing, and then report
2250 	 * any resulting Tx retries to MiRA in order to trigger probing.
2251 	 */
2252 	if (best_mcs != ieee80211_mira_get_best_mcs(&wn->mn))
2253 		iwn_set_link_quality(sc, ni);
2254 }
2255 
2256 void
2257 iwn_ampdu_rate_control(struct iwn_softc *sc, struct ieee80211_node *ni,
2258     struct iwn_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
2259 {
2260 	struct ieee80211com *ic = &sc->sc_ic;
2261 	struct iwn_node *wn = (void *)ni;
2262 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
2263 	int min_ampdu_id, max_ampdu_id, id;
2264 	int idx, end_idx;
2265 
2266 	/* Determine the min/max IDs we assigned to AMPDUs in this range. */
2267 	idx = IWN_AGG_SSN_TO_TXQ_IDX(seq);
2268 	end_idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
2269 	min_ampdu_id = txq->data[idx].ampdu_id;
2270 	max_ampdu_id = min_ampdu_id;
2271 	while (idx != end_idx) {
2272 		struct iwn_tx_data *txdata = &txq->data[idx];
2273 
2274 		if (txdata->m != NULL) {
2275 			if (min_ampdu_id > txdata->ampdu_id)
2276 				min_ampdu_id = txdata->ampdu_id;
2277 			if (max_ampdu_id < txdata->ampdu_id)
2278 				max_ampdu_id = txdata->ampdu_id;
2279 		}
2280 
2281 		idx = (idx + 1) % IWN_TX_RING_COUNT;
2282 	}
2283 
2284 	/*
2285 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
2286 	 */
2287 	for (id = min_ampdu_id; id <= max_ampdu_id; id++) {
2288 		int have_ack = 0, bit = 0;
2289 		idx = IWN_AGG_SSN_TO_TXQ_IDX(seq);
2290 		end_idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
2291 		wn->mn.agglen = 0;
2292 		wn->mn.ampdu_size = 0;
2293 		while (idx != end_idx) {
2294 			struct iwn_tx_data *txdata = &txq->data[idx];
2295 			uint16_t s = (seq + bit) & 0xfff;
2296 			/*
2297 			 * We can assume that this subframe has been ACKed
2298 			 * because ACK failures come as single frames and
2299 			 * before failing an A-MPDU subframe the firmware
2300 			 * sends it as a single frame at least once.
2301 			 *
2302 			 * However, when this A-MPDU was transmitted we
2303 			 * learned how many subframes it contained.
2304 			 * So if firmware isn't reporting all subframes now
2305 			 * we can deduce an ACK failure for missing frames.
2306 			 */
2307 			if (txdata->m != NULL && txdata->ampdu_id == id &&
2308 			    txdata->ampdu_txmcs == ni->ni_txmcs &&
2309 			    txdata->ampdu_nframes > 0 &&
2310 			    (SEQ_LT(ba->ba_winend, s) ||
2311 			    (ba->ba_bitmap & (1 << bit)) == 0)) {
2312 				have_ack++;
2313 				wn->mn.frames = txdata->ampdu_nframes;
2314 				wn->mn.agglen = txdata->ampdu_nframes;
2315 				wn->mn.ampdu_size = txdata->ampdu_size;
2316 				if (txdata->retries > 1)
2317 					wn->mn.retries++;
2318 				if (!SEQ_LT(ba->ba_winend, s))
2319 					ieee80211_output_ba_record_ack(ic, ni,
2320 					    tid, s);
2321 			}
2322 
2323 			idx = (idx + 1) % IWN_TX_RING_COUNT;
2324 			bit++;
2325 		}
2326 
2327 		if (have_ack > 0) {
2328 			wn->mn.txfail = wn->mn.frames - have_ack;
2329 			iwn_mira_choose(sc, ni);
2330 		}
2331 	}
2332 }
2333 
2334 /*
2335  * Process an incoming Compressed BlockAck.
2336  * Note that these block ack notifications are generated by firmware and do
2337  * not necessarily correspond to contents of block ack frames seen on the air.
2338  */
2339 void
2340 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2341     struct iwn_rx_data *data)
2342 {
2343 	struct iwn_compressed_ba *cba = (struct iwn_compressed_ba *)(desc + 1);
2344 	struct ieee80211com *ic = &sc->sc_ic;
2345 	struct ieee80211_node *ni;
2346 	struct ieee80211_tx_ba *ba;
2347 	struct iwn_tx_ring *txq;
2348 	uint16_t seq, ssn;
2349 	int qid;
2350 
2351 	if (ic->ic_state != IEEE80211_S_RUN)
2352 		return;
2353 
2354 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*cba),
2355 	    BUS_DMASYNC_POSTREAD);
2356 
2357 	if (!IEEE80211_ADDR_EQ(ic->ic_bss->ni_macaddr, cba->macaddr))
2358 		return;
2359 
2360 	ni = ic->ic_bss;
2361 
2362 	qid = le16toh(cba->qid);
2363 	if (qid < sc->first_agg_txq || qid >= sc->ntxqs)
2364 		return;
2365 
2366 	txq = &sc->txq[qid];
2367 
2368 	/* Protect against a firmware bug where the queue/TID are off. */
2369 	if (qid != sc->first_agg_txq + cba->tid)
2370 		return;
2371 
2372 	ba = &ni->ni_tx_ba[cba->tid];
2373 	if (ba->ba_state != IEEE80211_BA_AGREED)
2374 		return;
2375 
2376 	/*
2377 	 * The first bit in cba->bitmap corresponds to the sequence number
2378 	 * stored in the sequence control field cba->seq.
2379 	 * Multiple BA notifications in a row may be using this number, with
2380 	 * additional bits being set in cba->bitmap. It is unclear how the
2381 	 * firmware decides to shift this window forward.
2382 	 * We rely on ba->ba_winstart instead.
2383 	 */
2384 	seq = le16toh(cba->seq) >> IEEE80211_SEQ_SEQ_SHIFT;
2385 
2386 	/*
2387 	 * The firmware's new BA window starting sequence number
2388 	 * corresponds to the first hole in cba->bitmap, implying
2389 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
2390 	 * have been acked.
2391 	 */
2392 	ssn = le16toh(cba->ssn);
2393 
2394 	/* Skip rate control if our Tx rate is fixed. */
2395 	if (ic->ic_fixed_mcs == -1)
2396 		iwn_ampdu_rate_control(sc, ni, txq, cba->tid, ba->ba_winstart,
2397 		    ssn);
2398 
2399 	/*
2400 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
2401 	 * in firmware's BA window. Firmware is not going to retransmit any
2402 	 * frames before its BA window so mark them all as done.
2403 	 */
2404 	if (SEQ_LT(ba->ba_winstart, ssn)) {
2405 		ieee80211_output_ba_move_window(ic, ni, cba->tid, ssn);
2406 		iwn_ampdu_txq_advance(sc, txq, qid,
2407 		    IWN_AGG_SSN_TO_TXQ_IDX(ssn));
2408 		iwn_clear_oactive(sc, txq);
2409 	}
2410 }
2411 
2412 /*
2413  * Process a CALIBRATION_RESULT notification sent by the initialization
2414  * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2415  */
2416 void
2417 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2418     struct iwn_rx_data *data)
2419 {
2420 	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2421 	int len, idx = -1;
2422 
2423 	/* Runtime firmware should not send such a notification. */
2424 	if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2425 		return;
2426 
2427 	len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK) - 4;
2428 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len,
2429 	    BUS_DMASYNC_POSTREAD);
2430 
2431 	switch (calib->code) {
2432 	case IWN5000_PHY_CALIB_DC:
2433 		if (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2434 		    sc->hw_type == IWN_HW_REV_TYPE_2030 ||
2435 		    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
2436 		    sc->hw_type == IWN_HW_REV_TYPE_135 ||
2437 		    sc->hw_type == IWN_HW_REV_TYPE_105)
2438 			idx = 0;
2439 		break;
2440 	case IWN5000_PHY_CALIB_LO:
2441 		idx = 1;
2442 		break;
2443 	case IWN5000_PHY_CALIB_TX_IQ:
2444 		idx = 2;
2445 		break;
2446 	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2447 		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2448 		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2449 			idx = 3;
2450 		break;
2451 	case IWN5000_PHY_CALIB_BASE_BAND:
2452 		idx = 4;
2453 		break;
2454 	}
2455 	if (idx == -1)	/* Ignore other results. */
2456 		return;
2457 
2458 	/* Save calibration result. */
2459 	if (sc->calibcmd[idx].buf != NULL)
2460 		free(sc->calibcmd[idx].buf, M_DEVBUF, 0);
2461 	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2462 	if (sc->calibcmd[idx].buf == NULL) {
2463 		DPRINTF(("not enough memory for calibration result %d\n",
2464 		    calib->code));
2465 		return;
2466 	}
2467 	DPRINTF(("saving calibration result code=%d len=%d\n",
2468 	    calib->code, len));
2469 	sc->calibcmd[idx].len = len;
2470 	memcpy(sc->calibcmd[idx].buf, calib, len);
2471 }
2472 
2473 /*
2474  * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2475  * The latter is sent by the firmware after each received beacon.
2476  */
2477 void
2478 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2479     struct iwn_rx_data *data)
2480 {
2481 	struct iwn_ops *ops = &sc->ops;
2482 	struct ieee80211com *ic = &sc->sc_ic;
2483 	struct iwn_calib_state *calib = &sc->calib;
2484 	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2485 	int temp;
2486 
2487 	/* Ignore statistics received during a scan. */
2488 	if (ic->ic_state != IEEE80211_S_RUN)
2489 		return;
2490 
2491 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2492 	    sizeof (*stats), BUS_DMASYNC_POSTREAD);
2493 
2494 	DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type));
2495 	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2496 
2497 	/* Test if temperature has changed. */
2498 	if (stats->general.temp != sc->rawtemp) {
2499 		/* Convert "raw" temperature to degC. */
2500 		sc->rawtemp = stats->general.temp;
2501 		temp = ops->get_temperature(sc);
2502 		DPRINTFN(2, ("temperature=%dC\n", temp));
2503 
2504 		/* Update TX power if need be (4965AGN only). */
2505 		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2506 			iwn4965_power_calibration(sc, temp);
2507 	}
2508 
2509 	if (desc->type != IWN_BEACON_STATISTICS)
2510 		return;	/* Reply to a statistics request. */
2511 
2512 	sc->noise = iwn_get_noise(&stats->rx.general);
2513 
2514 	/* Test that RSSI and noise are present in stats report. */
2515 	if (sc->noise == -127)
2516 		return;
2517 
2518 	if (letoh32(stats->rx.general.flags) != 1) {
2519 		DPRINTF(("received statistics without RSSI\n"));
2520 		return;
2521 	}
2522 
2523 	/*
2524 	 * XXX Differential gain calibration makes the 6005 firmware
2525 	 * crap out, so skip it for now.  This effectively disables
2526 	 * sensitivity tuning as well.
2527 	 */
2528 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
2529 		return;
2530 
2531 	if (calib->state == IWN_CALIB_STATE_ASSOC)
2532 		iwn_collect_noise(sc, &stats->rx.general);
2533 	else if (calib->state == IWN_CALIB_STATE_RUN)
2534 		iwn_tune_sensitivity(sc, &stats->rx);
2535 }
2536 
2537 void
2538 iwn_ampdu_txq_advance(struct iwn_softc *sc, struct iwn_tx_ring *txq, int qid,
2539     int idx)
2540 {
2541 	struct iwn_ops *ops = &sc->ops;
2542 
2543 	DPRINTFN(3, ("%s: txq->cur=%d txq->read=%d txq->queued=%d qid=%d "
2544 	    "idx=%d\n", __func__, txq->cur, txq->read, txq->queued, qid, idx));
2545 
2546 	while (txq->read != idx) {
2547 		struct iwn_tx_data *txdata = &txq->data[txq->read];
2548 		if (txdata->m != NULL) {
2549 			ops->reset_sched(sc, qid, txq->read);
2550 			iwn_tx_done_free_txdata(sc, txdata);
2551 			txq->queued--;
2552 		}
2553 		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
2554 	}
2555 }
2556 
2557 /*
2558  * Handle A-MPDU Tx queue status report.
2559  * Tx failures come as single frames (perhaps out of order), and before failing
2560  * an A-MPDU subframe the firmware transmits it as a single frame at least once
2561  * and reports Tx success/failure here. Frames successfully transmitted in an
2562  * A-MPDU are completed when a compressed block ack notification is received.
2563  */
2564 void
2565 iwn_ampdu_tx_done(struct iwn_softc *sc, struct iwn_tx_ring *txq,
2566     struct iwn_rx_desc *desc, uint16_t status, uint8_t ackfailcnt,
2567     uint8_t rate, uint8_t rflags, int nframes, uint32_t ssn,
2568     struct iwn_txagg_status *agg_status)
2569 {
2570 	struct ieee80211com *ic = &sc->sc_ic;
2571 	int tid = desc->qid - sc->first_agg_txq;
2572 	struct iwn_tx_data *txdata = &txq->data[desc->idx];
2573 	struct ieee80211_node *ni = txdata->ni;
2574 	struct iwn_node *wn = (void *)ni;
2575 	int txfail = (status != IWN_TX_STATUS_SUCCESS &&
2576 	    status != IWN_TX_STATUS_DIRECT_DONE);
2577 	struct ieee80211_tx_ba *ba;
2578 	uint16_t seq;
2579 
2580 	sc->sc_tx_timer = 0;
2581 
2582 	if (ic->ic_state != IEEE80211_S_RUN)
2583 		return;
2584 
2585 	if (nframes > 1) {
2586 		int ampdu_id, have_ampdu_id = 0, ampdu_size = 0;
2587 		int i;
2588 
2589 		/* Compute the size of this A-MPDU. */
2590 		for (i = 0; i < nframes; i++) {
2591 			uint8_t qid = agg_status[i].qid;
2592 			uint8_t idx = agg_status[i].idx;
2593 
2594 			if (qid != desc->qid)
2595 				continue;
2596 
2597 			txdata = &txq->data[idx];
2598 			if (txdata->ni == NULL)
2599 				continue;
2600 
2601 			ampdu_size += txdata->totlen + IEEE80211_CRC_LEN;
2602 		}
2603 
2604 		/*
2605 		 * For each subframe collect Tx status, retries, and Tx rate.
2606 		 * (The Tx rate is the same for all subframes in this batch.)
2607 		 */
2608 		for (i = 0; i < nframes; i++) {
2609 			uint8_t qid = agg_status[i].qid;
2610 			uint8_t idx = agg_status[i].idx;
2611 			uint16_t txstatus = (le16toh(agg_status[i].status) &
2612 			    IWN_AGG_TX_STATUS_MASK);
2613 			uint16_t trycnt = (le16toh(agg_status[i].status) &
2614 			    IWN_AGG_TX_TRY) >> IWN_AGG_TX_TRY_SHIFT;
2615 
2616 			if (qid != desc->qid)
2617 				continue;
2618 
2619 			txdata = &txq->data[idx];
2620 			if (txdata->ni == NULL)
2621 				continue;
2622 
2623 			if (rflags & IWN_RFLAG_MCS)
2624 				txdata->ampdu_txmcs = rate;
2625 			if (txstatus != IWN_AGG_TX_STATE_TRANSMITTED)
2626 				txdata->txfail++;
2627 			if (trycnt > 1)
2628 				txdata->retries++;
2629 
2630 			/*
2631 			 * Assign a common ID to all subframes of this A-MPDU.
2632 			 * This ID will be used during Tx rate control to
2633 			 * infer the ACK status of individual subframes.
2634 			 */
2635 			if (!have_ampdu_id) {
2636 				wn = (void *)txdata->ni;
2637 				ampdu_id = wn->next_ampdu_id++;
2638 				have_ampdu_id = 1;
2639 			}
2640 			txdata->ampdu_id = ampdu_id;
2641 
2642 			/*
2643 			 * We will also need to know the total number of
2644 			 * subframes and the size of this A-MPDU. We store
2645 			 * this redundantly on each subframe because firmware
2646 			 * only reports acknowledged subframes via compressed
2647 			 * block-ack notification. This way we will know what
2648 			 * the total number of subframes and size were even if
2649 			 * just one of these subframes gets acknowledged.
2650 			 */
2651 			txdata->ampdu_nframes = nframes;
2652 			txdata->ampdu_size = ampdu_size;
2653 		}
2654 		return;
2655 	}
2656 
2657 	if (ni == NULL)
2658 		return;
2659 
2660 	ba = &ni->ni_tx_ba[tid];
2661 	if (ba->ba_state != IEEE80211_BA_AGREED)
2662 		return;
2663 
2664 	/* This was a final single-frame Tx attempt for frame SSN-1. */
2665 	seq = (ssn - 1) & 0xfff;
2666 
2667 	/*
2668 	 * Skip rate control if our Tx rate is fixed.
2669 	 * Don't report frames to MiRA which were sent at a different
2670 	 * Tx rate than ni->ni_txmcs.
2671 	 */
2672 	if (ic->ic_fixed_mcs == -1 && txdata->txmcs == ni->ni_txmcs) {
2673 		wn->mn.frames++;
2674 		wn->mn.agglen = 1;
2675 		wn->mn.ampdu_size = txdata->totlen + IEEE80211_CRC_LEN;
2676 		if (ackfailcnt > 0)
2677 			wn->mn.retries++;
2678 		if (txfail)
2679 			wn->mn.txfail++;
2680 		iwn_mira_choose(sc, ni);
2681 	}
2682 
2683 	if (txfail)
2684 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
2685 	else if (!SEQ_LT(seq, ba->ba_winstart)) {
2686 		/*
2687 		 * Move window forward if SEQ lies beyond end of window,
2688 		 * otherwise we can't record the ACK for this frame.
2689 		 * Non-acked frames which left holes in the bitmap near
2690 		 * the beginning of the window must be discarded.
2691 		 */
2692 		uint16_t s = seq;
2693 		while (SEQ_LT(ba->ba_winend, s)) {
2694 			ieee80211_output_ba_move_window(ic, ni, tid, s);
2695 			iwn_ampdu_txq_advance(sc, txq, desc->qid,
2696 			    IWN_AGG_SSN_TO_TXQ_IDX(s));
2697 			s = (s + 1) % 0xfff;
2698 		}
2699 		/* SEQ should now be within window; set corresponding bit. */
2700 		ieee80211_output_ba_record_ack(ic, ni, tid, seq);
2701 	}
2702 
2703 	/* Move window forward up to the first hole in the bitmap. */
2704 	ieee80211_output_ba_move_window_to_first_unacked(ic, ni, tid, ssn);
2705 	iwn_ampdu_txq_advance(sc, txq, desc->qid,
2706 	    IWN_AGG_SSN_TO_TXQ_IDX(ba->ba_winstart));
2707 
2708 	iwn_clear_oactive(sc, txq);
2709 }
2710 
2711 /*
2712  * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2713  * and 5000 adapters have different incompatible TX status formats.
2714  */
2715 void
2716 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2717     struct iwn_rx_data *data)
2718 {
2719 	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2720 	struct iwn_tx_ring *ring;
2721 	size_t len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK);
2722 	uint16_t status = letoh32(stat->stat.status) & 0xff;
2723 	uint32_t ssn;
2724 
2725 	if (desc->qid > IWN4965_NTXQUEUES)
2726 		return;
2727 
2728 	ring = &sc->txq[desc->qid];
2729 
2730 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2731 	    len, BUS_DMASYNC_POSTREAD);
2732 
2733 	/* Sanity checks. */
2734 	if (sizeof(*stat) > len)
2735 		return;
2736 	if (stat->nframes < 1 || stat->nframes > IWN_AMPDU_MAX)
2737 		return;
2738 	if (desc->qid < sc->first_agg_txq && stat->nframes > 1)
2739 		return;
2740 	if (desc->qid >= sc->first_agg_txq && sizeof(*stat) + sizeof(ssn) +
2741 	    stat->nframes * sizeof(stat->stat) > len)
2742 		return;
2743 
2744 	if (desc->qid < sc->first_agg_txq) {
2745 		/* XXX 4965 does not report byte count */
2746 		struct iwn_tx_data *txdata = &ring->data[desc->idx];
2747 		uint16_t framelen = txdata->totlen + IEEE80211_CRC_LEN;
2748 		int txfail = (status != IWN_TX_STATUS_SUCCESS &&
2749 		    status != IWN_TX_STATUS_DIRECT_DONE);
2750 
2751 		iwn_tx_done(sc, desc, stat->ackfailcnt, stat->rate, txfail,
2752 		    desc->qid, framelen);
2753 	} else {
2754 		memcpy(&ssn, &stat->stat.status + stat->nframes, sizeof(ssn));
2755 		ssn = le32toh(ssn) & 0xfff;
2756 		iwn_ampdu_tx_done(sc, ring, desc, status, stat->ackfailcnt,
2757 		    stat->rate, stat->rflags, stat->nframes, ssn,
2758 		    stat->stat.agg_status);
2759 	}
2760 }
2761 
2762 void
2763 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2764     struct iwn_rx_data *data)
2765 {
2766 	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2767 	struct iwn_tx_ring *ring;
2768 	size_t len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK);
2769 	uint16_t status = letoh32(stat->stat.status) & 0xff;
2770 	uint32_t ssn;
2771 
2772 	if (desc->qid > IWN5000_NTXQUEUES)
2773 		return;
2774 
2775 	ring = &sc->txq[desc->qid];
2776 
2777 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2778 	    sizeof (*stat), BUS_DMASYNC_POSTREAD);
2779 
2780 	/* Sanity checks. */
2781 	if (sizeof(*stat) > len)
2782 		return;
2783 	if (stat->nframes < 1 || stat->nframes > IWN_AMPDU_MAX)
2784 		return;
2785 	if (desc->qid < sc->first_agg_txq && stat->nframes > 1)
2786 		return;
2787 	if (desc->qid >= sc->first_agg_txq && sizeof(*stat) + sizeof(ssn) +
2788 	    stat->nframes * sizeof(stat->stat) > len)
2789 		return;
2790 
2791 	/* If this was not an aggregated frame, complete it now. */
2792 	if (desc->qid < sc->first_agg_txq) {
2793 		int txfail = (status != IWN_TX_STATUS_SUCCESS &&
2794 		    status != IWN_TX_STATUS_DIRECT_DONE);
2795 
2796 		/* Reset TX scheduler slot. */
2797 		iwn5000_reset_sched(sc, desc->qid, desc->idx);
2798 
2799 		iwn_tx_done(sc, desc, stat->ackfailcnt, stat->rate, txfail,
2800 		    desc->qid, letoh16(stat->len));
2801 	} else {
2802 		memcpy(&ssn, &stat->stat.status + stat->nframes, sizeof(ssn));
2803 		ssn = le32toh(ssn) & 0xfff;
2804 		iwn_ampdu_tx_done(sc, ring, desc, status, stat->ackfailcnt,
2805 		    stat->rate, stat->rflags, stat->nframes, ssn,
2806 		    stat->stat.agg_status);
2807 	}
2808 }
2809 
2810 void
2811 iwn_tx_done_free_txdata(struct iwn_softc *sc, struct iwn_tx_data *data)
2812 {
2813 	struct ieee80211com *ic = &sc->sc_ic;
2814 
2815 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
2816 	    BUS_DMASYNC_POSTWRITE);
2817 	bus_dmamap_unload(sc->sc_dmat, data->map);
2818 	m_freem(data->m);
2819 	data->m = NULL;
2820 	ieee80211_release_node(ic, data->ni);
2821 	data->ni = NULL;
2822 	data->totlen = 0;
2823 	data->retries = 0;
2824 	data->txfail = 0;
2825 	data->txmcs = 0;
2826 	data->ampdu_txmcs = 0;
2827 	data->txrate = 0;
2828 }
2829 
2830 void
2831 iwn_clear_oactive(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2832 {
2833 	struct ieee80211com *ic = &sc->sc_ic;
2834 	struct ifnet *ifp = &ic->ic_if;
2835 
2836 	if (ring->queued < IWN_TX_RING_LOMARK) {
2837 		sc->qfullmsk &= ~(1 << ring->qid);
2838 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
2839 			ifq_clr_oactive(&ifp->if_snd);
2840 			(*ifp->if_start)(ifp);
2841 		}
2842 	}
2843 }
2844 
2845 /*
2846  * Adapter-independent backend for TX_DONE firmware notifications.
2847  * This handles Tx status for non-aggregation queues.
2848  */
2849 void
2850 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2851     uint8_t ackfailcnt, uint8_t rate, int txfail, int qid, uint16_t len)
2852 {
2853 	struct ieee80211com *ic = &sc->sc_ic;
2854 	struct ifnet *ifp = &ic->ic_if;
2855 	struct iwn_tx_ring *ring = &sc->txq[qid];
2856 	struct iwn_tx_data *data = &ring->data[desc->idx];
2857 	struct iwn_node *wn = (void *)data->ni;
2858 
2859 	if (data->ni == NULL)
2860 		return;
2861 
2862 	if (data->ni->ni_flags & IEEE80211_NODE_HT) {
2863 		if (ic->ic_state == IEEE80211_S_RUN &&
2864 		    ic->ic_fixed_mcs == -1 &&
2865 		    data->txmcs == data->ni->ni_txmcs) {
2866 			wn->mn.frames++;
2867 			wn->mn.ampdu_size = len;
2868 			wn->mn.agglen = 1;
2869 			if (ackfailcnt > 0)
2870 				wn->mn.retries++;
2871 			if (txfail)
2872 				wn->mn.txfail++;
2873 			iwn_mira_choose(sc, data->ni);
2874 		}
2875 	} else if (data->txrate == data->ni->ni_txrate) {
2876 		wn->amn.amn_txcnt++;
2877 		if (ackfailcnt > 0)
2878 			wn->amn.amn_retrycnt++;
2879 		if (txfail)
2880 			wn->amn.amn_retrycnt++;
2881 	}
2882 	if (txfail)
2883 		ifp->if_oerrors++;
2884 
2885 	iwn_tx_done_free_txdata(sc, data);
2886 
2887 	sc->sc_tx_timer = 0;
2888 	ring->queued--;
2889 	iwn_clear_oactive(sc, ring);
2890 }
2891 
2892 /*
2893  * Process a "command done" firmware notification.  This is where we wakeup
2894  * processes waiting for a synchronous command completion.
2895  */
2896 void
2897 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2898 {
2899 	struct iwn_tx_ring *ring = &sc->txq[4];
2900 	struct iwn_tx_data *data;
2901 
2902 	if ((desc->qid & 0xf) != 4)
2903 		return;	/* Not a command ack. */
2904 
2905 	data = &ring->data[desc->idx];
2906 
2907 	/* If the command was mapped in an mbuf, free it. */
2908 	if (data->m != NULL) {
2909 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2910 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2911 		bus_dmamap_unload(sc->sc_dmat, data->map);
2912 		m_freem(data->m);
2913 		data->m = NULL;
2914 	}
2915 	wakeup(&ring->desc[desc->idx]);
2916 }
2917 
2918 /*
2919  * Process an INT_FH_RX or INT_SW_RX interrupt.
2920  */
2921 void
2922 iwn_notif_intr(struct iwn_softc *sc)
2923 {
2924 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2925 	struct iwn_ops *ops = &sc->ops;
2926 	struct ieee80211com *ic = &sc->sc_ic;
2927 	struct ifnet *ifp = &ic->ic_if;
2928 	uint16_t hw;
2929 
2930 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
2931 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
2932 
2933 	hw = letoh16(sc->rxq.stat->closed_count) & 0xfff;
2934 	while (sc->rxq.cur != hw) {
2935 		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2936 		struct iwn_rx_desc *desc;
2937 
2938 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc),
2939 		    BUS_DMASYNC_POSTREAD);
2940 		desc = mtod(data->m, struct iwn_rx_desc *);
2941 
2942 		DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n",
2943 		    desc->qid & 0xf, desc->idx, desc->flags, desc->type));
2944 
2945 		if (!(desc->qid & 0x80))	/* Reply to a command. */
2946 			iwn_cmd_done(sc, desc);
2947 
2948 		switch (desc->type) {
2949 		case IWN_RX_PHY:
2950 			iwn_rx_phy(sc, desc, data);
2951 			break;
2952 
2953 		case IWN_RX_DONE:		/* 4965AGN only. */
2954 		case IWN_MPDU_RX_DONE:
2955 			/* An 802.11 frame has been received. */
2956 			iwn_rx_done(sc, desc, data, &ml);
2957 			break;
2958 		case IWN_RX_COMPRESSED_BA:
2959 			/* A Compressed BlockAck has been received. */
2960 			iwn_rx_compressed_ba(sc, desc, data);
2961 			break;
2962 		case IWN_TX_DONE:
2963 			/* An 802.11 frame has been transmitted. */
2964 			ops->tx_done(sc, desc, data);
2965 			break;
2966 
2967 		case IWN_RX_STATISTICS:
2968 		case IWN_BEACON_STATISTICS:
2969 			iwn_rx_statistics(sc, desc, data);
2970 			break;
2971 
2972 		case IWN_BEACON_MISSED:
2973 		{
2974 			struct iwn_beacon_missed *miss =
2975 			    (struct iwn_beacon_missed *)(desc + 1);
2976 			uint32_t missed;
2977 
2978 			if ((ic->ic_opmode != IEEE80211_M_STA) ||
2979 			    (ic->ic_state != IEEE80211_S_RUN))
2980 				break;
2981 
2982 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2983 			    sizeof (*miss), BUS_DMASYNC_POSTREAD);
2984 			missed = letoh32(miss->consecutive);
2985 
2986 			/*
2987 			 * If more than 5 consecutive beacons are missed,
2988 			 * reinitialize the sensitivity state machine.
2989 			 */
2990 			if (missed > 5)
2991 				(void)iwn_init_sensitivity(sc);
2992 
2993 			/*
2994 			 * Rather than go directly to scan state, try to send a
2995 			 * directed probe request first. If that fails then the
2996 			 * state machine will drop us into scanning after timing
2997 			 * out waiting for a probe response.
2998 			 */
2999 			if (missed > ic->ic_bmissthres && !ic->ic_mgt_timer) {
3000 				if (ic->ic_if.if_flags & IFF_DEBUG)
3001 					printf("%s: receiving no beacons from "
3002 					    "%s; checking if this AP is still "
3003 					    "responding to probe requests\n",
3004 					    sc->sc_dev.dv_xname, ether_sprintf(
3005 					    ic->ic_bss->ni_macaddr));
3006 				IEEE80211_SEND_MGMT(ic, ic->ic_bss,
3007 				    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
3008 			}
3009 			break;
3010 		}
3011 		case IWN_UC_READY:
3012 		{
3013 			struct iwn_ucode_info *uc =
3014 			    (struct iwn_ucode_info *)(desc + 1);
3015 
3016 			/* The microcontroller is ready. */
3017 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3018 			    sizeof (*uc), BUS_DMASYNC_POSTREAD);
3019 			DPRINTF(("microcode alive notification version=%d.%d "
3020 			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3021 			    uc->subtype, letoh32(uc->valid)));
3022 
3023 			if (letoh32(uc->valid) != 1) {
3024 				printf("%s: microcontroller initialization "
3025 				    "failed\n", sc->sc_dev.dv_xname);
3026 				break;
3027 			}
3028 			if (uc->subtype == IWN_UCODE_INIT) {
3029 				/* Save microcontroller report. */
3030 				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3031 			}
3032 			/* Save the address of the error log in SRAM. */
3033 			sc->errptr = letoh32(uc->errptr);
3034 			break;
3035 		}
3036 		case IWN_STATE_CHANGED:
3037 		{
3038 			uint32_t *status = (uint32_t *)(desc + 1);
3039 
3040 			/* Enabled/disabled notification. */
3041 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3042 			    sizeof (*status), BUS_DMASYNC_POSTREAD);
3043 			DPRINTF(("state changed to %x\n", letoh32(*status)));
3044 
3045 			if (letoh32(*status) & 1) {
3046 				/* Radio transmitter is off, power down. */
3047 				iwn_stop(ifp);
3048 				return;	/* No further processing. */
3049 			}
3050 			break;
3051 		}
3052 		case IWN_START_SCAN:
3053 		{
3054 			struct iwn_start_scan *scan =
3055 			    (struct iwn_start_scan *)(desc + 1);
3056 
3057 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3058 			    sizeof (*scan), BUS_DMASYNC_POSTREAD);
3059 			DPRINTFN(2, ("scan start: chan %d status %x\n",
3060 			    scan->chan, letoh32(scan->status)));
3061 
3062 			if (sc->sc_flags & IWN_FLAG_BGSCAN)
3063 				break;
3064 
3065 			/* Fix current channel. */
3066 			ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan];
3067 			break;
3068 		}
3069 		case IWN_STOP_SCAN:
3070 		{
3071 			struct iwn_stop_scan *scan =
3072 			    (struct iwn_stop_scan *)(desc + 1);
3073 
3074 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
3075 			    sizeof (*scan), BUS_DMASYNC_POSTREAD);
3076 			DPRINTFN(2, ("scan stop: nchan=%d status=%d chan=%d\n",
3077 			    scan->nchan, scan->status, scan->chan));
3078 
3079 			if (scan->status == 1 && scan->chan <= 14 &&
3080 			    (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) {
3081 			    	int error;
3082 				/*
3083 				 * We just finished scanning 2GHz channels,
3084 				 * start scanning 5GHz ones.
3085 				 */
3086 				error = iwn_scan(sc, IEEE80211_CHAN_5GHZ,
3087 				    (sc->sc_flags & IWN_FLAG_BGSCAN) ? 1 : 0);
3088 				if (error == 0)
3089 					break;
3090 			}
3091 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
3092 			sc->sc_flags &= ~IWN_FLAG_BGSCAN;
3093 			ieee80211_end_scan(ifp);
3094 			break;
3095 		}
3096 		case IWN5000_CALIBRATION_RESULT:
3097 			iwn5000_rx_calib_results(sc, desc, data);
3098 			break;
3099 
3100 		case IWN5000_CALIBRATION_DONE:
3101 			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3102 			wakeup(sc);
3103 			break;
3104 		}
3105 
3106 		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3107 	}
3108 	if_input(&sc->sc_ic.ic_if, &ml);
3109 
3110 	/* Tell the firmware what we have processed. */
3111 	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3112 	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3113 }
3114 
3115 /*
3116  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3117  * from power-down sleep mode.
3118  */
3119 void
3120 iwn_wakeup_intr(struct iwn_softc *sc)
3121 {
3122 	int qid;
3123 
3124 	DPRINTF(("ucode wakeup from power-down sleep\n"));
3125 
3126 	/* Wakeup RX and TX rings. */
3127 	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3128 	for (qid = 0; qid < sc->ntxqs; qid++) {
3129 		struct iwn_tx_ring *ring = &sc->txq[qid];
3130 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3131 	}
3132 }
3133 
3134 #ifdef IWN_DEBUG
3135 /*
3136  * Dump the error log of the firmware when a firmware panic occurs.  Although
3137  * we can't debug the firmware because it is neither open source nor free, it
3138  * can help us to identify certain classes of problems.
3139  */
3140 void
3141 iwn_fatal_intr(struct iwn_softc *sc)
3142 {
3143 	struct iwn_fw_dump dump;
3144 	int i;
3145 
3146 	/* Check that the error log address is valid. */
3147 	if (sc->errptr < IWN_FW_DATA_BASE ||
3148 	    sc->errptr + sizeof (dump) >
3149 	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3150 		printf("%s: bad firmware error log address 0x%08x\n",
3151 		    sc->sc_dev.dv_xname, sc->errptr);
3152 		return;
3153 	}
3154 	if (iwn_nic_lock(sc) != 0) {
3155 		printf("%s: could not read firmware error log\n",
3156 		    sc->sc_dev.dv_xname);
3157 		return;
3158 	}
3159 	/* Read firmware error log from SRAM. */
3160 	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3161 	    sizeof (dump) / sizeof (uint32_t));
3162 	iwn_nic_unlock(sc);
3163 
3164 	if (dump.valid == 0) {
3165 		printf("%s: firmware error log is empty\n",
3166 		    sc->sc_dev.dv_xname);
3167 		return;
3168 	}
3169 	printf("firmware error log:\n");
3170 	printf("  error type      = \"%s\" (0x%08X)\n",
3171 	    (dump.id < nitems(iwn_fw_errmsg)) ?
3172 		iwn_fw_errmsg[dump.id] : "UNKNOWN",
3173 	    dump.id);
3174 	printf("  program counter = 0x%08X\n", dump.pc);
3175 	printf("  source line     = 0x%08X\n", dump.src_line);
3176 	printf("  error data      = 0x%08X%08X\n",
3177 	    dump.error_data[0], dump.error_data[1]);
3178 	printf("  branch link     = 0x%08X%08X\n",
3179 	    dump.branch_link[0], dump.branch_link[1]);
3180 	printf("  interrupt link  = 0x%08X%08X\n",
3181 	    dump.interrupt_link[0], dump.interrupt_link[1]);
3182 	printf("  time            = %u\n", dump.time[0]);
3183 
3184 	/* Dump driver status (TX and RX rings) while we're here. */
3185 	printf("driver status:\n");
3186 	for (i = 0; i < sc->ntxqs; i++) {
3187 		struct iwn_tx_ring *ring = &sc->txq[i];
3188 		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
3189 		    i, ring->qid, ring->cur, ring->queued);
3190 	}
3191 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
3192 	printf("  802.11 state %d\n", sc->sc_ic.ic_state);
3193 }
3194 #endif
3195 
3196 int
3197 iwn_intr(void *arg)
3198 {
3199 	struct iwn_softc *sc = arg;
3200 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3201 	uint32_t r1, r2, tmp;
3202 
3203 	/* Disable interrupts. */
3204 	IWN_WRITE(sc, IWN_INT_MASK, 0);
3205 
3206 	/* Read interrupts from ICT (fast) or from registers (slow). */
3207 	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3208 		tmp = 0;
3209 		while (sc->ict[sc->ict_cur] != 0) {
3210 			tmp |= sc->ict[sc->ict_cur];
3211 			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
3212 			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3213 		}
3214 		tmp = letoh32(tmp);
3215 		if (tmp == 0xffffffff)	/* Shouldn't happen. */
3216 			tmp = 0;
3217 		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
3218 			tmp |= 0x8000;
3219 		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3220 		r2 = 0;	/* Unused. */
3221 	} else {
3222 		r1 = IWN_READ(sc, IWN_INT);
3223 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3224 			return 0;	/* Hardware gone! */
3225 		r2 = IWN_READ(sc, IWN_FH_INT);
3226 	}
3227 	if (r1 == 0 && r2 == 0) {
3228 		if (ifp->if_flags & IFF_UP)
3229 			IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3230 		return 0;	/* Interrupt not for us. */
3231 	}
3232 
3233 	/* Acknowledge interrupts. */
3234 	IWN_WRITE(sc, IWN_INT, r1);
3235 	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3236 		IWN_WRITE(sc, IWN_FH_INT, r2);
3237 
3238 	if (r1 & IWN_INT_RF_TOGGLED) {
3239 		tmp = IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL;
3240 		printf("%s: RF switch: radio %s\n", sc->sc_dev.dv_xname,
3241 		    tmp ? "enabled" : "disabled");
3242 		if (tmp)
3243 			task_add(systq, &sc->init_task);
3244 	}
3245 	if (r1 & IWN_INT_CT_REACHED) {
3246 		printf("%s: critical temperature reached!\n",
3247 		    sc->sc_dev.dv_xname);
3248 	}
3249 	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3250 		printf("%s: fatal firmware error\n", sc->sc_dev.dv_xname);
3251 
3252 		/* Force a complete recalibration on next init. */
3253 		sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3254 
3255 		/* Dump firmware error log and stop. */
3256 #ifdef IWN_DEBUG
3257 		iwn_fatal_intr(sc);
3258 #endif
3259 		iwn_stop(ifp);
3260 		task_add(systq, &sc->init_task);
3261 		return 1;
3262 	}
3263 	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3264 	    (r2 & IWN_FH_INT_RX)) {
3265 		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3266 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
3267 				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3268 			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3269 			    IWN_INT_PERIODIC_DIS);
3270 			iwn_notif_intr(sc);
3271 			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
3272 				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3273 				    IWN_INT_PERIODIC_ENA);
3274 			}
3275 		} else
3276 			iwn_notif_intr(sc);
3277 	}
3278 
3279 	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3280 		if (sc->sc_flags & IWN_FLAG_USE_ICT)
3281 			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3282 		wakeup(sc);	/* FH DMA transfer completed. */
3283 	}
3284 
3285 	if (r1 & IWN_INT_ALIVE)
3286 		wakeup(sc);	/* Firmware is alive. */
3287 
3288 	if (r1 & IWN_INT_WAKEUP)
3289 		iwn_wakeup_intr(sc);
3290 
3291 	/* Re-enable interrupts. */
3292 	if (ifp->if_flags & IFF_UP)
3293 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3294 
3295 	return 1;
3296 }
3297 
3298 /*
3299  * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3300  * 5000 adapters use a slightly different format).
3301  */
3302 void
3303 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3304     uint16_t len)
3305 {
3306 	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3307 
3308 	*w = htole16(len + 8);
3309 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3310 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
3311 	    BUS_DMASYNC_PREWRITE);
3312 	if (idx < IWN_SCHED_WINSZ) {
3313 		*(w + IWN_TX_RING_COUNT) = *w;
3314 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3315 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
3316 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3317 	}
3318 }
3319 
3320 void
3321 iwn4965_reset_sched(struct iwn_softc *sc, int qid, int idx)
3322 {
3323 	/* TBD */
3324 }
3325 
3326 void
3327 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3328     uint16_t len)
3329 {
3330 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3331 
3332 	*w = htole16(id << 12 | (len + 8));
3333 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3334 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
3335 	    BUS_DMASYNC_PREWRITE);
3336 	if (idx < IWN_SCHED_WINSZ) {
3337 		*(w + IWN_TX_RING_COUNT) = *w;
3338 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3339 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
3340 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3341 	}
3342 }
3343 
3344 void
3345 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3346 {
3347 	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3348 
3349 	*w = (*w & htole16(0xf000)) | htole16(1);
3350 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3351 	    (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t),
3352 	    BUS_DMASYNC_PREWRITE);
3353 	if (idx < IWN_SCHED_WINSZ) {
3354 		*(w + IWN_TX_RING_COUNT) = *w;
3355 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3356 		    (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr,
3357 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3358 	}
3359 }
3360 
3361 int
3362 iwn_rval2ridx(int rval)
3363 {
3364 	int ridx;
3365 
3366 	for (ridx = 0; ridx < nitems(iwn_rates); ridx++) {
3367 		if (rval == iwn_rates[ridx].rate)
3368 			break;
3369 	}
3370 
3371 	return ridx;
3372 }
3373 
3374 int
3375 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3376 {
3377 	struct iwn_ops *ops = &sc->ops;
3378 	struct ieee80211com *ic = &sc->sc_ic;
3379 	struct iwn_node *wn = (void *)ni;
3380 	struct iwn_tx_ring *ring;
3381 	struct iwn_tx_desc *desc;
3382 	struct iwn_tx_data *data;
3383 	struct iwn_tx_cmd *cmd;
3384 	struct iwn_cmd_data *tx;
3385 	const struct iwn_rate *rinfo;
3386 	struct ieee80211_frame *wh;
3387 	struct ieee80211_key *k = NULL;
3388 	enum ieee80211_edca_ac ac;
3389 	int qid;
3390 	uint32_t flags;
3391 	uint16_t qos;
3392 	u_int hdrlen;
3393 	bus_dma_segment_t *seg;
3394 	uint8_t *ivp, tid, ridx, txant, type, subtype;
3395 	int i, totlen, hasqos, error, pad;
3396 
3397 	wh = mtod(m, struct ieee80211_frame *);
3398 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3399 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3400 	if (type == IEEE80211_FC0_TYPE_CTL)
3401 		hdrlen = sizeof(struct ieee80211_frame_min);
3402 	else
3403 		hdrlen = ieee80211_get_hdrlen(wh);
3404 
3405 	if ((hasqos = ieee80211_has_qos(wh))) {
3406 		/* Select EDCA Access Category and TX ring for this frame. */
3407 		struct ieee80211_tx_ba *ba;
3408 		qos = ieee80211_get_qos(wh);
3409 		tid = qos & IEEE80211_QOS_TID;
3410 		ac = ieee80211_up_to_ac(ic, tid);
3411 		qid = ac;
3412 
3413 		/* If possible, put this frame on an aggregation queue. */
3414 		if (sc->sc_tx_ba[tid].wn == wn) {
3415 			ba = &ni->ni_tx_ba[tid];
3416 			if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3417 			    ba->ba_state == IEEE80211_BA_AGREED) {
3418 				qid = sc->first_agg_txq + tid;
3419 				if (sc->qfullmsk & (1 << qid)) {
3420 					m_freem(m);
3421 					return ENOBUFS;
3422 				}
3423 			}
3424 		}
3425 	} else {
3426 		qos = 0;
3427 		tid = IWN_NONQOS_TID;
3428 		ac = EDCA_AC_BE;
3429 		qid = ac;
3430 	}
3431 
3432 	ring = &sc->txq[qid];
3433 	desc = &ring->desc[ring->cur];
3434 	data = &ring->data[ring->cur];
3435 
3436 	/* Choose a TX rate index. */
3437 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3438 	    type != IEEE80211_FC0_TYPE_DATA)
3439 		ridx = iwn_rval2ridx(ieee80211_min_basic_rate(ic));
3440 	else if (ic->ic_fixed_mcs != -1)
3441 		ridx = sc->fixed_ridx;
3442 	else if (ic->ic_fixed_rate != -1)
3443 		ridx = sc->fixed_ridx;
3444 	else {
3445 		if (ni->ni_flags & IEEE80211_NODE_HT)
3446 			ridx = iwn_mcs2ridx[ni->ni_txmcs];
3447 		else
3448 			ridx = wn->ridx[ni->ni_txrate];
3449 	}
3450 	rinfo = &iwn_rates[ridx];
3451 #if NBPFILTER > 0
3452 	if (sc->sc_drvbpf != NULL) {
3453 		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3454 		uint16_t chan_flags;
3455 
3456 		tap->wt_flags = 0;
3457 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3458 		chan_flags = ni->ni_chan->ic_flags;
3459 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3460 			chan_flags &= ~IEEE80211_CHAN_HT;
3461 		tap->wt_chan_flags = htole16(chan_flags);
3462 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3463 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3464 		    type == IEEE80211_FC0_TYPE_DATA) {
3465 			tap->wt_rate = (0x80 | ni->ni_txmcs);
3466 		} else
3467 			tap->wt_rate = rinfo->rate;
3468 		tap->wt_hwqueue = ac;
3469 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
3470 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
3471 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3472 
3473 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
3474 		    m, BPF_DIRECTION_OUT);
3475 	}
3476 #endif
3477 
3478 	totlen = m->m_pkthdr.len;
3479 
3480 	/* Encrypt the frame if need be. */
3481 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3482 		/* Retrieve key for TX. */
3483 		k = ieee80211_get_txkey(ic, wh, ni);
3484 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
3485 			/* Do software encryption. */
3486 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
3487 				return ENOBUFS;
3488 			/* 802.11 header may have moved. */
3489 			wh = mtod(m, struct ieee80211_frame *);
3490 			totlen = m->m_pkthdr.len;
3491 
3492 		} else	/* HW appends CCMP MIC. */
3493 			totlen += IEEE80211_CCMP_HDRLEN;
3494 	}
3495 
3496 	data->totlen = totlen;
3497 
3498 	/* Prepare TX firmware command. */
3499 	cmd = &ring->cmd[ring->cur];
3500 	cmd->code = IWN_CMD_TX_DATA;
3501 	cmd->flags = 0;
3502 	cmd->qid = ring->qid;
3503 	cmd->idx = ring->cur;
3504 
3505 	tx = (struct iwn_cmd_data *)cmd->data;
3506 	/* NB: No need to clear tx, all fields are reinitialized here. */
3507 	tx->scratch = 0;	/* clear "scratch" area */
3508 
3509 	flags = 0;
3510 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3511 		/* Unicast frame, check if an ACK is expected. */
3512 		if (!hasqos || (qos & IEEE80211_QOS_ACK_POLICY_MASK) !=
3513 		    IEEE80211_QOS_ACK_POLICY_NOACK)
3514 			flags |= IWN_TX_NEED_ACK;
3515 	}
3516 	if (type == IEEE80211_FC0_TYPE_CTL &&
3517 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
3518 		struct ieee80211_frame_min *mwh;
3519 		uint8_t *barfrm;
3520 		uint16_t ctl;
3521 		mwh = mtod(m, struct ieee80211_frame_min *);
3522 		barfrm = (uint8_t *)&mwh[1];
3523 		ctl = LE_READ_2(barfrm);
3524 		tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
3525 		    IEEE80211_BA_TID_INFO_SHIFT;
3526 		flags |= (IWN_TX_NEED_ACK | IWN_TX_IMM_BA);
3527 	}
3528 
3529 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3530 		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3531 
3532 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3533 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3534 		int rtsthres = ic->ic_rtsthreshold;
3535 		if (ni->ni_flags & IEEE80211_NODE_HT)
3536 			rtsthres = ieee80211_mira_get_rts_threshold(&wn->mn,
3537 			    ic, ni, totlen + IEEE80211_CRC_LEN);
3538 
3539 		/* NB: Group frames are sent using CCK in 802.11b/g/n (2GHz). */
3540 		if (totlen + IEEE80211_CRC_LEN > rtsthres) {
3541 			flags |= IWN_TX_NEED_RTS;
3542 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3543 		    ridx >= IWN_RIDX_OFDM6) {
3544 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3545 				flags |= IWN_TX_NEED_CTS;
3546 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3547 				flags |= IWN_TX_NEED_RTS;
3548 		}
3549 
3550 		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3551 			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3552 				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3553 				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3554 				flags |= IWN_TX_NEED_PROTECTION;
3555 			} else
3556 				flags |= IWN_TX_FULL_TXOP;
3557 		}
3558 	}
3559 
3560 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3561 	    type != IEEE80211_FC0_TYPE_DATA)
3562 		tx->id = sc->broadcast_id;
3563 	else
3564 		tx->id = wn->id;
3565 
3566 	if (type == IEEE80211_FC0_TYPE_MGT) {
3567 #ifndef IEEE80211_STA_ONLY
3568 		/* Tell HW to set timestamp in probe responses. */
3569 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3570 			flags |= IWN_TX_INSERT_TSTAMP;
3571 #endif
3572 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3573 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3574 			tx->timeout = htole16(3);
3575 		else
3576 			tx->timeout = htole16(2);
3577 	} else
3578 		tx->timeout = htole16(0);
3579 
3580 	if (hdrlen & 3) {
3581 		/* First segment length must be a multiple of 4. */
3582 		flags |= IWN_TX_NEED_PADDING;
3583 		pad = 4 - (hdrlen & 3);
3584 	} else
3585 		pad = 0;
3586 
3587 	tx->len = htole16(totlen);
3588 	tx->tid = tid;
3589 	tx->rts_ntries = 60;
3590 	tx->data_ntries = 15;
3591 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3592 
3593 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3594 	    tx->id != sc->broadcast_id)
3595 		tx->plcp = rinfo->ht_plcp;
3596 	else
3597 		tx->plcp = rinfo->plcp;
3598 
3599 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3600 	    tx->id != sc->broadcast_id) {
3601 		tx->rflags = rinfo->ht_flags;
3602 		if (ni->ni_htcaps & IEEE80211_HTCAP_SGI20)
3603 			tx->rflags |= IWN_RFLAG_SGI;
3604 	}
3605 	else
3606 		tx->rflags = rinfo->flags;
3607 	/*
3608 	 * Keep the Tx rate constant while mira is probing, or if this is
3609 	 * an aggregation queue in which case a fixed Tx rate works around
3610 	 * FIFO_UNDERRUN Tx errors.
3611 	 */
3612 	if (tx->id == sc->broadcast_id || ieee80211_mira_is_probing(&wn->mn) ||
3613 	    qid >= sc->first_agg_txq ||
3614 	    ic->ic_fixed_mcs != -1 || ic->ic_fixed_rate != -1) {
3615 		/* Group or management frame, or probing, or fixed Tx rate. */
3616 		tx->linkq = 0;
3617 		/* XXX Alternate between antenna A and B? */
3618 		txant = IWN_LSB(sc->txchainmask);
3619 		tx->rflags |= IWN_RFLAG_ANT(txant);
3620 	} else {
3621 		tx->linkq = 0; /* initial index into firmware LQ retry table */
3622 		flags |= IWN_TX_LINKQ;	/* enable multi-rate retry */
3623 	}
3624 	/* Set physical address of "scratch area". */
3625 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3626 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3627 
3628 	/* Copy 802.11 header in TX command. */
3629 	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3630 
3631 	if (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
3632 		/* Trim 802.11 header and prepend CCMP IV. */
3633 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
3634 		ivp = mtod(m, uint8_t *);
3635 		k->k_tsc++;
3636 		ivp[0] = k->k_tsc;
3637 		ivp[1] = k->k_tsc >> 8;
3638 		ivp[2] = 0;
3639 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
3640 		ivp[4] = k->k_tsc >> 16;
3641 		ivp[5] = k->k_tsc >> 24;
3642 		ivp[6] = k->k_tsc >> 32;
3643 		ivp[7] = k->k_tsc >> 40;
3644 
3645 		tx->security = IWN_CIPHER_CCMP;
3646 		if (qid >= sc->first_agg_txq)
3647 			flags |= IWN_TX_AMPDU_CCMP;
3648 		memcpy(tx->key, k->k_key, k->k_len);
3649 
3650 		/* TX scheduler includes CCMP MIC len w/5000 Series. */
3651 		if (sc->hw_type != IWN_HW_REV_TYPE_4965)
3652 			totlen += IEEE80211_CCMP_MICLEN;
3653 	} else {
3654 		/* Trim 802.11 header. */
3655 		m_adj(m, hdrlen);
3656 		tx->security = 0;
3657 	}
3658 	tx->flags = htole32(flags);
3659 
3660 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3661 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3662 	if (error != 0 && error != EFBIG) {
3663 		printf("%s: can't map mbuf (error %d)\n",
3664 		    sc->sc_dev.dv_xname, error);
3665 		m_freem(m);
3666 		return error;
3667 	}
3668 	if (error != 0) {
3669 		/* Too many DMA segments, linearize mbuf. */
3670 		if (m_defrag(m, M_DONTWAIT)) {
3671 			m_freem(m);
3672 			return ENOBUFS;
3673 		}
3674 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3675 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3676 		if (error != 0) {
3677 			printf("%s: can't map mbuf (error %d)\n",
3678 			    sc->sc_dev.dv_xname, error);
3679 			m_freem(m);
3680 			return error;
3681 		}
3682 	}
3683 
3684 	data->m = m;
3685 	data->ni = ni;
3686 	data->txmcs = ni->ni_txmcs;
3687 	data->txrate = ni->ni_txrate;
3688 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
3689 
3690 	DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
3691 	    ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs));
3692 
3693 	/* Fill TX descriptor. */
3694 	desc->nsegs = 1 + data->map->dm_nsegs;
3695 	/* First DMA segment is used by the TX command. */
3696 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3697 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3698 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3699 	/* Other DMA segments are for data payload. */
3700 	seg = data->map->dm_segs;
3701 	for (i = 1; i <= data->map->dm_nsegs; i++) {
3702 		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3703 		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3704 		    seg->ds_len << 4);
3705 		seg++;
3706 	}
3707 
3708 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
3709 	    BUS_DMASYNC_PREWRITE);
3710 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3711 	    (caddr_t)cmd - ring->cmd_dma.vaddr, sizeof (*cmd),
3712 	    BUS_DMASYNC_PREWRITE);
3713 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3714 	    (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc),
3715 	    BUS_DMASYNC_PREWRITE);
3716 
3717 	/* Update TX scheduler. */
3718 	ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3719 
3720 	/* Kick TX ring. */
3721 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3722 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3723 
3724 	/* Mark TX ring as full if we reach a certain threshold. */
3725 	if (++ring->queued > IWN_TX_RING_HIMARK)
3726 		sc->qfullmsk |= 1 << ring->qid;
3727 
3728 	return 0;
3729 }
3730 
3731 void
3732 iwn_start(struct ifnet *ifp)
3733 {
3734 	struct iwn_softc *sc = ifp->if_softc;
3735 	struct ieee80211com *ic = &sc->sc_ic;
3736 	struct ieee80211_node *ni;
3737 	struct mbuf *m;
3738 
3739 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
3740 		return;
3741 
3742 	for (;;) {
3743 		if (sc->qfullmsk != 0) {
3744 			ifq_set_oactive(&ifp->if_snd);
3745 			break;
3746 		}
3747 
3748 		/* Send pending management frames first. */
3749 		m = mq_dequeue(&ic->ic_mgtq);
3750 		if (m != NULL) {
3751 			ni = m->m_pkthdr.ph_cookie;
3752 			goto sendit;
3753 		}
3754 		if (ic->ic_state != IEEE80211_S_RUN ||
3755 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
3756 			break;
3757 
3758 		/* Encapsulate and send data frames. */
3759 		m = ifq_dequeue(&ifp->if_snd);
3760 		if (m == NULL)
3761 			break;
3762 #if NBPFILTER > 0
3763 		if (ifp->if_bpf != NULL)
3764 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
3765 #endif
3766 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL)
3767 			continue;
3768 sendit:
3769 #if NBPFILTER > 0
3770 		if (ic->ic_rawbpf != NULL)
3771 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
3772 #endif
3773 		if (iwn_tx(sc, m, ni) != 0) {
3774 			ieee80211_release_node(ic, ni);
3775 			ifp->if_oerrors++;
3776 			continue;
3777 		}
3778 
3779 		sc->sc_tx_timer = 5;
3780 		ifp->if_timer = 1;
3781 	}
3782 }
3783 
3784 void
3785 iwn_watchdog(struct ifnet *ifp)
3786 {
3787 	struct iwn_softc *sc = ifp->if_softc;
3788 
3789 	ifp->if_timer = 0;
3790 
3791 	if (sc->sc_tx_timer > 0) {
3792 		if (--sc->sc_tx_timer == 0) {
3793 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
3794 			iwn_stop(ifp);
3795 			ifp->if_oerrors++;
3796 			return;
3797 		}
3798 		ifp->if_timer = 1;
3799 	}
3800 
3801 	ieee80211_watchdog(ifp);
3802 }
3803 
3804 int
3805 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3806 {
3807 	struct iwn_softc *sc = ifp->if_softc;
3808 	struct ieee80211com *ic = &sc->sc_ic;
3809 	int s, error = 0;
3810 
3811 	error = rw_enter(&sc->sc_rwlock, RW_WRITE | RW_INTR);
3812 	if (error)
3813 		return error;
3814 	s = splnet();
3815 
3816 	switch (cmd) {
3817 	case SIOCSIFADDR:
3818 		ifp->if_flags |= IFF_UP;
3819 		/* FALLTHROUGH */
3820 	case SIOCSIFFLAGS:
3821 		if (ifp->if_flags & IFF_UP) {
3822 			if (!(ifp->if_flags & IFF_RUNNING))
3823 				error = iwn_init(ifp);
3824 		} else {
3825 			if (ifp->if_flags & IFF_RUNNING)
3826 				iwn_stop(ifp);
3827 		}
3828 		break;
3829 
3830 	case SIOCS80211POWER:
3831 		error = ieee80211_ioctl(ifp, cmd, data);
3832 		if (error != ENETRESET)
3833 			break;
3834 		if (ic->ic_state == IEEE80211_S_RUN &&
3835 		    sc->calib.state == IWN_CALIB_STATE_RUN) {
3836 			if (ic->ic_flags & IEEE80211_F_PMGTON)
3837 				error = iwn_set_pslevel(sc, 0, 3, 0);
3838 			else	/* back to CAM */
3839 				error = iwn_set_pslevel(sc, 0, 0, 0);
3840 		} else {
3841 			/* Defer until transition to IWN_CALIB_STATE_RUN. */
3842 			error = 0;
3843 		}
3844 		break;
3845 
3846 	default:
3847 		error = ieee80211_ioctl(ifp, cmd, data);
3848 	}
3849 
3850 	if (error == ENETRESET) {
3851 		error = 0;
3852 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
3853 		    (IFF_UP | IFF_RUNNING)) {
3854 			iwn_stop(ifp);
3855 			error = iwn_init(ifp);
3856 		}
3857 	}
3858 
3859 	splx(s);
3860 	rw_exit_write(&sc->sc_rwlock);
3861 	return error;
3862 }
3863 
3864 /*
3865  * Send a command to the firmware.
3866  */
3867 int
3868 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3869 {
3870 	struct iwn_ops *ops = &sc->ops;
3871 	struct iwn_tx_ring *ring = &sc->txq[4];
3872 	struct iwn_tx_desc *desc;
3873 	struct iwn_tx_data *data;
3874 	struct iwn_tx_cmd *cmd;
3875 	struct mbuf *m;
3876 	bus_addr_t paddr;
3877 	int totlen, error;
3878 
3879 	desc = &ring->desc[ring->cur];
3880 	data = &ring->data[ring->cur];
3881 	totlen = 4 + size;
3882 
3883 	if (size > sizeof cmd->data) {
3884 		/* Command is too large to fit in a descriptor. */
3885 		if (totlen > MCLBYTES)
3886 			return EINVAL;
3887 		MGETHDR(m, M_DONTWAIT, MT_DATA);
3888 		if (m == NULL)
3889 			return ENOMEM;
3890 		if (totlen > MHLEN) {
3891 			MCLGET(m, M_DONTWAIT);
3892 			if (!(m->m_flags & M_EXT)) {
3893 				m_freem(m);
3894 				return ENOMEM;
3895 			}
3896 		}
3897 		cmd = mtod(m, struct iwn_tx_cmd *);
3898 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen,
3899 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3900 		if (error != 0) {
3901 			m_freem(m);
3902 			return error;
3903 		}
3904 		data->m = m;
3905 		paddr = data->map->dm_segs[0].ds_addr;
3906 	} else {
3907 		cmd = &ring->cmd[ring->cur];
3908 		paddr = data->cmd_paddr;
3909 	}
3910 
3911 	cmd->code = code;
3912 	cmd->flags = 0;
3913 	cmd->qid = ring->qid;
3914 	cmd->idx = ring->cur;
3915 	memcpy(cmd->data, buf, size);
3916 
3917 	desc->nsegs = 1;
3918 	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3919 	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
3920 
3921 	if (size > sizeof cmd->data) {
3922 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen,
3923 		    BUS_DMASYNC_PREWRITE);
3924 	} else {
3925 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3926 		    (caddr_t)cmd - ring->cmd_dma.vaddr, totlen,
3927 		    BUS_DMASYNC_PREWRITE);
3928 	}
3929 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3930 	    (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc),
3931 	    BUS_DMASYNC_PREWRITE);
3932 
3933 	/* Update TX scheduler. */
3934 	ops->update_sched(sc, ring->qid, ring->cur, 0, 0);
3935 
3936 	/* Kick command ring. */
3937 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3938 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3939 
3940 	return async ? 0 : tsleep_nsec(desc, PCATCH, "iwncmd", SEC_TO_NSEC(1));
3941 }
3942 
3943 int
3944 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3945 {
3946 	struct iwn4965_node_info hnode;
3947 	caddr_t src, dst;
3948 
3949 	/*
3950 	 * We use the node structure for 5000 Series internally (it is
3951 	 * a superset of the one for 4965AGN). We thus copy the common
3952 	 * fields before sending the command.
3953 	 */
3954 	src = (caddr_t)node;
3955 	dst = (caddr_t)&hnode;
3956 	memcpy(dst, src, 48);
3957 	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3958 	memcpy(dst + 48, src + 72, 20);
3959 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3960 }
3961 
3962 int
3963 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3964 {
3965 	/* Direct mapping. */
3966 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3967 }
3968 
3969 int
3970 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
3971 {
3972 	struct ieee80211com *ic = &sc->sc_ic;
3973 	struct iwn_node *wn = (void *)ni;
3974 	struct iwn_cmd_link_quality linkq;
3975 	const struct iwn_rate *rinfo;
3976 	uint8_t txant;
3977 	int i;
3978 
3979 	/* Use the first valid TX antenna. */
3980 	txant = IWN_LSB(sc->txchainmask);
3981 
3982 	memset(&linkq, 0, sizeof linkq);
3983 	linkq.id = wn->id;
3984 	linkq.antmsk_1stream = txant;
3985 	linkq.antmsk_2stream = IWN_ANT_AB;
3986 	linkq.ampdu_max = IWN_AMPDU_MAX;
3987 	linkq.ampdu_threshold = 3;
3988 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
3989 
3990 	i = 0;
3991 	if (ni->ni_flags & IEEE80211_NODE_HT) {
3992 		int txmcs;
3993 		for (txmcs = ni->ni_txmcs; txmcs >= 0; txmcs--) {
3994 			rinfo = &iwn_rates[iwn_mcs2ridx[txmcs]];
3995 			linkq.retry[i].plcp = rinfo->ht_plcp;
3996 			linkq.retry[i].rflags = rinfo->ht_flags;
3997 
3998 			if (ni->ni_htcaps & IEEE80211_HTCAP_SGI20)
3999 				linkq.retry[i].rflags |= IWN_RFLAG_SGI;
4000 
4001 			/* XXX set correct ant mask for MIMO rates here */
4002 			linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4003 
4004 			if (++i >= IWN_MAX_TX_RETRIES)
4005 				break;
4006 		}
4007 	} else {
4008 		int txrate;
4009 		for (txrate = ni->ni_txrate; txrate >= 0; txrate--) {
4010 			rinfo = &iwn_rates[wn->ridx[txrate]];
4011 			linkq.retry[i].plcp = rinfo->plcp;
4012 			linkq.retry[i].rflags = rinfo->flags;
4013 			linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4014 			if (++i >= IWN_MAX_TX_RETRIES)
4015 				break;
4016 		}
4017 	}
4018 
4019 	/* Fill the rest with the lowest basic rate. */
4020 	rinfo = &iwn_rates[iwn_rval2ridx(ieee80211_min_basic_rate(ic))];
4021 	while (i < IWN_MAX_TX_RETRIES) {
4022 		linkq.retry[i].plcp = rinfo->plcp;
4023 		linkq.retry[i].rflags = rinfo->flags;
4024 		linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4025 		i++;
4026 	}
4027 
4028 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4029 }
4030 
4031 /*
4032  * Broadcast node is used to send group-addressed and management frames.
4033  */
4034 int
4035 iwn_add_broadcast_node(struct iwn_softc *sc, int async, int ridx)
4036 {
4037 	struct iwn_ops *ops = &sc->ops;
4038 	struct iwn_node_info node;
4039 	struct iwn_cmd_link_quality linkq;
4040 	const struct iwn_rate *rinfo;
4041 	uint8_t txant;
4042 	int i, error;
4043 
4044 	memset(&node, 0, sizeof node);
4045 	IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr);
4046 	node.id = sc->broadcast_id;
4047 	DPRINTF(("adding broadcast node\n"));
4048 	if ((error = ops->add_node(sc, &node, async)) != 0)
4049 		return error;
4050 
4051 	/* Use the first valid TX antenna. */
4052 	txant = IWN_LSB(sc->txchainmask);
4053 
4054 	memset(&linkq, 0, sizeof linkq);
4055 	linkq.id = sc->broadcast_id;
4056 	linkq.antmsk_1stream = txant;
4057 	linkq.antmsk_2stream = IWN_ANT_AB;
4058 	linkq.ampdu_max = IWN_AMPDU_MAX_NO_AGG;
4059 	linkq.ampdu_threshold = 3;
4060 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4061 
4062 	/* Use lowest mandatory bit-rate. */
4063 	rinfo = &iwn_rates[ridx];
4064 	linkq.retry[0].plcp = rinfo->plcp;
4065 	linkq.retry[0].rflags = rinfo->flags;
4066 	linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
4067 	/* Use same bit-rate for all TX retries. */
4068 	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4069 		linkq.retry[i].plcp = linkq.retry[0].plcp;
4070 		linkq.retry[i].rflags = linkq.retry[0].rflags;
4071 	}
4072 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4073 }
4074 
4075 void
4076 iwn_updateedca(struct ieee80211com *ic)
4077 {
4078 #define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
4079 	struct iwn_softc *sc = ic->ic_softc;
4080 	struct iwn_edca_params cmd;
4081 	int aci;
4082 
4083 	memset(&cmd, 0, sizeof cmd);
4084 	cmd.flags = htole32(IWN_EDCA_UPDATE);
4085 	for (aci = 0; aci < EDCA_NUM_AC; aci++) {
4086 		const struct ieee80211_edca_ac_params *ac =
4087 		    &ic->ic_edca_ac[aci];
4088 		cmd.ac[aci].aifsn = ac->ac_aifsn;
4089 		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->ac_ecwmin));
4090 		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->ac_ecwmax));
4091 		cmd.ac[aci].txoplimit =
4092 		    htole16(IEEE80211_TXOP_TO_US(ac->ac_txoplimit));
4093 	}
4094 	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4095 #undef IWN_EXP2
4096 }
4097 
4098 void
4099 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4100 {
4101 	struct iwn_cmd_led led;
4102 
4103 	/* Clear microcode LED ownership. */
4104 	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4105 
4106 	led.which = which;
4107 	led.unit = htole32(10000);	/* on/off in unit of 100ms */
4108 	led.off = off;
4109 	led.on = on;
4110 	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4111 }
4112 
4113 /*
4114  * Set the critical temperature at which the firmware will stop the radio
4115  * and notify us.
4116  */
4117 int
4118 iwn_set_critical_temp(struct iwn_softc *sc)
4119 {
4120 	struct iwn_critical_temp crit;
4121 	int32_t temp;
4122 
4123 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4124 
4125 	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4126 		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4127 	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4128 		temp = IWN_CTOK(110);
4129 	else
4130 		temp = 110;
4131 	memset(&crit, 0, sizeof crit);
4132 	crit.tempR = htole32(temp);
4133 	DPRINTF(("setting critical temperature to %d\n", temp));
4134 	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4135 }
4136 
4137 int
4138 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4139 {
4140 	struct iwn_cmd_timing cmd;
4141 	uint64_t val, mod;
4142 
4143 	memset(&cmd, 0, sizeof cmd);
4144 	memcpy(&cmd.tstamp, ni->ni_tstamp, sizeof (uint64_t));
4145 	cmd.bintval = htole16(ni->ni_intval);
4146 	cmd.lintval = htole16(10);
4147 
4148 	/* Compute remaining time until next beacon. */
4149 	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
4150 	mod = letoh64(cmd.tstamp) % val;
4151 	cmd.binitval = htole32((uint32_t)(val - mod));
4152 
4153 	DPRINTF(("timing bintval=%u, tstamp=%llu, init=%u\n",
4154 	    ni->ni_intval, letoh64(cmd.tstamp), (uint32_t)(val - mod)));
4155 
4156 	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4157 }
4158 
4159 void
4160 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4161 {
4162 	/* Adjust TX power if need be (delta >= 3 degC). */
4163 	DPRINTF(("temperature %d->%d\n", sc->temp, temp));
4164 	if (abs(temp - sc->temp) >= 3) {
4165 		/* Record temperature of last calibration. */
4166 		sc->temp = temp;
4167 		(void)iwn4965_set_txpower(sc, 1);
4168 	}
4169 }
4170 
4171 /*
4172  * Set TX power for current channel (each rate has its own power settings).
4173  * This function takes into account the regulatory information from EEPROM,
4174  * the current temperature and the current voltage.
4175  */
4176 int
4177 iwn4965_set_txpower(struct iwn_softc *sc, int async)
4178 {
4179 /* Fixed-point arithmetic division using a n-bit fractional part. */
4180 #define fdivround(a, b, n)	\
4181 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4182 /* Linear interpolation. */
4183 #define interpolate(x, x1, y1, x2, y2, n)	\
4184 	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4185 
4186 	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4187 	struct ieee80211com *ic = &sc->sc_ic;
4188 	struct iwn_ucode_info *uc = &sc->ucode_info;
4189 	struct ieee80211_channel *ch;
4190 	struct iwn4965_cmd_txpower cmd;
4191 	struct iwn4965_eeprom_chan_samples *chans;
4192 	const uint8_t *rf_gain, *dsp_gain;
4193 	int32_t vdiff, tdiff;
4194 	int i, c, grp, maxpwr;
4195 	uint8_t chan;
4196 
4197 	/* Retrieve current channel from last RXON. */
4198 	chan = sc->rxon.chan;
4199 	DPRINTF(("setting TX power for channel %d\n", chan));
4200 	ch = &ic->ic_channels[chan];
4201 
4202 	memset(&cmd, 0, sizeof cmd);
4203 	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4204 	cmd.chan = chan;
4205 
4206 	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4207 		maxpwr   = sc->maxpwr5GHz;
4208 		rf_gain  = iwn4965_rf_gain_5ghz;
4209 		dsp_gain = iwn4965_dsp_gain_5ghz;
4210 	} else {
4211 		maxpwr   = sc->maxpwr2GHz;
4212 		rf_gain  = iwn4965_rf_gain_2ghz;
4213 		dsp_gain = iwn4965_dsp_gain_2ghz;
4214 	}
4215 
4216 	/* Compute voltage compensation. */
4217 	vdiff = ((int32_t)letoh32(uc->volt) - sc->eeprom_voltage) / 7;
4218 	if (vdiff > 0)
4219 		vdiff *= 2;
4220 	if (abs(vdiff) > 2)
4221 		vdiff = 0;
4222 	DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4223 	    vdiff, letoh32(uc->volt), sc->eeprom_voltage));
4224 
4225 	/* Get channel attenuation group. */
4226 	if (chan <= 20)		/* 1-20 */
4227 		grp = 4;
4228 	else if (chan <= 43)	/* 34-43 */
4229 		grp = 0;
4230 	else if (chan <= 70)	/* 44-70 */
4231 		grp = 1;
4232 	else if (chan <= 124)	/* 71-124 */
4233 		grp = 2;
4234 	else			/* 125-200 */
4235 		grp = 3;
4236 	DPRINTF(("chan %d, attenuation group=%d\n", chan, grp));
4237 
4238 	/* Get channel sub-band. */
4239 	for (i = 0; i < IWN_NBANDS; i++)
4240 		if (sc->bands[i].lo != 0 &&
4241 		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4242 			break;
4243 	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
4244 		return EINVAL;
4245 	chans = sc->bands[i].chans;
4246 	DPRINTF(("chan %d sub-band=%d\n", chan, i));
4247 
4248 	for (c = 0; c < 2; c++) {
4249 		uint8_t power, gain, temp;
4250 		int maxchpwr, pwr, ridx, idx;
4251 
4252 		power = interpolate(chan,
4253 		    chans[0].num, chans[0].samples[c][1].power,
4254 		    chans[1].num, chans[1].samples[c][1].power, 1);
4255 		gain  = interpolate(chan,
4256 		    chans[0].num, chans[0].samples[c][1].gain,
4257 		    chans[1].num, chans[1].samples[c][1].gain, 1);
4258 		temp  = interpolate(chan,
4259 		    chans[0].num, chans[0].samples[c][1].temp,
4260 		    chans[1].num, chans[1].samples[c][1].temp, 1);
4261 		DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n",
4262 		    c, power, gain, temp));
4263 
4264 		/* Compute temperature compensation. */
4265 		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4266 		DPRINTF(("temperature compensation=%d (current=%d, "
4267 		    "EEPROM=%d)\n", tdiff, sc->temp, temp));
4268 
4269 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4270 			/* Convert dBm to half-dBm. */
4271 			maxchpwr = sc->maxpwr[chan] * 2;
4272 #ifdef notyet
4273 			if (ridx > iwn_mcs2ridx[7] && ridx < iwn_mcs2ridx[16])
4274 				maxchpwr -= 6;	/* MIMO 2T: -3dB */
4275 #endif
4276 
4277 			pwr = maxpwr;
4278 
4279 			/* Adjust TX power based on rate. */
4280 			if ((ridx % 8) == 5)
4281 				pwr -= 15;	/* OFDM48: -7.5dB */
4282 			else if ((ridx % 8) == 6)
4283 				pwr -= 17;	/* OFDM54: -8.5dB */
4284 			else if ((ridx % 8) == 7)
4285 				pwr -= 20;	/* OFDM60: -10dB */
4286 			else
4287 				pwr -= 10;	/* Others: -5dB */
4288 
4289 			/* Do not exceed channel max TX power. */
4290 			if (pwr > maxchpwr)
4291 				pwr = maxchpwr;
4292 
4293 			idx = gain - (pwr - power) - tdiff - vdiff;
4294 			if (ridx > iwn_mcs2ridx[7]) /* MIMO */
4295 				idx += (int32_t)letoh32(uc->atten[grp][c]);
4296 
4297 			if (cmd.band == 0)
4298 				idx += 9;	/* 5GHz */
4299 			if (ridx == IWN_RIDX_MAX)
4300 				idx += 5;	/* CCK */
4301 
4302 			/* Make sure idx stays in a valid range. */
4303 			if (idx < 0)
4304 				idx = 0;
4305 			else if (idx > IWN4965_MAX_PWR_INDEX)
4306 				idx = IWN4965_MAX_PWR_INDEX;
4307 
4308 			DPRINTF(("TX chain %d, rate idx %d: power=%d\n",
4309 			    c, ridx, idx));
4310 			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4311 			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4312 		}
4313 	}
4314 
4315 	DPRINTF(("setting TX power for chan %d\n", chan));
4316 	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4317 
4318 #undef interpolate
4319 #undef fdivround
4320 }
4321 
4322 int
4323 iwn5000_set_txpower(struct iwn_softc *sc, int async)
4324 {
4325 	struct iwn5000_cmd_txpower cmd;
4326 
4327 	/*
4328 	 * TX power calibration is handled automatically by the firmware
4329 	 * for 5000 Series.
4330 	 */
4331 	memset(&cmd, 0, sizeof cmd);
4332 	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
4333 	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4334 	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4335 	DPRINTF(("setting TX power\n"));
4336 	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4337 }
4338 
4339 /*
4340  * Retrieve the maximum RSSI (in dBm) among receivers.
4341  */
4342 int
4343 iwn4965_get_rssi(const struct iwn_rx_stat *stat)
4344 {
4345 	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4346 	uint8_t mask, agc;
4347 	int rssi;
4348 
4349 	mask = (letoh16(phy->antenna) >> 4) & IWN_ANT_ABC;
4350 	agc  = (letoh16(phy->agc) >> 7) & 0x7f;
4351 
4352 	rssi = 0;
4353 	if (mask & IWN_ANT_A)
4354 		rssi = MAX(rssi, phy->rssi[0]);
4355 	if (mask & IWN_ANT_B)
4356 		rssi = MAX(rssi, phy->rssi[2]);
4357 	if (mask & IWN_ANT_C)
4358 		rssi = MAX(rssi, phy->rssi[4]);
4359 
4360 	return rssi - agc - IWN_RSSI_TO_DBM;
4361 }
4362 
4363 int
4364 iwn5000_get_rssi(const struct iwn_rx_stat *stat)
4365 {
4366 	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4367 	uint8_t agc;
4368 	int rssi;
4369 
4370 	agc = (letoh32(phy->agc) >> 9) & 0x7f;
4371 
4372 	rssi = MAX(letoh16(phy->rssi[0]) & 0xff,
4373 		   letoh16(phy->rssi[1]) & 0xff);
4374 	rssi = MAX(letoh16(phy->rssi[2]) & 0xff, rssi);
4375 
4376 	return rssi - agc - IWN_RSSI_TO_DBM;
4377 }
4378 
4379 /*
4380  * Retrieve the average noise (in dBm) among receivers.
4381  */
4382 int
4383 iwn_get_noise(const struct iwn_rx_general_stats *stats)
4384 {
4385 	int i, total, nbant, noise;
4386 
4387 	total = nbant = 0;
4388 	for (i = 0; i < 3; i++) {
4389 		if ((noise = letoh32(stats->noise[i]) & 0xff) == 0)
4390 			continue;
4391 		total += noise;
4392 		nbant++;
4393 	}
4394 	/* There should be at least one antenna but check anyway. */
4395 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4396 }
4397 
4398 /*
4399  * Compute temperature (in degC) from last received statistics.
4400  */
4401 int
4402 iwn4965_get_temperature(struct iwn_softc *sc)
4403 {
4404 	struct iwn_ucode_info *uc = &sc->ucode_info;
4405 	int32_t r1, r2, r3, r4, temp;
4406 
4407 	r1 = letoh32(uc->temp[0].chan20MHz);
4408 	r2 = letoh32(uc->temp[1].chan20MHz);
4409 	r3 = letoh32(uc->temp[2].chan20MHz);
4410 	r4 = letoh32(sc->rawtemp);
4411 
4412 	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
4413 		return 0;
4414 
4415 	/* Sign-extend 23-bit R4 value to 32-bit. */
4416 	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4417 	/* Compute temperature in Kelvin. */
4418 	temp = (259 * (r4 - r2)) / (r3 - r1);
4419 	temp = (temp * 97) / 100 + 8;
4420 
4421 	DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp)));
4422 	return IWN_KTOC(temp);
4423 }
4424 
4425 int
4426 iwn5000_get_temperature(struct iwn_softc *sc)
4427 {
4428 	int32_t temp;
4429 
4430 	/*
4431 	 * Temperature is not used by the driver for 5000 Series because
4432 	 * TX power calibration is handled by firmware.
4433 	 */
4434 	temp = letoh32(sc->rawtemp);
4435 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4436 		temp = (temp / -5) + sc->temp_off;
4437 		temp = IWN_KTOC(temp);
4438 	}
4439 	return temp;
4440 }
4441 
4442 /*
4443  * Initialize sensitivity calibration state machine.
4444  */
4445 int
4446 iwn_init_sensitivity(struct iwn_softc *sc)
4447 {
4448 	struct iwn_ops *ops = &sc->ops;
4449 	struct iwn_calib_state *calib = &sc->calib;
4450 	uint32_t flags;
4451 	int error;
4452 
4453 	/* Reset calibration state machine. */
4454 	memset(calib, 0, sizeof (*calib));
4455 	calib->state = IWN_CALIB_STATE_INIT;
4456 	calib->cck_state = IWN_CCK_STATE_HIFA;
4457 	/* Set initial correlation values. */
4458 	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4459 	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4460 	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4461 	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4462 	calib->cck_x4      = 125;
4463 	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4464 	calib->energy_cck  = sc->limits->energy_cck;
4465 
4466 	/* Write initial sensitivity. */
4467 	if ((error = iwn_send_sensitivity(sc)) != 0)
4468 		return error;
4469 
4470 	/* Write initial gains. */
4471 	if ((error = ops->init_gains(sc)) != 0)
4472 		return error;
4473 
4474 	/* Request statistics at each beacon interval. */
4475 	flags = 0;
4476 	DPRINTFN(2, ("sending request for statistics\n"));
4477 	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4478 }
4479 
4480 /*
4481  * Collect noise and RSSI statistics for the first 20 beacons received
4482  * after association and use them to determine connected antennas and
4483  * to set differential gains.
4484  */
4485 void
4486 iwn_collect_noise(struct iwn_softc *sc,
4487     const struct iwn_rx_general_stats *stats)
4488 {
4489 	struct iwn_ops *ops = &sc->ops;
4490 	struct iwn_calib_state *calib = &sc->calib;
4491 	uint32_t val;
4492 	int i;
4493 
4494 	/* Accumulate RSSI and noise for all 3 antennas. */
4495 	for (i = 0; i < 3; i++) {
4496 		calib->rssi[i] += letoh32(stats->rssi[i]) & 0xff;
4497 		calib->noise[i] += letoh32(stats->noise[i]) & 0xff;
4498 	}
4499 	/* NB: We update differential gains only once after 20 beacons. */
4500 	if (++calib->nbeacons < 20)
4501 		return;
4502 
4503 	/* Determine highest average RSSI. */
4504 	val = MAX(calib->rssi[0], calib->rssi[1]);
4505 	val = MAX(calib->rssi[2], val);
4506 
4507 	/* Determine which antennas are connected. */
4508 	sc->chainmask = sc->rxchainmask;
4509 	for (i = 0; i < 3; i++)
4510 		if (val - calib->rssi[i] > 15 * 20)
4511 			sc->chainmask &= ~(1 << i);
4512 	DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n",
4513 	    sc->rxchainmask, sc->chainmask));
4514 
4515 	/* If none of the TX antennas are connected, keep at least one. */
4516 	if ((sc->chainmask & sc->txchainmask) == 0)
4517 		sc->chainmask |= IWN_LSB(sc->txchainmask);
4518 
4519 	(void)ops->set_gains(sc);
4520 	calib->state = IWN_CALIB_STATE_RUN;
4521 
4522 #ifdef notyet
4523 	/* XXX Disable RX chains with no antennas connected. */
4524 	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4525 	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4526 #endif
4527 
4528 	/* Enable power-saving mode if requested by user. */
4529 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4530 		(void)iwn_set_pslevel(sc, 0, 3, 1);
4531 }
4532 
4533 int
4534 iwn4965_init_gains(struct iwn_softc *sc)
4535 {
4536 	struct iwn_phy_calib_gain cmd;
4537 
4538 	memset(&cmd, 0, sizeof cmd);
4539 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4540 	/* Differential gains initially set to 0 for all 3 antennas. */
4541 	DPRINTF(("setting initial differential gains\n"));
4542 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4543 }
4544 
4545 int
4546 iwn5000_init_gains(struct iwn_softc *sc)
4547 {
4548 	struct iwn_phy_calib cmd;
4549 
4550 	memset(&cmd, 0, sizeof cmd);
4551 	cmd.code = sc->reset_noise_gain;
4552 	cmd.ngroups = 1;
4553 	cmd.isvalid = 1;
4554 	DPRINTF(("setting initial differential gains\n"));
4555 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4556 }
4557 
4558 int
4559 iwn4965_set_gains(struct iwn_softc *sc)
4560 {
4561 	struct iwn_calib_state *calib = &sc->calib;
4562 	struct iwn_phy_calib_gain cmd;
4563 	int i, delta, noise;
4564 
4565 	/* Get minimal noise among connected antennas. */
4566 	noise = INT_MAX;	/* NB: There's at least one antenna. */
4567 	for (i = 0; i < 3; i++)
4568 		if (sc->chainmask & (1 << i))
4569 			noise = MIN(calib->noise[i], noise);
4570 
4571 	memset(&cmd, 0, sizeof cmd);
4572 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4573 	/* Set differential gains for connected antennas. */
4574 	for (i = 0; i < 3; i++) {
4575 		if (sc->chainmask & (1 << i)) {
4576 			/* Compute attenuation (in unit of 1.5dB). */
4577 			delta = (noise - (int32_t)calib->noise[i]) / 30;
4578 			/* NB: delta <= 0 */
4579 			/* Limit to [-4.5dB,0]. */
4580 			cmd.gain[i] = MIN(abs(delta), 3);
4581 			if (delta < 0)
4582 				cmd.gain[i] |= 1 << 2;	/* sign bit */
4583 		}
4584 	}
4585 	DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4586 	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask));
4587 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4588 }
4589 
4590 int
4591 iwn5000_set_gains(struct iwn_softc *sc)
4592 {
4593 	struct iwn_calib_state *calib = &sc->calib;
4594 	struct iwn_phy_calib_gain cmd;
4595 	int i, ant, div, delta;
4596 
4597 	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4598 	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4599 
4600 	memset(&cmd, 0, sizeof cmd);
4601 	cmd.code = sc->noise_gain;
4602 	cmd.ngroups = 1;
4603 	cmd.isvalid = 1;
4604 	/*
4605 	 * Get first available RX antenna as referential.
4606 	 * IWN_LSB() return values start with 1, but antenna gain array
4607 	 * cmd.gain[] and noise array calib->noise[] start with 0.
4608 	 */
4609 	ant = IWN_LSB(sc->rxchainmask) - 1;
4610 
4611 	/* Set differential gains for other antennas. */
4612 	for (i = ant + 1; i < 3; i++) {
4613 		if (sc->chainmask & (1 << i)) {
4614 			/* The delta is relative to antenna "ant". */
4615 			delta = ((int32_t)calib->noise[ant] -
4616 			    (int32_t)calib->noise[i]) / div;
4617 			DPRINTF(("Ant[%d] vs. Ant[%d]: delta %d\n", ant, i, delta));
4618 			/* Limit to [-4.5dB,+4.5dB]. */
4619 			cmd.gain[i] = MIN(abs(delta), 3);
4620 			if (delta < 0)
4621 				cmd.gain[i] |= 1 << 2;	/* sign bit */
4622 			DPRINTF(("Setting differential gains for antenna %d: %x\n",
4623 				i, cmd.gain[i]));
4624 		}
4625 	}
4626 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4627 }
4628 
4629 /*
4630  * Tune RF RX sensitivity based on the number of false alarms detected
4631  * during the last beacon period.
4632  */
4633 void
4634 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4635 {
4636 #define inc(val, inc, max)			\
4637 	if ((val) < (max)) {			\
4638 		if ((val) < (max) - (inc))	\
4639 			(val) += (inc);		\
4640 		else				\
4641 			(val) = (max);		\
4642 		needs_update = 1;		\
4643 	}
4644 #define dec(val, dec, min)			\
4645 	if ((val) > (min)) {			\
4646 		if ((val) > (min) + (dec))	\
4647 			(val) -= (dec);		\
4648 		else				\
4649 			(val) = (min);		\
4650 		needs_update = 1;		\
4651 	}
4652 
4653 	const struct iwn_sensitivity_limits *limits = sc->limits;
4654 	struct iwn_calib_state *calib = &sc->calib;
4655 	uint32_t val, rxena, fa;
4656 	uint32_t energy[3], energy_min;
4657 	uint8_t noise[3], noise_ref;
4658 	int i, needs_update = 0;
4659 
4660 	/* Check that we've been enabled long enough. */
4661 	if ((rxena = letoh32(stats->general.load)) == 0)
4662 		return;
4663 
4664 	/* Compute number of false alarms since last call for OFDM. */
4665 	fa  = letoh32(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4666 	fa += letoh32(stats->ofdm.fa) - calib->fa_ofdm;
4667 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4668 
4669 	/* Save counters values for next call. */
4670 	calib->bad_plcp_ofdm = letoh32(stats->ofdm.bad_plcp);
4671 	calib->fa_ofdm = letoh32(stats->ofdm.fa);
4672 
4673 	if (fa > 50 * rxena) {
4674 		/* High false alarm count, decrease sensitivity. */
4675 		DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa));
4676 		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
4677 		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4678 		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
4679 		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4680 
4681 	} else if (fa < 5 * rxena) {
4682 		/* Low false alarm count, increase sensitivity. */
4683 		DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa));
4684 		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
4685 		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4686 		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
4687 		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4688 	}
4689 
4690 	/* Compute maximum noise among 3 receivers. */
4691 	for (i = 0; i < 3; i++)
4692 		noise[i] = (letoh32(stats->general.noise[i]) >> 8) & 0xff;
4693 	val = MAX(noise[0], noise[1]);
4694 	val = MAX(noise[2], val);
4695 	/* Insert it into our samples table. */
4696 	calib->noise_samples[calib->cur_noise_sample] = val;
4697 	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4698 
4699 	/* Compute maximum noise among last 20 samples. */
4700 	noise_ref = calib->noise_samples[0];
4701 	for (i = 1; i < 20; i++)
4702 		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4703 
4704 	/* Compute maximum energy among 3 receivers. */
4705 	for (i = 0; i < 3; i++)
4706 		energy[i] = letoh32(stats->general.energy[i]);
4707 	val = MIN(energy[0], energy[1]);
4708 	val = MIN(energy[2], val);
4709 	/* Insert it into our samples table. */
4710 	calib->energy_samples[calib->cur_energy_sample] = val;
4711 	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4712 
4713 	/* Compute minimum energy among last 10 samples. */
4714 	energy_min = calib->energy_samples[0];
4715 	for (i = 1; i < 10; i++)
4716 		energy_min = MAX(energy_min, calib->energy_samples[i]);
4717 	energy_min += 6;
4718 
4719 	/* Compute number of false alarms since last call for CCK. */
4720 	fa  = letoh32(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4721 	fa += letoh32(stats->cck.fa) - calib->fa_cck;
4722 	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
4723 
4724 	/* Save counters values for next call. */
4725 	calib->bad_plcp_cck = letoh32(stats->cck.bad_plcp);
4726 	calib->fa_cck = letoh32(stats->cck.fa);
4727 
4728 	if (fa > 50 * rxena) {
4729 		/* High false alarm count, decrease sensitivity. */
4730 		DPRINTFN(2, ("CCK high false alarm count: %u\n", fa));
4731 		calib->cck_state = IWN_CCK_STATE_HIFA;
4732 		calib->low_fa = 0;
4733 
4734 		if (calib->cck_x4 > 160) {
4735 			calib->noise_ref = noise_ref;
4736 			if (calib->energy_cck > 2)
4737 				dec(calib->energy_cck, 2, energy_min);
4738 		}
4739 		if (calib->cck_x4 < 160) {
4740 			calib->cck_x4 = 161;
4741 			needs_update = 1;
4742 		} else
4743 			inc(calib->cck_x4, 3, limits->max_cck_x4);
4744 
4745 		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4746 
4747 	} else if (fa < 5 * rxena) {
4748 		/* Low false alarm count, increase sensitivity. */
4749 		DPRINTFN(2, ("CCK low false alarm count: %u\n", fa));
4750 		calib->cck_state = IWN_CCK_STATE_LOFA;
4751 		calib->low_fa++;
4752 
4753 		if (calib->cck_state != IWN_CCK_STATE_INIT &&
4754 		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4755 		     calib->low_fa > 100)) {
4756 			inc(calib->energy_cck, 2, limits->min_energy_cck);
4757 			dec(calib->cck_x4,     3, limits->min_cck_x4);
4758 			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4759 		}
4760 	} else {
4761 		/* Not worth to increase or decrease sensitivity. */
4762 		DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa));
4763 		calib->low_fa = 0;
4764 		calib->noise_ref = noise_ref;
4765 
4766 		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4767 			/* Previous interval had many false alarms. */
4768 			dec(calib->energy_cck, 8, energy_min);
4769 		}
4770 		calib->cck_state = IWN_CCK_STATE_INIT;
4771 	}
4772 
4773 	if (needs_update)
4774 		(void)iwn_send_sensitivity(sc);
4775 #undef dec
4776 #undef inc
4777 }
4778 
4779 int
4780 iwn_send_sensitivity(struct iwn_softc *sc)
4781 {
4782 	struct iwn_calib_state *calib = &sc->calib;
4783 	struct iwn_enhanced_sensitivity_cmd cmd;
4784 	int len;
4785 
4786 	memset(&cmd, 0, sizeof cmd);
4787 	len = sizeof (struct iwn_sensitivity_cmd);
4788 	cmd.which = IWN_SENSITIVITY_WORKTBL;
4789 	/* OFDM modulation. */
4790 	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
4791 	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
4792 	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
4793 	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
4794 	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
4795 	cmd.energy_ofdm_th     = htole16(62);
4796 	/* CCK modulation. */
4797 	cmd.corr_cck_x4        = htole16(calib->cck_x4);
4798 	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
4799 	cmd.energy_cck         = htole16(calib->energy_cck);
4800 	/* Barker modulation: use default values. */
4801 	cmd.corr_barker        = htole16(190);
4802 	cmd.corr_barker_mrc    = htole16(390);
4803 	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
4804 		goto send;
4805 	/* Enhanced sensitivity settings. */
4806 	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
4807 	cmd.ofdm_det_slope_mrc = htole16(668);
4808 	cmd.ofdm_det_icept_mrc = htole16(4);
4809 	cmd.ofdm_det_slope     = htole16(486);
4810 	cmd.ofdm_det_icept     = htole16(37);
4811 	cmd.cck_det_slope_mrc  = htole16(853);
4812 	cmd.cck_det_icept_mrc  = htole16(4);
4813 	cmd.cck_det_slope      = htole16(476);
4814 	cmd.cck_det_icept      = htole16(99);
4815 send:
4816 	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
4817 }
4818 
4819 /*
4820  * Set STA mode power saving level (between 0 and 5).
4821  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4822  */
4823 int
4824 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4825 {
4826 	struct iwn_pmgt_cmd cmd;
4827 	const struct iwn_pmgt *pmgt;
4828 	uint32_t max, skip_dtim;
4829 	pcireg_t reg;
4830 	int i;
4831 
4832 	/* Select which PS parameters to use. */
4833 	if (dtim <= 2)
4834 		pmgt = &iwn_pmgt[0][level];
4835 	else if (dtim <= 10)
4836 		pmgt = &iwn_pmgt[1][level];
4837 	else
4838 		pmgt = &iwn_pmgt[2][level];
4839 
4840 	memset(&cmd, 0, sizeof cmd);
4841 	if (level != 0)	/* not CAM */
4842 		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4843 	if (level == 5)
4844 		cmd.flags |= htole16(IWN_PS_FAST_PD);
4845 	/* Retrieve PCIe Active State Power Management (ASPM). */
4846 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
4847 	    sc->sc_cap_off + PCI_PCIE_LCSR);
4848 	if (!(reg & PCI_PCIE_LCSR_ASPM_L0S))	/* L0s Entry disabled. */
4849 		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4850 	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4851 	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4852 
4853 	if (dtim == 0) {
4854 		dtim = 1;
4855 		skip_dtim = 0;
4856 	} else
4857 		skip_dtim = pmgt->skip_dtim;
4858 	if (skip_dtim != 0) {
4859 		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4860 		max = pmgt->intval[4];
4861 		if (max == (uint32_t)-1)
4862 			max = dtim * (skip_dtim + 1);
4863 		else if (max > dtim)
4864 			max = (max / dtim) * dtim;
4865 	} else
4866 		max = dtim;
4867 	for (i = 0; i < 5; i++)
4868 		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4869 
4870 	DPRINTF(("setting power saving level to %d\n", level));
4871 	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4872 }
4873 
4874 int
4875 iwn_send_btcoex(struct iwn_softc *sc)
4876 {
4877 	struct iwn_bluetooth cmd;
4878 
4879 	memset(&cmd, 0, sizeof cmd);
4880 	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4881 	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4882 	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4883 	DPRINTF(("configuring bluetooth coexistence\n"));
4884 	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4885 }
4886 
4887 int
4888 iwn_send_advanced_btcoex(struct iwn_softc *sc)
4889 {
4890 	static const uint32_t btcoex_3wire[12] = {
4891 		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
4892 		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
4893 		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
4894 	};
4895 	struct iwn_btcoex_priotable btprio;
4896 	struct iwn_btcoex_prot btprot;
4897 	int error, i;
4898 
4899 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
4900 	    sc->hw_type == IWN_HW_REV_TYPE_135) {
4901 		struct iwn2000_btcoex_config btconfig;
4902 
4903 		memset(&btconfig, 0, sizeof btconfig);
4904 		btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION |
4905 		    (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) |
4906 		    IWN_BT_SYNC_2_BT_DISABLE;
4907 		btconfig.max_kill = 5;
4908 		btconfig.bt3_t7_timer = 1;
4909 		btconfig.kill_ack = htole32(0xffff0000);
4910 		btconfig.kill_cts = htole32(0xffff0000);
4911 		btconfig.sample_time = 2;
4912 		btconfig.bt3_t2_timer = 0xc;
4913 		for (i = 0; i < 12; i++)
4914 			btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
4915 		btconfig.valid = htole16(0xff);
4916 		btconfig.prio_boost = htole32(0xf0);
4917 		DPRINTF(("configuring advanced bluetooth coexistence\n"));
4918 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
4919 		    sizeof(btconfig), 1);
4920 		if (error != 0)
4921 			return (error);
4922 	} else {
4923 		struct iwn6000_btcoex_config btconfig;
4924 
4925 		memset(&btconfig, 0, sizeof btconfig);
4926 		btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION |
4927 		    (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) |
4928 		    IWN_BT_SYNC_2_BT_DISABLE;
4929 		btconfig.max_kill = 5;
4930 		btconfig.bt3_t7_timer = 1;
4931 		btconfig.kill_ack = htole32(0xffff0000);
4932 		btconfig.kill_cts = htole32(0xffff0000);
4933 		btconfig.sample_time = 2;
4934 		btconfig.bt3_t2_timer = 0xc;
4935 		for (i = 0; i < 12; i++)
4936 			btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
4937 		btconfig.valid = htole16(0xff);
4938 		btconfig.prio_boost = 0xf0;
4939 		DPRINTF(("configuring advanced bluetooth coexistence\n"));
4940 		error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
4941 		    sizeof(btconfig), 1);
4942 		if (error != 0)
4943 			return (error);
4944 	}
4945 
4946 	memset(&btprio, 0, sizeof btprio);
4947 	btprio.calib_init1 = 0x6;
4948 	btprio.calib_init2 = 0x7;
4949 	btprio.calib_periodic_low1 = 0x2;
4950 	btprio.calib_periodic_low2 = 0x3;
4951 	btprio.calib_periodic_high1 = 0x4;
4952 	btprio.calib_periodic_high2 = 0x5;
4953 	btprio.dtim = 0x6;
4954 	btprio.scan52 = 0x8;
4955 	btprio.scan24 = 0xa;
4956 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
4957 	    1);
4958 	if (error != 0)
4959 		return (error);
4960 
4961 	/* Force BT state machine change */
4962 	memset(&btprot, 0, sizeof btprot);
4963 	btprot.open = 1;
4964 	btprot.type = 1;
4965 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
4966 	if (error != 0)
4967 		return (error);
4968 
4969 	btprot.open = 0;
4970 	return (iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1));
4971 }
4972 
4973 int
4974 iwn5000_runtime_calib(struct iwn_softc *sc)
4975 {
4976 	struct iwn5000_calib_config cmd;
4977 
4978 	memset(&cmd, 0, sizeof cmd);
4979 	cmd.ucode.once.enable = 0xffffffff;
4980 	cmd.ucode.once.start = IWN5000_CALIB_DC;
4981 	DPRINTF(("configuring runtime calibration\n"));
4982 	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
4983 }
4984 
4985 int
4986 iwn_config(struct iwn_softc *sc)
4987 {
4988 	struct iwn_ops *ops = &sc->ops;
4989 	struct ieee80211com *ic = &sc->sc_ic;
4990 	struct ifnet *ifp = &ic->ic_if;
4991 	uint32_t txmask;
4992 	uint16_t rxchain;
4993 	int error, ridx;
4994 
4995 	/* Set radio temperature sensor offset. */
4996 	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
4997 		error = iwn6000_temp_offset_calib(sc);
4998 		if (error != 0) {
4999 			printf("%s: could not set temperature offset\n",
5000 			    sc->sc_dev.dv_xname);
5001 			return error;
5002 		}
5003 	}
5004 
5005 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
5006 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
5007 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
5008 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
5009 		error = iwn2000_temp_offset_calib(sc);
5010 		if (error != 0) {
5011 			printf("%s: could not set temperature offset\n",
5012 			    sc->sc_dev.dv_xname);
5013 			return error;
5014 		}
5015 	}
5016 
5017 	if (sc->hw_type == IWN_HW_REV_TYPE_6050 ||
5018 	    sc->hw_type == IWN_HW_REV_TYPE_6005) {
5019 		/* Configure runtime DC calibration. */
5020 		error = iwn5000_runtime_calib(sc);
5021 		if (error != 0) {
5022 			printf("%s: could not configure runtime calibration\n",
5023 			    sc->sc_dev.dv_xname);
5024 			return error;
5025 		}
5026 	}
5027 
5028 	/* Configure valid TX chains for >=5000 Series. */
5029 	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5030 		txmask = htole32(sc->txchainmask);
5031 		DPRINTF(("configuring valid TX chains 0x%x\n", txmask));
5032 		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5033 		    sizeof txmask, 0);
5034 		if (error != 0) {
5035 			printf("%s: could not configure valid TX chains\n",
5036 			    sc->sc_dev.dv_xname);
5037 			return error;
5038 		}
5039 	}
5040 
5041 	/* Configure bluetooth coexistence. */
5042 	if (sc->sc_flags & IWN_FLAG_ADV_BT_COEX)
5043 		error = iwn_send_advanced_btcoex(sc);
5044 	else
5045 		error = iwn_send_btcoex(sc);
5046 	if (error != 0) {
5047 		printf("%s: could not configure bluetooth coexistence\n",
5048 		    sc->sc_dev.dv_xname);
5049 		return error;
5050 	}
5051 
5052 	/* Set mode, channel, RX filter and enable RX. */
5053 	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
5054 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
5055 	IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr);
5056 	IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr);
5057 	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
5058 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5059 	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) {
5060 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5061 		if (ic->ic_flags & IEEE80211_F_USEPROT)
5062 			sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT);
5063 		DPRINTF(("%s: 2ghz prot 0x%x\n", __func__,
5064 		    le32toh(sc->rxon.flags)));
5065 	}
5066 	switch (ic->ic_opmode) {
5067 	case IEEE80211_M_STA:
5068 		sc->rxon.mode = IWN_MODE_STA;
5069 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5070 		break;
5071 	case IEEE80211_M_MONITOR:
5072 		sc->rxon.mode = IWN_MODE_MONITOR;
5073 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
5074 		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5075 		break;
5076 	default:
5077 		/* Should not get there. */
5078 		break;
5079 	}
5080 	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
5081 	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
5082 	sc->rxon.ht_single_mask = 0xff;
5083 	sc->rxon.ht_dual_mask = 0xff;
5084 	sc->rxon.ht_triple_mask = 0xff;
5085 	rxchain =
5086 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5087 	    IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) |
5088 	    IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains);
5089 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5090 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5091 		rxchain |= IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask);
5092 	    	rxchain |= (IWN_RXCHAIN_DRIVER_FORCE | IWN_RXCHAIN_MIMO_FORCE);
5093 	}
5094 	sc->rxon.rxchain = htole16(rxchain);
5095 	DPRINTF(("setting configuration\n"));
5096 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x rxchain %x\n",
5097 	    __func__, sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5098 	    sc->rxon.ofdm_mask, sc->rxon.rxchain));
5099 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
5100 	if (error != 0) {
5101 		printf("%s: RXON command failed\n", sc->sc_dev.dv_xname);
5102 		return error;
5103 	}
5104 
5105 	ridx = (sc->sc_ic.ic_curmode == IEEE80211_MODE_11A) ?
5106 	    IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
5107 	if ((error = iwn_add_broadcast_node(sc, 0, ridx)) != 0) {
5108 		printf("%s: could not add broadcast node\n",
5109 		    sc->sc_dev.dv_xname);
5110 		return error;
5111 	}
5112 
5113 	/* Configuration has changed, set TX power accordingly. */
5114 	if ((error = ops->set_txpower(sc, 0)) != 0) {
5115 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5116 		return error;
5117 	}
5118 
5119 	if ((error = iwn_set_critical_temp(sc)) != 0) {
5120 		printf("%s: could not set critical temperature\n",
5121 		    sc->sc_dev.dv_xname);
5122 		return error;
5123 	}
5124 
5125 	/* Set power saving level to CAM during initialization. */
5126 	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5127 		printf("%s: could not set power saving level\n",
5128 		    sc->sc_dev.dv_xname);
5129 		return error;
5130 	}
5131 	return 0;
5132 }
5133 
5134 uint16_t
5135 iwn_get_active_dwell_time(struct iwn_softc *sc,
5136     uint16_t flags, uint8_t n_probes)
5137 {
5138 	/* No channel? Default to 2GHz settings */
5139 	if (flags & IEEE80211_CHAN_2GHZ) {
5140 		return (IWN_ACTIVE_DWELL_TIME_2GHZ +
5141 		IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
5142 	}
5143 
5144 	/* 5GHz dwell time */
5145 	return (IWN_ACTIVE_DWELL_TIME_5GHZ +
5146 	    IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
5147 }
5148 
5149 /*
5150  * Limit the total dwell time to 85% of the beacon interval.
5151  *
5152  * Returns the dwell time in milliseconds.
5153  */
5154 uint16_t
5155 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
5156 {
5157 	struct ieee80211com *ic = &sc->sc_ic;
5158 	struct ieee80211_node *ni = ic->ic_bss;
5159 	int bintval = 0;
5160 
5161 	/* bintval is in TU (1.024mS) */
5162 	if (ni != NULL)
5163 		bintval = ni->ni_intval;
5164 
5165 	/*
5166 	 * If it's non-zero, we should calculate the minimum of
5167 	 * it and the DWELL_BASE.
5168 	 *
5169 	 * XXX Yes, the math should take into account that bintval
5170 	 * is 1.024mS, not 1mS..
5171 	 */
5172 	if (ic->ic_state == IEEE80211_S_RUN && bintval > 0)
5173 		return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
5174 
5175 	/* No association context? Default */
5176 	return dwell_time;
5177 }
5178 
5179 uint16_t
5180 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags)
5181 {
5182 	uint16_t passive;
5183 	if (flags & IEEE80211_CHAN_2GHZ) {
5184 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
5185 	} else {
5186 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
5187 	}
5188 
5189 	/* Clamp to the beacon interval if we're associated */
5190 	return (iwn_limit_dwell(sc, passive));
5191 }
5192 
5193 int
5194 iwn_scan(struct iwn_softc *sc, uint16_t flags, int bgscan)
5195 {
5196 	struct ieee80211com *ic = &sc->sc_ic;
5197 	struct iwn_scan_hdr *hdr;
5198 	struct iwn_cmd_data *tx;
5199 	struct iwn_scan_essid *essid;
5200 	struct iwn_scan_chan *chan;
5201 	struct ieee80211_frame *wh;
5202 	struct ieee80211_rateset *rs;
5203 	struct ieee80211_channel *c;
5204 	uint8_t *buf, *frm;
5205 	uint16_t rxchain, dwell_active, dwell_passive;
5206 	uint8_t txant;
5207 	int buflen, error, is_active;
5208 
5209 	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
5210 	if (buf == NULL) {
5211 		printf("%s: could not allocate buffer for scan command\n",
5212 		    sc->sc_dev.dv_xname);
5213 		return ENOMEM;
5214 	}
5215 	hdr = (struct iwn_scan_hdr *)buf;
5216 	/*
5217 	 * Move to the next channel if no frames are received within 10ms
5218 	 * after sending the probe request.
5219 	 */
5220 	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
5221 	hdr->quiet_threshold = htole16(1);	/* min # of packets */
5222 
5223 	if (bgscan) {
5224 		int bintval;
5225 
5226 		/* Set maximum off-channel time. */
5227 		hdr->max_out = htole32(200 * 1024);
5228 
5229 		/* Configure scan pauses which service on-channel traffic. */
5230 		bintval = ic->ic_bss->ni_intval ? ic->ic_bss->ni_intval : 100;
5231 		hdr->pause_scan = htole32(((100 / bintval) << 22) |
5232 		    ((100 % bintval) * 1024));
5233 	}
5234 
5235 	/* Select antennas for scanning. */
5236 	rxchain =
5237 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5238 	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5239 	    IWN_RXCHAIN_DRIVER_FORCE;
5240 	if ((flags & IEEE80211_CHAN_5GHZ) &&
5241 	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
5242 		/*
5243 		 * On 4965 ant A and C must be avoided in 5GHz because of a
5244 		 * HW bug which causes very weak RSSI values being reported.
5245 		 */
5246 		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
5247 	} else	/* Use all available RX antennas. */
5248 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5249 	hdr->rxchain = htole16(rxchain);
5250 	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5251 
5252 	tx = (struct iwn_cmd_data *)(hdr + 1);
5253 	tx->flags = htole32(IWN_TX_AUTO_SEQ);
5254 	tx->id = sc->broadcast_id;
5255 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5256 
5257 	if (flags & IEEE80211_CHAN_5GHZ) {
5258 		/* Send probe requests at 6Mbps. */
5259 		tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
5260 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5261 	} else {
5262 		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5263 		if (bgscan && sc->hw_type == IWN_HW_REV_TYPE_4965 &&
5264 		    sc->rxon.chan > 14) {
5265 			/*
5266 			 * 4965 firmware can crash when sending probe requests
5267 			 * with CCK rates while associated to a 5GHz AP.
5268 			 * Send probe requests at 6Mbps OFDM as a workaround.
5269 			 */
5270 			tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
5271 		} else {
5272 			/* Send probe requests at 1Mbps. */
5273 			tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
5274 			tx->rflags = IWN_RFLAG_CCK;
5275 		}
5276 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5277 	}
5278 	/* Use the first valid TX antenna. */
5279 	txant = IWN_LSB(sc->txchainmask);
5280 	tx->rflags |= IWN_RFLAG_ANT(txant);
5281 
5282 	/*
5283 	 * Only do active scanning if we're announcing a probe request
5284 	 * for a given SSID (or more, if we ever add it to the driver.)
5285 	 */
5286 	is_active = 0;
5287 
5288 	/*
5289 	 * If we're scanning for a specific SSID, add it to the command.
5290 	 */
5291 	essid = (struct iwn_scan_essid *)(tx + 1);
5292 	if (ic->ic_des_esslen != 0) {
5293 		essid[0].id = IEEE80211_ELEMID_SSID;
5294 		essid[0].len = ic->ic_des_esslen;
5295 		memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen);
5296 
5297 		is_active = 1;
5298 	}
5299 	/*
5300 	 * Build a probe request frame.  Most of the following code is a
5301 	 * copy & paste of what is done in net80211.
5302 	 */
5303 	wh = (struct ieee80211_frame *)(essid + 20);
5304 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5305 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5306 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5307 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5308 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5309 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5310 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5311 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5312 
5313 	frm = (uint8_t *)(wh + 1);
5314 	frm = ieee80211_add_ssid(frm, NULL, 0);
5315 	frm = ieee80211_add_rates(frm, rs);
5316 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5317 		frm = ieee80211_add_xrates(frm, rs);
5318 	if (ic->ic_flags & IEEE80211_F_HTON)
5319 		frm = ieee80211_add_htcaps(frm, ic);
5320 
5321 	/* Set length of probe request. */
5322 	tx->len = htole16(frm - (uint8_t *)wh);
5323 
5324 	/*
5325 	 * If active scanning is requested but a certain channel is
5326 	 * marked passive, we can do active scanning if we detect
5327 	 * transmissions.
5328 	 *
5329 	 * There is an issue with some firmware versions that triggers
5330 	 * a sysassert on a "good CRC threshold" of zero (== disabled),
5331 	 * on a radar channel even though this means that we should NOT
5332 	 * send probes.
5333 	 *
5334 	 * The "good CRC threshold" is the number of frames that we
5335 	 * need to receive during our dwell time on a channel before
5336 	 * sending out probes -- setting this to a huge value will
5337 	 * mean we never reach it, but at the same time work around
5338 	 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER
5339 	 * here instead of IWN_GOOD_CRC_TH_DISABLED.
5340 	 *
5341 	 * This was fixed in later versions along with some other
5342 	 * scan changes, and the threshold behaves as a flag in those
5343 	 * versions.
5344 	 */
5345 
5346 	/*
5347 	 * If we're doing active scanning, set the crc_threshold
5348 	 * to a suitable value.  This is different to active veruss
5349 	 * passive scanning depending upon the channel flags; the
5350 	 * firmware will obey that particular check for us.
5351 	 */
5352 	if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
5353 		hdr->crc_threshold = is_active ?
5354 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
5355 	else
5356 		hdr->crc_threshold = is_active ?
5357 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
5358 
5359 	chan = (struct iwn_scan_chan *)frm;
5360 	for (c  = &ic->ic_channels[1];
5361 	     c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) {
5362 		if ((c->ic_flags & flags) != flags)
5363 			continue;
5364 
5365 		chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5366 		DPRINTFN(2, ("adding channel %d\n", chan->chan));
5367 		chan->flags = 0;
5368 		if (ic->ic_des_esslen != 0)
5369 			chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5370 
5371 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
5372 			chan->flags |= htole32(IWN_CHAN_PASSIVE);
5373 		else
5374 			chan->flags |= htole32(IWN_CHAN_ACTIVE);
5375 
5376 		/*
5377 		 * Calculate the active/passive dwell times.
5378 		 */
5379 
5380 		dwell_active = iwn_get_active_dwell_time(sc, flags, is_active);
5381 		dwell_passive = iwn_get_passive_dwell_time(sc, flags);
5382 
5383 		/* Make sure they're valid */
5384 		if (dwell_passive <= dwell_active)
5385 			dwell_passive = dwell_active + 1;
5386 
5387 		chan->active = htole16(dwell_active);
5388 		chan->passive = htole16(dwell_passive);
5389 
5390 		chan->dsp_gain = 0x6e;
5391 		if (IEEE80211_IS_CHAN_5GHZ(c)) {
5392 			chan->rf_gain = 0x3b;
5393 		} else {
5394 			chan->rf_gain = 0x28;
5395 		}
5396 		hdr->nchan++;
5397 		chan++;
5398 	}
5399 
5400 	buflen = (uint8_t *)chan - buf;
5401 	hdr->len = htole16(buflen);
5402 
5403 	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5404 	if (error == 0) {
5405 		/*
5406 		 * The current mode might have been fixed during association.
5407 		 * Ensure all channels get scanned.
5408 		 */
5409 		if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
5410 			ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
5411 
5412 		sc->sc_flags |= IWN_FLAG_SCANNING;
5413 		if (bgscan)
5414 			sc->sc_flags |= IWN_FLAG_BGSCAN;
5415 	}
5416 	free(buf, M_DEVBUF, IWN_SCAN_MAXSZ);
5417 	return error;
5418 }
5419 
5420 void
5421 iwn_scan_abort(struct iwn_softc *sc)
5422 {
5423 	iwn_cmd(sc, IWN_CMD_SCAN_ABORT, NULL, 0, 1);
5424 
5425 	/* XXX Cannot wait for status response in interrupt context. */
5426 	DELAY(100);
5427 
5428 	sc->sc_flags &= ~IWN_FLAG_SCANNING;
5429 	sc->sc_flags &= ~IWN_FLAG_BGSCAN;
5430 }
5431 
5432 int
5433 iwn_bgscan(struct ieee80211com *ic)
5434 {
5435 	struct iwn_softc *sc = ic->ic_softc;
5436 	int error;
5437 
5438 	if (sc->sc_flags & IWN_FLAG_SCANNING)
5439 		return 0;
5440 
5441 	error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 1);
5442 	if (error)
5443 		printf("%s: could not initiate background scan\n",
5444 		    sc->sc_dev.dv_xname);
5445 	return error;
5446 }
5447 
5448 int
5449 iwn_auth(struct iwn_softc *sc, int arg)
5450 {
5451 	struct iwn_ops *ops = &sc->ops;
5452 	struct ieee80211com *ic = &sc->sc_ic;
5453 	struct ieee80211_node *ni = ic->ic_bss;
5454 	int error, ridx;
5455 	int bss_switch =
5456 	    (!IEEE80211_ADDR_EQ(sc->bss_node_addr, etheranyaddr) &&
5457 	    !IEEE80211_ADDR_EQ(sc->bss_node_addr, ni->ni_macaddr));
5458 
5459 	/* Update adapter configuration. */
5460 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5461 	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5462 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5463 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5464 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5465 		if (ic->ic_flags & IEEE80211_F_USEPROT)
5466 			sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT);
5467 		DPRINTF(("%s: 2ghz prot 0x%x\n", __func__,
5468 		    le32toh(sc->rxon.flags)));
5469 	}
5470 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5471 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5472 	else
5473 		sc->rxon.flags &= ~htole32(IWN_RXON_SHSLOT);
5474 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5475 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5476 	else
5477 		sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE);
5478 	switch (ic->ic_curmode) {
5479 	case IEEE80211_MODE_11A:
5480 		sc->rxon.cck_mask  = 0;
5481 		sc->rxon.ofdm_mask = 0x15;
5482 		break;
5483 	case IEEE80211_MODE_11B:
5484 		sc->rxon.cck_mask  = 0x03;
5485 		sc->rxon.ofdm_mask = 0;
5486 		break;
5487 	default:	/* Assume 802.11b/g/n. */
5488 		sc->rxon.cck_mask  = 0x0f;
5489 		sc->rxon.ofdm_mask = 0x15;
5490 	}
5491 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__,
5492 	    sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5493 	    sc->rxon.ofdm_mask));
5494 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5495 	if (error != 0) {
5496 		printf("%s: RXON command failed\n", sc->sc_dev.dv_xname);
5497 		return error;
5498 	}
5499 
5500 	/* Configuration has changed, set TX power accordingly. */
5501 	if ((error = ops->set_txpower(sc, 1)) != 0) {
5502 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5503 		return error;
5504 	}
5505 	/*
5506 	 * Reconfiguring RXON clears the firmware nodes table so we must
5507 	 * add the broadcast node again.
5508 	 */
5509 	ridx = IEEE80211_IS_CHAN_5GHZ(ni->ni_chan) ?
5510 	    IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
5511 	if ((error = iwn_add_broadcast_node(sc, 1, ridx)) != 0) {
5512 		printf("%s: could not add broadcast node\n",
5513 		    sc->sc_dev.dv_xname);
5514 		return error;
5515 	}
5516 
5517 	/*
5518 	 * Make sure the firmware gets to see a beacon before we send
5519 	 * the auth request. Otherwise the Tx attempt can fail due to
5520 	 * the firmware's built-in regulatory domain enforcement.
5521 	 * Delaying here for every incoming deauth frame can result in a DoS.
5522 	 * Don't delay if we're here because of an incoming frame (arg != -1)
5523 	 * or if we're already waiting for a response (ic_mgt_timer != 0).
5524 	 * If we are switching APs after a background scan then net80211 has
5525 	 * just faked the reception of a deauth frame from our old AP, so it
5526 	 * is safe to delay in that case.
5527 	 */
5528 	if ((arg == -1 || bss_switch) && ic->ic_mgt_timer == 0)
5529 		DELAY(ni->ni_intval * 3 * IEEE80211_DUR_TU);
5530 
5531 	/* We can now clear the cached address of our previous AP. */
5532 	memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr));
5533 
5534 	return 0;
5535 }
5536 
5537 int
5538 iwn_run(struct iwn_softc *sc)
5539 {
5540 	struct iwn_ops *ops = &sc->ops;
5541 	struct ieee80211com *ic = &sc->sc_ic;
5542 	struct ieee80211_node *ni = ic->ic_bss;
5543 	struct iwn_node *wn = (void *)ni;
5544 	struct iwn_node_info node;
5545 	int error;
5546 
5547 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5548 		/* Link LED blinks while monitoring. */
5549 		iwn_set_led(sc, IWN_LED_LINK, 50, 50);
5550 		return 0;
5551 	}
5552 	if ((error = iwn_set_timing(sc, ni)) != 0) {
5553 		printf("%s: could not set timing\n", sc->sc_dev.dv_xname);
5554 		return error;
5555 	}
5556 
5557 	/* Update adapter configuration. */
5558 	sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
5559 	/* Short preamble and slot time are negotiated when associating. */
5560 	sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
5561 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5562 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5563 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5564 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5565 	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
5566 
5567 	/* HT is negotiated when associating. */
5568 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5569 		enum ieee80211_htprot htprot =
5570 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5571 		DPRINTF(("%s: htprot = %d\n", __func__, htprot));
5572 		sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot));
5573 	} else
5574 		sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3));
5575 
5576 	if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) {
5577 		/* 11a or 11n 5GHz */
5578 		sc->rxon.cck_mask  = 0;
5579 		sc->rxon.ofdm_mask = 0x15;
5580 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
5581 		/* 11n 2GHz */
5582 		sc->rxon.cck_mask  = 0x0f;
5583 		sc->rxon.ofdm_mask = 0x15;
5584 	} else {
5585 		if (ni->ni_rates.rs_nrates == 4) {
5586 			/* 11b */
5587 			sc->rxon.cck_mask  = 0x03;
5588 			sc->rxon.ofdm_mask = 0;
5589 		} else {
5590 			/* assume 11g */
5591 			sc->rxon.cck_mask  = 0x0f;
5592 			sc->rxon.ofdm_mask = 0x15;
5593 		}
5594 	}
5595 	DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__,
5596 	    sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask,
5597 	    sc->rxon.ofdm_mask));
5598 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5599 	if (error != 0) {
5600 		printf("%s: could not update configuration\n",
5601 		    sc->sc_dev.dv_xname);
5602 		return error;
5603 	}
5604 
5605 	/* Configuration has changed, set TX power accordingly. */
5606 	if ((error = ops->set_txpower(sc, 1)) != 0) {
5607 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5608 		return error;
5609 	}
5610 
5611 	/* Fake a join to initialize the TX rate. */
5612 	((struct iwn_node *)ni)->id = IWN_ID_BSS;
5613 	iwn_newassoc(ic, ni, 1);
5614 
5615 	/* Add BSS node. */
5616 	memset(&node, 0, sizeof node);
5617 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5618 	node.id = IWN_ID_BSS;
5619 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5620 		node.htmask = (IWN_AMDPU_SIZE_FACTOR_MASK |
5621 		    IWN_AMDPU_DENSITY_MASK);
5622 		node.htflags = htole32(
5623 		    IWN_AMDPU_SIZE_FACTOR(
5624 			(ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_LE)) |
5625 		    IWN_AMDPU_DENSITY(
5626 			(ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) >> 2));
5627 	}
5628 	DPRINTF(("adding BSS node\n"));
5629 	error = ops->add_node(sc, &node, 1);
5630 	if (error != 0) {
5631 		printf("%s: could not add BSS node\n", sc->sc_dev.dv_xname);
5632 		return error;
5633 	}
5634 
5635 	/* Cache address of AP in case it changes after a background scan. */
5636 	IEEE80211_ADDR_COPY(sc->bss_node_addr, ni->ni_macaddr);
5637 
5638 	DPRINTF(("setting link quality for node %d\n", node.id));
5639 	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5640 		printf("%s: could not setup link quality for node %d\n",
5641 		    sc->sc_dev.dv_xname, node.id);
5642 		return error;
5643 	}
5644 
5645 	if ((error = iwn_init_sensitivity(sc)) != 0) {
5646 		printf("%s: could not set sensitivity\n",
5647 		    sc->sc_dev.dv_xname);
5648 		return error;
5649 	}
5650 	/* Start periodic calibration timer. */
5651 	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5652 	sc->calib_cnt = 0;
5653 	timeout_add_msec(&sc->calib_to, 500);
5654 
5655 	ieee80211_mira_node_init(&wn->mn);
5656 
5657 	/* Link LED always on while associated. */
5658 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5659 	return 0;
5660 }
5661 
5662 /*
5663  * We support CCMP hardware encryption/decryption of unicast frames only.
5664  * HW support for TKIP really sucks.  We should let TKIP die anyway.
5665  */
5666 int
5667 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5668     struct ieee80211_key *k)
5669 {
5670 	struct iwn_softc *sc = ic->ic_softc;
5671 	struct iwn_ops *ops = &sc->ops;
5672 	struct iwn_node *wn = (void *)ni;
5673 	struct iwn_node_info node;
5674 	uint16_t kflags;
5675 
5676 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
5677 	    k->k_cipher != IEEE80211_CIPHER_CCMP)
5678 		return ieee80211_set_key(ic, ni, k);
5679 
5680 	kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id);
5681 	if (k->k_flags & IEEE80211_KEY_GROUP)
5682 		kflags |= IWN_KFLAG_GROUP;
5683 
5684 	memset(&node, 0, sizeof node);
5685 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
5686 	    sc->broadcast_id : wn->id;
5687 	node.control = IWN_NODE_UPDATE;
5688 	node.flags = IWN_FLAG_SET_KEY;
5689 	node.kflags = htole16(kflags);
5690 	node.kid = k->k_id;
5691 	memcpy(node.key, k->k_key, k->k_len);
5692 	DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id));
5693 	return ops->add_node(sc, &node, 1);
5694 }
5695 
5696 void
5697 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5698     struct ieee80211_key *k)
5699 {
5700 	struct iwn_softc *sc = ic->ic_softc;
5701 	struct iwn_ops *ops = &sc->ops;
5702 	struct iwn_node *wn = (void *)ni;
5703 	struct iwn_node_info node;
5704 
5705 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
5706 	    k->k_cipher != IEEE80211_CIPHER_CCMP) {
5707 		/* See comment about other ciphers above. */
5708 		ieee80211_delete_key(ic, ni, k);
5709 		return;
5710 	}
5711 	if (ic->ic_state != IEEE80211_S_RUN)
5712 		return;	/* Nothing to do. */
5713 	memset(&node, 0, sizeof node);
5714 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
5715 	    sc->broadcast_id : wn->id;
5716 	node.control = IWN_NODE_UPDATE;
5717 	node.flags = IWN_FLAG_SET_KEY;
5718 	node.kflags = htole16(IWN_KFLAG_INVALID);
5719 	node.kid = 0xff;
5720 	DPRINTF(("delete keys for node %d\n", node.id));
5721 	(void)ops->add_node(sc, &node, 1);
5722 }
5723 
5724 /*
5725  * This function is called by upper layer when HT protection settings in
5726  * beacons have changed.
5727  */
5728 void
5729 iwn_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
5730 {
5731 	struct iwn_softc *sc = ic->ic_softc;
5732 	struct iwn_ops *ops = &sc->ops;
5733 	enum ieee80211_htprot htprot;
5734 	struct iwn_rxon_assoc rxon_assoc;
5735 	int s, error;
5736 
5737 	/* Update HT protection mode setting. */
5738 	htprot = (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK) >>
5739 	    IEEE80211_HTOP1_PROT_SHIFT;
5740 	sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3));
5741 	sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot));
5742 
5743 	/* Update RXON config. */
5744 	memset(&rxon_assoc, 0, sizeof(rxon_assoc));
5745 	rxon_assoc.flags = sc->rxon.flags;
5746 	rxon_assoc.filter = sc->rxon.filter;
5747 	rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
5748 	rxon_assoc.cck_mask = sc->rxon.cck_mask;
5749 	rxon_assoc.ht_single_mask = sc->rxon.ht_single_mask;
5750 	rxon_assoc.ht_dual_mask = sc->rxon.ht_dual_mask;
5751 	rxon_assoc.ht_triple_mask = sc->rxon.ht_triple_mask;
5752 	rxon_assoc.rxchain = sc->rxon.rxchain;
5753 	rxon_assoc.acquisition = sc->rxon.acquisition;
5754 
5755 	s = splnet();
5756 
5757 	error = iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &rxon_assoc,
5758 	    sizeof(rxon_assoc), 1);
5759 	if (error != 0)
5760 		printf("%s: RXON_ASSOC command failed\n", sc->sc_dev.dv_xname);
5761 
5762 	DELAY(100);
5763 
5764 	/* All RXONs wipe the firmware's txpower table. Restore it. */
5765 	error = ops->set_txpower(sc, 1);
5766 	if (error != 0)
5767 		printf("%s: could not set TX power\n", sc->sc_dev.dv_xname);
5768 
5769 	DELAY(100);
5770 
5771 	/* Restore power saving level */
5772 	if (ic->ic_flags & IEEE80211_F_PMGTON)
5773 		error = iwn_set_pslevel(sc, 0, 3, 1);
5774 	else
5775 		error = iwn_set_pslevel(sc, 0, 0, 1);
5776 	if (error != 0)
5777 		printf("%s: could not set PS level\n", sc->sc_dev.dv_xname);
5778 
5779 	splx(s);
5780 }
5781 
5782 /*
5783  * This function is called by upper layer when an ADDBA request is received
5784  * from another STA and before the ADDBA response is sent.
5785  */
5786 int
5787 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5788     uint8_t tid)
5789 {
5790 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
5791 	struct iwn_softc *sc = ic->ic_softc;
5792 	struct iwn_ops *ops = &sc->ops;
5793 	struct iwn_node *wn = (void *)ni;
5794 	struct iwn_node_info node;
5795 
5796 	memset(&node, 0, sizeof node);
5797 	node.id = wn->id;
5798 	node.control = IWN_NODE_UPDATE;
5799 	node.flags = IWN_FLAG_SET_ADDBA;
5800 	node.addba_tid = tid;
5801 	node.addba_ssn = htole16(ba->ba_winstart);
5802 	DPRINTF(("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid,
5803 	    ba->ba_winstart));
5804 	/* XXX async command, so firmware may still fail to add BA agreement */
5805 	return ops->add_node(sc, &node, 1);
5806 }
5807 
5808 /*
5809  * This function is called by upper layer on teardown of an HT-immediate
5810  * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5811  */
5812 void
5813 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5814     uint8_t tid)
5815 {
5816 	struct iwn_softc *sc = ic->ic_softc;
5817 	struct iwn_ops *ops = &sc->ops;
5818 	struct iwn_node *wn = (void *)ni;
5819 	struct iwn_node_info node;
5820 
5821 	memset(&node, 0, sizeof node);
5822 	node.id = wn->id;
5823 	node.control = IWN_NODE_UPDATE;
5824 	node.flags = IWN_FLAG_SET_DELBA;
5825 	node.delba_tid = tid;
5826 	DPRINTF(("DELBA RA=%d TID=%d\n", wn->id, tid));
5827 	(void)ops->add_node(sc, &node, 1);
5828 }
5829 
5830 /*
5831  * This function is called by upper layer when an ADDBA response is received
5832  * from another STA.
5833  */
5834 int
5835 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5836     uint8_t tid)
5837 {
5838 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5839 	struct iwn_softc *sc = ic->ic_softc;
5840 	struct iwn_ops *ops = &sc->ops;
5841 	struct iwn_node *wn = (void *)ni;
5842 	struct iwn_node_info node;
5843 	int qid = sc->first_agg_txq + tid;
5844 	int error;
5845 
5846 	/* Ensure we can map this TID to an aggregation queue. */
5847 	if (tid >= IWN_NUM_AMPDU_TID || ba->ba_winsize > IWN_SCHED_WINSZ ||
5848 	    qid > sc->ntxqs || (sc->agg_queue_mask & (1 << qid)))
5849 		return ENOSPC;
5850 
5851 	/* Enable TX for the specified RA/TID. */
5852 	wn->disable_tid &= ~(1 << tid);
5853 	memset(&node, 0, sizeof node);
5854 	node.id = wn->id;
5855 	node.control = IWN_NODE_UPDATE;
5856 	node.flags = IWN_FLAG_SET_DISABLE_TID;
5857 	node.disable_tid = htole16(wn->disable_tid);
5858 	error = ops->add_node(sc, &node, 1);
5859 	if (error != 0)
5860 		return error;
5861 
5862 	if ((error = iwn_nic_lock(sc)) != 0)
5863 		return error;
5864 	ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5865 	iwn_nic_unlock(sc);
5866 
5867 	sc->agg_queue_mask |= (1 << qid);
5868 	sc->sc_tx_ba[tid].wn = wn;
5869 	ba->ba_bitmap = 0;
5870 
5871 	return 0;
5872 }
5873 
5874 void
5875 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5876     uint8_t tid)
5877 {
5878 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5879 	struct iwn_softc *sc = ic->ic_softc;
5880 	struct iwn_ops *ops = &sc->ops;
5881 	int qid = sc->first_agg_txq + tid;
5882 	struct iwn_node *wn = (void *)ni;
5883 	struct iwn_node_info node;
5884 
5885 	/* Discard all frames in the current window. */
5886 	iwn_ampdu_txq_advance(sc, &sc->txq[qid], qid,
5887 	    IWN_AGG_SSN_TO_TXQ_IDX(ba->ba_winend));
5888 
5889 	if (iwn_nic_lock(sc) != 0)
5890 		return;
5891 	ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5892 	iwn_nic_unlock(sc);
5893 
5894 	sc->agg_queue_mask &= ~(1 << qid);
5895 	sc->sc_tx_ba[tid].wn = NULL;
5896 	ba->ba_bitmap = 0;
5897 
5898 	/* Disable TX for the specified RA/TID. */
5899 	wn->disable_tid |= (1 << tid);
5900 	memset(&node, 0, sizeof node);
5901 	node.id = wn->id;
5902 	node.control = IWN_NODE_UPDATE;
5903 	node.flags = IWN_FLAG_SET_DISABLE_TID;
5904 	node.disable_tid = htole16(wn->disable_tid);
5905 	ops->add_node(sc, &node, 1);
5906 }
5907 
5908 void
5909 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5910     uint8_t tid, uint16_t ssn)
5911 {
5912 	struct iwn_node *wn = (void *)ni;
5913 	int qid = IWN4965_FIRST_AGG_TXQUEUE + tid;
5914 	uint16_t idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
5915 
5916 	/* Stop TX scheduler while we're changing its configuration. */
5917 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5918 	    IWN4965_TXQ_STATUS_CHGACT);
5919 
5920 	/* Assign RA/TID translation to the queue. */
5921 	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5922 	    wn->id << 4 | tid);
5923 
5924 	/* Enable chain-building mode for the queue. */
5925 	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5926 
5927 	/* Set starting sequence number from the ADDBA request. */
5928 	sc->txq[qid].cur = sc->txq[qid].read = idx;
5929 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
5930 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5931 
5932 	/* Set scheduler window size. */
5933 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5934 	    IWN_SCHED_WINSZ);
5935 	/* Set scheduler frame limit. */
5936 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5937 	    IWN_SCHED_LIMIT << 16);
5938 
5939 	/* Enable interrupts for the queue. */
5940 	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5941 
5942 	/* Mark the queue as active. */
5943 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5944 	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5945 	    iwn_tid2fifo[tid] << 1);
5946 }
5947 
5948 void
5949 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5950 {
5951 	int qid = IWN4965_FIRST_AGG_TXQUEUE + tid;
5952 	uint16_t idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
5953 
5954 	/* Stop TX scheduler while we're changing its configuration. */
5955 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5956 	    IWN4965_TXQ_STATUS_CHGACT);
5957 
5958 	/* Set starting sequence number from the ADDBA request. */
5959 	sc->txq[qid].cur = sc->txq[qid].read = idx;
5960 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
5961 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5962 
5963 	/* Disable interrupts for the queue. */
5964 	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5965 
5966 	/* Mark the queue as inactive. */
5967 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5968 	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5969 }
5970 
5971 void
5972 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5973     uint8_t tid, uint16_t ssn)
5974 {
5975 	int qid = IWN5000_FIRST_AGG_TXQUEUE + tid;
5976 	int idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
5977 	struct iwn_node *wn = (void *)ni;
5978 
5979 	/* Stop TX scheduler while we're changing its configuration. */
5980 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5981 	    IWN5000_TXQ_STATUS_CHGACT);
5982 
5983 	/* Assign RA/TID translation to the queue. */
5984 	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5985 	    wn->id << 4 | tid);
5986 
5987 	/* Enable chain-building mode for the queue. */
5988 	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5989 
5990 	/* Enable aggregation for the queue. */
5991 	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5992 
5993 	/* Set starting sequence number from the ADDBA request. */
5994 	sc->txq[qid].cur = sc->txq[qid].read = idx;
5995 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
5996 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5997 
5998 	/* Set scheduler window size and frame limit. */
5999 	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6000 	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6001 
6002 	/* Enable interrupts for the queue. */
6003 	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6004 
6005 	/* Mark the queue as active. */
6006 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6007 	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
6008 }
6009 
6010 void
6011 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
6012 {
6013 	int qid = IWN5000_FIRST_AGG_TXQUEUE + tid;
6014 	int idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn);
6015 
6016 	/* Stop TX scheduler while we're changing its configuration. */
6017 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6018 	    IWN5000_TXQ_STATUS_CHGACT);
6019 
6020 	/* Disable aggregation for the queue. */
6021 	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6022 
6023 	/* Set starting sequence number from the ADDBA request. */
6024 	sc->txq[qid].cur = sc->txq[qid].read = idx;
6025 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx);
6026 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6027 
6028 	/* Disable interrupts for the queue. */
6029 	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6030 
6031 	/* Mark the queue as inactive. */
6032 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6033 	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
6034 }
6035 
6036 /*
6037  * Query calibration tables from the initialization firmware.  We do this
6038  * only once at first boot.  Called from a process context.
6039  */
6040 int
6041 iwn5000_query_calibration(struct iwn_softc *sc)
6042 {
6043 	struct iwn5000_calib_config cmd;
6044 	int error;
6045 
6046 	memset(&cmd, 0, sizeof cmd);
6047 	cmd.ucode.once.enable = 0xffffffff;
6048 	cmd.ucode.once.start  = 0xffffffff;
6049 	cmd.ucode.once.send   = 0xffffffff;
6050 	cmd.ucode.flags       = 0xffffffff;
6051 	DPRINTF(("sending calibration query\n"));
6052 	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
6053 	if (error != 0)
6054 		return error;
6055 
6056 	/* Wait at most two seconds for calibration to complete. */
6057 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
6058 		error = tsleep_nsec(sc, PCATCH, "iwncal", SEC_TO_NSEC(2));
6059 	return error;
6060 }
6061 
6062 /*
6063  * Send calibration results to the runtime firmware.  These results were
6064  * obtained on first boot from the initialization firmware.
6065  */
6066 int
6067 iwn5000_send_calibration(struct iwn_softc *sc)
6068 {
6069 	int idx, error;
6070 
6071 	for (idx = 0; idx < 5; idx++) {
6072 		if (sc->calibcmd[idx].buf == NULL)
6073 			continue;	/* No results available. */
6074 		DPRINTF(("send calibration result idx=%d len=%d\n",
6075 		    idx, sc->calibcmd[idx].len));
6076 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
6077 		    sc->calibcmd[idx].len, 0);
6078 		if (error != 0) {
6079 			printf("%s: could not send calibration result\n",
6080 			    sc->sc_dev.dv_xname);
6081 			return error;
6082 		}
6083 	}
6084 	return 0;
6085 }
6086 
6087 int
6088 iwn5000_send_wimax_coex(struct iwn_softc *sc)
6089 {
6090 	struct iwn5000_wimax_coex wimax;
6091 
6092 #ifdef notyet
6093 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6094 		/* Enable WiMAX coexistence for combo adapters. */
6095 		wimax.flags =
6096 		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
6097 		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
6098 		    IWN_WIMAX_COEX_STA_TABLE_VALID |
6099 		    IWN_WIMAX_COEX_ENABLE;
6100 		memcpy(wimax.events, iwn6050_wimax_events,
6101 		    sizeof iwn6050_wimax_events);
6102 	} else
6103 #endif
6104 	{
6105 		/* Disable WiMAX coexistence. */
6106 		wimax.flags = 0;
6107 		memset(wimax.events, 0, sizeof wimax.events);
6108 	}
6109 	DPRINTF(("Configuring WiMAX coexistence\n"));
6110 	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
6111 }
6112 
6113 int
6114 iwn5000_crystal_calib(struct iwn_softc *sc)
6115 {
6116 	struct iwn5000_phy_calib_crystal cmd;
6117 
6118 	memset(&cmd, 0, sizeof cmd);
6119 	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
6120 	cmd.ngroups = 1;
6121 	cmd.isvalid = 1;
6122 	cmd.cap_pin[0] = letoh32(sc->eeprom_crystal) & 0xff;
6123 	cmd.cap_pin[1] = (letoh32(sc->eeprom_crystal) >> 16) & 0xff;
6124 	DPRINTF(("sending crystal calibration %d, %d\n",
6125 	    cmd.cap_pin[0], cmd.cap_pin[1]));
6126 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6127 }
6128 
6129 int
6130 iwn6000_temp_offset_calib(struct iwn_softc *sc)
6131 {
6132 	struct iwn6000_phy_calib_temp_offset cmd;
6133 
6134 	memset(&cmd, 0, sizeof cmd);
6135 	cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET;
6136 	cmd.ngroups = 1;
6137 	cmd.isvalid = 1;
6138 	if (sc->eeprom_temp != 0)
6139 		cmd.offset = htole16(sc->eeprom_temp);
6140 	else
6141 		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
6142 	DPRINTF(("setting radio sensor offset to %d\n", letoh16(cmd.offset)));
6143 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6144 }
6145 
6146 int
6147 iwn2000_temp_offset_calib(struct iwn_softc *sc)
6148 {
6149 	struct iwn2000_phy_calib_temp_offset cmd;
6150 
6151 	memset(&cmd, 0, sizeof cmd);
6152 	cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET;
6153 	cmd.ngroups = 1;
6154 	cmd.isvalid = 1;
6155 	if (sc->eeprom_rawtemp != 0) {
6156 		cmd.offset_low = htole16(sc->eeprom_rawtemp);
6157 		cmd.offset_high = htole16(sc->eeprom_temp);
6158 	} else {
6159 		cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
6160 		cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
6161 	}
6162 	cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
6163 	DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n",
6164 	    letoh16(cmd.offset_low), letoh16(cmd.offset_high),
6165 	    letoh16(cmd.burnt_voltage_ref)));
6166 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6167 }
6168 
6169 /*
6170  * This function is called after the runtime firmware notifies us of its
6171  * readiness (called in a process context).
6172  */
6173 int
6174 iwn4965_post_alive(struct iwn_softc *sc)
6175 {
6176 	int error, qid;
6177 
6178 	if ((error = iwn_nic_lock(sc)) != 0)
6179 		return error;
6180 
6181 	/* Clear TX scheduler state in SRAM. */
6182 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6183 	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
6184 	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
6185 
6186 	/* Set physical address of TX scheduler rings (1KB aligned). */
6187 	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6188 
6189 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6190 
6191 	/* Disable chain mode for all our 16 queues. */
6192 	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
6193 
6194 	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
6195 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
6196 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6197 
6198 		/* Set scheduler window size. */
6199 		iwn_mem_write(sc, sc->sched_base +
6200 		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
6201 		/* Set scheduler frame limit. */
6202 		iwn_mem_write(sc, sc->sched_base +
6203 		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6204 		    IWN_SCHED_LIMIT << 16);
6205 	}
6206 
6207 	/* Enable interrupts for all our 16 queues. */
6208 	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6209 	/* Identify TX FIFO rings (0-7). */
6210 	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6211 
6212 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6213 	for (qid = 0; qid < 7; qid++) {
6214 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6215 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6216 		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6217 	}
6218 	iwn_nic_unlock(sc);
6219 	return 0;
6220 }
6221 
6222 /*
6223  * This function is called after the initialization or runtime firmware
6224  * notifies us of its readiness (called in a process context).
6225  */
6226 int
6227 iwn5000_post_alive(struct iwn_softc *sc)
6228 {
6229 	int error, qid;
6230 
6231 	/* Switch to using ICT interrupt mode. */
6232 	iwn5000_ict_reset(sc);
6233 
6234 	if ((error = iwn_nic_lock(sc)) != 0)
6235 		return error;
6236 
6237 	/* Clear TX scheduler state in SRAM. */
6238 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6239 	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6240 	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6241 
6242 	/* Set physical address of TX scheduler rings (1KB aligned). */
6243 	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6244 
6245 	/* Disable scheduler chain extension (enabled by default in HW). */
6246 	iwn_prph_write(sc, IWN5000_SCHED_CHAINEXT_EN, 0);
6247 
6248 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6249 
6250 	/* Enable chain mode for all queues, except command queue. */
6251 	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6252 	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6253 
6254 	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6255 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6256 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6257 
6258 		iwn_mem_write(sc, sc->sched_base +
6259 		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6260 		/* Set scheduler window size and frame limit. */
6261 		iwn_mem_write(sc, sc->sched_base +
6262 		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6263 		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6264 	}
6265 
6266 	/* Enable interrupts for all our 20 queues. */
6267 	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6268 	/* Identify TX FIFO rings (0-7). */
6269 	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6270 
6271 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6272 	for (qid = 0; qid < 7; qid++) {
6273 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6274 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6275 		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6276 	}
6277 	iwn_nic_unlock(sc);
6278 
6279 	/* Configure WiMAX coexistence for combo adapters. */
6280 	error = iwn5000_send_wimax_coex(sc);
6281 	if (error != 0) {
6282 		printf("%s: could not configure WiMAX coexistence\n",
6283 		    sc->sc_dev.dv_xname);
6284 		return error;
6285 	}
6286 	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6287 		/* Perform crystal calibration. */
6288 		error = iwn5000_crystal_calib(sc);
6289 		if (error != 0) {
6290 			printf("%s: crystal calibration failed\n",
6291 			    sc->sc_dev.dv_xname);
6292 			return error;
6293 		}
6294 	}
6295 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6296 		/* Query calibration from the initialization firmware. */
6297 		if ((error = iwn5000_query_calibration(sc)) != 0) {
6298 			printf("%s: could not query calibration\n",
6299 			    sc->sc_dev.dv_xname);
6300 			return error;
6301 		}
6302 		/*
6303 		 * We have the calibration results now, reboot with the
6304 		 * runtime firmware (call ourselves recursively!)
6305 		 */
6306 		iwn_hw_stop(sc);
6307 		error = iwn_hw_init(sc);
6308 	} else {
6309 		/* Send calibration results to runtime firmware. */
6310 		error = iwn5000_send_calibration(sc);
6311 	}
6312 	return error;
6313 }
6314 
6315 /*
6316  * The firmware boot code is small and is intended to be copied directly into
6317  * the NIC internal memory (no DMA transfer).
6318  */
6319 int
6320 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6321 {
6322 	int error, ntries;
6323 
6324 	size /= sizeof (uint32_t);
6325 
6326 	if ((error = iwn_nic_lock(sc)) != 0)
6327 		return error;
6328 
6329 	/* Copy microcode image into NIC memory. */
6330 	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6331 	    (const uint32_t *)ucode, size);
6332 
6333 	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6334 	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6335 	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6336 
6337 	/* Start boot load now. */
6338 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6339 
6340 	/* Wait for transfer to complete. */
6341 	for (ntries = 0; ntries < 1000; ntries++) {
6342 		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6343 		    IWN_BSM_WR_CTRL_START))
6344 			break;
6345 		DELAY(10);
6346 	}
6347 	if (ntries == 1000) {
6348 		printf("%s: could not load boot firmware\n",
6349 		    sc->sc_dev.dv_xname);
6350 		iwn_nic_unlock(sc);
6351 		return ETIMEDOUT;
6352 	}
6353 
6354 	/* Enable boot after power up. */
6355 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6356 
6357 	iwn_nic_unlock(sc);
6358 	return 0;
6359 }
6360 
6361 int
6362 iwn4965_load_firmware(struct iwn_softc *sc)
6363 {
6364 	struct iwn_fw_info *fw = &sc->fw;
6365 	struct iwn_dma_info *dma = &sc->fw_dma;
6366 	int error;
6367 
6368 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
6369 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6370 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz,
6371 	    BUS_DMASYNC_PREWRITE);
6372 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6373 	    fw->init.text, fw->init.textsz);
6374 	bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
6375 	    fw->init.textsz, BUS_DMASYNC_PREWRITE);
6376 
6377 	/* Tell adapter where to find initialization sections. */
6378 	if ((error = iwn_nic_lock(sc)) != 0)
6379 		return error;
6380 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6381 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6382 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6383 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6384 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6385 	iwn_nic_unlock(sc);
6386 
6387 	/* Load firmware boot code. */
6388 	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6389 	if (error != 0) {
6390 		printf("%s: could not load boot firmware\n",
6391 		    sc->sc_dev.dv_xname);
6392 		return error;
6393 	}
6394 	/* Now press "execute". */
6395 	IWN_WRITE(sc, IWN_RESET, 0);
6396 
6397 	/* Wait at most one second for first alive notification. */
6398 	if ((error = tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(1))) != 0) {
6399 		printf("%s: timeout waiting for adapter to initialize\n",
6400 		    sc->sc_dev.dv_xname);
6401 		return error;
6402 	}
6403 
6404 	/* Retrieve current temperature for initial TX power calibration. */
6405 	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6406 	sc->temp = iwn4965_get_temperature(sc);
6407 
6408 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
6409 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6410 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz,
6411 	    BUS_DMASYNC_PREWRITE);
6412 	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6413 	    fw->main.text, fw->main.textsz);
6414 	bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
6415 	    fw->main.textsz, BUS_DMASYNC_PREWRITE);
6416 
6417 	/* Tell adapter where to find runtime sections. */
6418 	if ((error = iwn_nic_lock(sc)) != 0)
6419 		return error;
6420 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6421 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6422 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6423 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6424 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6425 	    IWN_FW_UPDATED | fw->main.textsz);
6426 	iwn_nic_unlock(sc);
6427 
6428 	return 0;
6429 }
6430 
6431 int
6432 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6433     const uint8_t *section, int size)
6434 {
6435 	struct iwn_dma_info *dma = &sc->fw_dma;
6436 	int error;
6437 
6438 	/* Copy firmware section into pre-allocated DMA-safe memory. */
6439 	memcpy(dma->vaddr, section, size);
6440 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
6441 
6442 	if ((error = iwn_nic_lock(sc)) != 0)
6443 		return error;
6444 
6445 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6446 	    IWN_FH_TX_CONFIG_DMA_PAUSE);
6447 
6448 	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6449 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6450 	    IWN_LOADDR(dma->paddr));
6451 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6452 	    IWN_HIADDR(dma->paddr) << 28 | size);
6453 	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6454 	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
6455 	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
6456 	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
6457 
6458 	/* Kick Flow Handler to start DMA transfer. */
6459 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6460 	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6461 
6462 	iwn_nic_unlock(sc);
6463 
6464 	/* Wait at most five seconds for FH DMA transfer to complete. */
6465 	return tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(5));
6466 }
6467 
6468 int
6469 iwn5000_load_firmware(struct iwn_softc *sc)
6470 {
6471 	struct iwn_fw_part *fw;
6472 	int error;
6473 
6474 	/* Load the initialization firmware on first boot only. */
6475 	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6476 	    &sc->fw.main : &sc->fw.init;
6477 
6478 	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6479 	    fw->text, fw->textsz);
6480 	if (error != 0) {
6481 		printf("%s: could not load firmware %s section\n",
6482 		    sc->sc_dev.dv_xname, ".text");
6483 		return error;
6484 	}
6485 	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6486 	    fw->data, fw->datasz);
6487 	if (error != 0) {
6488 		printf("%s: could not load firmware %s section\n",
6489 		    sc->sc_dev.dv_xname, ".data");
6490 		return error;
6491 	}
6492 
6493 	/* Now press "execute". */
6494 	IWN_WRITE(sc, IWN_RESET, 0);
6495 	return 0;
6496 }
6497 
6498 /*
6499  * Extract text and data sections from a legacy firmware image.
6500  */
6501 int
6502 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6503 {
6504 	const uint32_t *ptr;
6505 	size_t hdrlen = 24;
6506 	uint32_t rev;
6507 
6508 	ptr = (const uint32_t *)fw->data;
6509 	rev = letoh32(*ptr++);
6510 
6511 	/* Check firmware API version. */
6512 	if (IWN_FW_API(rev) <= 1) {
6513 		printf("%s: bad firmware, need API version >=2\n",
6514 		    sc->sc_dev.dv_xname);
6515 		return EINVAL;
6516 	}
6517 	if (IWN_FW_API(rev) >= 3) {
6518 		/* Skip build number (version 2 header). */
6519 		hdrlen += 4;
6520 		ptr++;
6521 	}
6522 	if (fw->size < hdrlen) {
6523 		printf("%s: firmware too short: %zu bytes\n",
6524 		    sc->sc_dev.dv_xname, fw->size);
6525 		return EINVAL;
6526 	}
6527 	fw->main.textsz = letoh32(*ptr++);
6528 	fw->main.datasz = letoh32(*ptr++);
6529 	fw->init.textsz = letoh32(*ptr++);
6530 	fw->init.datasz = letoh32(*ptr++);
6531 	fw->boot.textsz = letoh32(*ptr++);
6532 
6533 	/* Check that all firmware sections fit. */
6534 	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6535 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6536 		printf("%s: firmware too short: %zu bytes\n",
6537 		    sc->sc_dev.dv_xname, fw->size);
6538 		return EINVAL;
6539 	}
6540 
6541 	/* Get pointers to firmware sections. */
6542 	fw->main.text = (const uint8_t *)ptr;
6543 	fw->main.data = fw->main.text + fw->main.textsz;
6544 	fw->init.text = fw->main.data + fw->main.datasz;
6545 	fw->init.data = fw->init.text + fw->init.textsz;
6546 	fw->boot.text = fw->init.data + fw->init.datasz;
6547 	return 0;
6548 }
6549 
6550 /*
6551  * Extract text and data sections from a TLV firmware image.
6552  */
6553 int
6554 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6555     uint16_t alt)
6556 {
6557 	const struct iwn_fw_tlv_hdr *hdr;
6558 	const struct iwn_fw_tlv *tlv;
6559 	const uint8_t *ptr, *end;
6560 	uint64_t altmask;
6561 	uint32_t len;
6562 
6563 	if (fw->size < sizeof (*hdr)) {
6564 		printf("%s: firmware too short: %zu bytes\n",
6565 		    sc->sc_dev.dv_xname, fw->size);
6566 		return EINVAL;
6567 	}
6568 	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6569 	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6570 		printf("%s: bad firmware signature 0x%08x\n",
6571 		    sc->sc_dev.dv_xname, letoh32(hdr->signature));
6572 		return EINVAL;
6573 	}
6574 	DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr,
6575 	    letoh32(hdr->build)));
6576 
6577 	/*
6578 	 * Select the closest supported alternative that is less than
6579 	 * or equal to the specified one.
6580 	 */
6581 	altmask = letoh64(hdr->altmask);
6582 	while (alt > 0 && !(altmask & (1ULL << alt)))
6583 		alt--;	/* Downgrade. */
6584 	DPRINTF(("using alternative %d\n", alt));
6585 
6586 	ptr = (const uint8_t *)(hdr + 1);
6587 	end = (const uint8_t *)(fw->data + fw->size);
6588 
6589 	/* Parse type-length-value fields. */
6590 	while (ptr + sizeof (*tlv) <= end) {
6591 		tlv = (const struct iwn_fw_tlv *)ptr;
6592 		len = letoh32(tlv->len);
6593 
6594 		ptr += sizeof (*tlv);
6595 		if (ptr + len > end) {
6596 			printf("%s: firmware too short: %zu bytes\n",
6597 			    sc->sc_dev.dv_xname, fw->size);
6598 			return EINVAL;
6599 		}
6600 		/* Skip other alternatives. */
6601 		if (tlv->alt != 0 && tlv->alt != htole16(alt))
6602 			goto next;
6603 
6604 		switch (letoh16(tlv->type)) {
6605 		case IWN_FW_TLV_MAIN_TEXT:
6606 			fw->main.text = ptr;
6607 			fw->main.textsz = len;
6608 			break;
6609 		case IWN_FW_TLV_MAIN_DATA:
6610 			fw->main.data = ptr;
6611 			fw->main.datasz = len;
6612 			break;
6613 		case IWN_FW_TLV_INIT_TEXT:
6614 			fw->init.text = ptr;
6615 			fw->init.textsz = len;
6616 			break;
6617 		case IWN_FW_TLV_INIT_DATA:
6618 			fw->init.data = ptr;
6619 			fw->init.datasz = len;
6620 			break;
6621 		case IWN_FW_TLV_BOOT_TEXT:
6622 			fw->boot.text = ptr;
6623 			fw->boot.textsz = len;
6624 			break;
6625 		case IWN_FW_TLV_ENH_SENS:
6626 			if (len !=  0) {
6627 				printf("%s: TLV type %d has invalid size %u\n",
6628 				    sc->sc_dev.dv_xname, letoh16(tlv->type),
6629 				    len);
6630 				goto next;
6631 			}
6632 			sc->sc_flags |= IWN_FLAG_ENH_SENS;
6633 			break;
6634 		case IWN_FW_TLV_PHY_CALIB:
6635 			if (len != sizeof(uint32_t)) {
6636 				printf("%s: TLV type %d has invalid size %u\n",
6637 				    sc->sc_dev.dv_xname, letoh16(tlv->type),
6638 				    len);
6639 				goto next;
6640 			}
6641 			if (letoh32(*ptr) <= IWN5000_PHY_CALIB_MAX) {
6642 				sc->reset_noise_gain = letoh32(*ptr);
6643 				sc->noise_gain = letoh32(*ptr) + 1;
6644 			}
6645 			break;
6646 		case IWN_FW_TLV_FLAGS:
6647 			if (len < sizeof(uint32_t))
6648 				break;
6649 			if (len % sizeof(uint32_t))
6650 				break;
6651 			sc->tlv_feature_flags = letoh32(*ptr);
6652 			DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags));
6653 			break;
6654 		default:
6655 			DPRINTF(("TLV type %d not handled\n",
6656 			    letoh16(tlv->type)));
6657 			break;
6658 		}
6659  next:		/* TLV fields are 32-bit aligned. */
6660 		ptr += (len + 3) & ~3;
6661 	}
6662 	return 0;
6663 }
6664 
6665 int
6666 iwn_read_firmware(struct iwn_softc *sc)
6667 {
6668 	struct iwn_fw_info *fw = &sc->fw;
6669 	int error;
6670 
6671 	/*
6672 	 * Some PHY calibration commands are firmware-dependent; these
6673 	 * are the default values that will be overridden if
6674 	 * necessary.
6675 	 */
6676 	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
6677 	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
6678 
6679 	memset(fw, 0, sizeof (*fw));
6680 
6681 	/* Read firmware image from filesystem. */
6682 	if ((error = loadfirmware(sc->fwname, &fw->data, &fw->size)) != 0) {
6683 		printf("%s: could not read firmware %s (error %d)\n",
6684 		    sc->sc_dev.dv_xname, sc->fwname, error);
6685 		return error;
6686 	}
6687 	if (fw->size < sizeof (uint32_t)) {
6688 		printf("%s: firmware too short: %zu bytes\n",
6689 		    sc->sc_dev.dv_xname, fw->size);
6690 		free(fw->data, M_DEVBUF, fw->size);
6691 		return EINVAL;
6692 	}
6693 
6694 	/* Retrieve text and data sections. */
6695 	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
6696 		error = iwn_read_firmware_leg(sc, fw);
6697 	else
6698 		error = iwn_read_firmware_tlv(sc, fw, 1);
6699 	if (error != 0) {
6700 		printf("%s: could not read firmware sections\n",
6701 		    sc->sc_dev.dv_xname);
6702 		free(fw->data, M_DEVBUF, fw->size);
6703 		return error;
6704 	}
6705 
6706 	/* Make sure text and data sections fit in hardware memory. */
6707 	if (fw->main.textsz > sc->fw_text_maxsz ||
6708 	    fw->main.datasz > sc->fw_data_maxsz ||
6709 	    fw->init.textsz > sc->fw_text_maxsz ||
6710 	    fw->init.datasz > sc->fw_data_maxsz ||
6711 	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6712 	    (fw->boot.textsz & 3) != 0) {
6713 		printf("%s: firmware sections too large\n",
6714 		    sc->sc_dev.dv_xname);
6715 		free(fw->data, M_DEVBUF, fw->size);
6716 		return EINVAL;
6717 	}
6718 
6719 	/* We can proceed with loading the firmware. */
6720 	return 0;
6721 }
6722 
6723 int
6724 iwn_clock_wait(struct iwn_softc *sc)
6725 {
6726 	int ntries;
6727 
6728 	/* Set "initialization complete" bit. */
6729 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6730 
6731 	/* Wait for clock stabilization. */
6732 	for (ntries = 0; ntries < 2500; ntries++) {
6733 		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6734 			return 0;
6735 		DELAY(10);
6736 	}
6737 	printf("%s: timeout waiting for clock stabilization\n",
6738 	    sc->sc_dev.dv_xname);
6739 	return ETIMEDOUT;
6740 }
6741 
6742 int
6743 iwn_apm_init(struct iwn_softc *sc)
6744 {
6745 	pcireg_t reg;
6746 	int error;
6747 
6748 	/* Disable L0s exit timer (NMI bug workaround). */
6749 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6750 	/* Don't wait for ICH L0s (ICH bug workaround). */
6751 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6752 
6753 	/* Set FH wait threshold to max (HW bug under stress workaround). */
6754 	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6755 
6756 	/* Enable HAP INTA to move adapter from L1a to L0s. */
6757 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6758 
6759 	/* Retrieve PCIe Active State Power Management (ASPM). */
6760 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
6761 	    sc->sc_cap_off + PCI_PCIE_LCSR);
6762 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6763 	if (reg & PCI_PCIE_LCSR_ASPM_L1)	/* L1 Entry enabled. */
6764 		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6765 	else
6766 		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6767 
6768 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6769 	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6770 		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6771 
6772 	/* Wait for clock stabilization before accessing prph. */
6773 	if ((error = iwn_clock_wait(sc)) != 0)
6774 		return error;
6775 
6776 	if ((error = iwn_nic_lock(sc)) != 0)
6777 		return error;
6778 	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6779 		/* Enable DMA and BSM (Bootstrap State Machine). */
6780 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6781 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6782 		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6783 	} else {
6784 		/* Enable DMA. */
6785 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6786 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6787 	}
6788 	DELAY(20);
6789 	/* Disable L1-Active. */
6790 	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6791 	iwn_nic_unlock(sc);
6792 
6793 	return 0;
6794 }
6795 
6796 void
6797 iwn_apm_stop_master(struct iwn_softc *sc)
6798 {
6799 	int ntries;
6800 
6801 	/* Stop busmaster DMA activity. */
6802 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6803 	for (ntries = 0; ntries < 100; ntries++) {
6804 		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6805 			return;
6806 		DELAY(10);
6807 	}
6808 	printf("%s: timeout waiting for master\n", sc->sc_dev.dv_xname);
6809 }
6810 
6811 void
6812 iwn_apm_stop(struct iwn_softc *sc)
6813 {
6814 	iwn_apm_stop_master(sc);
6815 
6816 	/* Reset the entire device. */
6817 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6818 	DELAY(10);
6819 	/* Clear "initialization complete" bit. */
6820 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6821 }
6822 
6823 int
6824 iwn4965_nic_config(struct iwn_softc *sc)
6825 {
6826 	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6827 		/*
6828 		 * I don't believe this to be correct but this is what the
6829 		 * vendor driver is doing. Probably the bits should not be
6830 		 * shifted in IWN_RFCFG_*.
6831 		 */
6832 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6833 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6834 		    IWN_RFCFG_STEP(sc->rfcfg) |
6835 		    IWN_RFCFG_DASH(sc->rfcfg));
6836 	}
6837 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6838 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6839 	return 0;
6840 }
6841 
6842 int
6843 iwn5000_nic_config(struct iwn_softc *sc)
6844 {
6845 	uint32_t tmp;
6846 	int error;
6847 
6848 	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6849 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6850 		    IWN_RFCFG_TYPE(sc->rfcfg) |
6851 		    IWN_RFCFG_STEP(sc->rfcfg) |
6852 		    IWN_RFCFG_DASH(sc->rfcfg));
6853 	}
6854 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6855 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6856 
6857 	if ((error = iwn_nic_lock(sc)) != 0)
6858 		return error;
6859 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6860 
6861 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6862 		/*
6863 		 * Select first Switching Voltage Regulator (1.32V) to
6864 		 * solve a stability issue related to noisy DC2DC line
6865 		 * in the silicon of 1000 Series.
6866 		 */
6867 		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6868 		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6869 		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6870 		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6871 	}
6872 	iwn_nic_unlock(sc);
6873 
6874 	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6875 		/* Use internal power amplifier only. */
6876 		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6877 	}
6878 	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
6879 	     sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
6880 		/* Indicate that ROM calibration version is >=6. */
6881 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6882 	}
6883 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
6884 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
6885 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
6886 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
6887 	    sc->hw_type == IWN_HW_REV_TYPE_135 ||
6888 	    sc->hw_type == IWN_HW_REV_TYPE_105)
6889 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT);
6890 	return 0;
6891 }
6892 
6893 /*
6894  * Take NIC ownership over Intel Active Management Technology (AMT).
6895  */
6896 int
6897 iwn_hw_prepare(struct iwn_softc *sc)
6898 {
6899 	int ntries;
6900 
6901 	/* Check if hardware is ready. */
6902 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6903 	for (ntries = 0; ntries < 5; ntries++) {
6904 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6905 		    IWN_HW_IF_CONFIG_NIC_READY)
6906 			return 0;
6907 		DELAY(10);
6908 	}
6909 
6910 	/* Hardware not ready, force into ready state. */
6911 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6912 	for (ntries = 0; ntries < 15000; ntries++) {
6913 		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6914 		    IWN_HW_IF_CONFIG_PREPARE_DONE))
6915 			break;
6916 		DELAY(10);
6917 	}
6918 	if (ntries == 15000)
6919 		return ETIMEDOUT;
6920 
6921 	/* Hardware should be ready now. */
6922 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6923 	for (ntries = 0; ntries < 5; ntries++) {
6924 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6925 		    IWN_HW_IF_CONFIG_NIC_READY)
6926 			return 0;
6927 		DELAY(10);
6928 	}
6929 	return ETIMEDOUT;
6930 }
6931 
6932 int
6933 iwn_hw_init(struct iwn_softc *sc)
6934 {
6935 	struct iwn_ops *ops = &sc->ops;
6936 	int error, chnl, qid;
6937 
6938 	/* Clear pending interrupts. */
6939 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
6940 
6941 	if ((error = iwn_apm_init(sc)) != 0) {
6942 		printf("%s: could not power on adapter\n",
6943 		    sc->sc_dev.dv_xname);
6944 		return error;
6945 	}
6946 
6947 	/* Select VMAIN power source. */
6948 	if ((error = iwn_nic_lock(sc)) != 0)
6949 		return error;
6950 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6951 	iwn_nic_unlock(sc);
6952 
6953 	/* Perform adapter-specific initialization. */
6954 	if ((error = ops->nic_config(sc)) != 0)
6955 		return error;
6956 
6957 	/* Initialize RX ring. */
6958 	if ((error = iwn_nic_lock(sc)) != 0)
6959 		return error;
6960 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6961 	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6962 	/* Set physical address of RX ring (256-byte aligned). */
6963 	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6964 	/* Set physical address of RX status (16-byte aligned). */
6965 	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6966 	/* Enable RX. */
6967 	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6968 	    IWN_FH_RX_CONFIG_ENA           |
6969 	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
6970 	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
6971 	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
6972 	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0x11) | /* about 1/2 msec */
6973 	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6974 	iwn_nic_unlock(sc);
6975 	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6976 
6977 	if ((error = iwn_nic_lock(sc)) != 0)
6978 		return error;
6979 
6980 	/* Initialize TX scheduler. */
6981 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6982 
6983 	/* Set physical address of "keep warm" page (16-byte aligned). */
6984 	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6985 
6986 	/* Initialize TX rings. */
6987 	for (qid = 0; qid < sc->ntxqs; qid++) {
6988 		struct iwn_tx_ring *txq = &sc->txq[qid];
6989 
6990 		/* Set physical address of TX ring (256-byte aligned). */
6991 		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6992 		    txq->desc_dma.paddr >> 8);
6993 	}
6994 	iwn_nic_unlock(sc);
6995 
6996 	/* Enable DMA channels. */
6997 	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6998 		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6999 		    IWN_FH_TX_CONFIG_DMA_ENA |
7000 		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
7001 	}
7002 
7003 	/* Clear "radio off" and "commands blocked" bits. */
7004 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7005 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
7006 
7007 	/* Clear pending interrupts. */
7008 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7009 	/* Enable interrupt coalescing. */
7010 	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
7011 	/* Enable interrupts. */
7012 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7013 
7014 	/* _Really_ make sure "radio off" bit is cleared! */
7015 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7016 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7017 
7018 	/* Enable shadow registers. */
7019 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
7020 		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
7021 
7022 	if ((error = ops->load_firmware(sc)) != 0) {
7023 		printf("%s: could not load firmware\n", sc->sc_dev.dv_xname);
7024 		return error;
7025 	}
7026 	/* Wait at most one second for firmware alive notification. */
7027 	if ((error = tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(1))) != 0) {
7028 		printf("%s: timeout waiting for adapter to initialize\n",
7029 		    sc->sc_dev.dv_xname);
7030 		return error;
7031 	}
7032 	/* Do post-firmware initialization. */
7033 	return ops->post_alive(sc);
7034 }
7035 
7036 void
7037 iwn_hw_stop(struct iwn_softc *sc)
7038 {
7039 	int chnl, qid, ntries;
7040 
7041 	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
7042 
7043 	/* Disable interrupts. */
7044 	IWN_WRITE(sc, IWN_INT_MASK, 0);
7045 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7046 	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
7047 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7048 
7049 	/* Make sure we no longer hold the NIC lock. */
7050 	iwn_nic_unlock(sc);
7051 
7052 	/* Stop TX scheduler. */
7053 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7054 
7055 	/* Stop all DMA channels. */
7056 	if (iwn_nic_lock(sc) == 0) {
7057 		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7058 			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
7059 			for (ntries = 0; ntries < 200; ntries++) {
7060 				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
7061 				    IWN_FH_TX_STATUS_IDLE(chnl))
7062 					break;
7063 				DELAY(10);
7064 			}
7065 		}
7066 		iwn_nic_unlock(sc);
7067 	}
7068 
7069 	/* Stop RX ring. */
7070 	iwn_reset_rx_ring(sc, &sc->rxq);
7071 
7072 	/* Reset all TX rings. */
7073 	for (qid = 0; qid < sc->ntxqs; qid++)
7074 		iwn_reset_tx_ring(sc, &sc->txq[qid]);
7075 
7076 	if (iwn_nic_lock(sc) == 0) {
7077 		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
7078 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7079 		iwn_nic_unlock(sc);
7080 	}
7081 	DELAY(5);
7082 	/* Power OFF adapter. */
7083 	iwn_apm_stop(sc);
7084 }
7085 
7086 int
7087 iwn_init(struct ifnet *ifp)
7088 {
7089 	struct iwn_softc *sc = ifp->if_softc;
7090 	struct ieee80211com *ic = &sc->sc_ic;
7091 	int error;
7092 
7093 	memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr));
7094 	sc->agg_queue_mask = 0;
7095 	memset(sc->sc_tx_ba, 0, sizeof(sc->sc_tx_ba));
7096 
7097 	if ((error = iwn_hw_prepare(sc)) != 0) {
7098 		printf("%s: hardware not ready\n", sc->sc_dev.dv_xname);
7099 		goto fail;
7100 	}
7101 
7102 	/* Initialize interrupt mask to default value. */
7103 	sc->int_mask = IWN_INT_MASK_DEF;
7104 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7105 
7106 	/* Check that the radio is not disabled by hardware switch. */
7107 	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7108 		printf("%s: radio is disabled by hardware switch\n",
7109 		    sc->sc_dev.dv_xname);
7110 		error = EPERM;	/* :-) */
7111 		/* Re-enable interrupts. */
7112 		IWN_WRITE(sc, IWN_INT, 0xffffffff);
7113 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7114 		return error;
7115 	}
7116 
7117 	/* Read firmware images from the filesystem. */
7118 	if ((error = iwn_read_firmware(sc)) != 0) {
7119 		printf("%s: could not read firmware\n", sc->sc_dev.dv_xname);
7120 		goto fail;
7121 	}
7122 
7123 	/* Initialize hardware and upload firmware. */
7124 	error = iwn_hw_init(sc);
7125 	free(sc->fw.data, M_DEVBUF, sc->fw.size);
7126 	if (error != 0) {
7127 		printf("%s: could not initialize hardware\n",
7128 		    sc->sc_dev.dv_xname);
7129 		goto fail;
7130 	}
7131 
7132 	/* Configure adapter now that it is ready. */
7133 	if ((error = iwn_config(sc)) != 0) {
7134 		printf("%s: could not configure device\n",
7135 		    sc->sc_dev.dv_xname);
7136 		goto fail;
7137 	}
7138 
7139 	ifq_clr_oactive(&ifp->if_snd);
7140 	ifp->if_flags |= IFF_RUNNING;
7141 
7142 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7143 		ieee80211_begin_scan(ifp);
7144 	else
7145 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
7146 
7147 	return 0;
7148 
7149 fail:	iwn_stop(ifp);
7150 	return error;
7151 }
7152 
7153 void
7154 iwn_stop(struct ifnet *ifp)
7155 {
7156 	struct iwn_softc *sc = ifp->if_softc;
7157 	struct ieee80211com *ic = &sc->sc_ic;
7158 
7159 	timeout_del(&sc->calib_to);
7160 	ifp->if_timer = sc->sc_tx_timer = 0;
7161 	ifp->if_flags &= ~IFF_RUNNING;
7162 	ifq_clr_oactive(&ifp->if_snd);
7163 
7164 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
7165 
7166 	/* Power OFF hardware. */
7167 	iwn_hw_stop(sc);
7168 }
7169