xref: /freebsd/sys/dev/wpi/if_wpi.c (revision 6419bb52)
1 /*-
2  * Copyright (c) 2006,2007
3  *	Damien Bergamini <damien.bergamini@free.fr>
4  *	Benjamin Close <Benjamin.Close@clearchain.com>
5  * Copyright (c) 2015 Andriy Voskoboinyk <avos@FreeBSD.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __FBSDID("$FreeBSD$");
22 
23 /*
24  * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters.
25  *
26  * The 3945ABG network adapter doesn't use traditional hardware as
27  * many other adaptors do. Instead at run time the eeprom is set into a known
28  * state and told to load boot firmware. The boot firmware loads an init and a
29  * main  binary firmware image into SRAM on the card via DMA.
30  * Once the firmware is loaded, the driver/hw then
31  * communicate by way of circular dma rings via the SRAM to the firmware.
32  *
33  * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings.
34  * The 4 tx data rings allow for prioritization QoS.
35  *
36  * The rx data ring consists of 32 dma buffers. Two registers are used to
37  * indicate where in the ring the driver and the firmware are up to. The
38  * driver sets the initial read index (reg1) and the initial write index (reg2),
39  * the firmware updates the read index (reg1) on rx of a packet and fires an
40  * interrupt. The driver then processes the buffers starting at reg1 indicating
41  * to the firmware which buffers have been accessed by updating reg2. At the
42  * same time allocating new memory for the processed buffer.
43  *
44  * A similar thing happens with the tx rings. The difference is the firmware
45  * stop processing buffers once the queue is full and until confirmation
46  * of a successful transmition (tx_done) has occurred.
47  *
48  * The command ring operates in the same manner as the tx queues.
49  *
50  * All communication direct to the card (ie eeprom) is classed as Stage1
51  * communication
52  *
53  * All communication via the firmware to the card is classed as State2.
54  * The firmware consists of 2 parts. A bootstrap firmware and a runtime
55  * firmware. The bootstrap firmware and runtime firmware are loaded
56  * from host memory via dma to the card then told to execute. From this point
57  * on the majority of communications between the driver and the card goes
58  * via the firmware.
59  */
60 
61 #include "opt_wlan.h"
62 #include "opt_wpi.h"
63 
64 #include <sys/param.h>
65 #include <sys/sysctl.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/kernel.h>
69 #include <sys/socket.h>
70 #include <sys/systm.h>
71 #include <sys/malloc.h>
72 #include <sys/queue.h>
73 #include <sys/taskqueue.h>
74 #include <sys/module.h>
75 #include <sys/bus.h>
76 #include <sys/endian.h>
77 #include <sys/linker.h>
78 #include <sys/firmware.h>
79 
80 #include <machine/bus.h>
81 #include <machine/resource.h>
82 #include <sys/rman.h>
83 
84 #include <dev/pci/pcireg.h>
85 #include <dev/pci/pcivar.h>
86 
87 #include <net/bpf.h>
88 #include <net/if.h>
89 #include <net/if_var.h>
90 #include <net/if_arp.h>
91 #include <net/ethernet.h>
92 #include <net/if_dl.h>
93 #include <net/if_media.h>
94 #include <net/if_types.h>
95 
96 #include <netinet/in.h>
97 #include <netinet/in_systm.h>
98 #include <netinet/in_var.h>
99 #include <netinet/if_ether.h>
100 #include <netinet/ip.h>
101 
102 #include <net80211/ieee80211_var.h>
103 #include <net80211/ieee80211_radiotap.h>
104 #include <net80211/ieee80211_regdomain.h>
105 #include <net80211/ieee80211_ratectl.h>
106 
107 #include <dev/wpi/if_wpireg.h>
108 #include <dev/wpi/if_wpivar.h>
109 #include <dev/wpi/if_wpi_debug.h>
110 
111 struct wpi_ident {
112 	uint16_t	vendor;
113 	uint16_t	device;
114 	uint16_t	subdevice;
115 	const char	*name;
116 };
117 
118 static const struct wpi_ident wpi_ident_table[] = {
119 	/* The below entries support ABG regardless of the subid */
120 	{ 0x8086, 0x4222,    0x0, "Intel(R) PRO/Wireless 3945ABG" },
121 	{ 0x8086, 0x4227,    0x0, "Intel(R) PRO/Wireless 3945ABG" },
122 	/* The below entries only support BG */
123 	{ 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG"  },
124 	{ 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG"  },
125 	{ 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG"  },
126 	{ 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG"  },
127 	{ 0, 0, 0, NULL }
128 };
129 
130 static int	wpi_probe(device_t);
131 static int	wpi_attach(device_t);
132 static void	wpi_radiotap_attach(struct wpi_softc *);
133 static void	wpi_sysctlattach(struct wpi_softc *);
134 static void	wpi_init_beacon(struct wpi_vap *);
135 static struct ieee80211vap *wpi_vap_create(struct ieee80211com *,
136 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
137 		    const uint8_t [IEEE80211_ADDR_LEN],
138 		    const uint8_t [IEEE80211_ADDR_LEN]);
139 static void	wpi_vap_delete(struct ieee80211vap *);
140 static int	wpi_detach(device_t);
141 static int	wpi_shutdown(device_t);
142 static int	wpi_suspend(device_t);
143 static int	wpi_resume(device_t);
144 static int	wpi_nic_lock(struct wpi_softc *);
145 static int	wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int);
146 static void	wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
147 static int	wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *,
148 		    void **, bus_size_t, bus_size_t);
149 static void	wpi_dma_contig_free(struct wpi_dma_info *);
150 static int	wpi_alloc_shared(struct wpi_softc *);
151 static void	wpi_free_shared(struct wpi_softc *);
152 static int	wpi_alloc_fwmem(struct wpi_softc *);
153 static void	wpi_free_fwmem(struct wpi_softc *);
154 static int	wpi_alloc_rx_ring(struct wpi_softc *);
155 static void	wpi_update_rx_ring(struct wpi_softc *);
156 static void	wpi_update_rx_ring_ps(struct wpi_softc *);
157 static void	wpi_reset_rx_ring(struct wpi_softc *);
158 static void	wpi_free_rx_ring(struct wpi_softc *);
159 static int	wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *,
160 		    uint8_t);
161 static void	wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
162 static void	wpi_update_tx_ring_ps(struct wpi_softc *,
163 		    struct wpi_tx_ring *);
164 static void	wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
165 static void	wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
166 static int	wpi_read_eeprom(struct wpi_softc *,
167 		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
168 static uint32_t	wpi_eeprom_channel_flags(struct wpi_eeprom_chan *);
169 static void	wpi_read_eeprom_band(struct wpi_softc *, uint8_t, int, int *,
170 		    struct ieee80211_channel[]);
171 static int	wpi_read_eeprom_channels(struct wpi_softc *, uint8_t);
172 static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *,
173 		    struct ieee80211_channel *);
174 static void	wpi_getradiocaps(struct ieee80211com *, int, int *,
175 		    struct ieee80211_channel[]);
176 static int	wpi_setregdomain(struct ieee80211com *,
177 		    struct ieee80211_regdomain *, int,
178 		    struct ieee80211_channel[]);
179 static int	wpi_read_eeprom_group(struct wpi_softc *, uint8_t);
180 static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *,
181 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
182 static void	wpi_node_free(struct ieee80211_node *);
183 static void	wpi_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
184 		    const struct ieee80211_rx_stats *,
185 		    int, int);
186 static void	wpi_restore_node(void *, struct ieee80211_node *);
187 static void	wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *);
188 static int	wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
189 static void	wpi_calib_timeout(void *);
190 static void	wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *,
191 		    struct wpi_rx_data *);
192 static void	wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *,
193 		    struct wpi_rx_data *);
194 static void	wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *);
195 static void	wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *);
196 static void	wpi_notif_intr(struct wpi_softc *);
197 static void	wpi_wakeup_intr(struct wpi_softc *);
198 #ifdef WPI_DEBUG
199 static void	wpi_debug_registers(struct wpi_softc *);
200 #endif
201 static void	wpi_fatal_intr(struct wpi_softc *);
202 static void	wpi_intr(void *);
203 static void	wpi_free_txfrags(struct wpi_softc *, uint16_t);
204 static int	wpi_cmd2(struct wpi_softc *, struct wpi_buf *);
205 static int	wpi_tx_data(struct wpi_softc *, struct mbuf *,
206 		    struct ieee80211_node *);
207 static int	wpi_tx_data_raw(struct wpi_softc *, struct mbuf *,
208 		    struct ieee80211_node *,
209 		    const struct ieee80211_bpf_params *);
210 static int	wpi_raw_xmit(struct ieee80211_node *, struct mbuf *,
211 		    const struct ieee80211_bpf_params *);
212 static int	wpi_transmit(struct ieee80211com *, struct mbuf *);
213 static void	wpi_watchdog_rfkill(void *);
214 static void	wpi_scan_timeout(void *);
215 static void	wpi_tx_timeout(void *);
216 static void	wpi_parent(struct ieee80211com *);
217 static int	wpi_cmd(struct wpi_softc *, uint8_t, const void *, uint16_t,
218 		    int);
219 static int	wpi_mrr_setup(struct wpi_softc *);
220 static int	wpi_add_node(struct wpi_softc *, struct ieee80211_node *);
221 static int	wpi_add_broadcast_node(struct wpi_softc *, int);
222 static int	wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *);
223 static void	wpi_del_node(struct wpi_softc *, struct ieee80211_node *);
224 static int	wpi_updateedca(struct ieee80211com *);
225 static void	wpi_set_promisc(struct wpi_softc *);
226 static void	wpi_update_promisc(struct ieee80211com *);
227 static void	wpi_update_mcast(struct ieee80211com *);
228 static void	wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t);
229 static int	wpi_set_timing(struct wpi_softc *, struct ieee80211_node *);
230 static void	wpi_power_calibration(struct wpi_softc *);
231 static int	wpi_set_txpower(struct wpi_softc *, int);
232 static int	wpi_get_power_index(struct wpi_softc *,
233 		    struct wpi_power_group *, uint8_t, int, int);
234 static int	wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int);
235 static int	wpi_send_btcoex(struct wpi_softc *);
236 static int	wpi_send_rxon(struct wpi_softc *, int, int);
237 static int	wpi_config(struct wpi_softc *);
238 static uint16_t	wpi_get_active_dwell_time(struct wpi_softc *,
239 		    struct ieee80211_channel *, uint8_t);
240 static uint16_t	wpi_limit_dwell(struct wpi_softc *, uint16_t);
241 static uint16_t	wpi_get_passive_dwell_time(struct wpi_softc *,
242 		    struct ieee80211_channel *);
243 static uint32_t	wpi_get_scan_pause_time(uint32_t, uint16_t);
244 static int	wpi_scan(struct wpi_softc *, struct ieee80211_channel *);
245 static int	wpi_auth(struct wpi_softc *, struct ieee80211vap *);
246 static int	wpi_config_beacon(struct wpi_vap *);
247 static int	wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *);
248 static void	wpi_update_beacon(struct ieee80211vap *, int);
249 static void	wpi_newassoc(struct ieee80211_node *, int);
250 static int	wpi_run(struct wpi_softc *, struct ieee80211vap *);
251 static int	wpi_load_key(struct ieee80211_node *,
252 		    const struct ieee80211_key *);
253 static void	wpi_load_key_cb(void *, struct ieee80211_node *);
254 static int	wpi_set_global_keys(struct ieee80211_node *);
255 static int	wpi_del_key(struct ieee80211_node *,
256 		    const struct ieee80211_key *);
257 static void	wpi_del_key_cb(void *, struct ieee80211_node *);
258 static int	wpi_process_key(struct ieee80211vap *,
259 		    const struct ieee80211_key *, int);
260 static int	wpi_key_set(struct ieee80211vap *,
261 		    const struct ieee80211_key *);
262 static int	wpi_key_delete(struct ieee80211vap *,
263 		    const struct ieee80211_key *);
264 static int	wpi_post_alive(struct wpi_softc *);
265 static int	wpi_load_bootcode(struct wpi_softc *, const uint8_t *,
266 		    uint32_t);
267 static int	wpi_load_firmware(struct wpi_softc *);
268 static int	wpi_read_firmware(struct wpi_softc *);
269 static void	wpi_unload_firmware(struct wpi_softc *);
270 static int	wpi_clock_wait(struct wpi_softc *);
271 static int	wpi_apm_init(struct wpi_softc *);
272 static void	wpi_apm_stop_master(struct wpi_softc *);
273 static void	wpi_apm_stop(struct wpi_softc *);
274 static void	wpi_nic_config(struct wpi_softc *);
275 static int	wpi_hw_init(struct wpi_softc *);
276 static void	wpi_hw_stop(struct wpi_softc *);
277 static void	wpi_radio_on(void *, int);
278 static void	wpi_radio_off(void *, int);
279 static int	wpi_init(struct wpi_softc *);
280 static void	wpi_stop_locked(struct wpi_softc *);
281 static void	wpi_stop(struct wpi_softc *);
282 static void	wpi_scan_start(struct ieee80211com *);
283 static void	wpi_scan_end(struct ieee80211com *);
284 static void	wpi_set_channel(struct ieee80211com *);
285 static void	wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long);
286 static void	wpi_scan_mindwell(struct ieee80211_scan_state *);
287 
288 static device_method_t wpi_methods[] = {
289 	/* Device interface */
290 	DEVMETHOD(device_probe,		wpi_probe),
291 	DEVMETHOD(device_attach,	wpi_attach),
292 	DEVMETHOD(device_detach,	wpi_detach),
293 	DEVMETHOD(device_shutdown,	wpi_shutdown),
294 	DEVMETHOD(device_suspend,	wpi_suspend),
295 	DEVMETHOD(device_resume,	wpi_resume),
296 
297 	DEVMETHOD_END
298 };
299 
300 static driver_t wpi_driver = {
301 	"wpi",
302 	wpi_methods,
303 	sizeof (struct wpi_softc)
304 };
305 static devclass_t wpi_devclass;
306 
307 DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL);
308 
309 MODULE_VERSION(wpi, 1);
310 
311 MODULE_DEPEND(wpi, pci,  1, 1, 1);
312 MODULE_DEPEND(wpi, wlan, 1, 1, 1);
313 MODULE_DEPEND(wpi, firmware, 1, 1, 1);
314 
315 static int
316 wpi_probe(device_t dev)
317 {
318 	const struct wpi_ident *ident;
319 
320 	for (ident = wpi_ident_table; ident->name != NULL; ident++) {
321 		if (pci_get_vendor(dev) == ident->vendor &&
322 		    pci_get_device(dev) == ident->device) {
323 			device_set_desc(dev, ident->name);
324 			return (BUS_PROBE_DEFAULT);
325 		}
326 	}
327 	return ENXIO;
328 }
329 
330 static int
331 wpi_attach(device_t dev)
332 {
333 	struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev);
334 	struct ieee80211com *ic;
335 	uint8_t i;
336 	int error, rid;
337 #ifdef WPI_DEBUG
338 	int supportsa = 1;
339 	const struct wpi_ident *ident;
340 #endif
341 
342 	sc->sc_dev = dev;
343 
344 #ifdef WPI_DEBUG
345 	error = resource_int_value(device_get_name(sc->sc_dev),
346 	    device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
347 	if (error != 0)
348 		sc->sc_debug = 0;
349 #else
350 	sc->sc_debug = 0;
351 #endif
352 
353 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
354 
355 	/*
356 	 * Get the offset of the PCI Express Capability Structure in PCI
357 	 * Configuration Space.
358 	 */
359 	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
360 	if (error != 0) {
361 		device_printf(dev, "PCIe capability structure not found!\n");
362 		return error;
363 	}
364 
365 	/*
366 	 * Some card's only support 802.11b/g not a, check to see if
367 	 * this is one such card. A 0x0 in the subdevice table indicates
368 	 * the entire subdevice range is to be ignored.
369 	 */
370 #ifdef WPI_DEBUG
371 	for (ident = wpi_ident_table; ident->name != NULL; ident++) {
372 		if (ident->subdevice &&
373 		    pci_get_subdevice(dev) == ident->subdevice) {
374 		    supportsa = 0;
375 		    break;
376 		}
377 	}
378 #endif
379 
380 	/* Clear device-specific "PCI retry timeout" register (41h). */
381 	pci_write_config(dev, 0x41, 0, 1);
382 
383 	/* Enable bus-mastering. */
384 	pci_enable_busmaster(dev);
385 
386 	rid = PCIR_BAR(0);
387 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
388 	    RF_ACTIVE);
389 	if (sc->mem == NULL) {
390 		device_printf(dev, "can't map mem space\n");
391 		return ENOMEM;
392 	}
393 	sc->sc_st = rman_get_bustag(sc->mem);
394 	sc->sc_sh = rman_get_bushandle(sc->mem);
395 
396 	rid = 1;
397 	if (pci_alloc_msi(dev, &rid) == 0)
398 		rid = 1;
399 	else
400 		rid = 0;
401 	/* Install interrupt handler. */
402 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
403 	    (rid != 0 ? 0 : RF_SHAREABLE));
404 	if (sc->irq == NULL) {
405 		device_printf(dev, "can't map interrupt\n");
406 		error = ENOMEM;
407 		goto fail;
408 	}
409 
410 	WPI_LOCK_INIT(sc);
411 	WPI_TX_LOCK_INIT(sc);
412 	WPI_RXON_LOCK_INIT(sc);
413 	WPI_NT_LOCK_INIT(sc);
414 	WPI_TXQ_LOCK_INIT(sc);
415 	WPI_TXQ_STATE_LOCK_INIT(sc);
416 
417 	/* Allocate DMA memory for firmware transfers. */
418 	if ((error = wpi_alloc_fwmem(sc)) != 0) {
419 		device_printf(dev,
420 		    "could not allocate memory for firmware, error %d\n",
421 		    error);
422 		goto fail;
423 	}
424 
425 	/* Allocate shared page. */
426 	if ((error = wpi_alloc_shared(sc)) != 0) {
427 		device_printf(dev, "could not allocate shared page\n");
428 		goto fail;
429 	}
430 
431 	/* Allocate TX rings - 4 for QoS purposes, 1 for commands. */
432 	for (i = 0; i < WPI_DRV_NTXQUEUES; i++) {
433 		if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
434 			device_printf(dev,
435 			    "could not allocate TX ring %d, error %d\n", i,
436 			    error);
437 			goto fail;
438 		}
439 	}
440 
441 	/* Allocate RX ring. */
442 	if ((error = wpi_alloc_rx_ring(sc)) != 0) {
443 		device_printf(dev, "could not allocate RX ring, error %d\n",
444 		    error);
445 		goto fail;
446 	}
447 
448 	/* Clear pending interrupts. */
449 	WPI_WRITE(sc, WPI_INT, 0xffffffff);
450 
451 	ic = &sc->sc_ic;
452 	ic->ic_softc = sc;
453 	ic->ic_name = device_get_nameunit(dev);
454 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
455 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
456 
457 	/* Set device capabilities. */
458 	ic->ic_caps =
459 		  IEEE80211_C_STA		/* station mode supported */
460 		| IEEE80211_C_IBSS		/* IBSS mode supported */
461 		| IEEE80211_C_HOSTAP		/* Host access point mode */
462 		| IEEE80211_C_MONITOR		/* monitor mode supported */
463 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
464 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
465 		| IEEE80211_C_TXFRAG		/* handle tx frags */
466 		| IEEE80211_C_TXPMGT		/* tx power management */
467 		| IEEE80211_C_SHSLOT		/* short slot time supported */
468 		| IEEE80211_C_WPA		/* 802.11i */
469 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
470 		| IEEE80211_C_WME		/* 802.11e */
471 		| IEEE80211_C_PMGT		/* Station-side power mgmt */
472 		;
473 
474 	ic->ic_cryptocaps =
475 		  IEEE80211_CRYPTO_AES_CCM;
476 
477 	/*
478 	 * Read in the eeprom and also setup the channels for
479 	 * net80211. We don't set the rates as net80211 does this for us
480 	 */
481 	if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) {
482 		device_printf(dev, "could not read EEPROM, error %d\n",
483 		    error);
484 		goto fail;
485 	}
486 
487 #ifdef WPI_DEBUG
488 	if (bootverbose) {
489 		device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n",
490 		    sc->domain);
491 		device_printf(sc->sc_dev, "Hardware Type: %c\n",
492 		    sc->type > 1 ? 'B': '?');
493 		device_printf(sc->sc_dev, "Hardware Revision: %c\n",
494 		    ((sc->rev & 0xf0) == 0xd0) ? 'D': '?');
495 		device_printf(sc->sc_dev, "SKU %s support 802.11a\n",
496 		    supportsa ? "does" : "does not");
497 
498 		/* XXX hw_config uses the PCIDEV for the Hardware rev. Must
499 		   check what sc->rev really represents - benjsc 20070615 */
500 	}
501 #endif
502 
503 	ieee80211_ifattach(ic);
504 	ic->ic_vap_create = wpi_vap_create;
505 	ic->ic_vap_delete = wpi_vap_delete;
506 	ic->ic_parent = wpi_parent;
507 	ic->ic_raw_xmit = wpi_raw_xmit;
508 	ic->ic_transmit = wpi_transmit;
509 	ic->ic_node_alloc = wpi_node_alloc;
510 	sc->sc_node_free = ic->ic_node_free;
511 	ic->ic_node_free = wpi_node_free;
512 	ic->ic_wme.wme_update = wpi_updateedca;
513 	ic->ic_update_promisc = wpi_update_promisc;
514 	ic->ic_update_mcast = wpi_update_mcast;
515 	ic->ic_newassoc = wpi_newassoc;
516 	ic->ic_scan_start = wpi_scan_start;
517 	ic->ic_scan_end = wpi_scan_end;
518 	ic->ic_set_channel = wpi_set_channel;
519 	ic->ic_scan_curchan = wpi_scan_curchan;
520 	ic->ic_scan_mindwell = wpi_scan_mindwell;
521 	ic->ic_getradiocaps = wpi_getradiocaps;
522 	ic->ic_setregdomain = wpi_setregdomain;
523 
524 	sc->sc_update_rx_ring = wpi_update_rx_ring;
525 	sc->sc_update_tx_ring = wpi_update_tx_ring;
526 
527 	wpi_radiotap_attach(sc);
528 
529 	/* Setup Tx status flags (constant). */
530 	sc->sc_txs.flags = IEEE80211_RATECTL_STATUS_PKTLEN |
531 	    IEEE80211_RATECTL_STATUS_SHORT_RETRY |
532 	    IEEE80211_RATECTL_STATUS_LONG_RETRY;
533 
534 	callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0);
535 	callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0);
536 	callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0);
537 	callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0);
538 	TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc);
539 	TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc);
540 
541 	wpi_sysctlattach(sc);
542 
543 	/*
544 	 * Hook our interrupt after all initialization is complete.
545 	 */
546 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
547 	    NULL, wpi_intr, sc, &sc->sc_ih);
548 	if (error != 0) {
549 		device_printf(dev, "can't establish interrupt, error %d\n",
550 		    error);
551 		goto fail;
552 	}
553 
554 	if (bootverbose)
555 		ieee80211_announce(ic);
556 
557 #ifdef WPI_DEBUG
558 	if (sc->sc_debug & WPI_DEBUG_HW)
559 		ieee80211_announce_channels(ic);
560 #endif
561 
562 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
563 	return 0;
564 
565 fail:	wpi_detach(dev);
566 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
567 	return error;
568 }
569 
570 /*
571  * Attach the interface to 802.11 radiotap.
572  */
573 static void
574 wpi_radiotap_attach(struct wpi_softc *sc)
575 {
576 	struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap;
577 	struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap;
578 
579 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
580 	ieee80211_radiotap_attach(&sc->sc_ic,
581 	    &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT,
582 	    &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT);
583 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
584 }
585 
586 static void
587 wpi_sysctlattach(struct wpi_softc *sc)
588 {
589 #ifdef WPI_DEBUG
590 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
591 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
592 
593 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
594 	    "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
595 		"control debugging printfs");
596 #endif
597 }
598 
599 static void
600 wpi_init_beacon(struct wpi_vap *wvp)
601 {
602 	struct wpi_buf *bcn = &wvp->wv_bcbuf;
603 	struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
604 
605 	cmd->id = WPI_ID_BROADCAST;
606 	cmd->ofdm_mask = 0xff;
607 	cmd->cck_mask = 0x0f;
608 	cmd->lifetime = htole32(WPI_LIFETIME_INFINITE);
609 
610 	/*
611 	 * XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue
612 	 * XXX by using WPI_TX_NEED_ACK instead (with some side effects).
613 	 */
614 	cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP);
615 
616 	bcn->code = WPI_CMD_SET_BEACON;
617 	bcn->ac = WPI_CMD_QUEUE_NUM;
618 	bcn->size = sizeof(struct wpi_cmd_beacon);
619 }
620 
621 static struct ieee80211vap *
622 wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
623     enum ieee80211_opmode opmode, int flags,
624     const uint8_t bssid[IEEE80211_ADDR_LEN],
625     const uint8_t mac[IEEE80211_ADDR_LEN])
626 {
627 	struct wpi_vap *wvp;
628 	struct ieee80211vap *vap;
629 
630 	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
631 		return NULL;
632 
633 	wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
634 	vap = &wvp->wv_vap;
635 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
636 
637 	if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
638 		WPI_VAP_LOCK_INIT(wvp);
639 		wpi_init_beacon(wvp);
640 	}
641 
642 	/* Override with driver methods. */
643 	vap->iv_key_set = wpi_key_set;
644 	vap->iv_key_delete = wpi_key_delete;
645 	if (opmode == IEEE80211_M_IBSS) {
646 		wvp->wv_recv_mgmt = vap->iv_recv_mgmt;
647 		vap->iv_recv_mgmt = wpi_ibss_recv_mgmt;
648 	}
649 	wvp->wv_newstate = vap->iv_newstate;
650 	vap->iv_newstate = wpi_newstate;
651 	vap->iv_update_beacon = wpi_update_beacon;
652 	vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1;
653 
654 	ieee80211_ratectl_init(vap);
655 	/* Complete setup. */
656 	ieee80211_vap_attach(vap, ieee80211_media_change,
657 	    ieee80211_media_status, mac);
658 	ic->ic_opmode = opmode;
659 	return vap;
660 }
661 
662 static void
663 wpi_vap_delete(struct ieee80211vap *vap)
664 {
665 	struct wpi_vap *wvp = WPI_VAP(vap);
666 	struct wpi_buf *bcn = &wvp->wv_bcbuf;
667 	enum ieee80211_opmode opmode = vap->iv_opmode;
668 
669 	ieee80211_ratectl_deinit(vap);
670 	ieee80211_vap_detach(vap);
671 
672 	if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
673 		if (bcn->m != NULL)
674 			m_freem(bcn->m);
675 
676 		WPI_VAP_LOCK_DESTROY(wvp);
677 	}
678 
679 	free(wvp, M_80211_VAP);
680 }
681 
682 static int
683 wpi_detach(device_t dev)
684 {
685 	struct wpi_softc *sc = device_get_softc(dev);
686 	struct ieee80211com *ic = &sc->sc_ic;
687 	uint8_t qid;
688 
689 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
690 
691 	if (ic->ic_vap_create == wpi_vap_create) {
692 		ieee80211_draintask(ic, &sc->sc_radioon_task);
693 		ieee80211_draintask(ic, &sc->sc_radiooff_task);
694 
695 		wpi_stop(sc);
696 
697 		callout_drain(&sc->watchdog_rfkill);
698 		callout_drain(&sc->tx_timeout);
699 		callout_drain(&sc->scan_timeout);
700 		callout_drain(&sc->calib_to);
701 		ieee80211_ifdetach(ic);
702 	}
703 
704 	/* Uninstall interrupt handler. */
705 	if (sc->irq != NULL) {
706 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
707 		bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
708 		    sc->irq);
709 		pci_release_msi(dev);
710 	}
711 
712 	if (sc->txq[0].data_dmat) {
713 		/* Free DMA resources. */
714 		for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++)
715 			wpi_free_tx_ring(sc, &sc->txq[qid]);
716 
717 		wpi_free_rx_ring(sc);
718 		wpi_free_shared(sc);
719 	}
720 
721 	if (sc->fw_dma.tag)
722 		wpi_free_fwmem(sc);
723 
724 	if (sc->mem != NULL)
725 		bus_release_resource(dev, SYS_RES_MEMORY,
726 		    rman_get_rid(sc->mem), sc->mem);
727 
728 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
729 	WPI_TXQ_STATE_LOCK_DESTROY(sc);
730 	WPI_TXQ_LOCK_DESTROY(sc);
731 	WPI_NT_LOCK_DESTROY(sc);
732 	WPI_RXON_LOCK_DESTROY(sc);
733 	WPI_TX_LOCK_DESTROY(sc);
734 	WPI_LOCK_DESTROY(sc);
735 	return 0;
736 }
737 
738 static int
739 wpi_shutdown(device_t dev)
740 {
741 	struct wpi_softc *sc = device_get_softc(dev);
742 
743 	wpi_stop(sc);
744 	return 0;
745 }
746 
747 static int
748 wpi_suspend(device_t dev)
749 {
750 	struct wpi_softc *sc = device_get_softc(dev);
751 	struct ieee80211com *ic = &sc->sc_ic;
752 
753 	ieee80211_suspend_all(ic);
754 	return 0;
755 }
756 
757 static int
758 wpi_resume(device_t dev)
759 {
760 	struct wpi_softc *sc = device_get_softc(dev);
761 	struct ieee80211com *ic = &sc->sc_ic;
762 
763 	/* Clear device-specific "PCI retry timeout" register (41h). */
764 	pci_write_config(dev, 0x41, 0, 1);
765 
766 	ieee80211_resume_all(ic);
767 	return 0;
768 }
769 
770 /*
771  * Grab exclusive access to NIC memory.
772  */
773 static int
774 wpi_nic_lock(struct wpi_softc *sc)
775 {
776 	int ntries;
777 
778 	/* Request exclusive access to NIC. */
779 	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
780 
781 	/* Spin until we actually get the lock. */
782 	for (ntries = 0; ntries < 1000; ntries++) {
783 		if ((WPI_READ(sc, WPI_GP_CNTRL) &
784 		    (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) ==
785 		    WPI_GP_CNTRL_MAC_ACCESS_ENA)
786 			return 0;
787 		DELAY(10);
788 	}
789 
790 	device_printf(sc->sc_dev, "could not lock memory\n");
791 
792 	return ETIMEDOUT;
793 }
794 
795 /*
796  * Release lock on NIC memory.
797  */
798 static __inline void
799 wpi_nic_unlock(struct wpi_softc *sc)
800 {
801 	WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
802 }
803 
804 static __inline uint32_t
805 wpi_prph_read(struct wpi_softc *sc, uint32_t addr)
806 {
807 	WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr);
808 	WPI_BARRIER_READ_WRITE(sc);
809 	return WPI_READ(sc, WPI_PRPH_RDATA);
810 }
811 
812 static __inline void
813 wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data)
814 {
815 	WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr);
816 	WPI_BARRIER_WRITE(sc);
817 	WPI_WRITE(sc, WPI_PRPH_WDATA, data);
818 }
819 
820 static __inline void
821 wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
822 {
823 	wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask);
824 }
825 
826 static __inline void
827 wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
828 {
829 	wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask);
830 }
831 
832 static __inline void
833 wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr,
834     const uint32_t *data, uint32_t count)
835 {
836 	for (; count != 0; count--, data++, addr += 4)
837 		wpi_prph_write(sc, addr, *data);
838 }
839 
840 static __inline uint32_t
841 wpi_mem_read(struct wpi_softc *sc, uint32_t addr)
842 {
843 	WPI_WRITE(sc, WPI_MEM_RADDR, addr);
844 	WPI_BARRIER_READ_WRITE(sc);
845 	return WPI_READ(sc, WPI_MEM_RDATA);
846 }
847 
848 static __inline void
849 wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data,
850     int count)
851 {
852 	for (; count > 0; count--, addr += 4)
853 		*data++ = wpi_mem_read(sc, addr);
854 }
855 
856 static int
857 wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count)
858 {
859 	uint8_t *out = data;
860 	uint32_t val;
861 	int error, ntries;
862 
863 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
864 
865 	if ((error = wpi_nic_lock(sc)) != 0)
866 		return error;
867 
868 	for (; count > 0; count -= 2, addr++) {
869 		WPI_WRITE(sc, WPI_EEPROM, addr << 2);
870 		for (ntries = 0; ntries < 10; ntries++) {
871 			val = WPI_READ(sc, WPI_EEPROM);
872 			if (val & WPI_EEPROM_READ_VALID)
873 				break;
874 			DELAY(5);
875 		}
876 		if (ntries == 10) {
877 			device_printf(sc->sc_dev,
878 			    "timeout reading ROM at 0x%x\n", addr);
879 			return ETIMEDOUT;
880 		}
881 		*out++= val >> 16;
882 		if (count > 1)
883 			*out ++= val >> 24;
884 	}
885 
886 	wpi_nic_unlock(sc);
887 
888 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
889 
890 	return 0;
891 }
892 
893 static void
894 wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
895 {
896 	if (error != 0)
897 		return;
898 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
899 	*(bus_addr_t *)arg = segs[0].ds_addr;
900 }
901 
902 /*
903  * Allocates a contiguous block of dma memory of the requested size and
904  * alignment.
905  */
906 static int
907 wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma,
908     void **kvap, bus_size_t size, bus_size_t alignment)
909 {
910 	int error;
911 
912 	dma->tag = NULL;
913 	dma->size = size;
914 
915 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
916 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
917 	    1, size, 0, NULL, NULL, &dma->tag);
918 	if (error != 0)
919 		goto fail;
920 
921 	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
922 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
923 	if (error != 0)
924 		goto fail;
925 
926 	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
927 	    wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
928 	if (error != 0)
929 		goto fail;
930 
931 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
932 
933 	if (kvap != NULL)
934 		*kvap = dma->vaddr;
935 
936 	return 0;
937 
938 fail:	wpi_dma_contig_free(dma);
939 	return error;
940 }
941 
942 static void
943 wpi_dma_contig_free(struct wpi_dma_info *dma)
944 {
945 	if (dma->vaddr != NULL) {
946 		bus_dmamap_sync(dma->tag, dma->map,
947 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
948 		bus_dmamap_unload(dma->tag, dma->map);
949 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
950 		dma->vaddr = NULL;
951 	}
952 	if (dma->tag != NULL) {
953 		bus_dma_tag_destroy(dma->tag);
954 		dma->tag = NULL;
955 	}
956 }
957 
958 /*
959  * Allocate a shared page between host and NIC.
960  */
961 static int
962 wpi_alloc_shared(struct wpi_softc *sc)
963 {
964 	/* Shared buffer must be aligned on a 4KB boundary. */
965 	return wpi_dma_contig_alloc(sc, &sc->shared_dma,
966 	    (void **)&sc->shared, sizeof (struct wpi_shared), 4096);
967 }
968 
969 static void
970 wpi_free_shared(struct wpi_softc *sc)
971 {
972 	wpi_dma_contig_free(&sc->shared_dma);
973 }
974 
975 /*
976  * Allocate DMA-safe memory for firmware transfer.
977  */
978 static int
979 wpi_alloc_fwmem(struct wpi_softc *sc)
980 {
981 	/* Must be aligned on a 16-byte boundary. */
982 	return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL,
983 	    WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16);
984 }
985 
986 static void
987 wpi_free_fwmem(struct wpi_softc *sc)
988 {
989 	wpi_dma_contig_free(&sc->fw_dma);
990 }
991 
992 static int
993 wpi_alloc_rx_ring(struct wpi_softc *sc)
994 {
995 	struct wpi_rx_ring *ring = &sc->rxq;
996 	bus_size_t size;
997 	int i, error;
998 
999 	ring->cur = 0;
1000 	ring->update = 0;
1001 
1002 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1003 
1004 	/* Allocate RX descriptors (16KB aligned.) */
1005 	size = WPI_RX_RING_COUNT * sizeof (uint32_t);
1006 	error = wpi_dma_contig_alloc(sc, &ring->desc_dma,
1007 	    (void **)&ring->desc, size, WPI_RING_DMA_ALIGN);
1008 	if (error != 0) {
1009 		device_printf(sc->sc_dev,
1010 		    "%s: could not allocate RX ring DMA memory, error %d\n",
1011 		    __func__, error);
1012 		goto fail;
1013 	}
1014 
1015 	/* Create RX buffer DMA tag. */
1016 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1017 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1018 	    MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dmat);
1019 	if (error != 0) {
1020 		device_printf(sc->sc_dev,
1021 		    "%s: could not create RX buf DMA tag, error %d\n",
1022 		    __func__, error);
1023 		goto fail;
1024 	}
1025 
1026 	/*
1027 	 * Allocate and map RX buffers.
1028 	 */
1029 	for (i = 0; i < WPI_RX_RING_COUNT; i++) {
1030 		struct wpi_rx_data *data = &ring->data[i];
1031 		bus_addr_t paddr;
1032 
1033 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1034 		if (error != 0) {
1035 			device_printf(sc->sc_dev,
1036 			    "%s: could not create RX buf DMA map, error %d\n",
1037 			    __func__, error);
1038 			goto fail;
1039 		}
1040 
1041 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1042 		if (data->m == NULL) {
1043 			device_printf(sc->sc_dev,
1044 			    "%s: could not allocate RX mbuf\n", __func__);
1045 			error = ENOBUFS;
1046 			goto fail;
1047 		}
1048 
1049 		error = bus_dmamap_load(ring->data_dmat, data->map,
1050 		    mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
1051 		    &paddr, BUS_DMA_NOWAIT);
1052 		if (error != 0 && error != EFBIG) {
1053 			device_printf(sc->sc_dev,
1054 			    "%s: can't map mbuf (error %d)\n", __func__,
1055 			    error);
1056 			goto fail;
1057 		}
1058 
1059 		/* Set physical address of RX buffer. */
1060 		ring->desc[i] = htole32(paddr);
1061 	}
1062 
1063 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1064 	    BUS_DMASYNC_PREWRITE);
1065 
1066 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1067 
1068 	return 0;
1069 
1070 fail:	wpi_free_rx_ring(sc);
1071 
1072 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1073 
1074 	return error;
1075 }
1076 
1077 static void
1078 wpi_update_rx_ring(struct wpi_softc *sc)
1079 {
1080 	WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7);
1081 }
1082 
1083 static void
1084 wpi_update_rx_ring_ps(struct wpi_softc *sc)
1085 {
1086 	struct wpi_rx_ring *ring = &sc->rxq;
1087 
1088 	if (ring->update != 0) {
1089 		/* Wait for INT_WAKEUP event. */
1090 		return;
1091 	}
1092 
1093 	WPI_TXQ_LOCK(sc);
1094 	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1095 	if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
1096 		DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n",
1097 		    __func__);
1098 		ring->update = 1;
1099 	} else {
1100 		wpi_update_rx_ring(sc);
1101 		WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1102 	}
1103 	WPI_TXQ_UNLOCK(sc);
1104 }
1105 
1106 static void
1107 wpi_reset_rx_ring(struct wpi_softc *sc)
1108 {
1109 	struct wpi_rx_ring *ring = &sc->rxq;
1110 	int ntries;
1111 
1112 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1113 
1114 	if (wpi_nic_lock(sc) == 0) {
1115 		WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0);
1116 		for (ntries = 0; ntries < 1000; ntries++) {
1117 			if (WPI_READ(sc, WPI_FH_RX_STATUS) &
1118 			    WPI_FH_RX_STATUS_IDLE)
1119 				break;
1120 			DELAY(10);
1121 		}
1122 		wpi_nic_unlock(sc);
1123 	}
1124 
1125 	ring->cur = 0;
1126 	ring->update = 0;
1127 }
1128 
1129 static void
1130 wpi_free_rx_ring(struct wpi_softc *sc)
1131 {
1132 	struct wpi_rx_ring *ring = &sc->rxq;
1133 	int i;
1134 
1135 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1136 
1137 	wpi_dma_contig_free(&ring->desc_dma);
1138 
1139 	for (i = 0; i < WPI_RX_RING_COUNT; i++) {
1140 		struct wpi_rx_data *data = &ring->data[i];
1141 
1142 		if (data->m != NULL) {
1143 			bus_dmamap_sync(ring->data_dmat, data->map,
1144 			    BUS_DMASYNC_POSTREAD);
1145 			bus_dmamap_unload(ring->data_dmat, data->map);
1146 			m_freem(data->m);
1147 			data->m = NULL;
1148 		}
1149 		if (data->map != NULL)
1150 			bus_dmamap_destroy(ring->data_dmat, data->map);
1151 	}
1152 	if (ring->data_dmat != NULL) {
1153 		bus_dma_tag_destroy(ring->data_dmat);
1154 		ring->data_dmat = NULL;
1155 	}
1156 }
1157 
1158 static int
1159 wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, uint8_t qid)
1160 {
1161 	bus_addr_t paddr;
1162 	bus_size_t size;
1163 	int i, error;
1164 
1165 	ring->qid = qid;
1166 	ring->queued = 0;
1167 	ring->cur = 0;
1168 	ring->pending = 0;
1169 	ring->update = 0;
1170 
1171 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1172 
1173 	/* Allocate TX descriptors (16KB aligned.) */
1174 	size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc);
1175 	error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1176 	    size, WPI_RING_DMA_ALIGN);
1177 	if (error != 0) {
1178 		device_printf(sc->sc_dev,
1179 		    "%s: could not allocate TX ring DMA memory, error %d\n",
1180 		    __func__, error);
1181 		goto fail;
1182 	}
1183 
1184 	/* Update shared area with ring physical address. */
1185 	sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr);
1186 	bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
1187 	    BUS_DMASYNC_PREWRITE);
1188 
1189 	size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd);
1190 	error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1191 	    size, 4);
1192 	if (error != 0) {
1193 		device_printf(sc->sc_dev,
1194 		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1195 		    __func__, error);
1196 		goto fail;
1197 	}
1198 
1199 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1200 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1201 	    WPI_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
1202 	if (error != 0) {
1203 		device_printf(sc->sc_dev,
1204 		    "%s: could not create TX buf DMA tag, error %d\n",
1205 		    __func__, error);
1206 		goto fail;
1207 	}
1208 
1209 	paddr = ring->cmd_dma.paddr;
1210 	for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1211 		struct wpi_tx_data *data = &ring->data[i];
1212 
1213 		data->cmd_paddr = paddr;
1214 		paddr += sizeof (struct wpi_tx_cmd);
1215 
1216 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1217 		if (error != 0) {
1218 			device_printf(sc->sc_dev,
1219 			    "%s: could not create TX buf DMA map, error %d\n",
1220 			    __func__, error);
1221 			goto fail;
1222 		}
1223 	}
1224 
1225 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1226 
1227 	return 0;
1228 
1229 fail:	wpi_free_tx_ring(sc, ring);
1230 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1231 	return error;
1232 }
1233 
1234 static void
1235 wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1236 {
1237 	WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
1238 }
1239 
1240 static void
1241 wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1242 {
1243 
1244 	if (ring->update != 0) {
1245 		/* Wait for INT_WAKEUP event. */
1246 		return;
1247 	}
1248 
1249 	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1250 	if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
1251 		DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n",
1252 		    __func__, ring->qid);
1253 		ring->update = 1;
1254 	} else {
1255 		wpi_update_tx_ring(sc, ring);
1256 		WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
1257 	}
1258 }
1259 
1260 static void
1261 wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1262 {
1263 	int i;
1264 
1265 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1266 
1267 	for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1268 		struct wpi_tx_data *data = &ring->data[i];
1269 
1270 		if (data->m != NULL) {
1271 			bus_dmamap_sync(ring->data_dmat, data->map,
1272 			    BUS_DMASYNC_POSTWRITE);
1273 			bus_dmamap_unload(ring->data_dmat, data->map);
1274 			m_freem(data->m);
1275 			data->m = NULL;
1276 		}
1277 		if (data->ni != NULL) {
1278 			ieee80211_free_node(data->ni);
1279 			data->ni = NULL;
1280 		}
1281 	}
1282 	/* Clear TX descriptors. */
1283 	memset(ring->desc, 0, ring->desc_dma.size);
1284 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1285 	    BUS_DMASYNC_PREWRITE);
1286 	ring->queued = 0;
1287 	ring->cur = 0;
1288 	ring->pending = 0;
1289 	ring->update = 0;
1290 }
1291 
1292 static void
1293 wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
1294 {
1295 	int i;
1296 
1297 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
1298 
1299 	wpi_dma_contig_free(&ring->desc_dma);
1300 	wpi_dma_contig_free(&ring->cmd_dma);
1301 
1302 	for (i = 0; i < WPI_TX_RING_COUNT; i++) {
1303 		struct wpi_tx_data *data = &ring->data[i];
1304 
1305 		if (data->m != NULL) {
1306 			bus_dmamap_sync(ring->data_dmat, data->map,
1307 			    BUS_DMASYNC_POSTWRITE);
1308 			bus_dmamap_unload(ring->data_dmat, data->map);
1309 			m_freem(data->m);
1310 		}
1311 		if (data->map != NULL)
1312 			bus_dmamap_destroy(ring->data_dmat, data->map);
1313 	}
1314 	if (ring->data_dmat != NULL) {
1315 		bus_dma_tag_destroy(ring->data_dmat);
1316 		ring->data_dmat = NULL;
1317 	}
1318 }
1319 
1320 /*
1321  * Extract various information from EEPROM.
1322  */
1323 static int
1324 wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1325 {
1326 #define WPI_CHK(res) do {		\
1327 	if ((error = res) != 0)		\
1328 		goto fail;		\
1329 } while (0)
1330 	uint8_t i;
1331 	int error;
1332 
1333 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1334 
1335 	/* Adapter has to be powered on for EEPROM access to work. */
1336 	if ((error = wpi_apm_init(sc)) != 0) {
1337 		device_printf(sc->sc_dev,
1338 		    "%s: could not power ON adapter, error %d\n", __func__,
1339 		    error);
1340 		return error;
1341 	}
1342 
1343 	if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) {
1344 		device_printf(sc->sc_dev, "bad EEPROM signature\n");
1345 		error = EIO;
1346 		goto fail;
1347 	}
1348 	/* Clear HW ownership of EEPROM. */
1349 	WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER);
1350 
1351 	/* Read the hardware capabilities, revision and SKU type. */
1352 	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap,
1353 	    sizeof(sc->cap)));
1354 	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,
1355 	    sizeof(sc->rev)));
1356 	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type,
1357 	    sizeof(sc->type)));
1358 
1359 	sc->rev = le16toh(sc->rev);
1360 	DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap,
1361 	    sc->rev, sc->type);
1362 
1363 	/* Read the regulatory domain (4 ASCII characters.) */
1364 	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain,
1365 	    sizeof(sc->domain)));
1366 
1367 	/* Read MAC address. */
1368 	WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr,
1369 	    IEEE80211_ADDR_LEN));
1370 
1371 	/* Read the list of authorized channels. */
1372 	for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++)
1373 		WPI_CHK(wpi_read_eeprom_channels(sc, i));
1374 
1375 	/* Read the list of TX power groups. */
1376 	for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++)
1377 		WPI_CHK(wpi_read_eeprom_group(sc, i));
1378 
1379 fail:	wpi_apm_stop(sc);	/* Power OFF adapter. */
1380 
1381 	DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END,
1382 	    __func__);
1383 
1384 	return error;
1385 #undef WPI_CHK
1386 }
1387 
1388 /*
1389  * Translate EEPROM flags to net80211.
1390  */
1391 static uint32_t
1392 wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel)
1393 {
1394 	uint32_t nflags;
1395 
1396 	nflags = 0;
1397 	if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0)
1398 		nflags |= IEEE80211_CHAN_PASSIVE;
1399 	if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0)
1400 		nflags |= IEEE80211_CHAN_NOADHOC;
1401 	if (channel->flags & WPI_EEPROM_CHAN_RADAR) {
1402 		nflags |= IEEE80211_CHAN_DFS;
1403 		/* XXX apparently IBSS may still be marked */
1404 		nflags |= IEEE80211_CHAN_NOADHOC;
1405 	}
1406 
1407 	/* XXX HOSTAP uses WPI_MODE_IBSS */
1408 	if (nflags & IEEE80211_CHAN_NOADHOC)
1409 		nflags |= IEEE80211_CHAN_NOHOSTAP;
1410 
1411 	return nflags;
1412 }
1413 
1414 static void
1415 wpi_read_eeprom_band(struct wpi_softc *sc, uint8_t n, int maxchans,
1416     int *nchans, struct ieee80211_channel chans[])
1417 {
1418 	struct wpi_eeprom_chan *channels = sc->eeprom_channels[n];
1419 	const struct wpi_chan_band *band = &wpi_bands[n];
1420 	uint32_t nflags;
1421 	uint8_t bands[IEEE80211_MODE_BYTES];
1422 	uint8_t chan, i;
1423 	int error;
1424 
1425 	memset(bands, 0, sizeof(bands));
1426 
1427 	if (n == 0) {
1428 		setbit(bands, IEEE80211_MODE_11B);
1429 		setbit(bands, IEEE80211_MODE_11G);
1430 	} else
1431 		setbit(bands, IEEE80211_MODE_11A);
1432 
1433 	for (i = 0; i < band->nchan; i++) {
1434 		if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) {
1435 			DPRINTF(sc, WPI_DEBUG_EEPROM,
1436 			    "Channel Not Valid: %d, band %d\n",
1437 			     band->chan[i],n);
1438 			continue;
1439 		}
1440 
1441 		chan = band->chan[i];
1442 		nflags = wpi_eeprom_channel_flags(&channels[i]);
1443 		error = ieee80211_add_channel(chans, maxchans, nchans,
1444 		    chan, 0, channels[i].maxpwr, nflags, bands);
1445 		if (error != 0)
1446 			break;
1447 
1448 		/* Save maximum allowed TX power for this channel. */
1449 		sc->maxpwr[chan] = channels[i].maxpwr;
1450 
1451 		DPRINTF(sc, WPI_DEBUG_EEPROM,
1452 		    "adding chan %d flags=0x%x maxpwr=%d, offset %d\n",
1453 		    chan, channels[i].flags, sc->maxpwr[chan], *nchans);
1454 	}
1455 }
1456 
1457 /**
1458  * Read the eeprom to find out what channels are valid for the given
1459  * band and update net80211 with what we find.
1460  */
1461 static int
1462 wpi_read_eeprom_channels(struct wpi_softc *sc, uint8_t n)
1463 {
1464 	struct ieee80211com *ic = &sc->sc_ic;
1465 	const struct wpi_chan_band *band = &wpi_bands[n];
1466 	int error;
1467 
1468 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1469 
1470 	error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n],
1471 	    band->nchan * sizeof (struct wpi_eeprom_chan));
1472 	if (error != 0) {
1473 		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1474 		return error;
1475 	}
1476 
1477 	wpi_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans,
1478 	    ic->ic_channels);
1479 
1480 	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1481 
1482 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1483 
1484 	return 0;
1485 }
1486 
1487 static struct wpi_eeprom_chan *
1488 wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c)
1489 {
1490 	int i, j;
1491 
1492 	for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++)
1493 		for (i = 0; i < wpi_bands[j].nchan; i++)
1494 			if (wpi_bands[j].chan[i] == c->ic_ieee &&
1495 			    ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1)
1496 				return &sc->eeprom_channels[j][i];
1497 
1498 	return NULL;
1499 }
1500 
1501 static void
1502 wpi_getradiocaps(struct ieee80211com *ic,
1503     int maxchans, int *nchans, struct ieee80211_channel chans[])
1504 {
1505 	struct wpi_softc *sc = ic->ic_softc;
1506 	int i;
1507 
1508 	/* Parse the list of authorized channels. */
1509 	for (i = 0; i < WPI_CHAN_BANDS_COUNT && *nchans < maxchans; i++)
1510 		wpi_read_eeprom_band(sc, i, maxchans, nchans, chans);
1511 }
1512 
1513 /*
1514  * Enforce flags read from EEPROM.
1515  */
1516 static int
1517 wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
1518     int nchan, struct ieee80211_channel chans[])
1519 {
1520 	struct wpi_softc *sc = ic->ic_softc;
1521 	int i;
1522 
1523 	for (i = 0; i < nchan; i++) {
1524 		struct ieee80211_channel *c = &chans[i];
1525 		struct wpi_eeprom_chan *channel;
1526 
1527 		channel = wpi_find_eeprom_channel(sc, c);
1528 		if (channel == NULL) {
1529 			ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
1530 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
1531 			return EINVAL;
1532 		}
1533 		c->ic_flags |= wpi_eeprom_channel_flags(channel);
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 static int
1540 wpi_read_eeprom_group(struct wpi_softc *sc, uint8_t n)
1541 {
1542 	struct wpi_power_group *group = &sc->groups[n];
1543 	struct wpi_eeprom_group rgroup;
1544 	int i, error;
1545 
1546 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1547 
1548 	if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32,
1549 	    &rgroup, sizeof rgroup)) != 0) {
1550 		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1551 		return error;
1552 	}
1553 
1554 	/* Save TX power group information. */
1555 	group->chan   = rgroup.chan;
1556 	group->maxpwr = rgroup.maxpwr;
1557 	/* Retrieve temperature at which the samples were taken. */
1558 	group->temp   = (int16_t)le16toh(rgroup.temp);
1559 
1560 	DPRINTF(sc, WPI_DEBUG_EEPROM,
1561 	    "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan,
1562 	    group->maxpwr, group->temp);
1563 
1564 	for (i = 0; i < WPI_SAMPLES_COUNT; i++) {
1565 		group->samples[i].index = rgroup.samples[i].index;
1566 		group->samples[i].power = rgroup.samples[i].power;
1567 
1568 		DPRINTF(sc, WPI_DEBUG_EEPROM,
1569 		    "\tsample %d: index=%d power=%d\n", i,
1570 		    group->samples[i].index, group->samples[i].power);
1571 	}
1572 
1573 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1574 
1575 	return 0;
1576 }
1577 
1578 static __inline uint8_t
1579 wpi_add_node_entry_adhoc(struct wpi_softc *sc)
1580 {
1581 	uint8_t newid = WPI_ID_IBSS_MIN;
1582 
1583 	for (; newid <= WPI_ID_IBSS_MAX; newid++) {
1584 		if ((sc->nodesmsk & (1 << newid)) == 0) {
1585 			sc->nodesmsk |= 1 << newid;
1586 			return newid;
1587 		}
1588 	}
1589 
1590 	return WPI_ID_UNDEFINED;
1591 }
1592 
1593 static __inline uint8_t
1594 wpi_add_node_entry_sta(struct wpi_softc *sc)
1595 {
1596 	sc->nodesmsk |= 1 << WPI_ID_BSS;
1597 
1598 	return WPI_ID_BSS;
1599 }
1600 
1601 static __inline int
1602 wpi_check_node_entry(struct wpi_softc *sc, uint8_t id)
1603 {
1604 	if (id == WPI_ID_UNDEFINED)
1605 		return 0;
1606 
1607 	return (sc->nodesmsk >> id) & 1;
1608 }
1609 
1610 static __inline void
1611 wpi_clear_node_table(struct wpi_softc *sc)
1612 {
1613 	sc->nodesmsk = 0;
1614 }
1615 
1616 static __inline void
1617 wpi_del_node_entry(struct wpi_softc *sc, uint8_t id)
1618 {
1619 	sc->nodesmsk &= ~(1 << id);
1620 }
1621 
1622 static struct ieee80211_node *
1623 wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1624 {
1625 	struct wpi_node *wn;
1626 
1627 	wn = malloc(sizeof (struct wpi_node), M_80211_NODE,
1628 	    M_NOWAIT | M_ZERO);
1629 
1630 	if (wn == NULL)
1631 		return NULL;
1632 
1633 	wn->id = WPI_ID_UNDEFINED;
1634 
1635 	return &wn->ni;
1636 }
1637 
1638 static void
1639 wpi_node_free(struct ieee80211_node *ni)
1640 {
1641 	struct wpi_softc *sc = ni->ni_ic->ic_softc;
1642 	struct wpi_node *wn = WPI_NODE(ni);
1643 
1644 	if (wn->id != WPI_ID_UNDEFINED) {
1645 		WPI_NT_LOCK(sc);
1646 		if (wpi_check_node_entry(sc, wn->id)) {
1647 			wpi_del_node_entry(sc, wn->id);
1648 			wpi_del_node(sc, ni);
1649 		}
1650 		WPI_NT_UNLOCK(sc);
1651 	}
1652 
1653 	sc->sc_node_free(ni);
1654 }
1655 
1656 static __inline int
1657 wpi_check_bss_filter(struct wpi_softc *sc)
1658 {
1659 	return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0;
1660 }
1661 
1662 static void
1663 wpi_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
1664     const struct ieee80211_rx_stats *rxs,
1665     int rssi, int nf)
1666 {
1667 	struct ieee80211vap *vap = ni->ni_vap;
1668 	struct wpi_softc *sc = vap->iv_ic->ic_softc;
1669 	struct wpi_vap *wvp = WPI_VAP(vap);
1670 	uint64_t ni_tstamp, rx_tstamp;
1671 
1672 	wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
1673 
1674 	if (vap->iv_state == IEEE80211_S_RUN &&
1675 	    (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
1676 	    subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
1677 		ni_tstamp = le64toh(ni->ni_tstamp.tsf);
1678 		rx_tstamp = le64toh(sc->rx_tstamp);
1679 
1680 		if (ni_tstamp >= rx_tstamp) {
1681 			DPRINTF(sc, WPI_DEBUG_STATE,
1682 			    "ibss merge, tsf %ju tstamp %ju\n",
1683 			    (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
1684 			(void) ieee80211_ibss_merge(ni);
1685 		}
1686 	}
1687 }
1688 
1689 static void
1690 wpi_restore_node(void *arg, struct ieee80211_node *ni)
1691 {
1692 	struct wpi_softc *sc = arg;
1693 	struct wpi_node *wn = WPI_NODE(ni);
1694 	int error;
1695 
1696 	WPI_NT_LOCK(sc);
1697 	if (wn->id != WPI_ID_UNDEFINED) {
1698 		wn->id = WPI_ID_UNDEFINED;
1699 		if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
1700 			device_printf(sc->sc_dev,
1701 			    "%s: could not add IBSS node, error %d\n",
1702 			    __func__, error);
1703 		}
1704 	}
1705 	WPI_NT_UNLOCK(sc);
1706 }
1707 
1708 static void
1709 wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp)
1710 {
1711 	struct ieee80211com *ic = &sc->sc_ic;
1712 
1713 	/* Set group keys once. */
1714 	WPI_NT_LOCK(sc);
1715 	wvp->wv_gtk = 0;
1716 	WPI_NT_UNLOCK(sc);
1717 
1718 	ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc);
1719 	ieee80211_crypto_reload_keys(ic);
1720 }
1721 
1722 /**
1723  * Called by net80211 when ever there is a change to 80211 state machine
1724  */
1725 static int
1726 wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1727 {
1728 	struct wpi_vap *wvp = WPI_VAP(vap);
1729 	struct ieee80211com *ic = vap->iv_ic;
1730 	struct wpi_softc *sc = ic->ic_softc;
1731 	int error = 0;
1732 
1733 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
1734 
1735 	WPI_TXQ_LOCK(sc);
1736 	if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) {
1737 		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1738 		WPI_TXQ_UNLOCK(sc);
1739 
1740 		return ENXIO;
1741 	}
1742 	WPI_TXQ_UNLOCK(sc);
1743 
1744 	DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1745 		ieee80211_state_name[vap->iv_state],
1746 		ieee80211_state_name[nstate]);
1747 
1748 	if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) {
1749 		if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) {
1750 			device_printf(sc->sc_dev,
1751 			    "%s: could not set power saving level\n",
1752 			    __func__);
1753 			return error;
1754 		}
1755 
1756 		wpi_set_led(sc, WPI_LED_LINK, 1, 0);
1757 	}
1758 
1759 	switch (nstate) {
1760 	case IEEE80211_S_SCAN:
1761 		WPI_RXON_LOCK(sc);
1762 		if (wpi_check_bss_filter(sc) != 0) {
1763 			sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
1764 			if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
1765 				device_printf(sc->sc_dev,
1766 				    "%s: could not send RXON\n", __func__);
1767 			}
1768 		}
1769 		WPI_RXON_UNLOCK(sc);
1770 		break;
1771 
1772 	case IEEE80211_S_ASSOC:
1773 		if (vap->iv_state != IEEE80211_S_RUN)
1774 			break;
1775 		/* FALLTHROUGH */
1776 	case IEEE80211_S_AUTH:
1777 		/*
1778 		 * NB: do not optimize AUTH -> AUTH state transmission -
1779 		 * this will break powersave with non-QoS AP!
1780 		 */
1781 
1782 		/*
1783 		 * The node must be registered in the firmware before auth.
1784 		 * Also the associd must be cleared on RUN -> ASSOC
1785 		 * transitions.
1786 		 */
1787 		if ((error = wpi_auth(sc, vap)) != 0) {
1788 			device_printf(sc->sc_dev,
1789 			    "%s: could not move to AUTH state, error %d\n",
1790 			    __func__, error);
1791 		}
1792 		break;
1793 
1794 	case IEEE80211_S_RUN:
1795 		/*
1796 		 * RUN -> RUN transition:
1797 		 * STA mode: Just restart the timers.
1798 		 * IBSS mode: Process IBSS merge.
1799 		 */
1800 		if (vap->iv_state == IEEE80211_S_RUN) {
1801 			if (vap->iv_opmode != IEEE80211_M_IBSS) {
1802 				WPI_RXON_LOCK(sc);
1803 				wpi_calib_timeout(sc);
1804 				WPI_RXON_UNLOCK(sc);
1805 				break;
1806 			} else {
1807 				/*
1808 				 * Drop the BSS_FILTER bit
1809 				 * (there is no another way to change bssid).
1810 				 */
1811 				WPI_RXON_LOCK(sc);
1812 				sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
1813 				if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
1814 					device_printf(sc->sc_dev,
1815 					    "%s: could not send RXON\n",
1816 					    __func__);
1817 				}
1818 				WPI_RXON_UNLOCK(sc);
1819 
1820 				/* Restore all what was lost. */
1821 				wpi_restore_node_table(sc, wvp);
1822 
1823 				/* XXX set conditionally? */
1824 				wpi_updateedca(ic);
1825 			}
1826 		}
1827 
1828 		/*
1829 		 * !RUN -> RUN requires setting the association id
1830 		 * which is done with a firmware cmd.  We also defer
1831 		 * starting the timers until that work is done.
1832 		 */
1833 		if ((error = wpi_run(sc, vap)) != 0) {
1834 			device_printf(sc->sc_dev,
1835 			    "%s: could not move to RUN state\n", __func__);
1836 		}
1837 		break;
1838 
1839 	default:
1840 		break;
1841 	}
1842 	if (error != 0) {
1843 		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
1844 		return error;
1845 	}
1846 
1847 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
1848 
1849 	return wvp->wv_newstate(vap, nstate, arg);
1850 }
1851 
1852 static void
1853 wpi_calib_timeout(void *arg)
1854 {
1855 	struct wpi_softc *sc = arg;
1856 
1857 	if (wpi_check_bss_filter(sc) == 0)
1858 		return;
1859 
1860 	wpi_power_calibration(sc);
1861 
1862 	callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
1863 }
1864 
1865 static __inline uint8_t
1866 rate2plcp(const uint8_t rate)
1867 {
1868 	switch (rate) {
1869 	case 12:	return 0xd;
1870 	case 18:	return 0xf;
1871 	case 24:	return 0x5;
1872 	case 36:	return 0x7;
1873 	case 48:	return 0x9;
1874 	case 72:	return 0xb;
1875 	case 96:	return 0x1;
1876 	case 108:	return 0x3;
1877 	case 2:		return 10;
1878 	case 4:		return 20;
1879 	case 11:	return 55;
1880 	case 22:	return 110;
1881 	default:	return 0;
1882 	}
1883 }
1884 
1885 static __inline uint8_t
1886 plcp2rate(const uint8_t plcp)
1887 {
1888 	switch (plcp) {
1889 	case 0xd:	return 12;
1890 	case 0xf:	return 18;
1891 	case 0x5:	return 24;
1892 	case 0x7:	return 36;
1893 	case 0x9:	return 48;
1894 	case 0xb:	return 72;
1895 	case 0x1:	return 96;
1896 	case 0x3:	return 108;
1897 	case 10:	return 2;
1898 	case 20:	return 4;
1899 	case 55:	return 11;
1900 	case 110:	return 22;
1901 	default:	return 0;
1902 	}
1903 }
1904 
1905 /* Quickly determine if a given rate is CCK or OFDM. */
1906 #define WPI_RATE_IS_OFDM(rate)	((rate) >= 12 && (rate) != 22)
1907 
1908 static void
1909 wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc,
1910     struct wpi_rx_data *data)
1911 {
1912 	struct epoch_tracker et;
1913 	struct ieee80211com *ic = &sc->sc_ic;
1914 	struct wpi_rx_ring *ring = &sc->rxq;
1915 	struct wpi_rx_stat *stat;
1916 	struct wpi_rx_head *head;
1917 	struct wpi_rx_tail *tail;
1918 	struct ieee80211_frame *wh;
1919 	struct ieee80211_node *ni;
1920 	struct mbuf *m, *m1;
1921 	bus_addr_t paddr;
1922 	uint32_t flags;
1923 	uint16_t len;
1924 	int error;
1925 
1926 	stat = (struct wpi_rx_stat *)(desc + 1);
1927 
1928 	if (__predict_false(stat->len > WPI_STAT_MAXLEN)) {
1929 		device_printf(sc->sc_dev, "invalid RX statistic header\n");
1930 		goto fail1;
1931 	}
1932 
1933 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
1934 	head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len);
1935 	len = le16toh(head->len);
1936 	tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len);
1937 	flags = le32toh(tail->flags);
1938 
1939 	DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d"
1940 	    " rate %x chan %d tstamp %ju\n", __func__, ring->cur,
1941 	    le32toh(desc->len), len, (int8_t)stat->rssi,
1942 	    head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp));
1943 
1944 	/* Discard frames with a bad FCS early. */
1945 	if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) {
1946 		DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n",
1947 		    __func__, flags);
1948 		goto fail1;
1949 	}
1950 	/* Discard frames that are too short. */
1951 	if (len < sizeof (struct ieee80211_frame_ack)) {
1952 		DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n",
1953 		    __func__, len);
1954 		goto fail1;
1955 	}
1956 
1957 	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1958 	if (__predict_false(m1 == NULL)) {
1959 		DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n",
1960 		    __func__);
1961 		goto fail1;
1962 	}
1963 	bus_dmamap_unload(ring->data_dmat, data->map);
1964 
1965 	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
1966 	    MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
1967 	if (__predict_false(error != 0 && error != EFBIG)) {
1968 		device_printf(sc->sc_dev,
1969 		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
1970 		m_freem(m1);
1971 
1972 		/* Try to reload the old mbuf. */
1973 		error = bus_dmamap_load(ring->data_dmat, data->map,
1974 		    mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
1975 		    &paddr, BUS_DMA_NOWAIT);
1976 		if (error != 0 && error != EFBIG) {
1977 			panic("%s: could not load old RX mbuf", __func__);
1978 		}
1979 		/* Physical address may have changed. */
1980 		ring->desc[ring->cur] = htole32(paddr);
1981 		bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
1982 		    BUS_DMASYNC_PREWRITE);
1983 		goto fail1;
1984 	}
1985 
1986 	m = data->m;
1987 	data->m = m1;
1988 	/* Update RX descriptor. */
1989 	ring->desc[ring->cur] = htole32(paddr);
1990 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1991 	    BUS_DMASYNC_PREWRITE);
1992 
1993 	/* Finalize mbuf. */
1994 	m->m_data = (caddr_t)(head + 1);
1995 	m->m_pkthdr.len = m->m_len = len;
1996 
1997 	/* Grab a reference to the source node. */
1998 	wh = mtod(m, struct ieee80211_frame *);
1999 
2000 	if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
2001 	    (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) {
2002 		/* Check whether decryption was successful or not. */
2003 		if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) {
2004 			DPRINTF(sc, WPI_DEBUG_RECV,
2005 			    "CCMP decryption failed 0x%x\n", flags);
2006 			goto fail2;
2007 		}
2008 		m->m_flags |= M_WEP;
2009 	}
2010 
2011 	if (len >= sizeof(struct ieee80211_frame_min))
2012 		ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2013 	else
2014 		ni = NULL;
2015 
2016 	sc->rx_tstamp = tail->tstamp;
2017 
2018 	if (ieee80211_radiotap_active(ic)) {
2019 		struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap;
2020 
2021 		tap->wr_flags = 0;
2022 		if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE))
2023 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2024 		tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET);
2025 		tap->wr_dbm_antnoise = WPI_RSSI_OFFSET;
2026 		tap->wr_tsft = tail->tstamp;
2027 		tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf;
2028 		tap->wr_rate = plcp2rate(head->plcp);
2029 	}
2030 
2031 	WPI_UNLOCK(sc);
2032 	NET_EPOCH_ENTER(et);
2033 
2034 	/* Send the frame to the 802.11 layer. */
2035 	if (ni != NULL) {
2036 		(void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET);
2037 		/* Node is no longer needed. */
2038 		ieee80211_free_node(ni);
2039 	} else
2040 		(void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET);
2041 
2042 	NET_EPOCH_EXIT(et);
2043 	WPI_LOCK(sc);
2044 
2045 	return;
2046 
2047 fail2:	m_freem(m);
2048 
2049 fail1:	counter_u64_add(ic->ic_ierrors, 1);
2050 }
2051 
2052 static void
2053 wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc,
2054     struct wpi_rx_data *data)
2055 {
2056 	/* Ignore */
2057 }
2058 
2059 static void
2060 wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
2061 {
2062 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
2063 	struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3];
2064 	struct wpi_tx_data *data = &ring->data[desc->idx];
2065 	struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1);
2066 	struct mbuf *m;
2067 	struct ieee80211_node *ni;
2068 	uint32_t status = le32toh(stat->status);
2069 
2070 	KASSERT(data->ni != NULL, ("no node"));
2071 	KASSERT(data->m != NULL, ("no mbuf"));
2072 
2073 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
2074 
2075 	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: "
2076 	    "qid %d idx %d retries %d btkillcnt %d rate %x duration %d "
2077 	    "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt,
2078 	    stat->btkillcnt, stat->rate, le32toh(stat->duration), status);
2079 
2080 	/* Unmap and free mbuf. */
2081 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2082 	bus_dmamap_unload(ring->data_dmat, data->map);
2083 	m = data->m, data->m = NULL;
2084 	ni = data->ni, data->ni = NULL;
2085 
2086 	/* Restore frame header. */
2087 	KASSERT(M_LEADINGSPACE(m) >= data->hdrlen, ("no frame header!"));
2088 	M_PREPEND(m, data->hdrlen, M_NOWAIT);
2089 	KASSERT(m != NULL, ("%s: m is NULL\n", __func__));
2090 
2091 	/*
2092 	 * Update rate control statistics for the node.
2093 	 */
2094 	txs->pktlen = m->m_pkthdr.len;
2095 	txs->short_retries = stat->rtsfailcnt;
2096 	txs->long_retries = stat->ackfailcnt / WPI_NTRIES_DEFAULT;
2097 	if (!(status & WPI_TX_STATUS_FAIL))
2098 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
2099 	else {
2100 		switch (status & 0xff) {
2101 		case WPI_TX_STATUS_FAIL_SHORT_LIMIT:
2102 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
2103 			break;
2104 		case WPI_TX_STATUS_FAIL_LONG_LIMIT:
2105 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
2106 			break;
2107 		case WPI_TX_STATUS_FAIL_LIFE_EXPIRE:
2108 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
2109 			break;
2110 		default:
2111 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
2112 			break;
2113 		}
2114 	}
2115 
2116 	ieee80211_ratectl_tx_complete(ni, txs);
2117 	ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0);
2118 
2119 	WPI_TXQ_STATE_LOCK(sc);
2120 	if (--ring->queued > 0)
2121 		callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc);
2122 	else
2123 		callout_stop(&sc->tx_timeout);
2124 	WPI_TXQ_STATE_UNLOCK(sc);
2125 
2126 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
2127 }
2128 
2129 /*
2130  * Process a "command done" firmware notification.  This is where we wakeup
2131  * processes waiting for a synchronous command completion.
2132  */
2133 static void
2134 wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
2135 {
2136 	struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
2137 	struct wpi_tx_data *data;
2138 	struct wpi_tx_cmd *cmd;
2139 
2140 	DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x "
2141 				   "type %s len %d\n", desc->qid, desc->idx,
2142 				   desc->flags, wpi_cmd_str(desc->type),
2143 				   le32toh(desc->len));
2144 
2145 	if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM)
2146 		return;	/* Not a command ack. */
2147 
2148 	KASSERT(ring->queued == 0, ("ring->queued must be 0"));
2149 
2150 	data = &ring->data[desc->idx];
2151 	cmd = &ring->cmd[desc->idx];
2152 
2153 	/* If the command was mapped in an mbuf, free it. */
2154 	if (data->m != NULL) {
2155 		bus_dmamap_sync(ring->data_dmat, data->map,
2156 		    BUS_DMASYNC_POSTWRITE);
2157 		bus_dmamap_unload(ring->data_dmat, data->map);
2158 		m_freem(data->m);
2159 		data->m = NULL;
2160 	}
2161 
2162 	wakeup(cmd);
2163 
2164 	if (desc->type == WPI_CMD_SET_POWER_MODE) {
2165 		struct wpi_pmgt_cmd *pcmd = (struct wpi_pmgt_cmd *)cmd->data;
2166 
2167 		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
2168 		    BUS_DMASYNC_POSTREAD);
2169 
2170 		WPI_TXQ_LOCK(sc);
2171 		if (le16toh(pcmd->flags) & WPI_PS_ALLOW_SLEEP) {
2172 			sc->sc_update_rx_ring = wpi_update_rx_ring_ps;
2173 			sc->sc_update_tx_ring = wpi_update_tx_ring_ps;
2174 		} else {
2175 			sc->sc_update_rx_ring = wpi_update_rx_ring;
2176 			sc->sc_update_tx_ring = wpi_update_tx_ring;
2177 		}
2178 		WPI_TXQ_UNLOCK(sc);
2179 	}
2180 }
2181 
2182 static void
2183 wpi_notif_intr(struct wpi_softc *sc)
2184 {
2185 	struct ieee80211com *ic = &sc->sc_ic;
2186 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2187 	uint32_t hw;
2188 
2189 	bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
2190 	    BUS_DMASYNC_POSTREAD);
2191 
2192 	hw = le32toh(sc->shared->next) & 0xfff;
2193 	hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1;
2194 
2195 	while (sc->rxq.cur != hw) {
2196 		sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT;
2197 
2198 		struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2199 		struct wpi_rx_desc *desc;
2200 
2201 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2202 		    BUS_DMASYNC_POSTREAD);
2203 		desc = mtod(data->m, struct wpi_rx_desc *);
2204 
2205 		DPRINTF(sc, WPI_DEBUG_NOTIFY,
2206 		    "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
2207 		    __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags,
2208 		    desc->type, wpi_cmd_str(desc->type), le32toh(desc->len));
2209 
2210 		if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) {
2211 			/* Reply to a command. */
2212 			wpi_cmd_done(sc, desc);
2213 		}
2214 
2215 		switch (desc->type) {
2216 		case WPI_RX_DONE:
2217 			/* An 802.11 frame has been received. */
2218 			wpi_rx_done(sc, desc, data);
2219 
2220 			if (__predict_false(sc->sc_running == 0)) {
2221 				/* wpi_stop() was called. */
2222 				return;
2223 			}
2224 
2225 			break;
2226 
2227 		case WPI_TX_DONE:
2228 			/* An 802.11 frame has been transmitted. */
2229 			wpi_tx_done(sc, desc);
2230 			break;
2231 
2232 		case WPI_RX_STATISTICS:
2233 		case WPI_BEACON_STATISTICS:
2234 			wpi_rx_statistics(sc, desc, data);
2235 			break;
2236 
2237 		case WPI_BEACON_MISSED:
2238 		{
2239 			struct wpi_beacon_missed *miss =
2240 			    (struct wpi_beacon_missed *)(desc + 1);
2241 			uint32_t expected, misses, received, threshold;
2242 
2243 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2244 			    BUS_DMASYNC_POSTREAD);
2245 
2246 			misses = le32toh(miss->consecutive);
2247 			expected = le32toh(miss->expected);
2248 			received = le32toh(miss->received);
2249 			threshold = MAX(2, vap->iv_bmissthreshold);
2250 
2251 			DPRINTF(sc, WPI_DEBUG_BMISS,
2252 			    "%s: beacons missed %u(%u) (received %u/%u)\n",
2253 			    __func__, misses, le32toh(miss->total), received,
2254 			    expected);
2255 
2256 			if (misses >= threshold ||
2257 			    (received == 0 && expected >= threshold)) {
2258 				WPI_RXON_LOCK(sc);
2259 				if (callout_pending(&sc->scan_timeout)) {
2260 					wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL,
2261 					    0, 1);
2262 				}
2263 				WPI_RXON_UNLOCK(sc);
2264 				if (vap->iv_state == IEEE80211_S_RUN &&
2265 				    (ic->ic_flags & IEEE80211_F_SCAN) == 0)
2266 					ieee80211_beacon_miss(ic);
2267 			}
2268 
2269 			break;
2270 		}
2271 #ifdef WPI_DEBUG
2272 		case WPI_BEACON_SENT:
2273 		{
2274 			struct wpi_tx_stat *stat =
2275 			    (struct wpi_tx_stat *)(desc + 1);
2276 			uint64_t *tsf = (uint64_t *)(stat + 1);
2277 			uint32_t *mode = (uint32_t *)(tsf + 1);
2278 
2279 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2280 			    BUS_DMASYNC_POSTREAD);
2281 
2282 			DPRINTF(sc, WPI_DEBUG_BEACON,
2283 			    "beacon sent: rts %u, ack %u, btkill %u, rate %u, "
2284 			    "duration %u, status %x, tsf %ju, mode %x\n",
2285 			    stat->rtsfailcnt, stat->ackfailcnt,
2286 			    stat->btkillcnt, stat->rate, le32toh(stat->duration),
2287 			    le32toh(stat->status), le64toh(*tsf),
2288 			    le32toh(*mode));
2289 
2290 			break;
2291 		}
2292 #endif
2293 		case WPI_UC_READY:
2294 		{
2295 			struct wpi_ucode_info *uc =
2296 			    (struct wpi_ucode_info *)(desc + 1);
2297 
2298 			/* The microcontroller is ready. */
2299 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2300 			    BUS_DMASYNC_POSTREAD);
2301 			DPRINTF(sc, WPI_DEBUG_RESET,
2302 			    "microcode alive notification version=%d.%d "
2303 			    "subtype=%x alive=%x\n", uc->major, uc->minor,
2304 			    uc->subtype, le32toh(uc->valid));
2305 
2306 			if (le32toh(uc->valid) != 1) {
2307 				device_printf(sc->sc_dev,
2308 				    "microcontroller initialization failed\n");
2309 				wpi_stop_locked(sc);
2310 				return;
2311 			}
2312 			/* Save the address of the error log in SRAM. */
2313 			sc->errptr = le32toh(uc->errptr);
2314 			break;
2315 		}
2316 		case WPI_STATE_CHANGED:
2317 		{
2318 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2319 			    BUS_DMASYNC_POSTREAD);
2320 
2321 			uint32_t *status = (uint32_t *)(desc + 1);
2322 
2323 			DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n",
2324 			    le32toh(*status));
2325 
2326 			if (le32toh(*status) & 1) {
2327 				WPI_NT_LOCK(sc);
2328 				wpi_clear_node_table(sc);
2329 				WPI_NT_UNLOCK(sc);
2330 				ieee80211_runtask(ic,
2331 				    &sc->sc_radiooff_task);
2332 				return;
2333 			}
2334 			break;
2335 		}
2336 #ifdef WPI_DEBUG
2337 		case WPI_START_SCAN:
2338 		{
2339 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2340 			    BUS_DMASYNC_POSTREAD);
2341 
2342 			struct wpi_start_scan *scan =
2343 			    (struct wpi_start_scan *)(desc + 1);
2344 			DPRINTF(sc, WPI_DEBUG_SCAN,
2345 			    "%s: scanning channel %d status %x\n",
2346 			    __func__, scan->chan, le32toh(scan->status));
2347 
2348 			break;
2349 		}
2350 #endif
2351 		case WPI_STOP_SCAN:
2352 		{
2353 			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2354 			    BUS_DMASYNC_POSTREAD);
2355 
2356 			struct wpi_stop_scan *scan =
2357 			    (struct wpi_stop_scan *)(desc + 1);
2358 
2359 			DPRINTF(sc, WPI_DEBUG_SCAN,
2360 			    "scan finished nchan=%d status=%d chan=%d\n",
2361 			    scan->nchan, scan->status, scan->chan);
2362 
2363 			WPI_RXON_LOCK(sc);
2364 			callout_stop(&sc->scan_timeout);
2365 			WPI_RXON_UNLOCK(sc);
2366 			if (scan->status == WPI_SCAN_ABORTED)
2367 				ieee80211_cancel_scan(vap);
2368 			else
2369 				ieee80211_scan_next(vap);
2370 			break;
2371 		}
2372 		}
2373 
2374 		if (sc->rxq.cur % 8 == 0) {
2375 			/* Tell the firmware what we have processed. */
2376 			sc->sc_update_rx_ring(sc);
2377 		}
2378 	}
2379 }
2380 
2381 /*
2382  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2383  * from power-down sleep mode.
2384  */
2385 static void
2386 wpi_wakeup_intr(struct wpi_softc *sc)
2387 {
2388 	int qid;
2389 
2390 	DPRINTF(sc, WPI_DEBUG_PWRSAVE,
2391 	    "%s: ucode wakeup from power-down sleep\n", __func__);
2392 
2393 	/* Wakeup RX and TX rings. */
2394 	if (sc->rxq.update) {
2395 		sc->rxq.update = 0;
2396 		wpi_update_rx_ring(sc);
2397 	}
2398 	WPI_TXQ_LOCK(sc);
2399 	for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) {
2400 		struct wpi_tx_ring *ring = &sc->txq[qid];
2401 
2402 		if (ring->update) {
2403 			ring->update = 0;
2404 			wpi_update_tx_ring(sc, ring);
2405 		}
2406 	}
2407 	WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
2408 	WPI_TXQ_UNLOCK(sc);
2409 }
2410 
2411 /*
2412  * This function prints firmware registers
2413  */
2414 #ifdef WPI_DEBUG
2415 static void
2416 wpi_debug_registers(struct wpi_softc *sc)
2417 {
2418 	size_t i;
2419 	static const uint32_t csr_tbl[] = {
2420 		WPI_HW_IF_CONFIG,
2421 		WPI_INT,
2422 		WPI_INT_MASK,
2423 		WPI_FH_INT,
2424 		WPI_GPIO_IN,
2425 		WPI_RESET,
2426 		WPI_GP_CNTRL,
2427 		WPI_EEPROM,
2428 		WPI_EEPROM_GP,
2429 		WPI_GIO,
2430 		WPI_UCODE_GP1,
2431 		WPI_UCODE_GP2,
2432 		WPI_GIO_CHICKEN,
2433 		WPI_ANA_PLL,
2434 		WPI_DBG_HPET_MEM,
2435 	};
2436 	static const uint32_t prph_tbl[] = {
2437 		WPI_APMG_CLK_CTRL,
2438 		WPI_APMG_PS,
2439 		WPI_APMG_PCI_STT,
2440 		WPI_APMG_RFKILL,
2441 	};
2442 
2443 	DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n");
2444 
2445 	for (i = 0; i < nitems(csr_tbl); i++) {
2446 		DPRINTF(sc, WPI_DEBUG_REGISTER, "  %-18s: 0x%08x ",
2447 		    wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i]));
2448 
2449 		if ((i + 1) % 2 == 0)
2450 			DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2451 	}
2452 	DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n");
2453 
2454 	if (wpi_nic_lock(sc) == 0) {
2455 		for (i = 0; i < nitems(prph_tbl); i++) {
2456 			DPRINTF(sc, WPI_DEBUG_REGISTER, "  %-18s: 0x%08x ",
2457 			    wpi_get_prph_string(prph_tbl[i]),
2458 			    wpi_prph_read(sc, prph_tbl[i]));
2459 
2460 			if ((i + 1) % 2 == 0)
2461 				DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2462 		}
2463 		DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
2464 		wpi_nic_unlock(sc);
2465 	} else {
2466 		DPRINTF(sc, WPI_DEBUG_REGISTER,
2467 		    "Cannot access internal registers.\n");
2468 	}
2469 }
2470 #endif
2471 
2472 /*
2473  * Dump the error log of the firmware when a firmware panic occurs.  Although
2474  * we can't debug the firmware because it is neither open source nor free, it
2475  * can help us to identify certain classes of problems.
2476  */
2477 static void
2478 wpi_fatal_intr(struct wpi_softc *sc)
2479 {
2480 	struct wpi_fw_dump dump;
2481 	uint32_t i, offset, count;
2482 
2483 	/* Check that the error log address is valid. */
2484 	if (sc->errptr < WPI_FW_DATA_BASE ||
2485 	    sc->errptr + sizeof (dump) >
2486 	    WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) {
2487 		printf("%s: bad firmware error log address 0x%08x\n", __func__,
2488 		    sc->errptr);
2489 		return;
2490 	}
2491 	if (wpi_nic_lock(sc) != 0) {
2492 		printf("%s: could not read firmware error log\n", __func__);
2493 		return;
2494 	}
2495 	/* Read number of entries in the log. */
2496 	count = wpi_mem_read(sc, sc->errptr);
2497 	if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) {
2498 		printf("%s: invalid count field (count = %u)\n", __func__,
2499 		    count);
2500 		wpi_nic_unlock(sc);
2501 		return;
2502 	}
2503 	/* Skip "count" field. */
2504 	offset = sc->errptr + sizeof (uint32_t);
2505 	printf("firmware error log (count = %u):\n", count);
2506 	for (i = 0; i < count; i++) {
2507 		wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump,
2508 		    sizeof (dump) / sizeof (uint32_t));
2509 
2510 		printf("  error type = \"%s\" (0x%08X)\n",
2511 		    (dump.desc < nitems(wpi_fw_errmsg)) ?
2512 		        wpi_fw_errmsg[dump.desc] : "UNKNOWN",
2513 		    dump.desc);
2514 		printf("  error data      = 0x%08X\n",
2515 		    dump.data);
2516 		printf("  branch link     = 0x%08X%08X\n",
2517 		    dump.blink[0], dump.blink[1]);
2518 		printf("  interrupt link  = 0x%08X%08X\n",
2519 		    dump.ilink[0], dump.ilink[1]);
2520 		printf("  time            = %u\n", dump.time);
2521 
2522 		offset += sizeof (dump);
2523 	}
2524 	wpi_nic_unlock(sc);
2525 	/* Dump driver status (TX and RX rings) while we're here. */
2526 	printf("driver status:\n");
2527 	WPI_TXQ_LOCK(sc);
2528 	for (i = 0; i < WPI_DRV_NTXQUEUES; i++) {
2529 		struct wpi_tx_ring *ring = &sc->txq[i];
2530 		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2531 		    i, ring->qid, ring->cur, ring->queued);
2532 	}
2533 	WPI_TXQ_UNLOCK(sc);
2534 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
2535 }
2536 
2537 static void
2538 wpi_intr(void *arg)
2539 {
2540 	struct wpi_softc *sc = arg;
2541 	uint32_t r1, r2;
2542 
2543 	WPI_LOCK(sc);
2544 
2545 	/* Disable interrupts. */
2546 	WPI_WRITE(sc, WPI_INT_MASK, 0);
2547 
2548 	r1 = WPI_READ(sc, WPI_INT);
2549 
2550 	if (__predict_false(r1 == 0xffffffff ||
2551 			   (r1 & 0xfffffff0) == 0xa5a5a5a0))
2552 		goto end;	/* Hardware gone! */
2553 
2554 	r2 = WPI_READ(sc, WPI_FH_INT);
2555 
2556 	DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__,
2557 	    r1, r2);
2558 
2559 	if (r1 == 0 && r2 == 0)
2560 		goto done;	/* Interrupt not for us. */
2561 
2562 	/* Acknowledge interrupts. */
2563 	WPI_WRITE(sc, WPI_INT, r1);
2564 	WPI_WRITE(sc, WPI_FH_INT, r2);
2565 
2566 	if (__predict_false(r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR))) {
2567 		struct ieee80211com *ic = &sc->sc_ic;
2568 
2569 		device_printf(sc->sc_dev, "fatal firmware error\n");
2570 #ifdef WPI_DEBUG
2571 		wpi_debug_registers(sc);
2572 #endif
2573 		wpi_fatal_intr(sc);
2574 		DPRINTF(sc, WPI_DEBUG_HW,
2575 		    "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" :
2576 		    "(Hardware Error)");
2577 		ieee80211_restart_all(ic);
2578 		goto end;
2579 	}
2580 
2581 	if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) ||
2582 	    (r2 & WPI_FH_INT_RX))
2583 		wpi_notif_intr(sc);
2584 
2585 	if (r1 & WPI_INT_ALIVE)
2586 		wakeup(sc);	/* Firmware is alive. */
2587 
2588 	if (r1 & WPI_INT_WAKEUP)
2589 		wpi_wakeup_intr(sc);
2590 
2591 done:
2592 	/* Re-enable interrupts. */
2593 	if (__predict_true(sc->sc_running))
2594 		WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
2595 
2596 end:	WPI_UNLOCK(sc);
2597 }
2598 
2599 static void
2600 wpi_free_txfrags(struct wpi_softc *sc, uint16_t ac)
2601 {
2602 	struct wpi_tx_ring *ring;
2603 	struct wpi_tx_data *data;
2604 	uint8_t cur;
2605 
2606 	WPI_TXQ_LOCK(sc);
2607 	ring = &sc->txq[ac];
2608 
2609 	while (ring->pending != 0) {
2610 		ring->pending--;
2611 		cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT;
2612 		data = &ring->data[cur];
2613 
2614 		bus_dmamap_sync(ring->data_dmat, data->map,
2615 		    BUS_DMASYNC_POSTWRITE);
2616 		bus_dmamap_unload(ring->data_dmat, data->map);
2617 		m_freem(data->m);
2618 		data->m = NULL;
2619 
2620 		ieee80211_node_decref(data->ni);
2621 		data->ni = NULL;
2622 	}
2623 
2624 	WPI_TXQ_UNLOCK(sc);
2625 }
2626 
2627 static int
2628 wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf)
2629 {
2630 	struct ieee80211_frame *wh;
2631 	struct wpi_tx_cmd *cmd;
2632 	struct wpi_tx_data *data;
2633 	struct wpi_tx_desc *desc;
2634 	struct wpi_tx_ring *ring;
2635 	struct mbuf *m1;
2636 	bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER];
2637 	uint8_t cur, pad;
2638 	uint16_t hdrlen;
2639 	int error, i, nsegs, totlen, frag;
2640 
2641 	WPI_TXQ_LOCK(sc);
2642 
2643 	KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow"));
2644 
2645 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
2646 
2647 	if (__predict_false(sc->sc_running == 0)) {
2648 		/* wpi_stop() was called */
2649 		error = ENETDOWN;
2650 		goto end;
2651 	}
2652 
2653 	wh = mtod(buf->m, struct ieee80211_frame *);
2654 	hdrlen = ieee80211_anyhdrsize(wh);
2655 	totlen = buf->m->m_pkthdr.len;
2656 	frag = ((buf->m->m_flags & (M_FRAG | M_LASTFRAG)) == M_FRAG);
2657 
2658 	if (__predict_false(totlen < sizeof(struct ieee80211_frame_min))) {
2659 		error = EINVAL;
2660 		goto end;
2661 	}
2662 
2663 	if (hdrlen & 3) {
2664 		/* First segment length must be a multiple of 4. */
2665 		pad = 4 - (hdrlen & 3);
2666 	} else
2667 		pad = 0;
2668 
2669 	ring = &sc->txq[buf->ac];
2670 	cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT;
2671 	desc = &ring->desc[cur];
2672 	data = &ring->data[cur];
2673 
2674 	/* Prepare TX firmware command. */
2675 	cmd = &ring->cmd[cur];
2676 	cmd->code = buf->code;
2677 	cmd->flags = 0;
2678 	cmd->qid = ring->qid;
2679 	cmd->idx = cur;
2680 
2681 	memcpy(cmd->data, buf->data, buf->size);
2682 
2683 	/* Save and trim IEEE802.11 header. */
2684 	memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen);
2685 	m_adj(buf->m, hdrlen);
2686 
2687 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m,
2688 	    segs, &nsegs, BUS_DMA_NOWAIT);
2689 	if (error != 0 && error != EFBIG) {
2690 		device_printf(sc->sc_dev,
2691 		    "%s: can't map mbuf (error %d)\n", __func__, error);
2692 		goto end;
2693 	}
2694 	if (error != 0) {
2695 		/* Too many DMA segments, linearize mbuf. */
2696 		m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1);
2697 		if (m1 == NULL) {
2698 			device_printf(sc->sc_dev,
2699 			    "%s: could not defrag mbuf\n", __func__);
2700 			error = ENOBUFS;
2701 			goto end;
2702 		}
2703 		buf->m = m1;
2704 
2705 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
2706 		    buf->m, segs, &nsegs, BUS_DMA_NOWAIT);
2707 		if (__predict_false(error != 0)) {
2708 			/* XXX fix this (applicable to the iwn(4) too) */
2709 			/*
2710 			 * NB: Do not return error;
2711 			 * original mbuf does not exist anymore.
2712 			 */
2713 			device_printf(sc->sc_dev,
2714 			    "%s: can't map mbuf (error %d)\n", __func__,
2715 			    error);
2716 			if (ring->qid < WPI_CMD_QUEUE_NUM) {
2717 				if_inc_counter(buf->ni->ni_vap->iv_ifp,
2718 				    IFCOUNTER_OERRORS, 1);
2719 				if (!frag)
2720 					ieee80211_free_node(buf->ni);
2721 			}
2722 			m_freem(buf->m);
2723 			error = 0;
2724 			goto end;
2725 		}
2726 	}
2727 
2728 	KASSERT(nsegs < WPI_MAX_SCATTER,
2729 	    ("too many DMA segments, nsegs (%d) should be less than %d",
2730 	     nsegs, WPI_MAX_SCATTER));
2731 
2732 	data->m = buf->m;
2733 	data->ni = buf->ni;
2734 	data->hdrlen = hdrlen;
2735 
2736 	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
2737 	    __func__, ring->qid, cur, totlen, nsegs);
2738 
2739 	/* Fill TX descriptor. */
2740 	desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs);
2741 	/* First DMA segment is used by the TX command. */
2742 	desc->segs[0].addr = htole32(data->cmd_paddr);
2743 	desc->segs[0].len  = htole32(4 + buf->size + hdrlen + pad);
2744 	/* Other DMA segments are for data payload. */
2745 	seg = &segs[0];
2746 	for (i = 1; i <= nsegs; i++) {
2747 		desc->segs[i].addr = htole32(seg->ds_addr);
2748 		desc->segs[i].len  = htole32(seg->ds_len);
2749 		seg++;
2750 	}
2751 
2752 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
2753 	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
2754 	    BUS_DMASYNC_PREWRITE);
2755 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2756 	    BUS_DMASYNC_PREWRITE);
2757 
2758 	ring->pending += 1;
2759 
2760 	if (!frag) {
2761 		if (ring->qid < WPI_CMD_QUEUE_NUM) {
2762 			WPI_TXQ_STATE_LOCK(sc);
2763 			ring->queued += ring->pending;
2764 			callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout,
2765 			    sc);
2766 			WPI_TXQ_STATE_UNLOCK(sc);
2767 		}
2768 
2769 		/* Kick TX ring. */
2770 		ring->cur = (ring->cur + ring->pending) % WPI_TX_RING_COUNT;
2771 		ring->pending = 0;
2772 		sc->sc_update_tx_ring(sc, ring);
2773 	} else
2774 		ieee80211_node_incref(data->ni);
2775 
2776 end:	DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END,
2777 	    __func__);
2778 
2779 	WPI_TXQ_UNLOCK(sc);
2780 
2781 	return (error);
2782 }
2783 
2784 /*
2785  * Construct the data packet for a transmit buffer.
2786  */
2787 static int
2788 wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
2789 {
2790 	const struct ieee80211_txparam *tp = ni->ni_txparms;
2791 	struct ieee80211vap *vap = ni->ni_vap;
2792 	struct ieee80211com *ic = ni->ni_ic;
2793 	struct wpi_node *wn = WPI_NODE(ni);
2794 	struct ieee80211_frame *wh;
2795 	struct ieee80211_key *k = NULL;
2796 	struct wpi_buf tx_data;
2797 	struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
2798 	uint32_t flags;
2799 	uint16_t ac, qos;
2800 	uint8_t tid, type, rate;
2801 	int swcrypt, ismcast, totlen;
2802 
2803 	wh = mtod(m, struct ieee80211_frame *);
2804 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2805 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2806 	swcrypt = 1;
2807 
2808 	/* Select EDCA Access Category and TX ring for this frame. */
2809 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
2810 		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
2811 		tid = qos & IEEE80211_QOS_TID;
2812 	} else {
2813 		qos = 0;
2814 		tid = 0;
2815 	}
2816 	ac = M_WME_GETAC(m);
2817 
2818 	/* Choose a TX rate index. */
2819 	if (type == IEEE80211_FC0_TYPE_MGT ||
2820 	    type == IEEE80211_FC0_TYPE_CTL ||
2821 	    (m->m_flags & M_EAPOL) != 0)
2822 		rate = tp->mgmtrate;
2823 	else if (ismcast)
2824 		rate = tp->mcastrate;
2825 	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2826 		rate = tp->ucastrate;
2827 	else {
2828 		/* XXX pass pktlen */
2829 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2830 		rate = ni->ni_txrate;
2831 	}
2832 
2833 	/* Encrypt the frame if need be. */
2834 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2835 		/* Retrieve key for TX. */
2836 		k = ieee80211_crypto_encap(ni, m);
2837 		if (k == NULL)
2838 			return (ENOBUFS);
2839 
2840 		swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
2841 
2842 		/* 802.11 header may have moved. */
2843 		wh = mtod(m, struct ieee80211_frame *);
2844 	}
2845 	totlen = m->m_pkthdr.len;
2846 
2847 	if (ieee80211_radiotap_active_vap(vap)) {
2848 		struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
2849 
2850 		tap->wt_flags = 0;
2851 		tap->wt_rate = rate;
2852 		if (k != NULL)
2853 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2854 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2855 			tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2856 
2857 		ieee80211_radiotap_tx(vap, m);
2858 	}
2859 
2860 	flags = 0;
2861 	if (!ismcast) {
2862 		/* Unicast frame, check if an ACK is expected. */
2863 		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
2864 		    IEEE80211_QOS_ACKPOLICY_NOACK)
2865 			flags |= WPI_TX_NEED_ACK;
2866 	}
2867 
2868 	if (!IEEE80211_QOS_HAS_SEQ(wh))
2869 		flags |= WPI_TX_AUTO_SEQ;
2870 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2871 		flags |= WPI_TX_MORE_FRAG;
2872 
2873 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2874 	if (!ismcast) {
2875 		/* NB: Group frames are sent using CCK in 802.11b/g. */
2876 		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2877 			flags |= WPI_TX_NEED_RTS;
2878 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2879 		    WPI_RATE_IS_OFDM(rate)) {
2880 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2881 				flags |= WPI_TX_NEED_CTS;
2882 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2883 				flags |= WPI_TX_NEED_RTS;
2884 		}
2885 
2886 		if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
2887 			flags |= WPI_TX_FULL_TXOP;
2888 	}
2889 
2890 	memset(tx, 0, sizeof (struct wpi_cmd_data));
2891 	if (type == IEEE80211_FC0_TYPE_MGT) {
2892 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2893 
2894 		/* Tell HW to set timestamp in probe responses. */
2895 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2896 			flags |= WPI_TX_INSERT_TSTAMP;
2897 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2898 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2899 			tx->timeout = htole16(3);
2900 		else
2901 			tx->timeout = htole16(2);
2902 	}
2903 
2904 	if (ismcast || type != IEEE80211_FC0_TYPE_DATA)
2905 		tx->id = WPI_ID_BROADCAST;
2906 	else {
2907 		if (wn->id == WPI_ID_UNDEFINED) {
2908 			device_printf(sc->sc_dev,
2909 			    "%s: undefined node id\n", __func__);
2910 			return (EINVAL);
2911 		}
2912 
2913 		tx->id = wn->id;
2914 	}
2915 
2916 	if (!swcrypt) {
2917 		switch (k->wk_cipher->ic_cipher) {
2918 		case IEEE80211_CIPHER_AES_CCM:
2919 			tx->security = WPI_CIPHER_CCMP;
2920 			break;
2921 
2922 		default:
2923 			break;
2924 		}
2925 
2926 		memcpy(tx->key, k->wk_key, k->wk_keylen);
2927 	}
2928 
2929 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
2930 		struct mbuf *next = m->m_nextpkt;
2931 
2932 		tx->lnext = htole16(next->m_pkthdr.len);
2933 		tx->fnext = htole32(tx->security |
2934 				    (flags & WPI_TX_NEED_ACK) |
2935 				    WPI_NEXT_STA_ID(tx->id));
2936 	}
2937 
2938 	tx->len = htole16(totlen);
2939 	tx->flags = htole32(flags);
2940 	tx->plcp = rate2plcp(rate);
2941 	tx->tid = tid;
2942 	tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
2943 	tx->ofdm_mask = 0xff;
2944 	tx->cck_mask = 0x0f;
2945 	tx->rts_ntries = 7;
2946 	tx->data_ntries = tp->maxretry;
2947 
2948 	tx_data.ni = ni;
2949 	tx_data.m = m;
2950 	tx_data.size = sizeof(struct wpi_cmd_data);
2951 	tx_data.code = WPI_CMD_TX_DATA;
2952 	tx_data.ac = ac;
2953 
2954 	return wpi_cmd2(sc, &tx_data);
2955 }
2956 
2957 static int
2958 wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m,
2959     struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
2960 {
2961 	struct ieee80211vap *vap = ni->ni_vap;
2962 	struct ieee80211_key *k = NULL;
2963 	struct ieee80211_frame *wh;
2964 	struct wpi_buf tx_data;
2965 	struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
2966 	uint32_t flags;
2967 	uint8_t ac, type, rate;
2968 	int swcrypt, totlen;
2969 
2970 	wh = mtod(m, struct ieee80211_frame *);
2971 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2972 	swcrypt = 1;
2973 
2974 	ac = params->ibp_pri & 3;
2975 
2976 	/* Choose a TX rate index. */
2977 	rate = params->ibp_rate0;
2978 
2979 	flags = 0;
2980 	if (!IEEE80211_QOS_HAS_SEQ(wh))
2981 		flags |= WPI_TX_AUTO_SEQ;
2982 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
2983 		flags |= WPI_TX_NEED_ACK;
2984 	if (params->ibp_flags & IEEE80211_BPF_RTS)
2985 		flags |= WPI_TX_NEED_RTS;
2986 	if (params->ibp_flags & IEEE80211_BPF_CTS)
2987 		flags |= WPI_TX_NEED_CTS;
2988 	if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
2989 		flags |= WPI_TX_FULL_TXOP;
2990 
2991 	/* Encrypt the frame if need be. */
2992 	if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
2993 		/* Retrieve key for TX. */
2994 		k = ieee80211_crypto_encap(ni, m);
2995 		if (k == NULL)
2996 			return (ENOBUFS);
2997 
2998 		swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
2999 
3000 		/* 802.11 header may have moved. */
3001 		wh = mtod(m, struct ieee80211_frame *);
3002 	}
3003 	totlen = m->m_pkthdr.len;
3004 
3005 	if (ieee80211_radiotap_active_vap(vap)) {
3006 		struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
3007 
3008 		tap->wt_flags = 0;
3009 		tap->wt_rate = rate;
3010 		if (params->ibp_flags & IEEE80211_BPF_CRYPTO)
3011 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3012 
3013 		ieee80211_radiotap_tx(vap, m);
3014 	}
3015 
3016 	memset(tx, 0, sizeof (struct wpi_cmd_data));
3017 	if (type == IEEE80211_FC0_TYPE_MGT) {
3018 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3019 
3020 		/* Tell HW to set timestamp in probe responses. */
3021 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3022 			flags |= WPI_TX_INSERT_TSTAMP;
3023 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3024 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3025 			tx->timeout = htole16(3);
3026 		else
3027 			tx->timeout = htole16(2);
3028 	}
3029 
3030 	if (!swcrypt) {
3031 		switch (k->wk_cipher->ic_cipher) {
3032 		case IEEE80211_CIPHER_AES_CCM:
3033 			tx->security = WPI_CIPHER_CCMP;
3034 			break;
3035 
3036 		default:
3037 			break;
3038 		}
3039 
3040 		memcpy(tx->key, k->wk_key, k->wk_keylen);
3041 	}
3042 
3043 	tx->len = htole16(totlen);
3044 	tx->flags = htole32(flags);
3045 	tx->plcp = rate2plcp(rate);
3046 	tx->id = WPI_ID_BROADCAST;
3047 	tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
3048 	tx->rts_ntries = params->ibp_try1;
3049 	tx->data_ntries = params->ibp_try0;
3050 
3051 	tx_data.ni = ni;
3052 	tx_data.m = m;
3053 	tx_data.size = sizeof(struct wpi_cmd_data);
3054 	tx_data.code = WPI_CMD_TX_DATA;
3055 	tx_data.ac = ac;
3056 
3057 	return wpi_cmd2(sc, &tx_data);
3058 }
3059 
3060 static __inline int
3061 wpi_tx_ring_free_space(struct wpi_softc *sc, uint16_t ac)
3062 {
3063 	struct wpi_tx_ring *ring = &sc->txq[ac];
3064 	int retval;
3065 
3066 	WPI_TXQ_STATE_LOCK(sc);
3067 	retval = WPI_TX_RING_HIMARK - ring->queued;
3068 	WPI_TXQ_STATE_UNLOCK(sc);
3069 
3070 	return retval;
3071 }
3072 
3073 static int
3074 wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3075     const struct ieee80211_bpf_params *params)
3076 {
3077 	struct ieee80211com *ic = ni->ni_ic;
3078 	struct wpi_softc *sc = ic->ic_softc;
3079 	uint16_t ac;
3080 	int error = 0;
3081 
3082 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3083 
3084 	ac = M_WME_GETAC(m);
3085 
3086 	WPI_TX_LOCK(sc);
3087 
3088 	/* NB: no fragments here */
3089 	if (sc->sc_running == 0 || wpi_tx_ring_free_space(sc, ac) < 1) {
3090 		error = sc->sc_running ? ENOBUFS : ENETDOWN;
3091 		goto unlock;
3092 	}
3093 
3094 	if (params == NULL) {
3095 		/*
3096 		 * Legacy path; interpret frame contents to decide
3097 		 * precisely how to send the frame.
3098 		 */
3099 		error = wpi_tx_data(sc, m, ni);
3100 	} else {
3101 		/*
3102 		 * Caller supplied explicit parameters to use in
3103 		 * sending the frame.
3104 		 */
3105 		error = wpi_tx_data_raw(sc, m, ni, params);
3106 	}
3107 
3108 unlock:	WPI_TX_UNLOCK(sc);
3109 
3110 	if (error != 0) {
3111 		m_freem(m);
3112 		DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
3113 
3114 		return error;
3115 	}
3116 
3117 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3118 
3119 	return 0;
3120 }
3121 
3122 static int
3123 wpi_transmit(struct ieee80211com *ic, struct mbuf *m)
3124 {
3125 	struct wpi_softc *sc = ic->ic_softc;
3126 	struct ieee80211_node *ni;
3127 	struct mbuf *mnext;
3128 	uint16_t ac;
3129 	int error, nmbufs;
3130 
3131 	WPI_TX_LOCK(sc);
3132 	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__);
3133 
3134 	/* Check if interface is up & running. */
3135 	if (__predict_false(sc->sc_running == 0)) {
3136 		error = ENXIO;
3137 		goto unlock;
3138 	}
3139 
3140 	nmbufs = 1;
3141 	for (mnext = m->m_nextpkt; mnext != NULL; mnext = mnext->m_nextpkt)
3142 		nmbufs++;
3143 
3144 	/* Check for available space. */
3145 	ac = M_WME_GETAC(m);
3146 	if (wpi_tx_ring_free_space(sc, ac) < nmbufs) {
3147 		error = ENOBUFS;
3148 		goto unlock;
3149 	}
3150 
3151 	error = 0;
3152 	ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3153 	do {
3154 		mnext = m->m_nextpkt;
3155 		if (wpi_tx_data(sc, m, ni) != 0) {
3156 			if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS,
3157 			    nmbufs);
3158 			wpi_free_txfrags(sc, ac);
3159 			ieee80211_free_mbuf(m);
3160 			ieee80211_free_node(ni);
3161 			break;
3162 		}
3163 	} while((m = mnext) != NULL);
3164 
3165 	DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__);
3166 
3167 unlock:	WPI_TX_UNLOCK(sc);
3168 
3169 	return (error);
3170 }
3171 
3172 static void
3173 wpi_watchdog_rfkill(void *arg)
3174 {
3175 	struct wpi_softc *sc = arg;
3176 	struct ieee80211com *ic = &sc->sc_ic;
3177 
3178 	DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n");
3179 
3180 	/* No need to lock firmware memory. */
3181 	if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) {
3182 		/* Radio kill switch is still off. */
3183 		callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
3184 		    sc);
3185 	} else
3186 		ieee80211_runtask(ic, &sc->sc_radioon_task);
3187 }
3188 
3189 static void
3190 wpi_scan_timeout(void *arg)
3191 {
3192 	struct wpi_softc *sc = arg;
3193 	struct ieee80211com *ic = &sc->sc_ic;
3194 
3195 	ic_printf(ic, "scan timeout\n");
3196 	ieee80211_restart_all(ic);
3197 }
3198 
3199 static void
3200 wpi_tx_timeout(void *arg)
3201 {
3202 	struct wpi_softc *sc = arg;
3203 	struct ieee80211com *ic = &sc->sc_ic;
3204 
3205 	ic_printf(ic, "device timeout\n");
3206 	ieee80211_restart_all(ic);
3207 }
3208 
3209 static void
3210 wpi_parent(struct ieee80211com *ic)
3211 {
3212 	struct wpi_softc *sc = ic->ic_softc;
3213 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3214 
3215 	if (ic->ic_nrunning > 0) {
3216 		if (wpi_init(sc) == 0) {
3217 			ieee80211_notify_radio(ic, 1);
3218 			ieee80211_start_all(ic);
3219 		} else {
3220 			ieee80211_notify_radio(ic, 0);
3221 			ieee80211_stop(vap);
3222 		}
3223 	} else {
3224 		ieee80211_notify_radio(ic, 0);
3225 		wpi_stop(sc);
3226 	}
3227 }
3228 
3229 /*
3230  * Send a command to the firmware.
3231  */
3232 static int
3233 wpi_cmd(struct wpi_softc *sc, uint8_t code, const void *buf, uint16_t size,
3234     int async)
3235 {
3236 	struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
3237 	struct wpi_tx_desc *desc;
3238 	struct wpi_tx_data *data;
3239 	struct wpi_tx_cmd *cmd;
3240 	struct mbuf *m;
3241 	bus_addr_t paddr;
3242 	uint16_t totlen;
3243 	int error;
3244 
3245 	WPI_TXQ_LOCK(sc);
3246 
3247 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3248 
3249 	if (__predict_false(sc->sc_running == 0)) {
3250 		/* wpi_stop() was called */
3251 		if (code == WPI_CMD_SCAN)
3252 			error = ENETDOWN;
3253 		else
3254 			error = 0;
3255 
3256 		goto fail;
3257 	}
3258 
3259 	if (async == 0)
3260 		WPI_LOCK_ASSERT(sc);
3261 
3262 	DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %u async %d\n",
3263 	    __func__, wpi_cmd_str(code), size, async);
3264 
3265 	desc = &ring->desc[ring->cur];
3266 	data = &ring->data[ring->cur];
3267 	totlen = 4 + size;
3268 
3269 	if (size > sizeof cmd->data) {
3270 		/* Command is too large to fit in a descriptor. */
3271 		if (totlen > MCLBYTES) {
3272 			error = EINVAL;
3273 			goto fail;
3274 		}
3275 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3276 		if (m == NULL) {
3277 			error = ENOMEM;
3278 			goto fail;
3279 		}
3280 		cmd = mtod(m, struct wpi_tx_cmd *);
3281 		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3282 		    totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3283 		if (error != 0) {
3284 			m_freem(m);
3285 			goto fail;
3286 		}
3287 		data->m = m;
3288 	} else {
3289 		cmd = &ring->cmd[ring->cur];
3290 		paddr = data->cmd_paddr;
3291 	}
3292 
3293 	cmd->code = code;
3294 	cmd->flags = 0;
3295 	cmd->qid = ring->qid;
3296 	cmd->idx = ring->cur;
3297 	memcpy(cmd->data, buf, size);
3298 
3299 	desc->nsegs = 1 + (WPI_PAD32(size) << 4);
3300 	desc->segs[0].addr = htole32(paddr);
3301 	desc->segs[0].len  = htole32(totlen);
3302 
3303 	if (size > sizeof cmd->data) {
3304 		bus_dmamap_sync(ring->data_dmat, data->map,
3305 		    BUS_DMASYNC_PREWRITE);
3306 	} else {
3307 		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3308 		    BUS_DMASYNC_PREWRITE);
3309 	}
3310 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3311 	    BUS_DMASYNC_PREWRITE);
3312 
3313 	/* Kick command ring. */
3314 	ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
3315 	sc->sc_update_tx_ring(sc, ring);
3316 
3317 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3318 
3319 	WPI_TXQ_UNLOCK(sc);
3320 
3321 	return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
3322 
3323 fail:	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
3324 
3325 	WPI_TXQ_UNLOCK(sc);
3326 
3327 	return error;
3328 }
3329 
3330 /*
3331  * Configure HW multi-rate retries.
3332  */
3333 static int
3334 wpi_mrr_setup(struct wpi_softc *sc)
3335 {
3336 	struct ieee80211com *ic = &sc->sc_ic;
3337 	struct wpi_mrr_setup mrr;
3338 	uint8_t i;
3339 	int error;
3340 
3341 	/* CCK rates (not used with 802.11a). */
3342 	for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) {
3343 		mrr.rates[i].flags = 0;
3344 		mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
3345 		/* Fallback to the immediate lower CCK rate (if any.) */
3346 		mrr.rates[i].next =
3347 		    (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1;
3348 		/* Try twice at this rate before falling back to "next". */
3349 		mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
3350 	}
3351 	/* OFDM rates (not used with 802.11b). */
3352 	for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) {
3353 		mrr.rates[i].flags = 0;
3354 		mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
3355 		/* Fallback to the immediate lower rate (if any.) */
3356 		/* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */
3357 		mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ?
3358 		    ((ic->ic_curmode == IEEE80211_MODE_11A) ?
3359 			WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) :
3360 		    i - 1;
3361 		/* Try twice at this rate before falling back to "next". */
3362 		mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
3363 	}
3364 	/* Setup MRR for control frames. */
3365 	mrr.which = htole32(WPI_MRR_CTL);
3366 	error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
3367 	if (error != 0) {
3368 		device_printf(sc->sc_dev,
3369 		    "could not setup MRR for control frames\n");
3370 		return error;
3371 	}
3372 	/* Setup MRR for data frames. */
3373 	mrr.which = htole32(WPI_MRR_DATA);
3374 	error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
3375 	if (error != 0) {
3376 		device_printf(sc->sc_dev,
3377 		    "could not setup MRR for data frames\n");
3378 		return error;
3379 	}
3380 	return 0;
3381 }
3382 
3383 static int
3384 wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3385 {
3386 	struct ieee80211com *ic = ni->ni_ic;
3387 	struct wpi_vap *wvp = WPI_VAP(ni->ni_vap);
3388 	struct wpi_node *wn = WPI_NODE(ni);
3389 	struct wpi_node_info node;
3390 	int error;
3391 
3392 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3393 
3394 	if (wn->id == WPI_ID_UNDEFINED)
3395 		return EINVAL;
3396 
3397 	memset(&node, 0, sizeof node);
3398 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
3399 	node.id = wn->id;
3400 	node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3401 	    wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
3402 	node.action = htole32(WPI_ACTION_SET_RATE);
3403 	node.antenna = WPI_ANTENNA_BOTH;
3404 
3405 	DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__,
3406 	    wn->id, ether_sprintf(ni->ni_macaddr));
3407 
3408 	error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
3409 	if (error != 0) {
3410 		device_printf(sc->sc_dev,
3411 		    "%s: wpi_cmd() call failed with error code %d\n", __func__,
3412 		    error);
3413 		return error;
3414 	}
3415 
3416 	if (wvp->wv_gtk != 0) {
3417 		error = wpi_set_global_keys(ni);
3418 		if (error != 0) {
3419 			device_printf(sc->sc_dev,
3420 			    "%s: error while setting global keys\n", __func__);
3421 			return ENXIO;
3422 		}
3423 	}
3424 
3425 	return 0;
3426 }
3427 
3428 /*
3429  * Broadcast node is used to send group-addressed and management frames.
3430  */
3431 static int
3432 wpi_add_broadcast_node(struct wpi_softc *sc, int async)
3433 {
3434 	struct ieee80211com *ic = &sc->sc_ic;
3435 	struct wpi_node_info node;
3436 
3437 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3438 
3439 	memset(&node, 0, sizeof node);
3440 	IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
3441 	node.id = WPI_ID_BROADCAST;
3442 	node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3443 	    wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
3444 	node.action = htole32(WPI_ACTION_SET_RATE);
3445 	node.antenna = WPI_ANTENNA_BOTH;
3446 
3447 	DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__);
3448 
3449 	return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async);
3450 }
3451 
3452 static int
3453 wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3454 {
3455 	struct wpi_node *wn = WPI_NODE(ni);
3456 	int error;
3457 
3458 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3459 
3460 	wn->id = wpi_add_node_entry_sta(sc);
3461 
3462 	if ((error = wpi_add_node(sc, ni)) != 0) {
3463 		wpi_del_node_entry(sc, wn->id);
3464 		wn->id = WPI_ID_UNDEFINED;
3465 		return error;
3466 	}
3467 
3468 	return 0;
3469 }
3470 
3471 static int
3472 wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3473 {
3474 	struct wpi_node *wn = WPI_NODE(ni);
3475 	int error;
3476 
3477 	KASSERT(wn->id == WPI_ID_UNDEFINED,
3478 	    ("the node %d was added before", wn->id));
3479 
3480 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3481 
3482 	if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) {
3483 		device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__);
3484 		return ENOMEM;
3485 	}
3486 
3487 	if ((error = wpi_add_node(sc, ni)) != 0) {
3488 		wpi_del_node_entry(sc, wn->id);
3489 		wn->id = WPI_ID_UNDEFINED;
3490 		return error;
3491 	}
3492 
3493 	return 0;
3494 }
3495 
3496 static void
3497 wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni)
3498 {
3499 	struct wpi_node *wn = WPI_NODE(ni);
3500 	struct wpi_cmd_del_node node;
3501 	int error;
3502 
3503 	KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed"));
3504 
3505 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3506 
3507 	memset(&node, 0, sizeof node);
3508 	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
3509 	node.count = 1;
3510 
3511 	DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__,
3512 	    wn->id, ether_sprintf(ni->ni_macaddr));
3513 
3514 	error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1);
3515 	if (error != 0) {
3516 		device_printf(sc->sc_dev,
3517 		    "%s: could not delete node %u, error %d\n", __func__,
3518 		    wn->id, error);
3519 	}
3520 }
3521 
3522 static int
3523 wpi_updateedca(struct ieee80211com *ic)
3524 {
3525 #define WPI_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
3526 	struct wpi_softc *sc = ic->ic_softc;
3527 	struct chanAccParams chp;
3528 	struct wpi_edca_params cmd;
3529 	int aci, error;
3530 
3531 	ieee80211_wme_ic_getparams(ic, &chp);
3532 
3533 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3534 
3535 	memset(&cmd, 0, sizeof cmd);
3536 	cmd.flags = htole32(WPI_EDCA_UPDATE);
3537 	for (aci = 0; aci < WME_NUM_AC; aci++) {
3538 		const struct wmeParams *ac = &chp.cap_wmeParams[aci];
3539 		cmd.ac[aci].aifsn = ac->wmep_aifsn;
3540 		cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin));
3541 		cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax));
3542 		cmd.ac[aci].txoplimit =
3543 		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
3544 
3545 		DPRINTF(sc, WPI_DEBUG_EDCA,
3546 		    "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
3547 		    "txoplimit=%d\n", aci, cmd.ac[aci].aifsn,
3548 		    cmd.ac[aci].cwmin, cmd.ac[aci].cwmax,
3549 		    cmd.ac[aci].txoplimit);
3550 	}
3551 	error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
3552 
3553 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
3554 
3555 	return error;
3556 #undef WPI_EXP2
3557 }
3558 
3559 static void
3560 wpi_set_promisc(struct wpi_softc *sc)
3561 {
3562 	struct ieee80211com *ic = &sc->sc_ic;
3563 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3564 	uint32_t promisc_filter;
3565 
3566 	promisc_filter = WPI_FILTER_CTL;
3567 	if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP)
3568 		promisc_filter |= WPI_FILTER_PROMISC;
3569 
3570 	if (ic->ic_promisc > 0)
3571 		sc->rxon.filter |= htole32(promisc_filter);
3572 	else
3573 		sc->rxon.filter &= ~htole32(promisc_filter);
3574 }
3575 
3576 static void
3577 wpi_update_promisc(struct ieee80211com *ic)
3578 {
3579 	struct wpi_softc *sc = ic->ic_softc;
3580 
3581 	WPI_LOCK(sc);
3582 	if (sc->sc_running == 0) {
3583 		WPI_UNLOCK(sc);
3584 		return;
3585 	}
3586 	WPI_UNLOCK(sc);
3587 
3588 	WPI_RXON_LOCK(sc);
3589 	wpi_set_promisc(sc);
3590 
3591 	if (wpi_send_rxon(sc, 1, 1) != 0) {
3592 		device_printf(sc->sc_dev, "%s: could not send RXON\n",
3593 		    __func__);
3594 	}
3595 	WPI_RXON_UNLOCK(sc);
3596 }
3597 
3598 static void
3599 wpi_update_mcast(struct ieee80211com *ic)
3600 {
3601 	/* Ignore */
3602 }
3603 
3604 static void
3605 wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3606 {
3607 	struct wpi_cmd_led led;
3608 
3609 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3610 
3611 	led.which = which;
3612 	led.unit = htole32(100000);	/* on/off in unit of 100ms */
3613 	led.off = off;
3614 	led.on = on;
3615 	(void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1);
3616 }
3617 
3618 static int
3619 wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni)
3620 {
3621 	struct wpi_cmd_timing cmd;
3622 	uint64_t val, mod;
3623 
3624 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3625 
3626 	memset(&cmd, 0, sizeof cmd);
3627 	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3628 	cmd.bintval = htole16(ni->ni_intval);
3629 	cmd.lintval = htole16(10);
3630 
3631 	/* Compute remaining time until next beacon. */
3632 	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
3633 	mod = le64toh(cmd.tstamp) % val;
3634 	cmd.binitval = htole32((uint32_t)(val - mod));
3635 
3636 	DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3637 	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3638 
3639 	return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1);
3640 }
3641 
3642 /*
3643  * This function is called periodically (every 60 seconds) to adjust output
3644  * power to temperature changes.
3645  */
3646 static void
3647 wpi_power_calibration(struct wpi_softc *sc)
3648 {
3649 	int temp;
3650 
3651 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
3652 
3653 	/* Update sensor data. */
3654 	temp = (int)WPI_READ(sc, WPI_UCODE_GP2);
3655 	DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp);
3656 
3657 	/* Sanity-check read value. */
3658 	if (temp < -260 || temp > 25) {
3659 		/* This can't be correct, ignore. */
3660 		DPRINTF(sc, WPI_DEBUG_TEMP,
3661 		    "out-of-range temperature reported: %d\n", temp);
3662 		return;
3663 	}
3664 
3665 	DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp);
3666 
3667 	/* Adjust Tx power if need be. */
3668 	if (abs(temp - sc->temp) <= 6)
3669 		return;
3670 
3671 	sc->temp = temp;
3672 
3673 	if (wpi_set_txpower(sc, 1) != 0) {
3674 		/* just warn, too bad for the automatic calibration... */
3675 		device_printf(sc->sc_dev,"could not adjust Tx power\n");
3676 	}
3677 }
3678 
3679 /*
3680  * Set TX power for current channel.
3681  */
3682 static int
3683 wpi_set_txpower(struct wpi_softc *sc, int async)
3684 {
3685 	struct wpi_power_group *group;
3686 	struct wpi_cmd_txpower cmd;
3687 	uint8_t chan;
3688 	int idx, is_chan_5ghz, i;
3689 
3690 	/* Retrieve current channel from last RXON. */
3691 	chan = sc->rxon.chan;
3692 	is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0;
3693 
3694 	/* Find the TX power group to which this channel belongs. */
3695 	if (is_chan_5ghz) {
3696 		for (group = &sc->groups[1]; group < &sc->groups[4]; group++)
3697 			if (chan <= group->chan)
3698 				break;
3699 	} else
3700 		group = &sc->groups[0];
3701 
3702 	memset(&cmd, 0, sizeof cmd);
3703 	cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ;
3704 	cmd.chan = htole16(chan);
3705 
3706 	/* Set TX power for all OFDM and CCK rates. */
3707 	for (i = 0; i <= WPI_RIDX_MAX ; i++) {
3708 		/* Retrieve TX power for this channel/rate. */
3709 		idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i);
3710 
3711 		cmd.rates[i].plcp = wpi_ridx_to_plcp[i];
3712 
3713 		if (is_chan_5ghz) {
3714 			cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx];
3715 			cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx];
3716 		} else {
3717 			cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx];
3718 			cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx];
3719 		}
3720 		DPRINTF(sc, WPI_DEBUG_TEMP,
3721 		    "chan %d/ridx %d: power index %d\n", chan, i, idx);
3722 	}
3723 
3724 	return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async);
3725 }
3726 
3727 /*
3728  * Determine Tx power index for a given channel/rate combination.
3729  * This takes into account the regulatory information from EEPROM and the
3730  * current temperature.
3731  */
3732 static int
3733 wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group,
3734     uint8_t chan, int is_chan_5ghz, int ridx)
3735 {
3736 /* Fixed-point arithmetic division using a n-bit fractional part. */
3737 #define fdivround(a, b, n)	\
3738 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3739 
3740 /* Linear interpolation. */
3741 #define interpolate(x, x1, y1, x2, y2, n)	\
3742 	((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3743 
3744 	struct wpi_power_sample *sample;
3745 	int pwr, idx;
3746 
3747 	/* Default TX power is group maximum TX power minus 3dB. */
3748 	pwr = group->maxpwr / 2;
3749 
3750 	/* Decrease TX power for highest OFDM rates to reduce distortion. */
3751 	switch (ridx) {
3752 	case WPI_RIDX_OFDM36:
3753 		pwr -= is_chan_5ghz ?  5 : 0;
3754 		break;
3755 	case WPI_RIDX_OFDM48:
3756 		pwr -= is_chan_5ghz ? 10 : 7;
3757 		break;
3758 	case WPI_RIDX_OFDM54:
3759 		pwr -= is_chan_5ghz ? 12 : 9;
3760 		break;
3761 	}
3762 
3763 	/* Never exceed the channel maximum allowed TX power. */
3764 	pwr = min(pwr, sc->maxpwr[chan]);
3765 
3766 	/* Retrieve TX power index into gain tables from samples. */
3767 	for (sample = group->samples; sample < &group->samples[3]; sample++)
3768 		if (pwr > sample[1].power)
3769 			break;
3770 	/* Fixed-point linear interpolation using a 19-bit fractional part. */
3771 	idx = interpolate(pwr, sample[0].power, sample[0].index,
3772 	    sample[1].power, sample[1].index, 19);
3773 
3774 	/*-
3775 	 * Adjust power index based on current temperature:
3776 	 * - if cooler than factory-calibrated: decrease output power
3777 	 * - if warmer than factory-calibrated: increase output power
3778 	 */
3779 	idx -= (sc->temp - group->temp) * 11 / 100;
3780 
3781 	/* Decrease TX power for CCK rates (-5dB). */
3782 	if (ridx >= WPI_RIDX_CCK1)
3783 		idx += 10;
3784 
3785 	/* Make sure idx stays in a valid range. */
3786 	if (idx < 0)
3787 		return 0;
3788 	if (idx > WPI_MAX_PWR_INDEX)
3789 		return WPI_MAX_PWR_INDEX;
3790 	return idx;
3791 
3792 #undef interpolate
3793 #undef fdivround
3794 }
3795 
3796 /*
3797  * Set STA mode power saving level (between 0 and 5).
3798  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
3799  */
3800 static int
3801 wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async)
3802 {
3803 	struct wpi_pmgt_cmd cmd;
3804 	const struct wpi_pmgt *pmgt;
3805 	uint32_t max, reg;
3806 	uint8_t skip_dtim;
3807 	int i;
3808 
3809 	DPRINTF(sc, WPI_DEBUG_PWRSAVE,
3810 	    "%s: dtim=%d, level=%d, async=%d\n",
3811 	    __func__, dtim, level, async);
3812 
3813 	/* Select which PS parameters to use. */
3814 	if (dtim <= 10)
3815 		pmgt = &wpi_pmgt[0][level];
3816 	else
3817 		pmgt = &wpi_pmgt[1][level];
3818 
3819 	memset(&cmd, 0, sizeof cmd);
3820 	if (level != 0)	/* not CAM */
3821 		cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP);
3822 	/* Retrieve PCIe Active State Power Management (ASPM). */
3823 	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1);
3824 	if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S))	/* L0s Entry disabled. */
3825 		cmd.flags |= htole16(WPI_PS_PCI_PMGT);
3826 
3827 	cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU);
3828 	cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU);
3829 
3830 	if (dtim == 0) {
3831 		dtim = 1;
3832 		skip_dtim = 0;
3833 	} else
3834 		skip_dtim = pmgt->skip_dtim;
3835 
3836 	if (skip_dtim != 0) {
3837 		cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM);
3838 		max = pmgt->intval[4];
3839 		if (max == (uint32_t)-1)
3840 			max = dtim * (skip_dtim + 1);
3841 		else if (max > dtim)
3842 			max = rounddown(max, dtim);
3843 	} else
3844 		max = dtim;
3845 
3846 	for (i = 0; i < 5; i++)
3847 		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
3848 
3849 	return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
3850 }
3851 
3852 static int
3853 wpi_send_btcoex(struct wpi_softc *sc)
3854 {
3855 	struct wpi_bluetooth cmd;
3856 
3857 	memset(&cmd, 0, sizeof cmd);
3858 	cmd.flags = WPI_BT_COEX_MODE_4WIRE;
3859 	cmd.lead_time = WPI_BT_LEAD_TIME_DEF;
3860 	cmd.max_kill = WPI_BT_MAX_KILL_DEF;
3861 	DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
3862 	    __func__);
3863 	return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
3864 }
3865 
3866 static int
3867 wpi_send_rxon(struct wpi_softc *sc, int assoc, int async)
3868 {
3869 	int error;
3870 
3871 	if (async)
3872 		WPI_RXON_LOCK_ASSERT(sc);
3873 
3874 	if (assoc && wpi_check_bss_filter(sc) != 0) {
3875 		struct wpi_assoc rxon_assoc;
3876 
3877 		rxon_assoc.flags = sc->rxon.flags;
3878 		rxon_assoc.filter = sc->rxon.filter;
3879 		rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
3880 		rxon_assoc.cck_mask = sc->rxon.cck_mask;
3881 		rxon_assoc.reserved = 0;
3882 
3883 		error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc,
3884 		    sizeof (struct wpi_assoc), async);
3885 		if (error != 0) {
3886 			device_printf(sc->sc_dev,
3887 			    "RXON_ASSOC command failed, error %d\n", error);
3888 			return error;
3889 		}
3890 	} else {
3891 		if (async) {
3892 			WPI_NT_LOCK(sc);
3893 			error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
3894 			    sizeof (struct wpi_rxon), async);
3895 			if (error == 0)
3896 				wpi_clear_node_table(sc);
3897 			WPI_NT_UNLOCK(sc);
3898 		} else {
3899 			error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
3900 			    sizeof (struct wpi_rxon), async);
3901 			if (error == 0)
3902 				wpi_clear_node_table(sc);
3903 		}
3904 
3905 		if (error != 0) {
3906 			device_printf(sc->sc_dev,
3907 			    "RXON command failed, error %d\n", error);
3908 			return error;
3909 		}
3910 
3911 		/* Add broadcast node. */
3912 		error = wpi_add_broadcast_node(sc, async);
3913 		if (error != 0) {
3914 			device_printf(sc->sc_dev,
3915 			    "could not add broadcast node, error %d\n", error);
3916 			return error;
3917 		}
3918 	}
3919 
3920 	/* Configuration has changed, set Tx power accordingly. */
3921 	if ((error = wpi_set_txpower(sc, async)) != 0) {
3922 		device_printf(sc->sc_dev,
3923 		    "%s: could not set TX power, error %d\n", __func__, error);
3924 		return error;
3925 	}
3926 
3927 	return 0;
3928 }
3929 
3930 /**
3931  * Configure the card to listen to a particular channel, this transisions the
3932  * card in to being able to receive frames from remote devices.
3933  */
3934 static int
3935 wpi_config(struct wpi_softc *sc)
3936 {
3937 	struct ieee80211com *ic = &sc->sc_ic;
3938 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3939 	struct ieee80211_channel *c = ic->ic_curchan;
3940 	int error;
3941 
3942 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
3943 
3944 	/* Set power saving level to CAM during initialization. */
3945 	if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) {
3946 		device_printf(sc->sc_dev,
3947 		    "%s: could not set power saving level\n", __func__);
3948 		return error;
3949 	}
3950 
3951 	/* Configure bluetooth coexistence. */
3952 	if ((error = wpi_send_btcoex(sc)) != 0) {
3953 		device_printf(sc->sc_dev,
3954 		    "could not configure bluetooth coexistence\n");
3955 		return error;
3956 	}
3957 
3958 	/* Configure adapter. */
3959 	memset(&sc->rxon, 0, sizeof (struct wpi_rxon));
3960 	IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr);
3961 
3962 	/* Set default channel. */
3963 	sc->rxon.chan = ieee80211_chan2ieee(ic, c);
3964 	sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
3965 	if (IEEE80211_IS_CHAN_2GHZ(c))
3966 		sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
3967 
3968 	sc->rxon.filter = WPI_FILTER_MULTICAST;
3969 	switch (ic->ic_opmode) {
3970 	case IEEE80211_M_STA:
3971 		sc->rxon.mode = WPI_MODE_STA;
3972 		break;
3973 	case IEEE80211_M_IBSS:
3974 		sc->rxon.mode = WPI_MODE_IBSS;
3975 		sc->rxon.filter |= WPI_FILTER_BEACON;
3976 		break;
3977 	case IEEE80211_M_HOSTAP:
3978 		/* XXX workaround for beaconing */
3979 		sc->rxon.mode = WPI_MODE_IBSS;
3980 		sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC;
3981 		break;
3982 	case IEEE80211_M_AHDEMO:
3983 		sc->rxon.mode = WPI_MODE_HOSTAP;
3984 		break;
3985 	case IEEE80211_M_MONITOR:
3986 		sc->rxon.mode = WPI_MODE_MONITOR;
3987 		break;
3988 	default:
3989 		device_printf(sc->sc_dev, "unknown opmode %d\n",
3990 		    ic->ic_opmode);
3991 		return EINVAL;
3992 	}
3993 	sc->rxon.filter = htole32(sc->rxon.filter);
3994 	wpi_set_promisc(sc);
3995 	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
3996 	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
3997 
3998 	if ((error = wpi_send_rxon(sc, 0, 0)) != 0) {
3999 		device_printf(sc->sc_dev, "%s: could not send RXON\n",
4000 		    __func__);
4001 		return error;
4002 	}
4003 
4004 	/* Setup rate scalling. */
4005 	if ((error = wpi_mrr_setup(sc)) != 0) {
4006 		device_printf(sc->sc_dev, "could not setup MRR, error %d\n",
4007 		    error);
4008 		return error;
4009 	}
4010 
4011 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4012 
4013 	return 0;
4014 }
4015 
4016 static uint16_t
4017 wpi_get_active_dwell_time(struct wpi_softc *sc,
4018     struct ieee80211_channel *c, uint8_t n_probes)
4019 {
4020 	/* No channel? Default to 2GHz settings. */
4021 	if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
4022 		return (WPI_ACTIVE_DWELL_TIME_2GHZ +
4023 		WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
4024 	}
4025 
4026 	/* 5GHz dwell time. */
4027 	return (WPI_ACTIVE_DWELL_TIME_5GHZ +
4028 	    WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
4029 }
4030 
4031 /*
4032  * Limit the total dwell time.
4033  *
4034  * Returns the dwell time in milliseconds.
4035  */
4036 static uint16_t
4037 wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time)
4038 {
4039 	struct ieee80211com *ic = &sc->sc_ic;
4040 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4041 	uint16_t bintval = 0;
4042 
4043 	/* bintval is in TU (1.024mS) */
4044 	if (vap != NULL)
4045 		bintval = vap->iv_bss->ni_intval;
4046 
4047 	/*
4048 	 * If it's non-zero, we should calculate the minimum of
4049 	 * it and the DWELL_BASE.
4050 	 *
4051 	 * XXX Yes, the math should take into account that bintval
4052 	 * is 1.024mS, not 1mS..
4053 	 */
4054 	if (bintval > 0) {
4055 		DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__,
4056 		    bintval);
4057 		return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2));
4058 	}
4059 
4060 	/* No association context? Default. */
4061 	return dwell_time;
4062 }
4063 
4064 static uint16_t
4065 wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c)
4066 {
4067 	uint16_t passive;
4068 
4069 	if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c))
4070 		passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ;
4071 	else
4072 		passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ;
4073 
4074 	/* Clamp to the beacon interval if we're associated. */
4075 	return (wpi_limit_dwell(sc, passive));
4076 }
4077 
4078 static uint32_t
4079 wpi_get_scan_pause_time(uint32_t time, uint16_t bintval)
4080 {
4081 	uint32_t mod = (time % bintval) * IEEE80211_DUR_TU;
4082 	uint32_t nbeacons = time / bintval;
4083 
4084 	if (mod > WPI_PAUSE_MAX_TIME)
4085 		mod = WPI_PAUSE_MAX_TIME;
4086 
4087 	return WPI_PAUSE_SCAN(nbeacons, mod);
4088 }
4089 
4090 /*
4091  * Send a scan request to the firmware.
4092  */
4093 static int
4094 wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c)
4095 {
4096 	struct ieee80211com *ic = &sc->sc_ic;
4097 	struct ieee80211_scan_state *ss = ic->ic_scan;
4098 	struct ieee80211vap *vap = ss->ss_vap;
4099 	struct wpi_scan_hdr *hdr;
4100 	struct wpi_cmd_data *tx;
4101 	struct wpi_scan_essid *essids;
4102 	struct wpi_scan_chan *chan;
4103 	struct ieee80211_frame *wh;
4104 	struct ieee80211_rateset *rs;
4105 	uint16_t bintval, buflen, dwell_active, dwell_passive;
4106 	uint8_t *buf, *frm, i, nssid;
4107 	int bgscan, error;
4108 
4109 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4110 
4111 	/*
4112 	 * We are absolutely not allowed to send a scan command when another
4113 	 * scan command is pending.
4114 	 */
4115 	if (callout_pending(&sc->scan_timeout)) {
4116 		device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
4117 		    __func__);
4118 		error = EAGAIN;
4119 		goto fail;
4120 	}
4121 
4122 	bgscan = wpi_check_bss_filter(sc);
4123 	bintval = vap->iv_bss->ni_intval;
4124 	if (bgscan != 0 &&
4125 	    bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) {
4126 		error = EOPNOTSUPP;
4127 		goto fail;
4128 	}
4129 
4130 	buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4131 	if (buf == NULL) {
4132 		device_printf(sc->sc_dev,
4133 		    "%s: could not allocate buffer for scan command\n",
4134 		    __func__);
4135 		error = ENOMEM;
4136 		goto fail;
4137 	}
4138 	hdr = (struct wpi_scan_hdr *)buf;
4139 
4140 	/*
4141 	 * Move to the next channel if no packets are received within 10 msecs
4142 	 * after sending the probe request.
4143 	 */
4144 	hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT);
4145 	hdr->quiet_threshold = htole16(1);
4146 
4147 	if (bgscan != 0) {
4148 		/*
4149 		 * Max needs to be greater than active and passive and quiet!
4150 		 * It's also in microseconds!
4151 		 */
4152 		hdr->max_svc = htole32(250 * IEEE80211_DUR_TU);
4153 		hdr->pause_svc = htole32(wpi_get_scan_pause_time(100,
4154 		    bintval));
4155 	}
4156 
4157 	hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON);
4158 
4159 	tx = (struct wpi_cmd_data *)(hdr + 1);
4160 	tx->flags = htole32(WPI_TX_AUTO_SEQ);
4161 	tx->id = WPI_ID_BROADCAST;
4162 	tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
4163 
4164 	if (IEEE80211_IS_CHAN_5GHZ(c)) {
4165 		/* Send probe requests at 6Mbps. */
4166 		tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6];
4167 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4168 	} else {
4169 		hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO);
4170 		/* Send probe requests at 1Mbps. */
4171 		tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1];
4172 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4173 	}
4174 
4175 	essids = (struct wpi_scan_essid *)(tx + 1);
4176 	nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS);
4177 	for (i = 0; i < nssid; i++) {
4178 		essids[i].id = IEEE80211_ELEMID_SSID;
4179 		essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
4180 		memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len);
4181 #ifdef WPI_DEBUG
4182 		if (sc->sc_debug & WPI_DEBUG_SCAN) {
4183 			printf("Scanning Essid: ");
4184 			ieee80211_print_essid(essids[i].data, essids[i].len);
4185 			printf("\n");
4186 		}
4187 #endif
4188 	}
4189 
4190 	/*
4191 	 * Build a probe request frame.  Most of the following code is a
4192 	 * copy & paste of what is done in net80211.
4193 	 */
4194 	wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS);
4195 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4196 		IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4197 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4198 	IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
4199 	IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
4200 	IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr);
4201 
4202 	frm = (uint8_t *)(wh + 1);
4203 	frm = ieee80211_add_ssid(frm, NULL, 0);
4204 	frm = ieee80211_add_rates(frm, rs);
4205 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4206 		frm = ieee80211_add_xrates(frm, rs);
4207 
4208 	/* Set length of probe request. */
4209 	tx->len = htole16(frm - (uint8_t *)wh);
4210 
4211 	/*
4212 	 * Construct information about the channel that we
4213 	 * want to scan. The firmware expects this to be directly
4214 	 * after the scan probe request
4215 	 */
4216 	chan = (struct wpi_scan_chan *)frm;
4217 	chan->chan = ieee80211_chan2ieee(ic, c);
4218 	chan->flags = 0;
4219 	if (nssid) {
4220 		hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT;
4221 		chan->flags |= WPI_CHAN_NPBREQS(nssid);
4222 	} else
4223 		hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER;
4224 
4225 	if (!IEEE80211_IS_CHAN_PASSIVE(c))
4226 		chan->flags |= WPI_CHAN_ACTIVE;
4227 
4228 	/*
4229 	 * Calculate the active/passive dwell times.
4230 	 */
4231 	dwell_active = wpi_get_active_dwell_time(sc, c, nssid);
4232 	dwell_passive = wpi_get_passive_dwell_time(sc, c);
4233 
4234 	/* Make sure they're valid. */
4235 	if (dwell_active > dwell_passive)
4236 		dwell_active = dwell_passive;
4237 
4238 	chan->active = htole16(dwell_active);
4239 	chan->passive = htole16(dwell_passive);
4240 
4241 	chan->dsp_gain = 0x6e;  /* Default level */
4242 
4243 	if (IEEE80211_IS_CHAN_5GHZ(c))
4244 		chan->rf_gain = 0x3b;
4245 	else
4246 		chan->rf_gain = 0x28;
4247 
4248 	DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n",
4249 	    chan->chan, IEEE80211_IS_CHAN_PASSIVE(c));
4250 
4251 	hdr->nchan++;
4252 
4253 	if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) {
4254 		/* XXX Force probe request transmission. */
4255 		memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan));
4256 
4257 		chan++;
4258 
4259 		/* Reduce unnecessary delay. */
4260 		chan->flags = 0;
4261 		chan->passive = chan->active = hdr->quiet_time;
4262 
4263 		hdr->nchan++;
4264 	}
4265 
4266 	chan++;
4267 
4268 	buflen = (uint8_t *)chan - buf;
4269 	hdr->len = htole16(buflen);
4270 
4271 	DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n",
4272 	    hdr->nchan);
4273 	error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1);
4274 	free(buf, M_DEVBUF);
4275 
4276 	if (error != 0)
4277 		goto fail;
4278 
4279 	callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc);
4280 
4281 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4282 
4283 	return 0;
4284 
4285 fail:	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
4286 
4287 	return error;
4288 }
4289 
4290 static int
4291 wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap)
4292 {
4293 	struct ieee80211com *ic = vap->iv_ic;
4294 	struct ieee80211_node *ni = vap->iv_bss;
4295 	struct ieee80211_channel *c = ni->ni_chan;
4296 	int error;
4297 
4298 	WPI_RXON_LOCK(sc);
4299 
4300 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4301 
4302 	/* Update adapter configuration. */
4303 	sc->rxon.associd = 0;
4304 	sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
4305 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4306 	sc->rxon.chan = ieee80211_chan2ieee(ic, c);
4307 	sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
4308 	if (IEEE80211_IS_CHAN_2GHZ(c))
4309 		sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
4310 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4311 		sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
4312 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4313 		sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
4314 	if (IEEE80211_IS_CHAN_A(c)) {
4315 		sc->rxon.cck_mask  = 0;
4316 		sc->rxon.ofdm_mask = 0x15;
4317 	} else if (IEEE80211_IS_CHAN_B(c)) {
4318 		sc->rxon.cck_mask  = 0x03;
4319 		sc->rxon.ofdm_mask = 0;
4320 	} else {
4321 		/* Assume 802.11b/g. */
4322 		sc->rxon.cck_mask  = 0x0f;
4323 		sc->rxon.ofdm_mask = 0x15;
4324 	}
4325 
4326 	DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
4327 	    sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
4328 	    sc->rxon.ofdm_mask);
4329 
4330 	if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
4331 		device_printf(sc->sc_dev, "%s: could not send RXON\n",
4332 		    __func__);
4333 	}
4334 
4335 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4336 
4337 	WPI_RXON_UNLOCK(sc);
4338 
4339 	return error;
4340 }
4341 
4342 static int
4343 wpi_config_beacon(struct wpi_vap *wvp)
4344 {
4345 	struct ieee80211vap *vap = &wvp->wv_vap;
4346 	struct ieee80211com *ic = vap->iv_ic;
4347 	struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
4348 	struct wpi_buf *bcn = &wvp->wv_bcbuf;
4349 	struct wpi_softc *sc = ic->ic_softc;
4350 	struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
4351 	struct ieee80211_tim_ie *tie;
4352 	struct mbuf *m;
4353 	uint8_t *ptr;
4354 	int error;
4355 
4356 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4357 
4358 	WPI_VAP_LOCK_ASSERT(wvp);
4359 
4360 	cmd->len = htole16(bcn->m->m_pkthdr.len);
4361 	cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
4362 	    wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
4363 
4364 	/* XXX seems to be unused */
4365 	if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) {
4366 		tie = (struct ieee80211_tim_ie *) bo->bo_tim;
4367 		ptr = mtod(bcn->m, uint8_t *);
4368 
4369 		cmd->tim = htole16(bo->bo_tim - ptr);
4370 		cmd->timsz = tie->tim_len;
4371 	}
4372 
4373 	/* Necessary for recursion in ieee80211_beacon_update(). */
4374 	m = bcn->m;
4375 	bcn->m = m_dup(m, M_NOWAIT);
4376 	if (bcn->m == NULL) {
4377 		device_printf(sc->sc_dev,
4378 		    "%s: could not copy beacon frame\n", __func__);
4379 		error = ENOMEM;
4380 		goto end;
4381 	}
4382 
4383 	if ((error = wpi_cmd2(sc, bcn)) != 0) {
4384 		device_printf(sc->sc_dev,
4385 		    "%s: could not update beacon frame, error %d", __func__,
4386 		    error);
4387 		m_freem(bcn->m);
4388 	}
4389 
4390 	/* Restore mbuf. */
4391 end:	bcn->m = m;
4392 
4393 	return error;
4394 }
4395 
4396 static int
4397 wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni)
4398 {
4399 	struct ieee80211vap *vap = ni->ni_vap;
4400 	struct wpi_vap *wvp = WPI_VAP(vap);
4401 	struct wpi_buf *bcn = &wvp->wv_bcbuf;
4402 	struct mbuf *m;
4403 	int error;
4404 
4405 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4406 
4407 	if (ni->ni_chan == IEEE80211_CHAN_ANYC)
4408 		return EINVAL;
4409 
4410 	m = ieee80211_beacon_alloc(ni);
4411 	if (m == NULL) {
4412 		device_printf(sc->sc_dev,
4413 		    "%s: could not allocate beacon frame\n", __func__);
4414 		return ENOMEM;
4415 	}
4416 
4417 	WPI_VAP_LOCK(wvp);
4418 	if (bcn->m != NULL)
4419 		m_freem(bcn->m);
4420 
4421 	bcn->m = m;
4422 
4423 	error = wpi_config_beacon(wvp);
4424 	WPI_VAP_UNLOCK(wvp);
4425 
4426 	return error;
4427 }
4428 
4429 static void
4430 wpi_update_beacon(struct ieee80211vap *vap, int item)
4431 {
4432 	struct wpi_softc *sc = vap->iv_ic->ic_softc;
4433 	struct wpi_vap *wvp = WPI_VAP(vap);
4434 	struct wpi_buf *bcn = &wvp->wv_bcbuf;
4435 	struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
4436 	struct ieee80211_node *ni = vap->iv_bss;
4437 	int mcast = 0;
4438 
4439 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4440 
4441 	WPI_VAP_LOCK(wvp);
4442 	if (bcn->m == NULL) {
4443 		bcn->m = ieee80211_beacon_alloc(ni);
4444 		if (bcn->m == NULL) {
4445 			device_printf(sc->sc_dev,
4446 			    "%s: could not allocate beacon frame\n", __func__);
4447 
4448 			DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR,
4449 			    __func__);
4450 
4451 			WPI_VAP_UNLOCK(wvp);
4452 			return;
4453 		}
4454 	}
4455 	WPI_VAP_UNLOCK(wvp);
4456 
4457 	if (item == IEEE80211_BEACON_TIM)
4458 		mcast = 1;	/* TODO */
4459 
4460 	setbit(bo->bo_flags, item);
4461 	ieee80211_beacon_update(ni, bcn->m, mcast);
4462 
4463 	WPI_VAP_LOCK(wvp);
4464 	wpi_config_beacon(wvp);
4465 	WPI_VAP_UNLOCK(wvp);
4466 
4467 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4468 }
4469 
4470 static void
4471 wpi_newassoc(struct ieee80211_node *ni, int isnew)
4472 {
4473 	struct ieee80211vap *vap = ni->ni_vap;
4474 	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4475 	struct wpi_node *wn = WPI_NODE(ni);
4476 	int error;
4477 
4478 	WPI_NT_LOCK(sc);
4479 
4480 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4481 
4482 	if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) {
4483 		if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
4484 			device_printf(sc->sc_dev,
4485 			    "%s: could not add IBSS node, error %d\n",
4486 			    __func__, error);
4487 		}
4488 	}
4489 	WPI_NT_UNLOCK(sc);
4490 }
4491 
4492 static int
4493 wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap)
4494 {
4495 	struct ieee80211com *ic = vap->iv_ic;
4496 	struct ieee80211_node *ni = vap->iv_bss;
4497 	struct ieee80211_channel *c = ni->ni_chan;
4498 	int error;
4499 
4500 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
4501 
4502 	if (vap->iv_opmode == IEEE80211_M_MONITOR) {
4503 		/* Link LED blinks while monitoring. */
4504 		wpi_set_led(sc, WPI_LED_LINK, 5, 5);
4505 		return 0;
4506 	}
4507 
4508 	/* XXX kernel panic workaround */
4509 	if (c == IEEE80211_CHAN_ANYC) {
4510 		device_printf(sc->sc_dev, "%s: incomplete configuration\n",
4511 		    __func__);
4512 		return EINVAL;
4513 	}
4514 
4515 	if ((error = wpi_set_timing(sc, ni)) != 0) {
4516 		device_printf(sc->sc_dev,
4517 		    "%s: could not set timing, error %d\n", __func__, error);
4518 		return error;
4519 	}
4520 
4521 	/* Update adapter configuration. */
4522 	WPI_RXON_LOCK(sc);
4523 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4524 	sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni));
4525 	sc->rxon.chan = ieee80211_chan2ieee(ic, c);
4526 	sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
4527 	if (IEEE80211_IS_CHAN_2GHZ(c))
4528 		sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
4529 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
4530 		sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
4531 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4532 		sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
4533 	if (IEEE80211_IS_CHAN_A(c)) {
4534 		sc->rxon.cck_mask  = 0;
4535 		sc->rxon.ofdm_mask = 0x15;
4536 	} else if (IEEE80211_IS_CHAN_B(c)) {
4537 		sc->rxon.cck_mask  = 0x03;
4538 		sc->rxon.ofdm_mask = 0;
4539 	} else {
4540 		/* Assume 802.11b/g. */
4541 		sc->rxon.cck_mask  = 0x0f;
4542 		sc->rxon.ofdm_mask = 0x15;
4543 	}
4544 	sc->rxon.filter |= htole32(WPI_FILTER_BSS);
4545 
4546 	DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n",
4547 	    sc->rxon.chan, sc->rxon.flags);
4548 
4549 	if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
4550 		device_printf(sc->sc_dev, "%s: could not send RXON\n",
4551 		    __func__);
4552 		return error;
4553 	}
4554 
4555 	/* Start periodic calibration timer. */
4556 	callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
4557 
4558 	WPI_RXON_UNLOCK(sc);
4559 
4560 	if (vap->iv_opmode == IEEE80211_M_IBSS ||
4561 	    vap->iv_opmode == IEEE80211_M_HOSTAP) {
4562 		if ((error = wpi_setup_beacon(sc, ni)) != 0) {
4563 			device_printf(sc->sc_dev,
4564 			    "%s: could not setup beacon, error %d\n", __func__,
4565 			    error);
4566 			return error;
4567 		}
4568 	}
4569 
4570 	if (vap->iv_opmode == IEEE80211_M_STA) {
4571 		/* Add BSS node. */
4572 		WPI_NT_LOCK(sc);
4573 		error = wpi_add_sta_node(sc, ni);
4574 		WPI_NT_UNLOCK(sc);
4575 		if (error != 0) {
4576 			device_printf(sc->sc_dev,
4577 			    "%s: could not add BSS node, error %d\n", __func__,
4578 			    error);
4579 			return error;
4580 		}
4581 	}
4582 
4583 	/* Link LED always on while associated. */
4584 	wpi_set_led(sc, WPI_LED_LINK, 0, 1);
4585 
4586 	/* Enable power-saving mode if requested by user. */
4587 	if ((vap->iv_flags & IEEE80211_F_PMGTON) &&
4588 	    vap->iv_opmode != IEEE80211_M_IBSS)
4589 		(void)wpi_set_pslevel(sc, 0, 3, 1);
4590 
4591 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
4592 
4593 	return 0;
4594 }
4595 
4596 static int
4597 wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
4598 {
4599 	const struct ieee80211_cipher *cip = k->wk_cipher;
4600 	struct ieee80211vap *vap = ni->ni_vap;
4601 	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4602 	struct wpi_node *wn = WPI_NODE(ni);
4603 	struct wpi_node_info node;
4604 	uint16_t kflags;
4605 	int error;
4606 
4607 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4608 
4609 	if (wpi_check_node_entry(sc, wn->id) == 0) {
4610 		device_printf(sc->sc_dev, "%s: node does not exist\n",
4611 		    __func__);
4612 		return 0;
4613 	}
4614 
4615 	switch (cip->ic_cipher) {
4616 	case IEEE80211_CIPHER_AES_CCM:
4617 		kflags = WPI_KFLAG_CCMP;
4618 		break;
4619 
4620 	default:
4621 		device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__,
4622 		    cip->ic_cipher);
4623 		return 0;
4624 	}
4625 
4626 	kflags |= WPI_KFLAG_KID(k->wk_keyix);
4627 	if (k->wk_flags & IEEE80211_KEY_GROUP)
4628 		kflags |= WPI_KFLAG_MULTICAST;
4629 
4630 	memset(&node, 0, sizeof node);
4631 	node.id = wn->id;
4632 	node.control = WPI_NODE_UPDATE;
4633 	node.flags = WPI_FLAG_KEY_SET;
4634 	node.kflags = htole16(kflags);
4635 	memcpy(node.key, k->wk_key, k->wk_keylen);
4636 again:
4637 	DPRINTF(sc, WPI_DEBUG_KEY,
4638 	    "%s: setting %s key id %d for node %d (%s)\n", __func__,
4639 	    (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix,
4640 	    node.id, ether_sprintf(ni->ni_macaddr));
4641 
4642 	error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
4643 	if (error != 0) {
4644 		device_printf(sc->sc_dev, "can't update node info, error %d\n",
4645 		    error);
4646 		return !error;
4647 	}
4648 
4649 	if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
4650 	    k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4651 		kflags |= WPI_KFLAG_MULTICAST;
4652 		node.kflags = htole16(kflags);
4653 
4654 		goto again;
4655 	}
4656 
4657 	return 1;
4658 }
4659 
4660 static void
4661 wpi_load_key_cb(void *arg, struct ieee80211_node *ni)
4662 {
4663 	const struct ieee80211_key *k = arg;
4664 	struct ieee80211vap *vap = ni->ni_vap;
4665 	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4666 	struct wpi_node *wn = WPI_NODE(ni);
4667 	int error;
4668 
4669 	if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
4670 		return;
4671 
4672 	WPI_NT_LOCK(sc);
4673 	error = wpi_load_key(ni, k);
4674 	WPI_NT_UNLOCK(sc);
4675 
4676 	if (error == 0) {
4677 		device_printf(sc->sc_dev, "%s: error while setting key\n",
4678 		    __func__);
4679 	}
4680 }
4681 
4682 static int
4683 wpi_set_global_keys(struct ieee80211_node *ni)
4684 {
4685 	struct ieee80211vap *vap = ni->ni_vap;
4686 	struct ieee80211_key *wk = &vap->iv_nw_keys[0];
4687 	int error = 1;
4688 
4689 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++)
4690 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4691 			error = wpi_load_key(ni, wk);
4692 
4693 	return !error;
4694 }
4695 
4696 static int
4697 wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
4698 {
4699 	struct ieee80211vap *vap = ni->ni_vap;
4700 	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4701 	struct wpi_node *wn = WPI_NODE(ni);
4702 	struct wpi_node_info node;
4703 	uint16_t kflags;
4704 	int error;
4705 
4706 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4707 
4708 	if (wpi_check_node_entry(sc, wn->id) == 0) {
4709 		DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__);
4710 		return 1;	/* Nothing to do. */
4711 	}
4712 
4713 	kflags = WPI_KFLAG_KID(k->wk_keyix);
4714 	if (k->wk_flags & IEEE80211_KEY_GROUP)
4715 		kflags |= WPI_KFLAG_MULTICAST;
4716 
4717 	memset(&node, 0, sizeof node);
4718 	node.id = wn->id;
4719 	node.control = WPI_NODE_UPDATE;
4720 	node.flags = WPI_FLAG_KEY_SET;
4721 	node.kflags = htole16(kflags);
4722 again:
4723 	DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n",
4724 	    __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast",
4725 	    k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr));
4726 
4727 	error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
4728 	if (error != 0) {
4729 		device_printf(sc->sc_dev, "can't update node info, error %d\n",
4730 		    error);
4731 		return !error;
4732 	}
4733 
4734 	if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
4735 	    k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4736 		kflags |= WPI_KFLAG_MULTICAST;
4737 		node.kflags = htole16(kflags);
4738 
4739 		goto again;
4740 	}
4741 
4742 	return 1;
4743 }
4744 
4745 static void
4746 wpi_del_key_cb(void *arg, struct ieee80211_node *ni)
4747 {
4748 	const struct ieee80211_key *k = arg;
4749 	struct ieee80211vap *vap = ni->ni_vap;
4750 	struct wpi_softc *sc = ni->ni_ic->ic_softc;
4751 	struct wpi_node *wn = WPI_NODE(ni);
4752 	int error;
4753 
4754 	if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
4755 		return;
4756 
4757 	WPI_NT_LOCK(sc);
4758 	error = wpi_del_key(ni, k);
4759 	WPI_NT_UNLOCK(sc);
4760 
4761 	if (error == 0) {
4762 		device_printf(sc->sc_dev, "%s: error while deleting key\n",
4763 		    __func__);
4764 	}
4765 }
4766 
4767 static int
4768 wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k,
4769     int set)
4770 {
4771 	struct ieee80211com *ic = vap->iv_ic;
4772 	struct wpi_softc *sc = ic->ic_softc;
4773 	struct wpi_vap *wvp = WPI_VAP(vap);
4774 	struct ieee80211_node *ni;
4775 	int error, ni_ref = 0;
4776 
4777 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4778 
4779 	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
4780 		/* Not for us. */
4781 		return 1;
4782 	}
4783 
4784 	if (!(k->wk_flags & IEEE80211_KEY_RECV)) {
4785 		/* XMIT keys are handled in wpi_tx_data(). */
4786 		return 1;
4787 	}
4788 
4789 	/* Handle group keys. */
4790 	if (&vap->iv_nw_keys[0] <= k &&
4791 	    k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
4792 		WPI_NT_LOCK(sc);
4793 		if (set)
4794 			wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix);
4795 		else
4796 			wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix);
4797 		WPI_NT_UNLOCK(sc);
4798 
4799 		if (vap->iv_state == IEEE80211_S_RUN) {
4800 			ieee80211_iterate_nodes(&ic->ic_sta,
4801 			    set ? wpi_load_key_cb : wpi_del_key_cb,
4802 			    __DECONST(void *, k));
4803 		}
4804 
4805 		return 1;
4806 	}
4807 
4808 	switch (vap->iv_opmode) {
4809 	case IEEE80211_M_STA:
4810 		ni = vap->iv_bss;
4811 		break;
4812 
4813 	case IEEE80211_M_IBSS:
4814 	case IEEE80211_M_AHDEMO:
4815 	case IEEE80211_M_HOSTAP:
4816 		ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr);
4817 		if (ni == NULL)
4818 			return 0;	/* should not happen */
4819 
4820 		ni_ref = 1;
4821 		break;
4822 
4823 	default:
4824 		device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__,
4825 		    vap->iv_opmode);
4826 		return 0;
4827 	}
4828 
4829 	WPI_NT_LOCK(sc);
4830 	if (set)
4831 		error = wpi_load_key(ni, k);
4832 	else
4833 		error = wpi_del_key(ni, k);
4834 	WPI_NT_UNLOCK(sc);
4835 
4836 	if (ni_ref)
4837 		ieee80211_node_decref(ni);
4838 
4839 	return error;
4840 }
4841 
4842 static int
4843 wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
4844 {
4845 	return wpi_process_key(vap, k, 1);
4846 }
4847 
4848 static int
4849 wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
4850 {
4851 	return wpi_process_key(vap, k, 0);
4852 }
4853 
4854 /*
4855  * This function is called after the runtime firmware notifies us of its
4856  * readiness (called in a process context).
4857  */
4858 static int
4859 wpi_post_alive(struct wpi_softc *sc)
4860 {
4861 	int ntries, error;
4862 
4863 	/* Check (again) that the radio is not disabled. */
4864 	if ((error = wpi_nic_lock(sc)) != 0)
4865 		return error;
4866 
4867 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4868 
4869 	/* NB: Runtime firmware must be up and running. */
4870 	if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) {
4871 		device_printf(sc->sc_dev,
4872 		    "RF switch: radio disabled (%s)\n", __func__);
4873 		wpi_nic_unlock(sc);
4874 		return EPERM;   /* :-) */
4875 	}
4876 	wpi_nic_unlock(sc);
4877 
4878 	/* Wait for thermal sensor to calibrate. */
4879 	for (ntries = 0; ntries < 1000; ntries++) {
4880 		if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0)
4881 			break;
4882 		DELAY(10);
4883 	}
4884 
4885 	if (ntries == 1000) {
4886 		device_printf(sc->sc_dev,
4887 		    "timeout waiting for thermal sensor calibration\n");
4888 		return ETIMEDOUT;
4889 	}
4890 
4891 	DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp);
4892 	return 0;
4893 }
4894 
4895 /*
4896  * The firmware boot code is small and is intended to be copied directly into
4897  * the NIC internal memory (no DMA transfer).
4898  */
4899 static int
4900 wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, uint32_t size)
4901 {
4902 	int error, ntries;
4903 
4904 	DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size);
4905 
4906 	size /= sizeof (uint32_t);
4907 
4908 	if ((error = wpi_nic_lock(sc)) != 0)
4909 		return error;
4910 
4911 	/* Copy microcode image into NIC memory. */
4912 	wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE,
4913 	    (const uint32_t *)ucode, size);
4914 
4915 	wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0);
4916 	wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE);
4917 	wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size);
4918 
4919 	/* Start boot load now. */
4920 	wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START);
4921 
4922 	/* Wait for transfer to complete. */
4923 	for (ntries = 0; ntries < 1000; ntries++) {
4924 		uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS);
4925 		DPRINTF(sc, WPI_DEBUG_HW,
4926 		    "firmware status=0x%x, val=0x%x, result=0x%x\n", status,
4927 		    WPI_FH_TX_STATUS_IDLE(6),
4928 		    status & WPI_FH_TX_STATUS_IDLE(6));
4929 		if (status & WPI_FH_TX_STATUS_IDLE(6)) {
4930 			DPRINTF(sc, WPI_DEBUG_HW,
4931 			    "Status Match! - ntries = %d\n", ntries);
4932 			break;
4933 		}
4934 		DELAY(10);
4935 	}
4936 	if (ntries == 1000) {
4937 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
4938 		    __func__);
4939 		wpi_nic_unlock(sc);
4940 		return ETIMEDOUT;
4941 	}
4942 
4943 	/* Enable boot after power up. */
4944 	wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN);
4945 
4946 	wpi_nic_unlock(sc);
4947 	return 0;
4948 }
4949 
4950 static int
4951 wpi_load_firmware(struct wpi_softc *sc)
4952 {
4953 	struct wpi_fw_info *fw = &sc->fw;
4954 	struct wpi_dma_info *dma = &sc->fw_dma;
4955 	int error;
4956 
4957 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
4958 
4959 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
4960 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
4961 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4962 	memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz);
4963 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4964 
4965 	/* Tell adapter where to find initialization sections. */
4966 	if ((error = wpi_nic_lock(sc)) != 0)
4967 		return error;
4968 	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
4969 	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz);
4970 	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
4971 	    dma->paddr + WPI_FW_DATA_MAXSZ);
4972 	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
4973 	wpi_nic_unlock(sc);
4974 
4975 	/* Load firmware boot code. */
4976 	error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
4977 	if (error != 0) {
4978 		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
4979 		    __func__);
4980 		return error;
4981 	}
4982 
4983 	/* Now press "execute". */
4984 	WPI_WRITE(sc, WPI_RESET, 0);
4985 
4986 	/* Wait at most one second for first alive notification. */
4987 	if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
4988 		device_printf(sc->sc_dev,
4989 		    "%s: timeout waiting for adapter to initialize, error %d\n",
4990 		    __func__, error);
4991 		return error;
4992 	}
4993 
4994 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
4995 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
4996 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4997 	memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz);
4998 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
4999 
5000 	/* Tell adapter where to find runtime sections. */
5001 	if ((error = wpi_nic_lock(sc)) != 0)
5002 		return error;
5003 	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
5004 	wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5005 	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
5006 	    dma->paddr + WPI_FW_DATA_MAXSZ);
5007 	wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE,
5008 	    WPI_FW_UPDATED | fw->main.textsz);
5009 	wpi_nic_unlock(sc);
5010 
5011 	return 0;
5012 }
5013 
5014 static int
5015 wpi_read_firmware(struct wpi_softc *sc)
5016 {
5017 	const struct firmware *fp;
5018 	struct wpi_fw_info *fw = &sc->fw;
5019 	const struct wpi_firmware_hdr *hdr;
5020 	int error;
5021 
5022 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5023 
5024 	DPRINTF(sc, WPI_DEBUG_FIRMWARE,
5025 	    "Attempting Loading Firmware from %s module\n", WPI_FW_NAME);
5026 
5027 	WPI_UNLOCK(sc);
5028 	fp = firmware_get(WPI_FW_NAME);
5029 	WPI_LOCK(sc);
5030 
5031 	if (fp == NULL) {
5032 		device_printf(sc->sc_dev,
5033 		    "could not load firmware image '%s'\n", WPI_FW_NAME);
5034 		return EINVAL;
5035 	}
5036 
5037 	sc->fw_fp = fp;
5038 
5039 	if (fp->datasize < sizeof (struct wpi_firmware_hdr)) {
5040 		device_printf(sc->sc_dev,
5041 		    "firmware file too short: %zu bytes\n", fp->datasize);
5042 		error = EINVAL;
5043 		goto fail;
5044 	}
5045 
5046 	fw->size = fp->datasize;
5047 	fw->data = (const uint8_t *)fp->data;
5048 
5049 	/* Extract firmware header information. */
5050 	hdr = (const struct wpi_firmware_hdr *)fw->data;
5051 
5052 	/*     |  RUNTIME FIRMWARE   |    INIT FIRMWARE    | BOOT FW  |
5053 	   |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */
5054 
5055 	fw->main.textsz = le32toh(hdr->rtextsz);
5056 	fw->main.datasz = le32toh(hdr->rdatasz);
5057 	fw->init.textsz = le32toh(hdr->itextsz);
5058 	fw->init.datasz = le32toh(hdr->idatasz);
5059 	fw->boot.textsz = le32toh(hdr->btextsz);
5060 	fw->boot.datasz = 0;
5061 
5062 	/* Sanity-check firmware header. */
5063 	if (fw->main.textsz > WPI_FW_TEXT_MAXSZ ||
5064 	    fw->main.datasz > WPI_FW_DATA_MAXSZ ||
5065 	    fw->init.textsz > WPI_FW_TEXT_MAXSZ ||
5066 	    fw->init.datasz > WPI_FW_DATA_MAXSZ ||
5067 	    fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ ||
5068 	    (fw->boot.textsz & 3) != 0) {
5069 		device_printf(sc->sc_dev, "invalid firmware header\n");
5070 		error = EINVAL;
5071 		goto fail;
5072 	}
5073 
5074 	/* Check that all firmware sections fit. */
5075 	if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz +
5076 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5077 		device_printf(sc->sc_dev,
5078 		    "firmware file too short: %zu bytes\n", fw->size);
5079 		error = EINVAL;
5080 		goto fail;
5081 	}
5082 
5083 	/* Get pointers to firmware sections. */
5084 	fw->main.text = (const uint8_t *)(hdr + 1);
5085 	fw->main.data = fw->main.text + fw->main.textsz;
5086 	fw->init.text = fw->main.data + fw->main.datasz;
5087 	fw->init.data = fw->init.text + fw->init.textsz;
5088 	fw->boot.text = fw->init.data + fw->init.datasz;
5089 
5090 	DPRINTF(sc, WPI_DEBUG_FIRMWARE,
5091 	    "Firmware Version: Major %d, Minor %d, Driver %d, \n"
5092 	    "runtime (text: %u, data: %u) init (text: %u, data %u) "
5093 	    "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver),
5094 	    fw->main.textsz, fw->main.datasz,
5095 	    fw->init.textsz, fw->init.datasz, fw->boot.textsz);
5096 
5097 	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text);
5098 	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data);
5099 	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text);
5100 	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data);
5101 	DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text);
5102 
5103 	return 0;
5104 
5105 fail:	wpi_unload_firmware(sc);
5106 	return error;
5107 }
5108 
5109 /**
5110  * Free the referenced firmware image
5111  */
5112 static void
5113 wpi_unload_firmware(struct wpi_softc *sc)
5114 {
5115 	if (sc->fw_fp != NULL) {
5116 		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5117 		sc->fw_fp = NULL;
5118 	}
5119 }
5120 
5121 static int
5122 wpi_clock_wait(struct wpi_softc *sc)
5123 {
5124 	int ntries;
5125 
5126 	/* Set "initialization complete" bit. */
5127 	WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
5128 
5129 	/* Wait for clock stabilization. */
5130 	for (ntries = 0; ntries < 2500; ntries++) {
5131 		if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY)
5132 			return 0;
5133 		DELAY(100);
5134 	}
5135 	device_printf(sc->sc_dev,
5136 	    "%s: timeout waiting for clock stabilization\n", __func__);
5137 
5138 	return ETIMEDOUT;
5139 }
5140 
5141 static int
5142 wpi_apm_init(struct wpi_softc *sc)
5143 {
5144 	uint32_t reg;
5145 	int error;
5146 
5147 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5148 
5149 	/* Disable L0s exit timer (NMI bug workaround). */
5150 	WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER);
5151 	/* Don't wait for ICH L0s (ICH bug workaround). */
5152 	WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX);
5153 
5154 	/* Set FH wait threshold to max (HW bug under stress workaround). */
5155 	WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000);
5156 
5157 	/* Retrieve PCIe Active State Power Management (ASPM). */
5158 	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 1);
5159 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
5160 	if (reg & PCIEM_LINK_CTL_ASPMC_L1)	/* L1 Entry enabled. */
5161 		WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
5162 	else
5163 		WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
5164 
5165 	WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT);
5166 
5167 	/* Wait for clock stabilization before accessing prph. */
5168 	if ((error = wpi_clock_wait(sc)) != 0)
5169 		return error;
5170 
5171 	if ((error = wpi_nic_lock(sc)) != 0)
5172 		return error;
5173 	/* Cleanup. */
5174 	wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400);
5175 	wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200);
5176 
5177 	/* Enable DMA and BSM (Bootstrap State Machine). */
5178 	wpi_prph_write(sc, WPI_APMG_CLK_EN,
5179 	    WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT);
5180 	DELAY(20);
5181 	/* Disable L1-Active. */
5182 	wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS);
5183 	wpi_nic_unlock(sc);
5184 
5185 	return 0;
5186 }
5187 
5188 static void
5189 wpi_apm_stop_master(struct wpi_softc *sc)
5190 {
5191 	int ntries;
5192 
5193 	/* Stop busmaster DMA activity. */
5194 	WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER);
5195 
5196 	if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) ==
5197 	    WPI_GP_CNTRL_MAC_PS)
5198 		return; /* Already asleep. */
5199 
5200 	for (ntries = 0; ntries < 100; ntries++) {
5201 		if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED)
5202 			return;
5203 		DELAY(10);
5204 	}
5205 	device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
5206 	    __func__);
5207 }
5208 
5209 static void
5210 wpi_apm_stop(struct wpi_softc *sc)
5211 {
5212 	wpi_apm_stop_master(sc);
5213 
5214 	/* Reset the entire device. */
5215 	WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW);
5216 	DELAY(10);
5217 	/* Clear "initialization complete" bit. */
5218 	WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
5219 }
5220 
5221 static void
5222 wpi_nic_config(struct wpi_softc *sc)
5223 {
5224 	uint32_t rev;
5225 
5226 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5227 
5228 	/* voodoo from the Linux "driver".. */
5229 	rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1);
5230 	if ((rev & 0xc0) == 0x40)
5231 		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB);
5232 	else if (!(rev & 0x80))
5233 		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM);
5234 
5235 	if (sc->cap == 0x80)
5236 		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC);
5237 
5238 	if ((sc->rev & 0xf0) == 0xd0)
5239 		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
5240 	else
5241 		WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
5242 
5243 	if (sc->type > 1)
5244 		WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B);
5245 }
5246 
5247 static int
5248 wpi_hw_init(struct wpi_softc *sc)
5249 {
5250 	uint8_t chnl;
5251 	int ntries, error;
5252 
5253 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
5254 
5255 	/* Clear pending interrupts. */
5256 	WPI_WRITE(sc, WPI_INT, 0xffffffff);
5257 
5258 	if ((error = wpi_apm_init(sc)) != 0) {
5259 		device_printf(sc->sc_dev,
5260 		    "%s: could not power ON adapter, error %d\n", __func__,
5261 		    error);
5262 		return error;
5263 	}
5264 
5265 	/* Select VMAIN power source. */
5266 	if ((error = wpi_nic_lock(sc)) != 0)
5267 		return error;
5268 	wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK);
5269 	wpi_nic_unlock(sc);
5270 	/* Spin until VMAIN gets selected. */
5271 	for (ntries = 0; ntries < 5000; ntries++) {
5272 		if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN)
5273 			break;
5274 		DELAY(10);
5275 	}
5276 	if (ntries == 5000) {
5277 		device_printf(sc->sc_dev, "timeout selecting power source\n");
5278 		return ETIMEDOUT;
5279 	}
5280 
5281 	/* Perform adapter initialization. */
5282 	wpi_nic_config(sc);
5283 
5284 	/* Initialize RX ring. */
5285 	if ((error = wpi_nic_lock(sc)) != 0)
5286 		return error;
5287 	/* Set physical address of RX ring. */
5288 	WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr);
5289 	/* Set physical address of RX read pointer. */
5290 	WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr +
5291 	    offsetof(struct wpi_shared, next));
5292 	WPI_WRITE(sc, WPI_FH_RX_WPTR, 0);
5293 	/* Enable RX. */
5294 	WPI_WRITE(sc, WPI_FH_RX_CONFIG,
5295 	    WPI_FH_RX_CONFIG_DMA_ENA |
5296 	    WPI_FH_RX_CONFIG_RDRBD_ENA |
5297 	    WPI_FH_RX_CONFIG_WRSTATUS_ENA |
5298 	    WPI_FH_RX_CONFIG_MAXFRAG |
5299 	    WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) |
5300 	    WPI_FH_RX_CONFIG_IRQ_DST_HOST |
5301 	    WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1));
5302 	(void)WPI_READ(sc, WPI_FH_RSSR_TBL);	/* barrier */
5303 	wpi_nic_unlock(sc);
5304 	WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7);
5305 
5306 	/* Initialize TX rings. */
5307 	if ((error = wpi_nic_lock(sc)) != 0)
5308 		return error;
5309 	wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2);	/* bypass mode */
5310 	wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1);	/* enable RA0 */
5311 	/* Enable all 6 TX rings. */
5312 	wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f);
5313 	wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000);
5314 	wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002);
5315 	wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4);
5316 	wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5);
5317 	/* Set physical address of TX rings. */
5318 	WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr);
5319 	WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5);
5320 
5321 	/* Enable all DMA channels. */
5322 	for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
5323 		WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0);
5324 		WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0);
5325 		WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008);
5326 	}
5327 	wpi_nic_unlock(sc);
5328 	(void)WPI_READ(sc, WPI_FH_TX_BASE);	/* barrier */
5329 
5330 	/* Clear "radio off" and "commands blocked" bits. */
5331 	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5332 	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED);
5333 
5334 	/* Clear pending interrupts. */
5335 	WPI_WRITE(sc, WPI_INT, 0xffffffff);
5336 	/* Enable interrupts. */
5337 	WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
5338 
5339 	/* _Really_ make sure "radio off" bit is cleared! */
5340 	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5341 	WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
5342 
5343 	if ((error = wpi_load_firmware(sc)) != 0) {
5344 		device_printf(sc->sc_dev,
5345 		    "%s: could not load firmware, error %d\n", __func__,
5346 		    error);
5347 		return error;
5348 	}
5349 	/* Wait at most one second for firmware alive notification. */
5350 	if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
5351 		device_printf(sc->sc_dev,
5352 		    "%s: timeout waiting for adapter to initialize, error %d\n",
5353 		    __func__, error);
5354 		return error;
5355 	}
5356 
5357 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
5358 
5359 	/* Do post-firmware initialization. */
5360 	return wpi_post_alive(sc);
5361 }
5362 
5363 static void
5364 wpi_hw_stop(struct wpi_softc *sc)
5365 {
5366 	uint8_t chnl, qid;
5367 	int ntries;
5368 
5369 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5370 
5371 	if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP)
5372 		wpi_nic_lock(sc);
5373 
5374 	WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO);
5375 
5376 	/* Disable interrupts. */
5377 	WPI_WRITE(sc, WPI_INT_MASK, 0);
5378 	WPI_WRITE(sc, WPI_INT, 0xffffffff);
5379 	WPI_WRITE(sc, WPI_FH_INT, 0xffffffff);
5380 
5381 	/* Make sure we no longer hold the NIC lock. */
5382 	wpi_nic_unlock(sc);
5383 
5384 	if (wpi_nic_lock(sc) == 0) {
5385 		/* Stop TX scheduler. */
5386 		wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0);
5387 		wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0);
5388 
5389 		/* Stop all DMA channels. */
5390 		for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
5391 			WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0);
5392 			for (ntries = 0; ntries < 200; ntries++) {
5393 				if (WPI_READ(sc, WPI_FH_TX_STATUS) &
5394 				    WPI_FH_TX_STATUS_IDLE(chnl))
5395 					break;
5396 				DELAY(10);
5397 			}
5398 		}
5399 		wpi_nic_unlock(sc);
5400 	}
5401 
5402 	/* Stop RX ring. */
5403 	wpi_reset_rx_ring(sc);
5404 
5405 	/* Reset all TX rings. */
5406 	for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++)
5407 		wpi_reset_tx_ring(sc, &sc->txq[qid]);
5408 
5409 	if (wpi_nic_lock(sc) == 0) {
5410 		wpi_prph_write(sc, WPI_APMG_CLK_DIS,
5411 		    WPI_APMG_CLK_CTRL_DMA_CLK_RQT);
5412 		wpi_nic_unlock(sc);
5413 	}
5414 	DELAY(5);
5415 	/* Power OFF adapter. */
5416 	wpi_apm_stop(sc);
5417 }
5418 
5419 static void
5420 wpi_radio_on(void *arg0, int pending)
5421 {
5422 	struct wpi_softc *sc = arg0;
5423 	struct ieee80211com *ic = &sc->sc_ic;
5424 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5425 
5426 	device_printf(sc->sc_dev, "RF switch: radio enabled\n");
5427 
5428 	WPI_LOCK(sc);
5429 	callout_stop(&sc->watchdog_rfkill);
5430 	WPI_UNLOCK(sc);
5431 
5432 	if (vap != NULL)
5433 		ieee80211_init(vap);
5434 }
5435 
5436 static void
5437 wpi_radio_off(void *arg0, int pending)
5438 {
5439 	struct wpi_softc *sc = arg0;
5440 	struct ieee80211com *ic = &sc->sc_ic;
5441 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5442 
5443 	device_printf(sc->sc_dev, "RF switch: radio disabled\n");
5444 
5445 	ieee80211_notify_radio(ic, 0);
5446 	wpi_stop(sc);
5447 	if (vap != NULL)
5448 		ieee80211_stop(vap);
5449 
5450 	WPI_LOCK(sc);
5451 	callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc);
5452 	WPI_UNLOCK(sc);
5453 }
5454 
5455 static int
5456 wpi_init(struct wpi_softc *sc)
5457 {
5458 	int error = 0;
5459 
5460 	WPI_LOCK(sc);
5461 
5462 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
5463 
5464 	if (sc->sc_running != 0)
5465 		goto end;
5466 
5467 	/* Check that the radio is not disabled by hardware switch. */
5468 	if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) {
5469 		device_printf(sc->sc_dev,
5470 		    "RF switch: radio disabled (%s)\n", __func__);
5471 		callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
5472 		    sc);
5473 		error = EINPROGRESS;
5474 		goto end;
5475 	}
5476 
5477 	/* Read firmware images from the filesystem. */
5478 	if ((error = wpi_read_firmware(sc)) != 0) {
5479 		device_printf(sc->sc_dev,
5480 		    "%s: could not read firmware, error %d\n", __func__,
5481 		    error);
5482 		goto end;
5483 	}
5484 
5485 	sc->sc_running = 1;
5486 
5487 	/* Initialize hardware and upload firmware. */
5488 	error = wpi_hw_init(sc);
5489 	wpi_unload_firmware(sc);
5490 	if (error != 0) {
5491 		device_printf(sc->sc_dev,
5492 		    "%s: could not initialize hardware, error %d\n", __func__,
5493 		    error);
5494 		goto fail;
5495 	}
5496 
5497 	/* Configure adapter now that it is ready. */
5498 	if ((error = wpi_config(sc)) != 0) {
5499 		device_printf(sc->sc_dev,
5500 		    "%s: could not configure device, error %d\n", __func__,
5501 		    error);
5502 		goto fail;
5503 	}
5504 
5505 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
5506 
5507 	WPI_UNLOCK(sc);
5508 
5509 	return 0;
5510 
5511 fail:	wpi_stop_locked(sc);
5512 
5513 end:	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
5514 	WPI_UNLOCK(sc);
5515 
5516 	return error;
5517 }
5518 
5519 static void
5520 wpi_stop_locked(struct wpi_softc *sc)
5521 {
5522 
5523 	WPI_LOCK_ASSERT(sc);
5524 
5525 	if (sc->sc_running == 0)
5526 		return;
5527 
5528 	WPI_TX_LOCK(sc);
5529 	WPI_TXQ_LOCK(sc);
5530 	sc->sc_running = 0;
5531 	WPI_TXQ_UNLOCK(sc);
5532 	WPI_TX_UNLOCK(sc);
5533 
5534 	WPI_TXQ_STATE_LOCK(sc);
5535 	callout_stop(&sc->tx_timeout);
5536 	WPI_TXQ_STATE_UNLOCK(sc);
5537 
5538 	WPI_RXON_LOCK(sc);
5539 	callout_stop(&sc->scan_timeout);
5540 	callout_stop(&sc->calib_to);
5541 	WPI_RXON_UNLOCK(sc);
5542 
5543 	/* Power OFF hardware. */
5544 	wpi_hw_stop(sc);
5545 }
5546 
5547 static void
5548 wpi_stop(struct wpi_softc *sc)
5549 {
5550 	WPI_LOCK(sc);
5551 	wpi_stop_locked(sc);
5552 	WPI_UNLOCK(sc);
5553 }
5554 
5555 /*
5556  * Callback from net80211 to start a scan.
5557  */
5558 static void
5559 wpi_scan_start(struct ieee80211com *ic)
5560 {
5561 	struct wpi_softc *sc = ic->ic_softc;
5562 
5563 	wpi_set_led(sc, WPI_LED_LINK, 20, 2);
5564 }
5565 
5566 /*
5567  * Callback from net80211 to terminate a scan.
5568  */
5569 static void
5570 wpi_scan_end(struct ieee80211com *ic)
5571 {
5572 	struct wpi_softc *sc = ic->ic_softc;
5573 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5574 
5575 	if (vap->iv_state == IEEE80211_S_RUN)
5576 		wpi_set_led(sc, WPI_LED_LINK, 0, 1);
5577 }
5578 
5579 /**
5580  * Called by the net80211 framework to indicate to the driver
5581  * that the channel should be changed
5582  */
5583 static void
5584 wpi_set_channel(struct ieee80211com *ic)
5585 {
5586 	const struct ieee80211_channel *c = ic->ic_curchan;
5587 	struct wpi_softc *sc = ic->ic_softc;
5588 	int error;
5589 
5590 	DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
5591 
5592 	WPI_LOCK(sc);
5593 	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
5594 	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
5595 	WPI_UNLOCK(sc);
5596 	WPI_TX_LOCK(sc);
5597 	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
5598 	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
5599 	WPI_TX_UNLOCK(sc);
5600 
5601 	/*
5602 	 * Only need to set the channel in Monitor mode. AP scanning and auth
5603 	 * are already taken care of by their respective firmware commands.
5604 	 */
5605 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5606 		WPI_RXON_LOCK(sc);
5607 		sc->rxon.chan = ieee80211_chan2ieee(ic, c);
5608 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
5609 			sc->rxon.flags |= htole32(WPI_RXON_AUTO |
5610 			    WPI_RXON_24GHZ);
5611 		} else {
5612 			sc->rxon.flags &= ~htole32(WPI_RXON_AUTO |
5613 			    WPI_RXON_24GHZ);
5614 		}
5615 		if ((error = wpi_send_rxon(sc, 0, 1)) != 0)
5616 			device_printf(sc->sc_dev,
5617 			    "%s: error %d setting channel\n", __func__,
5618 			    error);
5619 		WPI_RXON_UNLOCK(sc);
5620 	}
5621 }
5622 
5623 /**
5624  * Called by net80211 to indicate that we need to scan the current
5625  * channel. The channel is previously be set via the wpi_set_channel
5626  * callback.
5627  */
5628 static void
5629 wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5630 {
5631 	struct ieee80211vap *vap = ss->ss_vap;
5632 	struct ieee80211com *ic = vap->iv_ic;
5633 	struct wpi_softc *sc = ic->ic_softc;
5634 	int error;
5635 
5636 	WPI_RXON_LOCK(sc);
5637 	error = wpi_scan(sc, ic->ic_curchan);
5638 	WPI_RXON_UNLOCK(sc);
5639 	if (error != 0)
5640 		ieee80211_cancel_scan(vap);
5641 }
5642 
5643 /**
5644  * Called by the net80211 framework to indicate
5645  * the minimum dwell time has been met, terminate the scan.
5646  * We don't actually terminate the scan as the firmware will notify
5647  * us when it's finished and we have no way to interrupt it.
5648  */
5649 static void
5650 wpi_scan_mindwell(struct ieee80211_scan_state *ss)
5651 {
5652 	/* NB: don't try to abort scan; wait for firmware to finish */
5653 }
5654