xref: /freebsd/sys/dev/mwl/if_mwl.c (revision c697fb7f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5  * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16  *    redistribution must be conditioned upon including a substantially
17  *    similar Disclaimer requirement for further binary redistribution.
18  *
19  * NO WARRANTY
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGES.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Marvell 88W8363 Wireless LAN controller.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_mwl.h"
42 #include "opt_wlan.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/errno.h>
55 #include <sys/callout.h>
56 #include <sys/bus.h>
57 #include <sys/endian.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 
61 #include <machine/bus.h>
62 
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_arp.h>
69 #include <net/ethernet.h>
70 #include <net/if_llc.h>
71 
72 #include <net/bpf.h>
73 
74 #include <net80211/ieee80211_var.h>
75 #include <net80211/ieee80211_input.h>
76 #include <net80211/ieee80211_regdomain.h>
77 
78 #ifdef INET
79 #include <netinet/in.h>
80 #include <netinet/if_ether.h>
81 #endif /* INET */
82 
83 #include <dev/mwl/if_mwlvar.h>
84 #include <dev/mwl/mwldiag.h>
85 
86 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
87 #define	MS(v,x)	(((v) & x) >> x##_S)
88 #define	SM(v,x)	(((v) << x##_S) & x)
89 
90 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
91 		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
92 		    const uint8_t [IEEE80211_ADDR_LEN],
93 		    const uint8_t [IEEE80211_ADDR_LEN]);
94 static void	mwl_vap_delete(struct ieee80211vap *);
95 static int	mwl_setupdma(struct mwl_softc *);
96 static int	mwl_hal_reset(struct mwl_softc *sc);
97 static int	mwl_init(struct mwl_softc *);
98 static void	mwl_parent(struct ieee80211com *);
99 static int	mwl_reset(struct ieee80211vap *, u_long);
100 static void	mwl_stop(struct mwl_softc *);
101 static void	mwl_start(struct mwl_softc *);
102 static int	mwl_transmit(struct ieee80211com *, struct mbuf *);
103 static int	mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
104 			const struct ieee80211_bpf_params *);
105 static int	mwl_media_change(struct ifnet *);
106 static void	mwl_watchdog(void *);
107 static int	mwl_ioctl(struct ieee80211com *, u_long, void *);
108 static void	mwl_radar_proc(void *, int);
109 static void	mwl_chanswitch_proc(void *, int);
110 static void	mwl_bawatchdog_proc(void *, int);
111 static int	mwl_key_alloc(struct ieee80211vap *,
112 			struct ieee80211_key *,
113 			ieee80211_keyix *, ieee80211_keyix *);
114 static int	mwl_key_delete(struct ieee80211vap *,
115 			const struct ieee80211_key *);
116 static int	mwl_key_set(struct ieee80211vap *,
117 			const struct ieee80211_key *);
118 static int	_mwl_key_set(struct ieee80211vap *,
119 			const struct ieee80211_key *,
120 			const uint8_t mac[IEEE80211_ADDR_LEN]);
121 static int	mwl_mode_init(struct mwl_softc *);
122 static void	mwl_update_mcast(struct ieee80211com *);
123 static void	mwl_update_promisc(struct ieee80211com *);
124 static void	mwl_updateslot(struct ieee80211com *);
125 static int	mwl_beacon_setup(struct ieee80211vap *);
126 static void	mwl_beacon_update(struct ieee80211vap *, int);
127 #ifdef MWL_HOST_PS_SUPPORT
128 static void	mwl_update_ps(struct ieee80211vap *, int);
129 static int	mwl_set_tim(struct ieee80211_node *, int);
130 #endif
131 static int	mwl_dma_setup(struct mwl_softc *);
132 static void	mwl_dma_cleanup(struct mwl_softc *);
133 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
134 		    const uint8_t [IEEE80211_ADDR_LEN]);
135 static void	mwl_node_cleanup(struct ieee80211_node *);
136 static void	mwl_node_drain(struct ieee80211_node *);
137 static void	mwl_node_getsignal(const struct ieee80211_node *,
138 			int8_t *, int8_t *);
139 static void	mwl_node_getmimoinfo(const struct ieee80211_node *,
140 			struct ieee80211_mimo_info *);
141 static int	mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
142 static void	mwl_rx_proc(void *, int);
143 static void	mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
144 static int	mwl_tx_setup(struct mwl_softc *, int, int);
145 static int	mwl_wme_update(struct ieee80211com *);
146 static void	mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
147 static void	mwl_tx_cleanup(struct mwl_softc *);
148 static uint16_t	mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
149 static int	mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
150 			     struct mwl_txbuf *, struct mbuf *);
151 static void	mwl_tx_proc(void *, int);
152 static int	mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
153 static void	mwl_draintxq(struct mwl_softc *);
154 static void	mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
155 static int	mwl_recv_action(struct ieee80211_node *,
156 			const struct ieee80211_frame *,
157 			const uint8_t *, const uint8_t *);
158 static int	mwl_addba_request(struct ieee80211_node *,
159 			struct ieee80211_tx_ampdu *, int dialogtoken,
160 			int baparamset, int batimeout);
161 static int	mwl_addba_response(struct ieee80211_node *,
162 			struct ieee80211_tx_ampdu *, int status,
163 			int baparamset, int batimeout);
164 static void	mwl_addba_stop(struct ieee80211_node *,
165 			struct ieee80211_tx_ampdu *);
166 static int	mwl_startrecv(struct mwl_softc *);
167 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
168 			struct ieee80211_channel *);
169 static int	mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
170 static void	mwl_scan_start(struct ieee80211com *);
171 static void	mwl_scan_end(struct ieee80211com *);
172 static void	mwl_set_channel(struct ieee80211com *);
173 static int	mwl_peerstadb(struct ieee80211_node *,
174 			int aid, int staid, MWL_HAL_PEERINFO *pi);
175 static int	mwl_localstadb(struct ieee80211vap *);
176 static int	mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
177 static int	allocstaid(struct mwl_softc *sc, int aid);
178 static void	delstaid(struct mwl_softc *sc, int staid);
179 static void	mwl_newassoc(struct ieee80211_node *, int);
180 static void	mwl_agestations(void *);
181 static int	mwl_setregdomain(struct ieee80211com *,
182 			struct ieee80211_regdomain *, int,
183 			struct ieee80211_channel []);
184 static void	mwl_getradiocaps(struct ieee80211com *, int, int *,
185 			struct ieee80211_channel []);
186 static int	mwl_getchannels(struct mwl_softc *);
187 
188 static void	mwl_sysctlattach(struct mwl_softc *);
189 static void	mwl_announce(struct mwl_softc *);
190 
191 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
192     "Marvell driver parameters");
193 
194 static	int mwl_rxdesc = MWL_RXDESC;		/* # rx desc's to allocate */
195 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
196 	    0, "rx descriptors allocated");
197 static	int mwl_rxbuf = MWL_RXBUF;		/* # rx buffers to allocate */
198 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
199 	    0, "rx buffers allocated");
200 static	int mwl_txbuf = MWL_TXBUF;		/* # tx buffers to allocate */
201 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
202 	    0, "tx buffers allocated");
203 static	int mwl_txcoalesce = 8;		/* # tx packets to q before poking f/w*/
204 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
205 	    0, "tx buffers to send at once");
206 static	int mwl_rxquota = MWL_RXBUF;		/* # max buffers to process */
207 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
208 	    0, "max rx buffers to process per interrupt");
209 static	int mwl_rxdmalow = 3;			/* # min buffers for wakeup */
210 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
211 	    0, "min free rx buffers before restarting traffic");
212 
213 #ifdef MWL_DEBUG
214 static	int mwl_debug = 0;
215 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
216 	    0, "control debugging printfs");
217 enum {
218 	MWL_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
219 	MWL_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
220 	MWL_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
221 	MWL_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
222 	MWL_DEBUG_RESET		= 0x00000010,	/* reset processing */
223 	MWL_DEBUG_BEACON 	= 0x00000020,	/* beacon handling */
224 	MWL_DEBUG_INTR		= 0x00000040,	/* ISR */
225 	MWL_DEBUG_TX_PROC	= 0x00000080,	/* tx ISR proc */
226 	MWL_DEBUG_RX_PROC	= 0x00000100,	/* rx ISR proc */
227 	MWL_DEBUG_KEYCACHE	= 0x00000200,	/* key cache management */
228 	MWL_DEBUG_STATE		= 0x00000400,	/* 802.11 state transitions */
229 	MWL_DEBUG_NODE		= 0x00000800,	/* node management */
230 	MWL_DEBUG_RECV_ALL	= 0x00001000,	/* trace all frames (beacons) */
231 	MWL_DEBUG_TSO		= 0x00002000,	/* TSO processing */
232 	MWL_DEBUG_AMPDU		= 0x00004000,	/* BA stream handling */
233 	MWL_DEBUG_ANY		= 0xffffffff
234 };
235 #define	IS_BEACON(wh) \
236     ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
237 	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
238 #define	IFF_DUMPPKTS_RECV(sc, wh) \
239     ((sc->sc_debug & MWL_DEBUG_RECV) && \
240       ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
241 #define	IFF_DUMPPKTS_XMIT(sc) \
242 	(sc->sc_debug & MWL_DEBUG_XMIT)
243 
244 #define	DPRINTF(sc, m, fmt, ...) do {				\
245 	if (sc->sc_debug & (m))					\
246 		printf(fmt, __VA_ARGS__);			\
247 } while (0)
248 #define	KEYPRINTF(sc, hk, mac) do {				\
249 	if (sc->sc_debug & MWL_DEBUG_KEYCACHE)			\
250 		mwl_keyprint(sc, __func__, hk, mac);		\
251 } while (0)
252 static	void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
253 static	void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
254 #else
255 #define	IFF_DUMPPKTS_RECV(sc, wh)	0
256 #define	IFF_DUMPPKTS_XMIT(sc)		0
257 #define	DPRINTF(sc, m, fmt, ...)	do { (void )sc; } while (0)
258 #define	KEYPRINTF(sc, k, mac)		do { (void )sc; } while (0)
259 #endif
260 
261 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
262 
263 /*
264  * Each packet has fixed front matter: a 2-byte length
265  * of the payload, followed by a 4-address 802.11 header
266  * (regardless of the actual header and always w/o any
267  * QoS header).  The payload then follows.
268  */
269 struct mwltxrec {
270 	uint16_t fwlen;
271 	struct ieee80211_frame_addr4 wh;
272 } __packed;
273 
274 /*
275  * Read/Write shorthands for accesses to BAR 0.  Note
276  * that all BAR 1 operations are done in the "hal" and
277  * there should be no reference to them here.
278  */
279 #ifdef MWL_DEBUG
280 static __inline uint32_t
281 RD4(struct mwl_softc *sc, bus_size_t off)
282 {
283 	return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
284 }
285 #endif
286 
287 static __inline void
288 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
289 {
290 	bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
291 }
292 
293 int
294 mwl_attach(uint16_t devid, struct mwl_softc *sc)
295 {
296 	struct ieee80211com *ic = &sc->sc_ic;
297 	struct mwl_hal *mh;
298 	int error = 0;
299 
300 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
301 
302 	/*
303 	 * Setup the RX free list lock early, so it can be consistently
304 	 * removed.
305 	 */
306 	MWL_RXFREE_INIT(sc);
307 
308 	mh = mwl_hal_attach(sc->sc_dev, devid,
309 	    sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
310 	if (mh == NULL) {
311 		device_printf(sc->sc_dev, "unable to attach HAL\n");
312 		error = EIO;
313 		goto bad;
314 	}
315 	sc->sc_mh = mh;
316 	/*
317 	 * Load firmware so we can get setup.  We arbitrarily
318 	 * pick station firmware; we'll re-load firmware as
319 	 * needed so setting up the wrong mode isn't a big deal.
320 	 */
321 	if (mwl_hal_fwload(mh, NULL) != 0) {
322 		device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
323 		error = EIO;
324 		goto bad1;
325 	}
326 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
327 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
328 		error = EIO;
329 		goto bad1;
330 	}
331 	error = mwl_getchannels(sc);
332 	if (error != 0)
333 		goto bad1;
334 
335 	sc->sc_txantenna = 0;		/* h/w default */
336 	sc->sc_rxantenna = 0;		/* h/w default */
337 	sc->sc_invalid = 0;		/* ready to go, enable int handling */
338 	sc->sc_ageinterval = MWL_AGEINTERVAL;
339 
340 	/*
341 	 * Allocate tx+rx descriptors and populate the lists.
342 	 * We immediately push the information to the firmware
343 	 * as otherwise it gets upset.
344 	 */
345 	error = mwl_dma_setup(sc);
346 	if (error != 0) {
347 		device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
348 		    error);
349 		goto bad1;
350 	}
351 	error = mwl_setupdma(sc);	/* push to firmware */
352 	if (error != 0)			/* NB: mwl_setupdma prints msg */
353 		goto bad1;
354 
355 	callout_init(&sc->sc_timer, 1);
356 	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
357 	mbufq_init(&sc->sc_snd, ifqmaxlen);
358 
359 	sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
360 		taskqueue_thread_enqueue, &sc->sc_tq);
361 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
362 		"%s taskq", device_get_nameunit(sc->sc_dev));
363 
364 	NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
365 	TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
366 	TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
367 	TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
368 
369 	/* NB: insure BK queue is the lowest priority h/w queue */
370 	if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
371 		device_printf(sc->sc_dev,
372 		    "unable to setup xmit queue for %s traffic!\n",
373 		     ieee80211_wme_acnames[WME_AC_BK]);
374 		error = EIO;
375 		goto bad2;
376 	}
377 	if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
378 	    !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
379 	    !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
380 		/*
381 		 * Not enough hardware tx queues to properly do WME;
382 		 * just punt and assign them all to the same h/w queue.
383 		 * We could do a better job of this if, for example,
384 		 * we allocate queues when we switch from station to
385 		 * AP mode.
386 		 */
387 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
388 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
389 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
390 			mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
391 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
392 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
393 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
394 	}
395 	TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
396 
397 	ic->ic_softc = sc;
398 	ic->ic_name = device_get_nameunit(sc->sc_dev);
399 	/* XXX not right but it's not used anywhere important */
400 	ic->ic_phytype = IEEE80211_T_OFDM;
401 	ic->ic_opmode = IEEE80211_M_STA;
402 	ic->ic_caps =
403 		  IEEE80211_C_STA		/* station mode supported */
404 		| IEEE80211_C_HOSTAP		/* hostap mode */
405 		| IEEE80211_C_MONITOR		/* monitor mode */
406 #if 0
407 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
408 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
409 #endif
410 		| IEEE80211_C_MBSS		/* mesh point link mode */
411 		| IEEE80211_C_WDS		/* WDS supported */
412 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
413 		| IEEE80211_C_SHSLOT		/* short slot time supported */
414 		| IEEE80211_C_WME		/* WME/WMM supported */
415 		| IEEE80211_C_BURST		/* xmit bursting supported */
416 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
417 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
418 		| IEEE80211_C_TXFRAG		/* handle tx frags */
419 		| IEEE80211_C_TXPMGT		/* capable of txpow mgt */
420 		| IEEE80211_C_DFS		/* DFS supported */
421 		;
422 
423 	ic->ic_htcaps =
424 		  IEEE80211_HTCAP_SMPS_ENA	/* SM PS mode enabled */
425 		| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width */
426 		| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
427 		| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
428 		| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
429 #if MWL_AGGR_SIZE == 7935
430 		| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
431 #else
432 		| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
433 #endif
434 #if 0
435 		| IEEE80211_HTCAP_PSMP		/* PSMP supported */
436 		| IEEE80211_HTCAP_40INTOLERANT	/* 40MHz intolerant */
437 #endif
438 		/* s/w capabilities */
439 		| IEEE80211_HTC_HT		/* HT operation */
440 		| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
441 		| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
442 		| IEEE80211_HTC_SMPS		/* SMPS available */
443 		;
444 
445 	/*
446 	 * Mark h/w crypto support.
447 	 * XXX no way to query h/w support.
448 	 */
449 	ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
450 			  |  IEEE80211_CRYPTO_AES_CCM
451 			  |  IEEE80211_CRYPTO_TKIP
452 			  |  IEEE80211_CRYPTO_TKIPMIC
453 			  ;
454 	/*
455 	 * Transmit requires space in the packet for a special
456 	 * format transmit record and optional padding between
457 	 * this record and the payload.  Ask the net80211 layer
458 	 * to arrange this when encapsulating packets so we can
459 	 * add it efficiently.
460 	 */
461 	ic->ic_headroom = sizeof(struct mwltxrec) -
462 		sizeof(struct ieee80211_frame);
463 
464 	IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
465 
466 	/* call MI attach routine. */
467 	ieee80211_ifattach(ic);
468 	ic->ic_setregdomain = mwl_setregdomain;
469 	ic->ic_getradiocaps = mwl_getradiocaps;
470 	/* override default methods */
471 	ic->ic_raw_xmit = mwl_raw_xmit;
472 	ic->ic_newassoc = mwl_newassoc;
473 	ic->ic_updateslot = mwl_updateslot;
474 	ic->ic_update_mcast = mwl_update_mcast;
475 	ic->ic_update_promisc = mwl_update_promisc;
476 	ic->ic_wme.wme_update = mwl_wme_update;
477 	ic->ic_transmit = mwl_transmit;
478 	ic->ic_ioctl = mwl_ioctl;
479 	ic->ic_parent = mwl_parent;
480 
481 	ic->ic_node_alloc = mwl_node_alloc;
482 	sc->sc_node_cleanup = ic->ic_node_cleanup;
483 	ic->ic_node_cleanup = mwl_node_cleanup;
484 	sc->sc_node_drain = ic->ic_node_drain;
485 	ic->ic_node_drain = mwl_node_drain;
486 	ic->ic_node_getsignal = mwl_node_getsignal;
487 	ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
488 
489 	ic->ic_scan_start = mwl_scan_start;
490 	ic->ic_scan_end = mwl_scan_end;
491 	ic->ic_set_channel = mwl_set_channel;
492 
493 	sc->sc_recv_action = ic->ic_recv_action;
494 	ic->ic_recv_action = mwl_recv_action;
495 	sc->sc_addba_request = ic->ic_addba_request;
496 	ic->ic_addba_request = mwl_addba_request;
497 	sc->sc_addba_response = ic->ic_addba_response;
498 	ic->ic_addba_response = mwl_addba_response;
499 	sc->sc_addba_stop = ic->ic_addba_stop;
500 	ic->ic_addba_stop = mwl_addba_stop;
501 
502 	ic->ic_vap_create = mwl_vap_create;
503 	ic->ic_vap_delete = mwl_vap_delete;
504 
505 	ieee80211_radiotap_attach(ic,
506 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
507 		MWL_TX_RADIOTAP_PRESENT,
508 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
509 		MWL_RX_RADIOTAP_PRESENT);
510 	/*
511 	 * Setup dynamic sysctl's now that country code and
512 	 * regdomain are available from the hal.
513 	 */
514 	mwl_sysctlattach(sc);
515 
516 	if (bootverbose)
517 		ieee80211_announce(ic);
518 	mwl_announce(sc);
519 	return 0;
520 bad2:
521 	mwl_dma_cleanup(sc);
522 bad1:
523 	mwl_hal_detach(mh);
524 bad:
525 	MWL_RXFREE_DESTROY(sc);
526 	sc->sc_invalid = 1;
527 	return error;
528 }
529 
530 int
531 mwl_detach(struct mwl_softc *sc)
532 {
533 	struct ieee80211com *ic = &sc->sc_ic;
534 
535 	MWL_LOCK(sc);
536 	mwl_stop(sc);
537 	MWL_UNLOCK(sc);
538 	/*
539 	 * NB: the order of these is important:
540 	 * o call the 802.11 layer before detaching the hal to
541 	 *   insure callbacks into the driver to delete global
542 	 *   key cache entries can be handled
543 	 * o reclaim the tx queue data structures after calling
544 	 *   the 802.11 layer as we'll get called back to reclaim
545 	 *   node state and potentially want to use them
546 	 * o to cleanup the tx queues the hal is called, so detach
547 	 *   it last
548 	 * Other than that, it's straightforward...
549 	 */
550 	ieee80211_ifdetach(ic);
551 	callout_drain(&sc->sc_watchdog);
552 	mwl_dma_cleanup(sc);
553 	MWL_RXFREE_DESTROY(sc);
554 	mwl_tx_cleanup(sc);
555 	mwl_hal_detach(sc->sc_mh);
556 	mbufq_drain(&sc->sc_snd);
557 
558 	return 0;
559 }
560 
561 /*
562  * MAC address handling for multiple BSS on the same radio.
563  * The first vap uses the MAC address from the EEPROM.  For
564  * subsequent vap's we set the U/L bit (bit 1) in the MAC
565  * address and use the next six bits as an index.
566  */
567 static void
568 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
569 {
570 	int i;
571 
572 	if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
573 		/* NB: we only do this if h/w supports multiple bssid */
574 		for (i = 0; i < 32; i++)
575 			if ((sc->sc_bssidmask & (1<<i)) == 0)
576 				break;
577 		if (i != 0)
578 			mac[0] |= (i << 2)|0x2;
579 	} else
580 		i = 0;
581 	sc->sc_bssidmask |= 1<<i;
582 	if (i == 0)
583 		sc->sc_nbssid0++;
584 }
585 
586 static void
587 reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
588 {
589 	int i = mac[0] >> 2;
590 	if (i != 0 || --sc->sc_nbssid0 == 0)
591 		sc->sc_bssidmask &= ~(1<<i);
592 }
593 
594 static struct ieee80211vap *
595 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
596     enum ieee80211_opmode opmode, int flags,
597     const uint8_t bssid[IEEE80211_ADDR_LEN],
598     const uint8_t mac0[IEEE80211_ADDR_LEN])
599 {
600 	struct mwl_softc *sc = ic->ic_softc;
601 	struct mwl_hal *mh = sc->sc_mh;
602 	struct ieee80211vap *vap, *apvap;
603 	struct mwl_hal_vap *hvap;
604 	struct mwl_vap *mvp;
605 	uint8_t mac[IEEE80211_ADDR_LEN];
606 
607 	IEEE80211_ADDR_COPY(mac, mac0);
608 	switch (opmode) {
609 	case IEEE80211_M_HOSTAP:
610 	case IEEE80211_M_MBSS:
611 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
612 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
613 		hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
614 		if (hvap == NULL) {
615 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
616 				reclaim_address(sc, mac);
617 			return NULL;
618 		}
619 		break;
620 	case IEEE80211_M_STA:
621 		if ((flags & IEEE80211_CLONE_MACADDR) == 0)
622 			assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
623 		hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
624 		if (hvap == NULL) {
625 			if ((flags & IEEE80211_CLONE_MACADDR) == 0)
626 				reclaim_address(sc, mac);
627 			return NULL;
628 		}
629 		/* no h/w beacon miss support; always use s/w */
630 		flags |= IEEE80211_CLONE_NOBEACONS;
631 		break;
632 	case IEEE80211_M_WDS:
633 		hvap = NULL;		/* NB: we use associated AP vap */
634 		if (sc->sc_napvaps == 0)
635 			return NULL;	/* no existing AP vap */
636 		break;
637 	case IEEE80211_M_MONITOR:
638 		hvap = NULL;
639 		break;
640 	case IEEE80211_M_IBSS:
641 	case IEEE80211_M_AHDEMO:
642 	default:
643 		return NULL;
644 	}
645 
646 	mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
647 	mvp->mv_hvap = hvap;
648 	if (opmode == IEEE80211_M_WDS) {
649 		/*
650 		 * WDS vaps must have an associated AP vap; find one.
651 		 * XXX not right.
652 		 */
653 		TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
654 			if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
655 				mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
656 				break;
657 			}
658 		KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
659 	}
660 	vap = &mvp->mv_vap;
661 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
662 	/* override with driver methods */
663 	mvp->mv_newstate = vap->iv_newstate;
664 	vap->iv_newstate = mwl_newstate;
665 	vap->iv_max_keyix = 0;	/* XXX */
666 	vap->iv_key_alloc = mwl_key_alloc;
667 	vap->iv_key_delete = mwl_key_delete;
668 	vap->iv_key_set = mwl_key_set;
669 #ifdef MWL_HOST_PS_SUPPORT
670 	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
671 		vap->iv_update_ps = mwl_update_ps;
672 		mvp->mv_set_tim = vap->iv_set_tim;
673 		vap->iv_set_tim = mwl_set_tim;
674 	}
675 #endif
676 	vap->iv_reset = mwl_reset;
677 	vap->iv_update_beacon = mwl_beacon_update;
678 
679 	/* override max aid so sta's cannot assoc when we're out of sta id's */
680 	vap->iv_max_aid = MWL_MAXSTAID;
681 	/* override default A-MPDU rx parameters */
682 	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
683 	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
684 
685 	/* complete setup */
686 	ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
687 	    mac);
688 
689 	switch (vap->iv_opmode) {
690 	case IEEE80211_M_HOSTAP:
691 	case IEEE80211_M_MBSS:
692 	case IEEE80211_M_STA:
693 		/*
694 		 * Setup sta db entry for local address.
695 		 */
696 		mwl_localstadb(vap);
697 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
698 		    vap->iv_opmode == IEEE80211_M_MBSS)
699 			sc->sc_napvaps++;
700 		else
701 			sc->sc_nstavaps++;
702 		break;
703 	case IEEE80211_M_WDS:
704 		sc->sc_nwdsvaps++;
705 		break;
706 	default:
707 		break;
708 	}
709 	/*
710 	 * Setup overall operating mode.
711 	 */
712 	if (sc->sc_napvaps)
713 		ic->ic_opmode = IEEE80211_M_HOSTAP;
714 	else if (sc->sc_nstavaps)
715 		ic->ic_opmode = IEEE80211_M_STA;
716 	else
717 		ic->ic_opmode = opmode;
718 
719 	return vap;
720 }
721 
722 static void
723 mwl_vap_delete(struct ieee80211vap *vap)
724 {
725 	struct mwl_vap *mvp = MWL_VAP(vap);
726 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
727 	struct mwl_hal *mh = sc->sc_mh;
728 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
729 	enum ieee80211_opmode opmode = vap->iv_opmode;
730 
731 	/* XXX disallow ap vap delete if WDS still present */
732 	if (sc->sc_running) {
733 		/* quiesce h/w while we remove the vap */
734 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
735 	}
736 	ieee80211_vap_detach(vap);
737 	switch (opmode) {
738 	case IEEE80211_M_HOSTAP:
739 	case IEEE80211_M_MBSS:
740 	case IEEE80211_M_STA:
741 		KASSERT(hvap != NULL, ("no hal vap handle"));
742 		(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
743 		mwl_hal_delvap(hvap);
744 		if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
745 			sc->sc_napvaps--;
746 		else
747 			sc->sc_nstavaps--;
748 		/* XXX don't do it for IEEE80211_CLONE_MACADDR */
749 		reclaim_address(sc, vap->iv_myaddr);
750 		break;
751 	case IEEE80211_M_WDS:
752 		sc->sc_nwdsvaps--;
753 		break;
754 	default:
755 		break;
756 	}
757 	mwl_cleartxq(sc, vap);
758 	free(mvp, M_80211_VAP);
759 	if (sc->sc_running)
760 		mwl_hal_intrset(mh, sc->sc_imask);
761 }
762 
763 void
764 mwl_suspend(struct mwl_softc *sc)
765 {
766 
767 	MWL_LOCK(sc);
768 	mwl_stop(sc);
769 	MWL_UNLOCK(sc);
770 }
771 
772 void
773 mwl_resume(struct mwl_softc *sc)
774 {
775 	int error = EDOOFUS;
776 
777 	MWL_LOCK(sc);
778 	if (sc->sc_ic.ic_nrunning > 0)
779 		error = mwl_init(sc);
780 	MWL_UNLOCK(sc);
781 
782 	if (error == 0)
783 		ieee80211_start_all(&sc->sc_ic);	/* start all vap's */
784 }
785 
786 void
787 mwl_shutdown(void *arg)
788 {
789 	struct mwl_softc *sc = arg;
790 
791 	MWL_LOCK(sc);
792 	mwl_stop(sc);
793 	MWL_UNLOCK(sc);
794 }
795 
796 /*
797  * Interrupt handler.  Most of the actual processing is deferred.
798  */
799 void
800 mwl_intr(void *arg)
801 {
802 	struct mwl_softc *sc = arg;
803 	struct mwl_hal *mh = sc->sc_mh;
804 	uint32_t status;
805 
806 	if (sc->sc_invalid) {
807 		/*
808 		 * The hardware is not ready/present, don't touch anything.
809 		 * Note this can happen early on if the IRQ is shared.
810 		 */
811 		DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
812 		return;
813 	}
814 	/*
815 	 * Figure out the reason(s) for the interrupt.
816 	 */
817 	mwl_hal_getisr(mh, &status);		/* NB: clears ISR too */
818 	if (status == 0)			/* must be a shared irq */
819 		return;
820 
821 	DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
822 	    __func__, status, sc->sc_imask);
823 	if (status & MACREG_A2HRIC_BIT_RX_RDY)
824 		taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
825 	if (status & MACREG_A2HRIC_BIT_TX_DONE)
826 		taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
827 	if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
828 		taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
829 	if (status & MACREG_A2HRIC_BIT_OPC_DONE)
830 		mwl_hal_cmddone(mh);
831 	if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
832 		;
833 	}
834 	if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
835 		/* TKIP ICV error */
836 		sc->sc_stats.mst_rx_badtkipicv++;
837 	}
838 	if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
839 		/* 11n aggregation queue is empty, re-fill */
840 		;
841 	}
842 	if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
843 		;
844 	}
845 	if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
846 		/* radar detected, process event */
847 		taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
848 	}
849 	if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
850 		/* DFS channel switch */
851 		taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
852 	}
853 }
854 
855 static void
856 mwl_radar_proc(void *arg, int pending)
857 {
858 	struct mwl_softc *sc = arg;
859 	struct ieee80211com *ic = &sc->sc_ic;
860 
861 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
862 	    __func__, pending);
863 
864 	sc->sc_stats.mst_radardetect++;
865 	/* XXX stop h/w BA streams? */
866 
867 	IEEE80211_LOCK(ic);
868 	ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
869 	IEEE80211_UNLOCK(ic);
870 }
871 
872 static void
873 mwl_chanswitch_proc(void *arg, int pending)
874 {
875 	struct mwl_softc *sc = arg;
876 	struct ieee80211com *ic = &sc->sc_ic;
877 
878 	DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
879 	    __func__, pending);
880 
881 	IEEE80211_LOCK(ic);
882 	sc->sc_csapending = 0;
883 	ieee80211_csa_completeswitch(ic);
884 	IEEE80211_UNLOCK(ic);
885 }
886 
887 static void
888 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
889 {
890 	struct ieee80211_node *ni = sp->data[0];
891 
892 	/* send DELBA and drop the stream */
893 	ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
894 }
895 
896 static void
897 mwl_bawatchdog_proc(void *arg, int pending)
898 {
899 	struct mwl_softc *sc = arg;
900 	struct mwl_hal *mh = sc->sc_mh;
901 	const MWL_HAL_BASTREAM *sp;
902 	uint8_t bitmap, n;
903 
904 	sc->sc_stats.mst_bawatchdog++;
905 
906 	if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
907 		DPRINTF(sc, MWL_DEBUG_AMPDU,
908 		    "%s: could not get bitmap\n", __func__);
909 		sc->sc_stats.mst_bawatchdog_failed++;
910 		return;
911 	}
912 	DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
913 	if (bitmap == 0xff) {
914 		n = 0;
915 		/* disable all ba streams */
916 		for (bitmap = 0; bitmap < 8; bitmap++) {
917 			sp = mwl_hal_bastream_lookup(mh, bitmap);
918 			if (sp != NULL) {
919 				mwl_bawatchdog(sp);
920 				n++;
921 			}
922 		}
923 		if (n == 0) {
924 			DPRINTF(sc, MWL_DEBUG_AMPDU,
925 			    "%s: no BA streams found\n", __func__);
926 			sc->sc_stats.mst_bawatchdog_empty++;
927 		}
928 	} else if (bitmap != 0xaa) {
929 		/* disable a single ba stream */
930 		sp = mwl_hal_bastream_lookup(mh, bitmap);
931 		if (sp != NULL) {
932 			mwl_bawatchdog(sp);
933 		} else {
934 			DPRINTF(sc, MWL_DEBUG_AMPDU,
935 			    "%s: no BA stream %d\n", __func__, bitmap);
936 			sc->sc_stats.mst_bawatchdog_notfound++;
937 		}
938 	}
939 }
940 
941 /*
942  * Convert net80211 channel to a HAL channel.
943  */
944 static void
945 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
946 {
947 	hc->channel = chan->ic_ieee;
948 
949 	*(uint32_t *)&hc->channelFlags = 0;
950 	if (IEEE80211_IS_CHAN_2GHZ(chan))
951 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
952 	else if (IEEE80211_IS_CHAN_5GHZ(chan))
953 		hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
954 	if (IEEE80211_IS_CHAN_HT40(chan)) {
955 		hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
956 		if (IEEE80211_IS_CHAN_HT40U(chan))
957 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
958 		else
959 			hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
960 	} else
961 		hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
962 	/* XXX 10MHz channels */
963 }
964 
965 /*
966  * Inform firmware of our tx/rx dma setup.  The BAR 0
967  * writes below are for compatibility with older firmware.
968  * For current firmware we send this information with a
969  * cmd block via mwl_hal_sethwdma.
970  */
971 static int
972 mwl_setupdma(struct mwl_softc *sc)
973 {
974 	int error, i;
975 
976 	sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
977 	WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
978 	WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
979 
980 	for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
981 		struct mwl_txq *txq = &sc->sc_txq[i];
982 		sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
983 		WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
984 	}
985 	sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
986 	sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
987 
988 	error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
989 	if (error != 0) {
990 		device_printf(sc->sc_dev,
991 		    "unable to setup tx/rx dma; hal status %u\n", error);
992 		/* XXX */
993 	}
994 	return error;
995 }
996 
997 /*
998  * Inform firmware of tx rate parameters.
999  * Called after a channel change.
1000  */
1001 static int
1002 mwl_setcurchanrates(struct mwl_softc *sc)
1003 {
1004 	struct ieee80211com *ic = &sc->sc_ic;
1005 	const struct ieee80211_rateset *rs;
1006 	MWL_HAL_TXRATE rates;
1007 
1008 	memset(&rates, 0, sizeof(rates));
1009 	rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1010 	/* rate used to send management frames */
1011 	rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1012 	/* rate used to send multicast frames */
1013 	rates.McastRate = rates.MgtRate;
1014 
1015 	return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1016 }
1017 
1018 /*
1019  * Inform firmware of tx rate parameters.  Called whenever
1020  * user-settable params change and after a channel change.
1021  */
1022 static int
1023 mwl_setrates(struct ieee80211vap *vap)
1024 {
1025 	struct mwl_vap *mvp = MWL_VAP(vap);
1026 	struct ieee80211_node *ni = vap->iv_bss;
1027 	const struct ieee80211_txparam *tp = ni->ni_txparms;
1028 	MWL_HAL_TXRATE rates;
1029 
1030 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1031 
1032 	/*
1033 	 * Update the h/w rate map.
1034 	 * NB: 0x80 for MCS is passed through unchanged
1035 	 */
1036 	memset(&rates, 0, sizeof(rates));
1037 	/* rate used to send management frames */
1038 	rates.MgtRate = tp->mgmtrate;
1039 	/* rate used to send multicast frames */
1040 	rates.McastRate = tp->mcastrate;
1041 
1042 	/* while here calculate EAPOL fixed rate cookie */
1043 	mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1044 
1045 	return mwl_hal_settxrate(mvp->mv_hvap,
1046 	    tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1047 		RATE_FIXED : RATE_AUTO, &rates);
1048 }
1049 
1050 /*
1051  * Setup a fixed xmit rate cookie for EAPOL frames.
1052  */
1053 static void
1054 mwl_seteapolformat(struct ieee80211vap *vap)
1055 {
1056 	struct mwl_vap *mvp = MWL_VAP(vap);
1057 	struct ieee80211_node *ni = vap->iv_bss;
1058 	enum ieee80211_phymode mode;
1059 	uint8_t rate;
1060 
1061 	KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1062 
1063 	mode = ieee80211_chan2mode(ni->ni_chan);
1064 	/*
1065 	 * Use legacy rates when operating a mixed HT+non-HT bss.
1066 	 * NB: this may violate POLA for sta and wds vap's.
1067 	 */
1068 	if (mode == IEEE80211_MODE_11NA &&
1069 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1070 		rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1071 	else if (mode == IEEE80211_MODE_11NG &&
1072 	    (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1073 		rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1074 	else
1075 		rate = vap->iv_txparms[mode].mgmtrate;
1076 
1077 	mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1078 }
1079 
1080 /*
1081  * Map SKU+country code to region code for radar bin'ing.
1082  */
1083 static int
1084 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1085 {
1086 	switch (rd->regdomain) {
1087 	case SKU_FCC:
1088 	case SKU_FCC3:
1089 		return DOMAIN_CODE_FCC;
1090 	case SKU_CA:
1091 		return DOMAIN_CODE_IC;
1092 	case SKU_ETSI:
1093 	case SKU_ETSI2:
1094 	case SKU_ETSI3:
1095 		if (rd->country == CTRY_SPAIN)
1096 			return DOMAIN_CODE_SPAIN;
1097 		if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1098 			return DOMAIN_CODE_FRANCE;
1099 		/* XXX force 1.3.1 radar type */
1100 		return DOMAIN_CODE_ETSI_131;
1101 	case SKU_JAPAN:
1102 		return DOMAIN_CODE_MKK;
1103 	case SKU_ROW:
1104 		return DOMAIN_CODE_DGT;	/* Taiwan */
1105 	case SKU_APAC:
1106 	case SKU_APAC2:
1107 	case SKU_APAC3:
1108 		return DOMAIN_CODE_AUS;	/* Australia */
1109 	}
1110 	/* XXX KOREA? */
1111 	return DOMAIN_CODE_FCC;			/* XXX? */
1112 }
1113 
1114 static int
1115 mwl_hal_reset(struct mwl_softc *sc)
1116 {
1117 	struct ieee80211com *ic = &sc->sc_ic;
1118 	struct mwl_hal *mh = sc->sc_mh;
1119 
1120 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1121 	mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1122 	mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1123 	mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1124 	mwl_chan_set(sc, ic->ic_curchan);
1125 	/* NB: RF/RA performance tuned for indoor mode */
1126 	mwl_hal_setrateadaptmode(mh, 0);
1127 	mwl_hal_setoptimizationlevel(mh,
1128 	    (ic->ic_flags & IEEE80211_F_BURST) != 0);
1129 
1130 	mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1131 
1132 	mwl_hal_setaggampduratemode(mh, 1, 80);		/* XXX */
1133 	mwl_hal_setcfend(mh, 0);			/* XXX */
1134 
1135 	return 1;
1136 }
1137 
1138 static int
1139 mwl_init(struct mwl_softc *sc)
1140 {
1141 	struct mwl_hal *mh = sc->sc_mh;
1142 	int error = 0;
1143 
1144 	MWL_LOCK_ASSERT(sc);
1145 
1146 	/*
1147 	 * Stop anything previously setup.  This is safe
1148 	 * whether this is the first time through or not.
1149 	 */
1150 	mwl_stop(sc);
1151 
1152 	/*
1153 	 * Push vap-independent state to the firmware.
1154 	 */
1155 	if (!mwl_hal_reset(sc)) {
1156 		device_printf(sc->sc_dev, "unable to reset hardware\n");
1157 		return EIO;
1158 	}
1159 
1160 	/*
1161 	 * Setup recv (once); transmit is already good to go.
1162 	 */
1163 	error = mwl_startrecv(sc);
1164 	if (error != 0) {
1165 		device_printf(sc->sc_dev, "unable to start recv logic\n");
1166 		return error;
1167 	}
1168 
1169 	/*
1170 	 * Enable interrupts.
1171 	 */
1172 	sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1173 		     | MACREG_A2HRIC_BIT_TX_DONE
1174 		     | MACREG_A2HRIC_BIT_OPC_DONE
1175 #if 0
1176 		     | MACREG_A2HRIC_BIT_MAC_EVENT
1177 #endif
1178 		     | MACREG_A2HRIC_BIT_ICV_ERROR
1179 		     | MACREG_A2HRIC_BIT_RADAR_DETECT
1180 		     | MACREG_A2HRIC_BIT_CHAN_SWITCH
1181 #if 0
1182 		     | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1183 #endif
1184 		     | MACREG_A2HRIC_BIT_BA_WATCHDOG
1185 		     | MACREQ_A2HRIC_BIT_TX_ACK
1186 		     ;
1187 
1188 	sc->sc_running = 1;
1189 	mwl_hal_intrset(mh, sc->sc_imask);
1190 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1191 
1192 	return 0;
1193 }
1194 
1195 static void
1196 mwl_stop(struct mwl_softc *sc)
1197 {
1198 
1199 	MWL_LOCK_ASSERT(sc);
1200 	if (sc->sc_running) {
1201 		/*
1202 		 * Shutdown the hardware and driver.
1203 		 */
1204 		sc->sc_running = 0;
1205 		callout_stop(&sc->sc_watchdog);
1206 		sc->sc_tx_timer = 0;
1207 		mwl_draintxq(sc);
1208 	}
1209 }
1210 
1211 static int
1212 mwl_reset_vap(struct ieee80211vap *vap, int state)
1213 {
1214 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1215 	struct ieee80211com *ic = vap->iv_ic;
1216 
1217 	if (state == IEEE80211_S_RUN)
1218 		mwl_setrates(vap);
1219 	/* XXX off by 1? */
1220 	mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1221 	/* XXX auto? 20/40 split? */
1222 	mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1223 	    (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1224 	mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1225 	    HTPROTECT_NONE : HTPROTECT_AUTO);
1226 	/* XXX txpower cap */
1227 
1228 	/* re-setup beacons */
1229 	if (state == IEEE80211_S_RUN &&
1230 	    (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1231 	     vap->iv_opmode == IEEE80211_M_MBSS ||
1232 	     vap->iv_opmode == IEEE80211_M_IBSS)) {
1233 		mwl_setapmode(vap, vap->iv_bss->ni_chan);
1234 		mwl_hal_setnprotmode(hvap,
1235 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1236 		return mwl_beacon_setup(vap);
1237 	}
1238 	return 0;
1239 }
1240 
1241 /*
1242  * Reset the hardware w/o losing operational state.
1243  * Used to reset or reload hardware state for a vap.
1244  */
1245 static int
1246 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1247 {
1248 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1249 	int error = 0;
1250 
1251 	if (hvap != NULL) {			/* WDS, MONITOR, etc. */
1252 		struct ieee80211com *ic = vap->iv_ic;
1253 		struct mwl_softc *sc = ic->ic_softc;
1254 		struct mwl_hal *mh = sc->sc_mh;
1255 
1256 		/* XXX handle DWDS sta vap change */
1257 		/* XXX do we need to disable interrupts? */
1258 		mwl_hal_intrset(mh, 0);		/* disable interrupts */
1259 		error = mwl_reset_vap(vap, vap->iv_state);
1260 		mwl_hal_intrset(mh, sc->sc_imask);
1261 	}
1262 	return error;
1263 }
1264 
1265 /*
1266  * Allocate a tx buffer for sending a frame.  The
1267  * packet is assumed to have the WME AC stored so
1268  * we can use it to select the appropriate h/w queue.
1269  */
1270 static struct mwl_txbuf *
1271 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1272 {
1273 	struct mwl_txbuf *bf;
1274 
1275 	/*
1276 	 * Grab a TX buffer and associated resources.
1277 	 */
1278 	MWL_TXQ_LOCK(txq);
1279 	bf = STAILQ_FIRST(&txq->free);
1280 	if (bf != NULL) {
1281 		STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1282 		txq->nfree--;
1283 	}
1284 	MWL_TXQ_UNLOCK(txq);
1285 	if (bf == NULL)
1286 		DPRINTF(sc, MWL_DEBUG_XMIT,
1287 		    "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1288 	return bf;
1289 }
1290 
1291 /*
1292  * Return a tx buffer to the queue it came from.  Note there
1293  * are two cases because we must preserve the order of buffers
1294  * as it reflects the fixed order of descriptors in memory
1295  * (the firmware pre-fetches descriptors so we cannot reorder).
1296  */
1297 static void
1298 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1299 {
1300 	bf->bf_m = NULL;
1301 	bf->bf_node = NULL;
1302 	MWL_TXQ_LOCK(txq);
1303 	STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1304 	txq->nfree++;
1305 	MWL_TXQ_UNLOCK(txq);
1306 }
1307 
1308 static void
1309 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1310 {
1311 	bf->bf_m = NULL;
1312 	bf->bf_node = NULL;
1313 	MWL_TXQ_LOCK(txq);
1314 	STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1315 	txq->nfree++;
1316 	MWL_TXQ_UNLOCK(txq);
1317 }
1318 
1319 static int
1320 mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1321 {
1322 	struct mwl_softc *sc = ic->ic_softc;
1323 	int error;
1324 
1325 	MWL_LOCK(sc);
1326 	if (!sc->sc_running) {
1327 		MWL_UNLOCK(sc);
1328 		return (ENXIO);
1329 	}
1330 	error = mbufq_enqueue(&sc->sc_snd, m);
1331 	if (error) {
1332 		MWL_UNLOCK(sc);
1333 		return (error);
1334 	}
1335 	mwl_start(sc);
1336 	MWL_UNLOCK(sc);
1337 	return (0);
1338 }
1339 
1340 static void
1341 mwl_start(struct mwl_softc *sc)
1342 {
1343 	struct ieee80211_node *ni;
1344 	struct mwl_txbuf *bf;
1345 	struct mbuf *m;
1346 	struct mwl_txq *txq = NULL;	/* XXX silence gcc */
1347 	int nqueued;
1348 
1349 	MWL_LOCK_ASSERT(sc);
1350 	if (!sc->sc_running || sc->sc_invalid)
1351 		return;
1352 	nqueued = 0;
1353 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1354 		/*
1355 		 * Grab the node for the destination.
1356 		 */
1357 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1358 		KASSERT(ni != NULL, ("no node"));
1359 		m->m_pkthdr.rcvif = NULL;	/* committed, clear ref */
1360 		/*
1361 		 * Grab a TX buffer and associated resources.
1362 		 * We honor the classification by the 802.11 layer.
1363 		 */
1364 		txq = sc->sc_ac2q[M_WME_GETAC(m)];
1365 		bf = mwl_gettxbuf(sc, txq);
1366 		if (bf == NULL) {
1367 			m_freem(m);
1368 			ieee80211_free_node(ni);
1369 #ifdef MWL_TX_NODROP
1370 			sc->sc_stats.mst_tx_qstop++;
1371 			break;
1372 #else
1373 			DPRINTF(sc, MWL_DEBUG_XMIT,
1374 			    "%s: tail drop on q %d\n", __func__, txq->qnum);
1375 			sc->sc_stats.mst_tx_qdrop++;
1376 			continue;
1377 #endif /* MWL_TX_NODROP */
1378 		}
1379 
1380 		/*
1381 		 * Pass the frame to the h/w for transmission.
1382 		 */
1383 		if (mwl_tx_start(sc, ni, bf, m)) {
1384 			if_inc_counter(ni->ni_vap->iv_ifp,
1385 			    IFCOUNTER_OERRORS, 1);
1386 			mwl_puttxbuf_head(txq, bf);
1387 			ieee80211_free_node(ni);
1388 			continue;
1389 		}
1390 		nqueued++;
1391 		if (nqueued >= mwl_txcoalesce) {
1392 			/*
1393 			 * Poke the firmware to process queued frames;
1394 			 * see below about (lack of) locking.
1395 			 */
1396 			nqueued = 0;
1397 			mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1398 		}
1399 	}
1400 	if (nqueued) {
1401 		/*
1402 		 * NB: We don't need to lock against tx done because
1403 		 * this just prods the firmware to check the transmit
1404 		 * descriptors.  The firmware will also start fetching
1405 		 * descriptors by itself if it notices new ones are
1406 		 * present when it goes to deliver a tx done interrupt
1407 		 * to the host. So if we race with tx done processing
1408 		 * it's ok.  Delivering the kick here rather than in
1409 		 * mwl_tx_start is an optimization to avoid poking the
1410 		 * firmware for each packet.
1411 		 *
1412 		 * NB: the queue id isn't used so 0 is ok.
1413 		 */
1414 		mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1415 	}
1416 }
1417 
1418 static int
1419 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1420 	const struct ieee80211_bpf_params *params)
1421 {
1422 	struct ieee80211com *ic = ni->ni_ic;
1423 	struct mwl_softc *sc = ic->ic_softc;
1424 	struct mwl_txbuf *bf;
1425 	struct mwl_txq *txq;
1426 
1427 	if (!sc->sc_running || sc->sc_invalid) {
1428 		m_freem(m);
1429 		return ENETDOWN;
1430 	}
1431 	/*
1432 	 * Grab a TX buffer and associated resources.
1433 	 * Note that we depend on the classification
1434 	 * by the 802.11 layer to get to the right h/w
1435 	 * queue.  Management frames must ALWAYS go on
1436 	 * queue 1 but we cannot just force that here
1437 	 * because we may receive non-mgt frames.
1438 	 */
1439 	txq = sc->sc_ac2q[M_WME_GETAC(m)];
1440 	bf = mwl_gettxbuf(sc, txq);
1441 	if (bf == NULL) {
1442 		sc->sc_stats.mst_tx_qstop++;
1443 		m_freem(m);
1444 		return ENOBUFS;
1445 	}
1446 	/*
1447 	 * Pass the frame to the h/w for transmission.
1448 	 */
1449 	if (mwl_tx_start(sc, ni, bf, m)) {
1450 		mwl_puttxbuf_head(txq, bf);
1451 
1452 		return EIO;		/* XXX */
1453 	}
1454 	/*
1455 	 * NB: We don't need to lock against tx done because
1456 	 * this just prods the firmware to check the transmit
1457 	 * descriptors.  The firmware will also start fetching
1458 	 * descriptors by itself if it notices new ones are
1459 	 * present when it goes to deliver a tx done interrupt
1460 	 * to the host. So if we race with tx done processing
1461 	 * it's ok.  Delivering the kick here rather than in
1462 	 * mwl_tx_start is an optimization to avoid poking the
1463 	 * firmware for each packet.
1464 	 *
1465 	 * NB: the queue id isn't used so 0 is ok.
1466 	 */
1467 	mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1468 	return 0;
1469 }
1470 
1471 static int
1472 mwl_media_change(struct ifnet *ifp)
1473 {
1474 	struct ieee80211vap *vap = ifp->if_softc;
1475 	int error;
1476 
1477 	error = ieee80211_media_change(ifp);
1478 	/* NB: only the fixed rate can change and that doesn't need a reset */
1479 	if (error == ENETRESET) {
1480 		mwl_setrates(vap);
1481 		error = 0;
1482 	}
1483 	return error;
1484 }
1485 
1486 #ifdef MWL_DEBUG
1487 static void
1488 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1489 	const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1490 {
1491 	static const char *ciphers[] = {
1492 		"WEP",
1493 		"TKIP",
1494 		"AES-CCM",
1495 	};
1496 	int i, n;
1497 
1498 	printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1499 	for (i = 0, n = hk->keyLen; i < n; i++)
1500 		printf(" %02x", hk->key.aes[i]);
1501 	printf(" mac %s", ether_sprintf(mac));
1502 	if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1503 		printf(" %s", "rxmic");
1504 		for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1505 			printf(" %02x", hk->key.tkip.rxMic[i]);
1506 		printf(" txmic");
1507 		for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1508 			printf(" %02x", hk->key.tkip.txMic[i]);
1509 	}
1510 	printf(" flags 0x%x\n", hk->keyFlags);
1511 }
1512 #endif
1513 
1514 /*
1515  * Allocate a key cache slot for a unicast key.  The
1516  * firmware handles key allocation and every station is
1517  * guaranteed key space so we are always successful.
1518  */
1519 static int
1520 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1521 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1522 {
1523 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1524 
1525 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1526 	    (k->wk_flags & IEEE80211_KEY_GROUP)) {
1527 		if (!(&vap->iv_nw_keys[0] <= k &&
1528 		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1529 			/* should not happen */
1530 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1531 				"%s: bogus group key\n", __func__);
1532 			return 0;
1533 		}
1534 		/* give the caller what they requested */
1535 		*keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1536 	} else {
1537 		/*
1538 		 * Firmware handles key allocation.
1539 		 */
1540 		*keyix = *rxkeyix = 0;
1541 	}
1542 	return 1;
1543 }
1544 
1545 /*
1546  * Delete a key entry allocated by mwl_key_alloc.
1547  */
1548 static int
1549 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1550 {
1551 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1552 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1553 	MWL_HAL_KEYVAL hk;
1554 	const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1555 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1556 
1557 	if (hvap == NULL) {
1558 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1559 			/* XXX monitor mode? */
1560 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1561 			    "%s: no hvap for opmode %d\n", __func__,
1562 			    vap->iv_opmode);
1563 			return 0;
1564 		}
1565 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1566 	}
1567 
1568 	DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1569 	    __func__, k->wk_keyix);
1570 
1571 	memset(&hk, 0, sizeof(hk));
1572 	hk.keyIndex = k->wk_keyix;
1573 	switch (k->wk_cipher->ic_cipher) {
1574 	case IEEE80211_CIPHER_WEP:
1575 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1576 		break;
1577 	case IEEE80211_CIPHER_TKIP:
1578 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1579 		break;
1580 	case IEEE80211_CIPHER_AES_CCM:
1581 		hk.keyTypeId = KEY_TYPE_ID_AES;
1582 		break;
1583 	default:
1584 		/* XXX should not happen */
1585 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1586 		    __func__, k->wk_cipher->ic_cipher);
1587 		return 0;
1588 	}
1589 	return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0);	/*XXX*/
1590 }
1591 
1592 static __inline int
1593 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1594 {
1595 	if (k->wk_flags & IEEE80211_KEY_GROUP) {
1596 		if (k->wk_flags & IEEE80211_KEY_XMIT)
1597 			hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1598 		if (k->wk_flags & IEEE80211_KEY_RECV)
1599 			hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1600 		return 1;
1601 	} else
1602 		return 0;
1603 }
1604 
1605 /*
1606  * Set the key cache contents for the specified key.  Key cache
1607  * slot(s) must already have been allocated by mwl_key_alloc.
1608  */
1609 static int
1610 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1611 {
1612 	return (_mwl_key_set(vap, k, k->wk_macaddr));
1613 }
1614 
1615 static int
1616 _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1617 	const uint8_t mac[IEEE80211_ADDR_LEN])
1618 {
1619 #define	GRPXMIT	(IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1620 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1621 #define	IEEE80211_IS_STATICKEY(k) \
1622 	(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1623 	 (GRPXMIT|IEEE80211_KEY_RECV))
1624 	struct mwl_softc *sc = vap->iv_ic->ic_softc;
1625 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1626 	const struct ieee80211_cipher *cip = k->wk_cipher;
1627 	const uint8_t *macaddr;
1628 	MWL_HAL_KEYVAL hk;
1629 
1630 	KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1631 		("s/w crypto set?"));
1632 
1633 	if (hvap == NULL) {
1634 		if (vap->iv_opmode != IEEE80211_M_WDS) {
1635 			/* XXX monitor mode? */
1636 			DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1637 			    "%s: no hvap for opmode %d\n", __func__,
1638 			    vap->iv_opmode);
1639 			return 0;
1640 		}
1641 		hvap = MWL_VAP(vap)->mv_ap_hvap;
1642 	}
1643 	memset(&hk, 0, sizeof(hk));
1644 	hk.keyIndex = k->wk_keyix;
1645 	switch (cip->ic_cipher) {
1646 	case IEEE80211_CIPHER_WEP:
1647 		hk.keyTypeId = KEY_TYPE_ID_WEP;
1648 		hk.keyLen = k->wk_keylen;
1649 		if (k->wk_keyix == vap->iv_def_txkey)
1650 			hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1651 		if (!IEEE80211_IS_STATICKEY(k)) {
1652 			/* NB: WEP is never used for the PTK */
1653 			(void) addgroupflags(&hk, k);
1654 		}
1655 		break;
1656 	case IEEE80211_CIPHER_TKIP:
1657 		hk.keyTypeId = KEY_TYPE_ID_TKIP;
1658 		hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1659 		hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1660 		hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1661 		hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1662 		if (!addgroupflags(&hk, k))
1663 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1664 		break;
1665 	case IEEE80211_CIPHER_AES_CCM:
1666 		hk.keyTypeId = KEY_TYPE_ID_AES;
1667 		hk.keyLen = k->wk_keylen;
1668 		if (!addgroupflags(&hk, k))
1669 			hk.keyFlags |= KEY_FLAG_PAIRWISE;
1670 		break;
1671 	default:
1672 		/* XXX should not happen */
1673 		DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1674 		    __func__, k->wk_cipher->ic_cipher);
1675 		return 0;
1676 	}
1677 	/*
1678 	 * NB: tkip mic keys get copied here too; the layout
1679 	 *     just happens to match that in ieee80211_key.
1680 	 */
1681 	memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1682 
1683 	/*
1684 	 * Locate address of sta db entry for writing key;
1685 	 * the convention unfortunately is somewhat different
1686 	 * than how net80211, hostapd, and wpa_supplicant think.
1687 	 */
1688 	if (vap->iv_opmode == IEEE80211_M_STA) {
1689 		/*
1690 		 * NB: keys plumbed before the sta reaches AUTH state
1691 		 * will be discarded or written to the wrong sta db
1692 		 * entry because iv_bss is meaningless.  This is ok
1693 		 * (right now) because we handle deferred plumbing of
1694 		 * WEP keys when the sta reaches AUTH state.
1695 		 */
1696 		macaddr = vap->iv_bss->ni_bssid;
1697 		if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1698 			/* XXX plumb to local sta db too for static key wep */
1699 			mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1700 		}
1701 	} else if (vap->iv_opmode == IEEE80211_M_WDS &&
1702 	    vap->iv_state != IEEE80211_S_RUN) {
1703 		/*
1704 		 * Prior to RUN state a WDS vap will not it's BSS node
1705 		 * setup so we will plumb the key to the wrong mac
1706 		 * address (it'll be our local address).  Workaround
1707 		 * this for the moment by grabbing the correct address.
1708 		 */
1709 		macaddr = vap->iv_des_bssid;
1710 	} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1711 		macaddr = vap->iv_myaddr;
1712 	else
1713 		macaddr = mac;
1714 	KEYPRINTF(sc, &hk, macaddr);
1715 	return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1716 #undef IEEE80211_IS_STATICKEY
1717 #undef GRPXMIT
1718 }
1719 
1720 /*
1721  * Set the multicast filter contents into the hardware.
1722  * XXX f/w has no support; just defer to the os.
1723  */
1724 static void
1725 mwl_setmcastfilter(struct mwl_softc *sc)
1726 {
1727 #if 0
1728 	struct ether_multi *enm;
1729 	struct ether_multistep estep;
1730 	uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1731 	uint8_t *mp;
1732 	int nmc;
1733 
1734 	mp = macs;
1735 	nmc = 0;
1736 	ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1737 	while (enm != NULL) {
1738 		/* XXX Punt on ranges. */
1739 		if (nmc == MWL_HAL_MCAST_MAX ||
1740 		    !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1741 			ifp->if_flags |= IFF_ALLMULTI;
1742 			return;
1743 		}
1744 		IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1745 		mp += IEEE80211_ADDR_LEN, nmc++;
1746 		ETHER_NEXT_MULTI(estep, enm);
1747 	}
1748 	ifp->if_flags &= ~IFF_ALLMULTI;
1749 	mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1750 #endif
1751 }
1752 
1753 static int
1754 mwl_mode_init(struct mwl_softc *sc)
1755 {
1756 	struct ieee80211com *ic = &sc->sc_ic;
1757 	struct mwl_hal *mh = sc->sc_mh;
1758 
1759 	mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1760 	mwl_setmcastfilter(sc);
1761 
1762 	return 0;
1763 }
1764 
1765 /*
1766  * Callback from the 802.11 layer after a multicast state change.
1767  */
1768 static void
1769 mwl_update_mcast(struct ieee80211com *ic)
1770 {
1771 	struct mwl_softc *sc = ic->ic_softc;
1772 
1773 	mwl_setmcastfilter(sc);
1774 }
1775 
1776 /*
1777  * Callback from the 802.11 layer after a promiscuous mode change.
1778  * Note this interface does not check the operating mode as this
1779  * is an internal callback and we are expected to honor the current
1780  * state (e.g. this is used for setting the interface in promiscuous
1781  * mode when operating in hostap mode to do ACS).
1782  */
1783 static void
1784 mwl_update_promisc(struct ieee80211com *ic)
1785 {
1786 	struct mwl_softc *sc = ic->ic_softc;
1787 
1788 	mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1789 }
1790 
1791 /*
1792  * Callback from the 802.11 layer to update the slot time
1793  * based on the current setting.  We use it to notify the
1794  * firmware of ERP changes and the f/w takes care of things
1795  * like slot time and preamble.
1796  */
1797 static void
1798 mwl_updateslot(struct ieee80211com *ic)
1799 {
1800 	struct mwl_softc *sc = ic->ic_softc;
1801 	struct mwl_hal *mh = sc->sc_mh;
1802 	int prot;
1803 
1804 	/* NB: can be called early; suppress needless cmds */
1805 	if (!sc->sc_running)
1806 		return;
1807 
1808 	/*
1809 	 * Calculate the ERP flags.  The firwmare will use
1810 	 * this to carry out the appropriate measures.
1811 	 */
1812 	prot = 0;
1813 	if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1814 		if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1815 			prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1816 		if (ic->ic_flags & IEEE80211_F_USEPROT)
1817 			prot |= IEEE80211_ERP_USE_PROTECTION;
1818 		if (ic->ic_flags & IEEE80211_F_USEBARKER)
1819 			prot |= IEEE80211_ERP_LONG_PREAMBLE;
1820 	}
1821 
1822 	DPRINTF(sc, MWL_DEBUG_RESET,
1823 	    "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1824 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1825 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1826 	    ic->ic_flags);
1827 
1828 	mwl_hal_setgprot(mh, prot);
1829 }
1830 
1831 /*
1832  * Setup the beacon frame.
1833  */
1834 static int
1835 mwl_beacon_setup(struct ieee80211vap *vap)
1836 {
1837 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1838 	struct ieee80211_node *ni = vap->iv_bss;
1839 	struct mbuf *m;
1840 
1841 	m = ieee80211_beacon_alloc(ni);
1842 	if (m == NULL)
1843 		return ENOBUFS;
1844 	mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1845 	m_free(m);
1846 
1847 	return 0;
1848 }
1849 
1850 /*
1851  * Update the beacon frame in response to a change.
1852  */
1853 static void
1854 mwl_beacon_update(struct ieee80211vap *vap, int item)
1855 {
1856 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1857 	struct ieee80211com *ic = vap->iv_ic;
1858 
1859 	KASSERT(hvap != NULL, ("no beacon"));
1860 	switch (item) {
1861 	case IEEE80211_BEACON_ERP:
1862 		mwl_updateslot(ic);
1863 		break;
1864 	case IEEE80211_BEACON_HTINFO:
1865 		mwl_hal_setnprotmode(hvap,
1866 		    MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1867 		break;
1868 	case IEEE80211_BEACON_CAPS:
1869 	case IEEE80211_BEACON_WME:
1870 	case IEEE80211_BEACON_APPIE:
1871 	case IEEE80211_BEACON_CSA:
1872 		break;
1873 	case IEEE80211_BEACON_TIM:
1874 		/* NB: firmware always forms TIM */
1875 		return;
1876 	}
1877 	/* XXX retain beacon frame and update */
1878 	mwl_beacon_setup(vap);
1879 }
1880 
1881 static void
1882 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1883 {
1884 	bus_addr_t *paddr = (bus_addr_t*) arg;
1885 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
1886 	*paddr = segs->ds_addr;
1887 }
1888 
1889 #ifdef MWL_HOST_PS_SUPPORT
1890 /*
1891  * Handle power save station occupancy changes.
1892  */
1893 static void
1894 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1895 {
1896 	struct mwl_vap *mvp = MWL_VAP(vap);
1897 
1898 	if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1899 		mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1900 	mvp->mv_last_ps_sta = nsta;
1901 }
1902 
1903 /*
1904  * Handle associated station power save state changes.
1905  */
1906 static int
1907 mwl_set_tim(struct ieee80211_node *ni, int set)
1908 {
1909 	struct ieee80211vap *vap = ni->ni_vap;
1910 	struct mwl_vap *mvp = MWL_VAP(vap);
1911 
1912 	if (mvp->mv_set_tim(ni, set)) {		/* NB: state change */
1913 		mwl_hal_setpowersave_sta(mvp->mv_hvap,
1914 		    IEEE80211_AID(ni->ni_associd), set);
1915 		return 1;
1916 	} else
1917 		return 0;
1918 }
1919 #endif /* MWL_HOST_PS_SUPPORT */
1920 
1921 static int
1922 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1923 	struct mwl_descdma *dd,
1924 	int nbuf, size_t bufsize, int ndesc, size_t descsize)
1925 {
1926 	uint8_t *ds;
1927 	int error;
1928 
1929 	DPRINTF(sc, MWL_DEBUG_RESET,
1930 	    "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1931 	    __func__, name, nbuf, (uintmax_t) bufsize,
1932 	    ndesc, (uintmax_t) descsize);
1933 
1934 	dd->dd_name = name;
1935 	dd->dd_desc_len = nbuf * ndesc * descsize;
1936 
1937 	/*
1938 	 * Setup DMA descriptor area.
1939 	 */
1940 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
1941 		       PAGE_SIZE, 0,		/* alignment, bounds */
1942 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1943 		       BUS_SPACE_MAXADDR,	/* highaddr */
1944 		       NULL, NULL,		/* filter, filterarg */
1945 		       dd->dd_desc_len,		/* maxsize */
1946 		       1,			/* nsegments */
1947 		       dd->dd_desc_len,		/* maxsegsize */
1948 		       BUS_DMA_ALLOCNOW,	/* flags */
1949 		       NULL,			/* lockfunc */
1950 		       NULL,			/* lockarg */
1951 		       &dd->dd_dmat);
1952 	if (error != 0) {
1953 		device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1954 		return error;
1955 	}
1956 
1957 	/* allocate descriptors */
1958 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1959 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1960 				 &dd->dd_dmamap);
1961 	if (error != 0) {
1962 		device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1963 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
1964 		goto fail1;
1965 	}
1966 
1967 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1968 				dd->dd_desc, dd->dd_desc_len,
1969 				mwl_load_cb, &dd->dd_desc_paddr,
1970 				BUS_DMA_NOWAIT);
1971 	if (error != 0) {
1972 		device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1973 			dd->dd_name, error);
1974 		goto fail2;
1975 	}
1976 
1977 	ds = dd->dd_desc;
1978 	memset(ds, 0, dd->dd_desc_len);
1979 	DPRINTF(sc, MWL_DEBUG_RESET,
1980 	    "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1981 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1982 	    (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1983 
1984 	return 0;
1985 fail2:
1986 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1987 fail1:
1988 	bus_dma_tag_destroy(dd->dd_dmat);
1989 	memset(dd, 0, sizeof(*dd));
1990 	return error;
1991 #undef DS2PHYS
1992 }
1993 
1994 static void
1995 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1996 {
1997 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
1998 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1999 	bus_dma_tag_destroy(dd->dd_dmat);
2000 
2001 	memset(dd, 0, sizeof(*dd));
2002 }
2003 
2004 /*
2005  * Construct a tx q's free list.  The order of entries on
2006  * the list must reflect the physical layout of tx descriptors
2007  * because the firmware pre-fetches descriptors.
2008  *
2009  * XXX might be better to use indices into the buffer array.
2010  */
2011 static void
2012 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2013 {
2014 	struct mwl_txbuf *bf;
2015 	int i;
2016 
2017 	bf = txq->dma.dd_bufptr;
2018 	STAILQ_INIT(&txq->free);
2019 	for (i = 0; i < mwl_txbuf; i++, bf++)
2020 		STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2021 	txq->nfree = i;
2022 }
2023 
2024 #define	DS2PHYS(_dd, _ds) \
2025 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2026 
2027 static int
2028 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2029 {
2030 	int error, bsize, i;
2031 	struct mwl_txbuf *bf;
2032 	struct mwl_txdesc *ds;
2033 
2034 	error = mwl_desc_setup(sc, "tx", &txq->dma,
2035 			mwl_txbuf, sizeof(struct mwl_txbuf),
2036 			MWL_TXDESC, sizeof(struct mwl_txdesc));
2037 	if (error != 0)
2038 		return error;
2039 
2040 	/* allocate and setup tx buffers */
2041 	bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2042 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2043 	if (bf == NULL) {
2044 		device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2045 			mwl_txbuf);
2046 		return ENOMEM;
2047 	}
2048 	txq->dma.dd_bufptr = bf;
2049 
2050 	ds = txq->dma.dd_desc;
2051 	for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2052 		bf->bf_desc = ds;
2053 		bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2054 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2055 				&bf->bf_dmamap);
2056 		if (error != 0) {
2057 			device_printf(sc->sc_dev, "unable to create dmamap for tx "
2058 				"buffer %u, error %u\n", i, error);
2059 			return error;
2060 		}
2061 	}
2062 	mwl_txq_reset(sc, txq);
2063 	return 0;
2064 }
2065 
2066 static void
2067 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2068 {
2069 	struct mwl_txbuf *bf;
2070 	int i;
2071 
2072 	bf = txq->dma.dd_bufptr;
2073 	for (i = 0; i < mwl_txbuf; i++, bf++) {
2074 		KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2075 		KASSERT(bf->bf_node == NULL, ("node on free list"));
2076 		if (bf->bf_dmamap != NULL)
2077 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2078 	}
2079 	STAILQ_INIT(&txq->free);
2080 	txq->nfree = 0;
2081 	if (txq->dma.dd_bufptr != NULL) {
2082 		free(txq->dma.dd_bufptr, M_MWLDEV);
2083 		txq->dma.dd_bufptr = NULL;
2084 	}
2085 	if (txq->dma.dd_desc_len != 0)
2086 		mwl_desc_cleanup(sc, &txq->dma);
2087 }
2088 
2089 static int
2090 mwl_rxdma_setup(struct mwl_softc *sc)
2091 {
2092 	int error, jumbosize, bsize, i;
2093 	struct mwl_rxbuf *bf;
2094 	struct mwl_jumbo *rbuf;
2095 	struct mwl_rxdesc *ds;
2096 	caddr_t data;
2097 
2098 	error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2099 			mwl_rxdesc, sizeof(struct mwl_rxbuf),
2100 			1, sizeof(struct mwl_rxdesc));
2101 	if (error != 0)
2102 		return error;
2103 
2104 	/*
2105 	 * Receive is done to a private pool of jumbo buffers.
2106 	 * This allows us to attach to mbuf's and avoid re-mapping
2107 	 * memory on each rx we post.  We allocate a large chunk
2108 	 * of memory and manage it in the driver.  The mbuf free
2109 	 * callback method is used to reclaim frames after sending
2110 	 * them up the stack.  By default we allocate 2x the number of
2111 	 * rx descriptors configured so we have some slop to hold
2112 	 * us while frames are processed.
2113 	 */
2114 	if (mwl_rxbuf < 2*mwl_rxdesc) {
2115 		device_printf(sc->sc_dev,
2116 		    "too few rx dma buffers (%d); increasing to %d\n",
2117 		    mwl_rxbuf, 2*mwl_rxdesc);
2118 		mwl_rxbuf = 2*mwl_rxdesc;
2119 	}
2120 	jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2121 	sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2122 
2123 	error = bus_dma_tag_create(sc->sc_dmat,	/* parent */
2124 		       PAGE_SIZE, 0,		/* alignment, bounds */
2125 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2126 		       BUS_SPACE_MAXADDR,	/* highaddr */
2127 		       NULL, NULL,		/* filter, filterarg */
2128 		       sc->sc_rxmemsize,	/* maxsize */
2129 		       1,			/* nsegments */
2130 		       sc->sc_rxmemsize,	/* maxsegsize */
2131 		       BUS_DMA_ALLOCNOW,	/* flags */
2132 		       NULL,			/* lockfunc */
2133 		       NULL,			/* lockarg */
2134 		       &sc->sc_rxdmat);
2135 	if (error != 0) {
2136 		device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2137 		return error;
2138 	}
2139 
2140 	error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2141 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2142 				 &sc->sc_rxmap);
2143 	if (error != 0) {
2144 		device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2145 		    (uintmax_t) sc->sc_rxmemsize);
2146 		return error;
2147 	}
2148 
2149 	error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2150 				sc->sc_rxmem, sc->sc_rxmemsize,
2151 				mwl_load_cb, &sc->sc_rxmem_paddr,
2152 				BUS_DMA_NOWAIT);
2153 	if (error != 0) {
2154 		device_printf(sc->sc_dev, "could not load rx DMA map\n");
2155 		return error;
2156 	}
2157 
2158 	/*
2159 	 * Allocate rx buffers and set them up.
2160 	 */
2161 	bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2162 	bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2163 	if (bf == NULL) {
2164 		device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2165 		return error;
2166 	}
2167 	sc->sc_rxdma.dd_bufptr = bf;
2168 
2169 	STAILQ_INIT(&sc->sc_rxbuf);
2170 	ds = sc->sc_rxdma.dd_desc;
2171 	for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2172 		bf->bf_desc = ds;
2173 		bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2174 		/* pre-assign dma buffer */
2175 		bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2176 		/* NB: tail is intentional to preserve descriptor order */
2177 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2178 	}
2179 
2180 	/*
2181 	 * Place remainder of dma memory buffers on the free list.
2182 	 */
2183 	SLIST_INIT(&sc->sc_rxfree);
2184 	for (; i < mwl_rxbuf; i++) {
2185 		data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2186 		rbuf = MWL_JUMBO_DATA2BUF(data);
2187 		SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2188 		sc->sc_nrxfree++;
2189 	}
2190 	return 0;
2191 }
2192 #undef DS2PHYS
2193 
2194 static void
2195 mwl_rxdma_cleanup(struct mwl_softc *sc)
2196 {
2197 	if (sc->sc_rxmem_paddr != 0) {
2198 		bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2199 		sc->sc_rxmem_paddr = 0;
2200 	}
2201 	if (sc->sc_rxmem != NULL) {
2202 		bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2203 		sc->sc_rxmem = NULL;
2204 	}
2205 	if (sc->sc_rxdma.dd_bufptr != NULL) {
2206 		free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2207 		sc->sc_rxdma.dd_bufptr = NULL;
2208 	}
2209 	if (sc->sc_rxdma.dd_desc_len != 0)
2210 		mwl_desc_cleanup(sc, &sc->sc_rxdma);
2211 }
2212 
2213 static int
2214 mwl_dma_setup(struct mwl_softc *sc)
2215 {
2216 	int error, i;
2217 
2218 	error = mwl_rxdma_setup(sc);
2219 	if (error != 0) {
2220 		mwl_rxdma_cleanup(sc);
2221 		return error;
2222 	}
2223 
2224 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2225 		error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2226 		if (error != 0) {
2227 			mwl_dma_cleanup(sc);
2228 			return error;
2229 		}
2230 	}
2231 	return 0;
2232 }
2233 
2234 static void
2235 mwl_dma_cleanup(struct mwl_softc *sc)
2236 {
2237 	int i;
2238 
2239 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2240 		mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2241 	mwl_rxdma_cleanup(sc);
2242 }
2243 
2244 static struct ieee80211_node *
2245 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2246 {
2247 	struct ieee80211com *ic = vap->iv_ic;
2248 	struct mwl_softc *sc = ic->ic_softc;
2249 	const size_t space = sizeof(struct mwl_node);
2250 	struct mwl_node *mn;
2251 
2252 	mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2253 	if (mn == NULL) {
2254 		/* XXX stat+msg */
2255 		return NULL;
2256 	}
2257 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2258 	return &mn->mn_node;
2259 }
2260 
2261 static void
2262 mwl_node_cleanup(struct ieee80211_node *ni)
2263 {
2264 	struct ieee80211com *ic = ni->ni_ic;
2265         struct mwl_softc *sc = ic->ic_softc;
2266 	struct mwl_node *mn = MWL_NODE(ni);
2267 
2268 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2269 	    __func__, ni, ni->ni_ic, mn->mn_staid);
2270 
2271 	if (mn->mn_staid != 0) {
2272 		struct ieee80211vap *vap = ni->ni_vap;
2273 
2274 		if (mn->mn_hvap != NULL) {
2275 			if (vap->iv_opmode == IEEE80211_M_STA)
2276 				mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2277 			else
2278 				mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2279 		}
2280 		/*
2281 		 * NB: legacy WDS peer sta db entry is installed using
2282 		 * the associate ap's hvap; use it again to delete it.
2283 		 * XXX can vap be NULL?
2284 		 */
2285 		else if (vap->iv_opmode == IEEE80211_M_WDS &&
2286 		    MWL_VAP(vap)->mv_ap_hvap != NULL)
2287 			mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2288 			    ni->ni_macaddr);
2289 		delstaid(sc, mn->mn_staid);
2290 		mn->mn_staid = 0;
2291 	}
2292 	sc->sc_node_cleanup(ni);
2293 }
2294 
2295 /*
2296  * Reclaim rx dma buffers from packets sitting on the ampdu
2297  * reorder queue for a station.  We replace buffers with a
2298  * system cluster (if available).
2299  */
2300 static void
2301 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2302 {
2303 #if 0
2304 	int i, n, off;
2305 	struct mbuf *m;
2306 	void *cl;
2307 
2308 	n = rap->rxa_qframes;
2309 	for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2310 		m = rap->rxa_m[i];
2311 		if (m == NULL)
2312 			continue;
2313 		n--;
2314 		/* our dma buffers have a well-known free routine */
2315 		if ((m->m_flags & M_EXT) == 0 ||
2316 		    m->m_ext.ext_free != mwl_ext_free)
2317 			continue;
2318 		/*
2319 		 * Try to allocate a cluster and move the data.
2320 		 */
2321 		off = m->m_data - m->m_ext.ext_buf;
2322 		if (off + m->m_pkthdr.len > MCLBYTES) {
2323 			/* XXX no AMSDU for now */
2324 			continue;
2325 		}
2326 		cl = pool_cache_get_paddr(&mclpool_cache, 0,
2327 		    &m->m_ext.ext_paddr);
2328 		if (cl != NULL) {
2329 			/*
2330 			 * Copy the existing data to the cluster, remove
2331 			 * the rx dma buffer, and attach the cluster in
2332 			 * its place.  Note we preserve the offset to the
2333 			 * data so frames being bridged can still prepend
2334 			 * their headers without adding another mbuf.
2335 			 */
2336 			memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2337 			MEXTREMOVE(m);
2338 			MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2339 			/* setup mbuf like _MCLGET does */
2340 			m->m_flags |= M_CLUSTER | M_EXT_RW;
2341 			_MOWNERREF(m, M_EXT | M_CLUSTER);
2342 			/* NB: m_data is clobbered by MEXTADDR, adjust */
2343 			m->m_data += off;
2344 		}
2345 	}
2346 #endif
2347 }
2348 
2349 /*
2350  * Callback to reclaim resources.  We first let the
2351  * net80211 layer do it's thing, then if we are still
2352  * blocked by a lack of rx dma buffers we walk the ampdu
2353  * reorder q's to reclaim buffers by copying to a system
2354  * cluster.
2355  */
2356 static void
2357 mwl_node_drain(struct ieee80211_node *ni)
2358 {
2359 	struct ieee80211com *ic = ni->ni_ic;
2360         struct mwl_softc *sc = ic->ic_softc;
2361 	struct mwl_node *mn = MWL_NODE(ni);
2362 
2363 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2364 	    __func__, ni, ni->ni_vap, mn->mn_staid);
2365 
2366 	/* NB: call up first to age out ampdu q's */
2367 	sc->sc_node_drain(ni);
2368 
2369 	/* XXX better to not check low water mark? */
2370 	if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2371 	    (ni->ni_flags & IEEE80211_NODE_HT)) {
2372 		uint8_t tid;
2373 		/*
2374 		 * Walk the reorder q and reclaim rx dma buffers by copying
2375 		 * the packet contents into clusters.
2376 		 */
2377 		for (tid = 0; tid < WME_NUM_TID; tid++) {
2378 			struct ieee80211_rx_ampdu *rap;
2379 
2380 			rap = &ni->ni_rx_ampdu[tid];
2381 			if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2382 				continue;
2383 			if (rap->rxa_qframes)
2384 				mwl_ampdu_rxdma_reclaim(rap);
2385 		}
2386 	}
2387 }
2388 
2389 static void
2390 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2391 {
2392 	*rssi = ni->ni_ic->ic_node_getrssi(ni);
2393 #ifdef MWL_ANT_INFO_SUPPORT
2394 #if 0
2395 	/* XXX need to smooth data */
2396 	*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2397 #else
2398 	*noise = -95;		/* XXX */
2399 #endif
2400 #else
2401 	*noise = -95;		/* XXX */
2402 #endif
2403 }
2404 
2405 /*
2406  * Convert Hardware per-antenna rssi info to common format:
2407  * Let a1, a2, a3 represent the amplitudes per chain
2408  * Let amax represent max[a1, a2, a3]
2409  * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2410  * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2411  * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2412  * maintain some extra precision.
2413  *
2414  * Values are stored in .5 db format capped at 127.
2415  */
2416 static void
2417 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2418 	struct ieee80211_mimo_info *mi)
2419 {
2420 #define	CVT(_dst, _src) do {						\
2421 	(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2);	\
2422 	(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1);			\
2423 } while (0)
2424 	static const int8_t logdbtbl[32] = {
2425 	       0,   0,  24,  38,  48,  56,  62,  68,
2426 	      72,  76,  80,  83,  86,  89,  92,  94,
2427 	      96,  98, 100, 102, 104, 106, 107, 109,
2428 	     110, 112, 113, 115, 116, 117, 118, 119
2429 	};
2430 	const struct mwl_node *mn = MWL_NODE_CONST(ni);
2431 	uint8_t rssi = mn->mn_ai.rsvd1/2;		/* XXX */
2432 	uint32_t rssi_max;
2433 
2434 	rssi_max = mn->mn_ai.rssi_a;
2435 	if (mn->mn_ai.rssi_b > rssi_max)
2436 		rssi_max = mn->mn_ai.rssi_b;
2437 	if (mn->mn_ai.rssi_c > rssi_max)
2438 		rssi_max = mn->mn_ai.rssi_c;
2439 
2440 	CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2441 	CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2442 	CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2443 
2444 	mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2445 	mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2446 	mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2447 #undef CVT
2448 }
2449 
2450 static __inline void *
2451 mwl_getrxdma(struct mwl_softc *sc)
2452 {
2453 	struct mwl_jumbo *buf;
2454 	void *data;
2455 
2456 	/*
2457 	 * Allocate from jumbo pool.
2458 	 */
2459 	MWL_RXFREE_LOCK(sc);
2460 	buf = SLIST_FIRST(&sc->sc_rxfree);
2461 	if (buf == NULL) {
2462 		DPRINTF(sc, MWL_DEBUG_ANY,
2463 		    "%s: out of rx dma buffers\n", __func__);
2464 		sc->sc_stats.mst_rx_nodmabuf++;
2465 		data = NULL;
2466 	} else {
2467 		SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2468 		sc->sc_nrxfree--;
2469 		data = MWL_JUMBO_BUF2DATA(buf);
2470 	}
2471 	MWL_RXFREE_UNLOCK(sc);
2472 	return data;
2473 }
2474 
2475 static __inline void
2476 mwl_putrxdma(struct mwl_softc *sc, void *data)
2477 {
2478 	struct mwl_jumbo *buf;
2479 
2480 	/* XXX bounds check data */
2481 	MWL_RXFREE_LOCK(sc);
2482 	buf = MWL_JUMBO_DATA2BUF(data);
2483 	SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2484 	sc->sc_nrxfree++;
2485 	MWL_RXFREE_UNLOCK(sc);
2486 }
2487 
2488 static int
2489 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2490 {
2491 	struct mwl_rxdesc *ds;
2492 
2493 	ds = bf->bf_desc;
2494 	if (bf->bf_data == NULL) {
2495 		bf->bf_data = mwl_getrxdma(sc);
2496 		if (bf->bf_data == NULL) {
2497 			/* mark descriptor to be skipped */
2498 			ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2499 			/* NB: don't need PREREAD */
2500 			MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2501 			sc->sc_stats.mst_rxbuf_failed++;
2502 			return ENOMEM;
2503 		}
2504 	}
2505 	/*
2506 	 * NB: DMA buffer contents is known to be unmodified
2507 	 *     so there's no need to flush the data cache.
2508 	 */
2509 
2510 	/*
2511 	 * Setup descriptor.
2512 	 */
2513 	ds->QosCtrl = 0;
2514 	ds->RSSI = 0;
2515 	ds->Status = EAGLE_RXD_STATUS_IDLE;
2516 	ds->Channel = 0;
2517 	ds->PktLen = htole16(MWL_AGGR_SIZE);
2518 	ds->SQ2 = 0;
2519 	ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2520 	/* NB: don't touch pPhysNext, set once */
2521 	ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2522 	MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2523 
2524 	return 0;
2525 }
2526 
2527 static void
2528 mwl_ext_free(struct mbuf *m)
2529 {
2530 	struct mwl_softc *sc = m->m_ext.ext_arg1;
2531 
2532 	/* XXX bounds check data */
2533 	mwl_putrxdma(sc, m->m_ext.ext_buf);
2534 	/*
2535 	 * If we were previously blocked by a lack of rx dma buffers
2536 	 * check if we now have enough to restart rx interrupt handling.
2537 	 * NB: we know we are called at splvm which is above splnet.
2538 	 */
2539 	if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2540 		sc->sc_rxblocked = 0;
2541 		mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2542 	}
2543 }
2544 
2545 struct mwl_frame_bar {
2546 	u_int8_t	i_fc[2];
2547 	u_int8_t	i_dur[2];
2548 	u_int8_t	i_ra[IEEE80211_ADDR_LEN];
2549 	u_int8_t	i_ta[IEEE80211_ADDR_LEN];
2550 	/* ctl, seq, FCS */
2551 } __packed;
2552 
2553 /*
2554  * Like ieee80211_anyhdrsize, but handles BAR frames
2555  * specially so the logic below to piece the 802.11
2556  * header together works.
2557  */
2558 static __inline int
2559 mwl_anyhdrsize(const void *data)
2560 {
2561 	const struct ieee80211_frame *wh = data;
2562 
2563 	if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2564 		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2565 		case IEEE80211_FC0_SUBTYPE_CTS:
2566 		case IEEE80211_FC0_SUBTYPE_ACK:
2567 			return sizeof(struct ieee80211_frame_ack);
2568 		case IEEE80211_FC0_SUBTYPE_BAR:
2569 			return sizeof(struct mwl_frame_bar);
2570 		}
2571 		return sizeof(struct ieee80211_frame_min);
2572 	} else
2573 		return ieee80211_hdrsize(data);
2574 }
2575 
2576 static void
2577 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2578 {
2579 	const struct ieee80211_frame *wh;
2580 	struct ieee80211_node *ni;
2581 
2582 	wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2583 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2584 	if (ni != NULL) {
2585 		ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2586 		ieee80211_free_node(ni);
2587 	}
2588 }
2589 
2590 /*
2591  * Convert hardware signal strength to rssi.  The value
2592  * provided by the device has the noise floor added in;
2593  * we need to compensate for this but we don't have that
2594  * so we use a fixed value.
2595  *
2596  * The offset of 8 is good for both 2.4 and 5GHz.  The LNA
2597  * offset is already set as part of the initial gain.  This
2598  * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2599  */
2600 static __inline int
2601 cvtrssi(uint8_t ssi)
2602 {
2603 	int rssi = (int) ssi + 8;
2604 	/* XXX hack guess until we have a real noise floor */
2605 	rssi = 2*(87 - rssi);	/* NB: .5 dBm units */
2606 	return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2607 }
2608 
2609 static void
2610 mwl_rx_proc(void *arg, int npending)
2611 {
2612 	struct epoch_tracker et;
2613 	struct mwl_softc *sc = arg;
2614 	struct ieee80211com *ic = &sc->sc_ic;
2615 	struct mwl_rxbuf *bf;
2616 	struct mwl_rxdesc *ds;
2617 	struct mbuf *m;
2618 	struct ieee80211_qosframe *wh;
2619 	struct ieee80211_node *ni;
2620 	struct mwl_node *mn;
2621 	int off, len, hdrlen, pktlen, rssi, ntodo;
2622 	uint8_t *data, status;
2623 	void *newdata;
2624 	int16_t nf;
2625 
2626 	DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2627 	    __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2628 	    RD4(sc, sc->sc_hwspecs.rxDescWrite));
2629 	nf = -96;			/* XXX */
2630 	bf = sc->sc_rxnext;
2631 	for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2632 		if (bf == NULL)
2633 			bf = STAILQ_FIRST(&sc->sc_rxbuf);
2634 		ds = bf->bf_desc;
2635 		data = bf->bf_data;
2636 		if (data == NULL) {
2637 			/*
2638 			 * If data allocation failed previously there
2639 			 * will be no buffer; try again to re-populate it.
2640 			 * Note the firmware will not advance to the next
2641 			 * descriptor with a dma buffer so we must mimic
2642 			 * this or we'll get out of sync.
2643 			 */
2644 			DPRINTF(sc, MWL_DEBUG_ANY,
2645 			    "%s: rx buf w/o dma memory\n", __func__);
2646 			(void) mwl_rxbuf_init(sc, bf);
2647 			sc->sc_stats.mst_rx_dmabufmissing++;
2648 			break;
2649 		}
2650 		MWL_RXDESC_SYNC(sc, ds,
2651 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2652 		if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2653 			break;
2654 #ifdef MWL_DEBUG
2655 		if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2656 			mwl_printrxbuf(bf, 0);
2657 #endif
2658 		status = ds->Status;
2659 		if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2660 			counter_u64_add(ic->ic_ierrors, 1);
2661 			sc->sc_stats.mst_rx_crypto++;
2662 			/*
2663 			 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2664 			 *     for backwards compatibility.
2665 			 */
2666 			if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2667 			    (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2668 				/*
2669 				 * MIC error, notify upper layers.
2670 				 */
2671 				bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2672 				    BUS_DMASYNC_POSTREAD);
2673 				mwl_handlemicerror(ic, data);
2674 				sc->sc_stats.mst_rx_tkipmic++;
2675 			}
2676 			/* XXX too painful to tap packets */
2677 			goto rx_next;
2678 		}
2679 		/*
2680 		 * Sync the data buffer.
2681 		 */
2682 		len = le16toh(ds->PktLen);
2683 		bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2684 		/*
2685 		 * The 802.11 header is provided all or in part at the front;
2686 		 * use it to calculate the true size of the header that we'll
2687 		 * construct below.  We use this to figure out where to copy
2688 		 * payload prior to constructing the header.
2689 		 */
2690 		hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2691 		off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2692 
2693 		/* calculate rssi early so we can re-use for each aggregate */
2694 		rssi = cvtrssi(ds->RSSI);
2695 
2696 		pktlen = hdrlen + (len - off);
2697 		/*
2698 		 * NB: we know our frame is at least as large as
2699 		 * IEEE80211_MIN_LEN because there is a 4-address
2700 		 * frame at the front.  Hence there's no need to
2701 		 * vet the packet length.  If the frame in fact
2702 		 * is too small it should be discarded at the
2703 		 * net80211 layer.
2704 		 */
2705 
2706 		/*
2707 		 * Attach dma buffer to an mbuf.  We tried
2708 		 * doing this based on the packet size (i.e.
2709 		 * copying small packets) but it turns out to
2710 		 * be a net loss.  The tradeoff might be system
2711 		 * dependent (cache architecture is important).
2712 		 */
2713 		MGETHDR(m, M_NOWAIT, MT_DATA);
2714 		if (m == NULL) {
2715 			DPRINTF(sc, MWL_DEBUG_ANY,
2716 			    "%s: no rx mbuf\n", __func__);
2717 			sc->sc_stats.mst_rx_nombuf++;
2718 			goto rx_next;
2719 		}
2720 		/*
2721 		 * Acquire the replacement dma buffer before
2722 		 * processing the frame.  If we're out of dma
2723 		 * buffers we disable rx interrupts and wait
2724 		 * for the free pool to reach mlw_rxdmalow buffers
2725 		 * before starting to do work again.  If the firmware
2726 		 * runs out of descriptors then it will toss frames
2727 		 * which is better than our doing it as that can
2728 		 * starve our processing.  It is also important that
2729 		 * we always process rx'd frames in case they are
2730 		 * A-MPDU as otherwise the host's view of the BA
2731 		 * window may get out of sync with the firmware.
2732 		 */
2733 		newdata = mwl_getrxdma(sc);
2734 		if (newdata == NULL) {
2735 			/* NB: stat+msg in mwl_getrxdma */
2736 			m_free(m);
2737 			/* disable RX interrupt and mark state */
2738 			mwl_hal_intrset(sc->sc_mh,
2739 			    sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2740 			sc->sc_rxblocked = 1;
2741 			ieee80211_drain(ic);
2742 			/* XXX check rxblocked and immediately start again? */
2743 			goto rx_stop;
2744 		}
2745 		bf->bf_data = newdata;
2746 		/*
2747 		 * Attach the dma buffer to the mbuf;
2748 		 * mwl_rxbuf_init will re-setup the rx
2749 		 * descriptor using the replacement dma
2750 		 * buffer we just installed above.
2751 		 */
2752 		m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
2753 		    EXT_NET_DRV);
2754 		m->m_data += off - hdrlen;
2755 		m->m_pkthdr.len = m->m_len = pktlen;
2756 		/* NB: dma buffer assumed read-only */
2757 
2758 		/*
2759 		 * Piece 802.11 header together.
2760 		 */
2761 		wh = mtod(m, struct ieee80211_qosframe *);
2762 		/* NB: don't need to do this sometimes but ... */
2763 		/* XXX special case so we can memcpy after m_devget? */
2764 		ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2765 		if (IEEE80211_QOS_HAS_SEQ(wh))
2766 			*(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2767 		/*
2768 		 * The f/w strips WEP header but doesn't clear
2769 		 * the WEP bit; mark the packet with M_WEP so
2770 		 * net80211 will treat the data as decrypted.
2771 		 * While here also clear the PWR_MGT bit since
2772 		 * power save is handled by the firmware and
2773 		 * passing this up will potentially cause the
2774 		 * upper layer to put a station in power save
2775 		 * (except when configured with MWL_HOST_PS_SUPPORT).
2776 		 */
2777 		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2778 			m->m_flags |= M_WEP;
2779 #ifdef MWL_HOST_PS_SUPPORT
2780 		wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2781 #else
2782 		wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2783 		    IEEE80211_FC1_PWR_MGT);
2784 #endif
2785 
2786 		if (ieee80211_radiotap_active(ic)) {
2787 			struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2788 
2789 			tap->wr_flags = 0;
2790 			tap->wr_rate = ds->Rate;
2791 			tap->wr_antsignal = rssi + nf;
2792 			tap->wr_antnoise = nf;
2793 		}
2794 		if (IFF_DUMPPKTS_RECV(sc, wh)) {
2795 			ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2796 			    len, ds->Rate, rssi);
2797 		}
2798 		/* dispatch */
2799 		ni = ieee80211_find_rxnode(ic,
2800 		    (const struct ieee80211_frame_min *) wh);
2801 
2802 		NET_EPOCH_ENTER(et);
2803 		if (ni != NULL) {
2804 			mn = MWL_NODE(ni);
2805 #ifdef MWL_ANT_INFO_SUPPORT
2806 			mn->mn_ai.rssi_a = ds->ai.rssi_a;
2807 			mn->mn_ai.rssi_b = ds->ai.rssi_b;
2808 			mn->mn_ai.rssi_c = ds->ai.rssi_c;
2809 			mn->mn_ai.rsvd1 = rssi;
2810 #endif
2811 			/* tag AMPDU aggregates for reorder processing */
2812 			if (ni->ni_flags & IEEE80211_NODE_HT)
2813 				m->m_flags |= M_AMPDU;
2814 			(void) ieee80211_input(ni, m, rssi, nf);
2815 			ieee80211_free_node(ni);
2816 		} else
2817 			(void) ieee80211_input_all(ic, m, rssi, nf);
2818 		NET_EPOCH_EXIT(et);
2819 rx_next:
2820 		/* NB: ignore ENOMEM so we process more descriptors */
2821 		(void) mwl_rxbuf_init(sc, bf);
2822 		bf = STAILQ_NEXT(bf, bf_list);
2823 	}
2824 rx_stop:
2825 	sc->sc_rxnext = bf;
2826 
2827 	if (mbufq_first(&sc->sc_snd) != NULL) {
2828 		/* NB: kick fw; the tx thread may have been preempted */
2829 		mwl_hal_txstart(sc->sc_mh, 0);
2830 		mwl_start(sc);
2831 	}
2832 }
2833 
2834 static void
2835 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2836 {
2837 	struct mwl_txbuf *bf, *bn;
2838 	struct mwl_txdesc *ds;
2839 
2840 	MWL_TXQ_LOCK_INIT(sc, txq);
2841 	txq->qnum = qnum;
2842 	txq->txpri = 0;	/* XXX */
2843 #if 0
2844 	/* NB: q setup by mwl_txdma_setup XXX */
2845 	STAILQ_INIT(&txq->free);
2846 #endif
2847 	STAILQ_FOREACH(bf, &txq->free, bf_list) {
2848 		bf->bf_txq = txq;
2849 
2850 		ds = bf->bf_desc;
2851 		bn = STAILQ_NEXT(bf, bf_list);
2852 		if (bn == NULL)
2853 			bn = STAILQ_FIRST(&txq->free);
2854 		ds->pPhysNext = htole32(bn->bf_daddr);
2855 	}
2856 	STAILQ_INIT(&txq->active);
2857 }
2858 
2859 /*
2860  * Setup a hardware data transmit queue for the specified
2861  * access control.  We record the mapping from ac's
2862  * to h/w queues for use by mwl_tx_start.
2863  */
2864 static int
2865 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2866 {
2867 	struct mwl_txq *txq;
2868 
2869 	if (ac >= nitems(sc->sc_ac2q)) {
2870 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2871 			ac, nitems(sc->sc_ac2q));
2872 		return 0;
2873 	}
2874 	if (mvtype >= MWL_NUM_TX_QUEUES) {
2875 		device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2876 			mvtype, MWL_NUM_TX_QUEUES);
2877 		return 0;
2878 	}
2879 	txq = &sc->sc_txq[mvtype];
2880 	mwl_txq_init(sc, txq, mvtype);
2881 	sc->sc_ac2q[ac] = txq;
2882 	return 1;
2883 }
2884 
2885 /*
2886  * Update WME parameters for a transmit queue.
2887  */
2888 static int
2889 mwl_txq_update(struct mwl_softc *sc, int ac)
2890 {
2891 #define	MWL_EXPONENT_TO_VALUE(v)	((1<<v)-1)
2892 	struct ieee80211com *ic = &sc->sc_ic;
2893 	struct chanAccParams chp;
2894 	struct mwl_txq *txq = sc->sc_ac2q[ac];
2895 	struct wmeParams *wmep;
2896 	struct mwl_hal *mh = sc->sc_mh;
2897 	int aifs, cwmin, cwmax, txoplim;
2898 
2899 	ieee80211_wme_ic_getparams(ic, &chp);
2900 	wmep = &chp.cap_wmeParams[ac];
2901 
2902 	aifs = wmep->wmep_aifsn;
2903 	/* XXX in sta mode need to pass log values for cwmin/max */
2904 	cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2905 	cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2906 	txoplim = wmep->wmep_txopLimit;		/* NB: units of 32us */
2907 
2908 	if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2909 		device_printf(sc->sc_dev, "unable to update hardware queue "
2910 			"parameters for %s traffic!\n",
2911 			ieee80211_wme_acnames[ac]);
2912 		return 0;
2913 	}
2914 	return 1;
2915 #undef MWL_EXPONENT_TO_VALUE
2916 }
2917 
2918 /*
2919  * Callback from the 802.11 layer to update WME parameters.
2920  */
2921 static int
2922 mwl_wme_update(struct ieee80211com *ic)
2923 {
2924 	struct mwl_softc *sc = ic->ic_softc;
2925 
2926 	return !mwl_txq_update(sc, WME_AC_BE) ||
2927 	    !mwl_txq_update(sc, WME_AC_BK) ||
2928 	    !mwl_txq_update(sc, WME_AC_VI) ||
2929 	    !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2930 }
2931 
2932 /*
2933  * Reclaim resources for a setup queue.
2934  */
2935 static void
2936 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2937 {
2938 	/* XXX hal work? */
2939 	MWL_TXQ_LOCK_DESTROY(txq);
2940 }
2941 
2942 /*
2943  * Reclaim all tx queue resources.
2944  */
2945 static void
2946 mwl_tx_cleanup(struct mwl_softc *sc)
2947 {
2948 	int i;
2949 
2950 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2951 		mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2952 }
2953 
2954 static int
2955 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2956 {
2957 	struct mbuf *m;
2958 	int error;
2959 
2960 	/*
2961 	 * Load the DMA map so any coalescing is done.  This
2962 	 * also calculates the number of descriptors we need.
2963 	 */
2964 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2965 				     bf->bf_segs, &bf->bf_nseg,
2966 				     BUS_DMA_NOWAIT);
2967 	if (error == EFBIG) {
2968 		/* XXX packet requires too many descriptors */
2969 		bf->bf_nseg = MWL_TXDESC+1;
2970 	} else if (error != 0) {
2971 		sc->sc_stats.mst_tx_busdma++;
2972 		m_freem(m0);
2973 		return error;
2974 	}
2975 	/*
2976 	 * Discard null packets and check for packets that
2977 	 * require too many TX descriptors.  We try to convert
2978 	 * the latter to a cluster.
2979 	 */
2980 	if (error == EFBIG) {		/* too many desc's, linearize */
2981 		sc->sc_stats.mst_tx_linear++;
2982 #if MWL_TXDESC > 1
2983 		m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2984 #else
2985 		m = m_defrag(m0, M_NOWAIT);
2986 #endif
2987 		if (m == NULL) {
2988 			m_freem(m0);
2989 			sc->sc_stats.mst_tx_nombuf++;
2990 			return ENOMEM;
2991 		}
2992 		m0 = m;
2993 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2994 					     bf->bf_segs, &bf->bf_nseg,
2995 					     BUS_DMA_NOWAIT);
2996 		if (error != 0) {
2997 			sc->sc_stats.mst_tx_busdma++;
2998 			m_freem(m0);
2999 			return error;
3000 		}
3001 		KASSERT(bf->bf_nseg <= MWL_TXDESC,
3002 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
3003 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
3004 		sc->sc_stats.mst_tx_nodata++;
3005 		m_freem(m0);
3006 		return EIO;
3007 	}
3008 	DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3009 		__func__, m0, m0->m_pkthdr.len);
3010 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3011 	bf->bf_m = m0;
3012 
3013 	return 0;
3014 }
3015 
3016 static __inline int
3017 mwl_cvtlegacyrate(int rate)
3018 {
3019 	switch (rate) {
3020 	case 2:	 return 0;
3021 	case 4:	 return 1;
3022 	case 11: return 2;
3023 	case 22: return 3;
3024 	case 44: return 4;
3025 	case 12: return 5;
3026 	case 18: return 6;
3027 	case 24: return 7;
3028 	case 36: return 8;
3029 	case 48: return 9;
3030 	case 72: return 10;
3031 	case 96: return 11;
3032 	case 108:return 12;
3033 	}
3034 	return 0;
3035 }
3036 
3037 /*
3038  * Calculate fixed tx rate information per client state;
3039  * this value is suitable for writing to the Format field
3040  * of a tx descriptor.
3041  */
3042 static uint16_t
3043 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3044 {
3045 	uint16_t fmt;
3046 
3047 	fmt = SM(3, EAGLE_TXD_ANTENNA)
3048 	    | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3049 		EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3050 	if (rate & IEEE80211_RATE_MCS) {	/* HT MCS */
3051 		fmt |= EAGLE_TXD_FORMAT_HT
3052 		    /* NB: 0x80 implicitly stripped from ucastrate */
3053 		    | SM(rate, EAGLE_TXD_RATE);
3054 		/* XXX short/long GI may be wrong; re-check */
3055 		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3056 			fmt |= EAGLE_TXD_CHW_40
3057 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3058 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3059 		} else {
3060 			fmt |= EAGLE_TXD_CHW_20
3061 			    | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3062 			        EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3063 		}
3064 	} else {			/* legacy rate */
3065 		fmt |= EAGLE_TXD_FORMAT_LEGACY
3066 		    | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3067 		    | EAGLE_TXD_CHW_20
3068 		    /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3069 		    | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3070 			EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3071 	}
3072 	return fmt;
3073 }
3074 
3075 static int
3076 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3077     struct mbuf *m0)
3078 {
3079 	struct ieee80211com *ic = &sc->sc_ic;
3080 	struct ieee80211vap *vap = ni->ni_vap;
3081 	int error, iswep, ismcast;
3082 	int hdrlen, copyhdrlen, pktlen;
3083 	struct mwl_txdesc *ds;
3084 	struct mwl_txq *txq;
3085 	struct ieee80211_frame *wh;
3086 	struct mwltxrec *tr;
3087 	struct mwl_node *mn;
3088 	uint16_t qos;
3089 #if MWL_TXDESC > 1
3090 	int i;
3091 #endif
3092 
3093 	wh = mtod(m0, struct ieee80211_frame *);
3094 	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3095 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3096 	hdrlen = ieee80211_anyhdrsize(wh);
3097 	copyhdrlen = hdrlen;
3098 	pktlen = m0->m_pkthdr.len;
3099 	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3100 		qos = *(uint16_t *)ieee80211_getqos(wh);
3101 		if (IEEE80211_IS_DSTODS(wh))
3102 			copyhdrlen -= sizeof(qos);
3103 	} else
3104 		qos = 0;
3105 
3106 	if (iswep) {
3107 		const struct ieee80211_cipher *cip;
3108 		struct ieee80211_key *k;
3109 
3110 		/*
3111 		 * Construct the 802.11 header+trailer for an encrypted
3112 		 * frame. The only reason this can fail is because of an
3113 		 * unknown or unsupported cipher/key type.
3114 		 *
3115 		 * NB: we do this even though the firmware will ignore
3116 		 *     what we've done for WEP and TKIP as we need the
3117 		 *     ExtIV filled in for CCMP and this also adjusts
3118 		 *     the headers which simplifies our work below.
3119 		 */
3120 		k = ieee80211_crypto_encap(ni, m0);
3121 		if (k == NULL) {
3122 			/*
3123 			 * This can happen when the key is yanked after the
3124 			 * frame was queued.  Just discard the frame; the
3125 			 * 802.11 layer counts failures and provides
3126 			 * debugging/diagnostics.
3127 			 */
3128 			m_freem(m0);
3129 			return EIO;
3130 		}
3131 		/*
3132 		 * Adjust the packet length for the crypto additions
3133 		 * done during encap and any other bits that the f/w
3134 		 * will add later on.
3135 		 */
3136 		cip = k->wk_cipher;
3137 		pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3138 
3139 		/* packet header may have moved, reset our local pointer */
3140 		wh = mtod(m0, struct ieee80211_frame *);
3141 	}
3142 
3143 	if (ieee80211_radiotap_active_vap(vap)) {
3144 		sc->sc_tx_th.wt_flags = 0;	/* XXX */
3145 		if (iswep)
3146 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3147 #if 0
3148 		sc->sc_tx_th.wt_rate = ds->DataRate;
3149 #endif
3150 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3151 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3152 
3153 		ieee80211_radiotap_tx(vap, m0);
3154 	}
3155 	/*
3156 	 * Copy up/down the 802.11 header; the firmware requires
3157 	 * we present a 2-byte payload length followed by a
3158 	 * 4-address header (w/o QoS), followed (optionally) by
3159 	 * any WEP/ExtIV header (but only filled in for CCMP).
3160 	 * We are assured the mbuf has sufficient headroom to
3161 	 * prepend in-place by the setup of ic_headroom in
3162 	 * mwl_attach.
3163 	 */
3164 	if (hdrlen < sizeof(struct mwltxrec)) {
3165 		const int space = sizeof(struct mwltxrec) - hdrlen;
3166 		if (M_LEADINGSPACE(m0) < space) {
3167 			/* NB: should never happen */
3168 			device_printf(sc->sc_dev,
3169 			    "not enough headroom, need %d found %zd, "
3170 			    "m_flags 0x%x m_len %d\n",
3171 			    space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3172 			ieee80211_dump_pkt(ic,
3173 			    mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3174 			m_freem(m0);
3175 			sc->sc_stats.mst_tx_noheadroom++;
3176 			return EIO;
3177 		}
3178 		M_PREPEND(m0, space, M_NOWAIT);
3179 	}
3180 	tr = mtod(m0, struct mwltxrec *);
3181 	if (wh != (struct ieee80211_frame *) &tr->wh)
3182 		ovbcopy(wh, &tr->wh, hdrlen);
3183 	/*
3184 	 * Note: the "firmware length" is actually the length
3185 	 * of the fully formed "802.11 payload".  That is, it's
3186 	 * everything except for the 802.11 header.  In particular
3187 	 * this includes all crypto material including the MIC!
3188 	 */
3189 	tr->fwlen = htole16(pktlen - hdrlen);
3190 
3191 	/*
3192 	 * Load the DMA map so any coalescing is done.  This
3193 	 * also calculates the number of descriptors we need.
3194 	 */
3195 	error = mwl_tx_dmasetup(sc, bf, m0);
3196 	if (error != 0) {
3197 		/* NB: stat collected in mwl_tx_dmasetup */
3198 		DPRINTF(sc, MWL_DEBUG_XMIT,
3199 		    "%s: unable to setup dma\n", __func__);
3200 		return error;
3201 	}
3202 	bf->bf_node = ni;			/* NB: held reference */
3203 	m0 = bf->bf_m;				/* NB: may have changed */
3204 	tr = mtod(m0, struct mwltxrec *);
3205 	wh = (struct ieee80211_frame *)&tr->wh;
3206 
3207 	/*
3208 	 * Formulate tx descriptor.
3209 	 */
3210 	ds = bf->bf_desc;
3211 	txq = bf->bf_txq;
3212 
3213 	ds->QosCtrl = qos;			/* NB: already little-endian */
3214 #if MWL_TXDESC == 1
3215 	/*
3216 	 * NB: multiframes should be zero because the descriptors
3217 	 *     are initialized to zero.  This should handle the case
3218 	 *     where the driver is built with MWL_TXDESC=1 but we are
3219 	 *     using firmware with multi-segment support.
3220 	 */
3221 	ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3222 	ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3223 #else
3224 	ds->multiframes = htole32(bf->bf_nseg);
3225 	ds->PktLen = htole16(m0->m_pkthdr.len);
3226 	for (i = 0; i < bf->bf_nseg; i++) {
3227 		ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3228 		ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3229 	}
3230 #endif
3231 	/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3232 	ds->Format = 0;
3233 	ds->pad = 0;
3234 	ds->ack_wcb_addr = 0;
3235 
3236 	mn = MWL_NODE(ni);
3237 	/*
3238 	 * Select transmit rate.
3239 	 */
3240 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3241 	case IEEE80211_FC0_TYPE_MGT:
3242 		sc->sc_stats.mst_tx_mgmt++;
3243 		/* fall thru... */
3244 	case IEEE80211_FC0_TYPE_CTL:
3245 		/* NB: assign to BE q to avoid bursting */
3246 		ds->TxPriority = MWL_WME_AC_BE;
3247 		break;
3248 	case IEEE80211_FC0_TYPE_DATA:
3249 		if (!ismcast) {
3250 			const struct ieee80211_txparam *tp = ni->ni_txparms;
3251 			/*
3252 			 * EAPOL frames get forced to a fixed rate and w/o
3253 			 * aggregation; otherwise check for any fixed rate
3254 			 * for the client (may depend on association state).
3255 			 */
3256 			if (m0->m_flags & M_EAPOL) {
3257 				const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3258 				ds->Format = mvp->mv_eapolformat;
3259 				ds->pad = htole16(
3260 				    EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3261 			} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3262 				/* XXX pre-calculate per node */
3263 				ds->Format = htole16(
3264 				    mwl_calcformat(tp->ucastrate, ni));
3265 				ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3266 			}
3267 			/* NB: EAPOL frames will never have qos set */
3268 			if (qos == 0)
3269 				ds->TxPriority = txq->qnum;
3270 #if MWL_MAXBA > 3
3271 			else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3272 				ds->TxPriority = mn->mn_ba[3].txq;
3273 #endif
3274 #if MWL_MAXBA > 2
3275 			else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3276 				ds->TxPriority = mn->mn_ba[2].txq;
3277 #endif
3278 #if MWL_MAXBA > 1
3279 			else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3280 				ds->TxPriority = mn->mn_ba[1].txq;
3281 #endif
3282 #if MWL_MAXBA > 0
3283 			else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3284 				ds->TxPriority = mn->mn_ba[0].txq;
3285 #endif
3286 			else
3287 				ds->TxPriority = txq->qnum;
3288 		} else
3289 			ds->TxPriority = txq->qnum;
3290 		break;
3291 	default:
3292 		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3293 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3294 		sc->sc_stats.mst_tx_badframetype++;
3295 		m_freem(m0);
3296 		return EIO;
3297 	}
3298 
3299 	if (IFF_DUMPPKTS_XMIT(sc))
3300 		ieee80211_dump_pkt(ic,
3301 		    mtod(m0, const uint8_t *)+sizeof(uint16_t),
3302 		    m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3303 
3304 	MWL_TXQ_LOCK(txq);
3305 	ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3306 	STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3307 	MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3308 
3309 	sc->sc_tx_timer = 5;
3310 	MWL_TXQ_UNLOCK(txq);
3311 
3312 	return 0;
3313 }
3314 
3315 static __inline int
3316 mwl_cvtlegacyrix(int rix)
3317 {
3318 	static const int ieeerates[] =
3319 	    { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3320 	return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3321 }
3322 
3323 /*
3324  * Process completed xmit descriptors from the specified queue.
3325  */
3326 static int
3327 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3328 {
3329 #define	EAGLE_TXD_STATUS_MCAST \
3330 	(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3331 	struct ieee80211com *ic = &sc->sc_ic;
3332 	struct mwl_txbuf *bf;
3333 	struct mwl_txdesc *ds;
3334 	struct ieee80211_node *ni;
3335 	struct mwl_node *an;
3336 	int nreaped;
3337 	uint32_t status;
3338 
3339 	DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3340 	for (nreaped = 0;; nreaped++) {
3341 		MWL_TXQ_LOCK(txq);
3342 		bf = STAILQ_FIRST(&txq->active);
3343 		if (bf == NULL) {
3344 			MWL_TXQ_UNLOCK(txq);
3345 			break;
3346 		}
3347 		ds = bf->bf_desc;
3348 		MWL_TXDESC_SYNC(txq, ds,
3349 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3350 		if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3351 			MWL_TXQ_UNLOCK(txq);
3352 			break;
3353 		}
3354 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3355 		MWL_TXQ_UNLOCK(txq);
3356 
3357 #ifdef MWL_DEBUG
3358 		if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3359 			mwl_printtxbuf(bf, txq->qnum, nreaped);
3360 #endif
3361 		ni = bf->bf_node;
3362 		if (ni != NULL) {
3363 			an = MWL_NODE(ni);
3364 			status = le32toh(ds->Status);
3365 			if (status & EAGLE_TXD_STATUS_OK) {
3366 				uint16_t Format = le16toh(ds->Format);
3367 				uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3368 
3369 				sc->sc_stats.mst_ant_tx[txant]++;
3370 				if (status & EAGLE_TXD_STATUS_OK_RETRY)
3371 					sc->sc_stats.mst_tx_retries++;
3372 				if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3373 					sc->sc_stats.mst_tx_mretries++;
3374 				if (txq->qnum >= MWL_WME_AC_VO)
3375 					ic->ic_wme.wme_hipri_traffic++;
3376 				ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3377 				if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3378 					ni->ni_txrate = mwl_cvtlegacyrix(
3379 					    ni->ni_txrate);
3380 				} else
3381 					ni->ni_txrate |= IEEE80211_RATE_MCS;
3382 				sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3383 			} else {
3384 				if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3385 					sc->sc_stats.mst_tx_linkerror++;
3386 				if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3387 					sc->sc_stats.mst_tx_xretries++;
3388 				if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3389 					sc->sc_stats.mst_tx_aging++;
3390 				if (bf->bf_m->m_flags & M_FF)
3391 					sc->sc_stats.mst_ff_txerr++;
3392 			}
3393 			if (bf->bf_m->m_flags & M_TXCB)
3394 				/* XXX strip fw len in case header inspected */
3395 				m_adj(bf->bf_m, sizeof(uint16_t));
3396 			ieee80211_tx_complete(ni, bf->bf_m,
3397 			    (status & EAGLE_TXD_STATUS_OK) == 0);
3398 		} else
3399 			m_freem(bf->bf_m);
3400 		ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3401 
3402 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3403 		    BUS_DMASYNC_POSTWRITE);
3404 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3405 
3406 		mwl_puttxbuf_tail(txq, bf);
3407 	}
3408 	return nreaped;
3409 #undef EAGLE_TXD_STATUS_MCAST
3410 }
3411 
3412 /*
3413  * Deferred processing of transmit interrupt; special-cased
3414  * for four hardware queues, 0-3.
3415  */
3416 static void
3417 mwl_tx_proc(void *arg, int npending)
3418 {
3419 	struct mwl_softc *sc = arg;
3420 	int nreaped;
3421 
3422 	/*
3423 	 * Process each active queue.
3424 	 */
3425 	nreaped = 0;
3426 	if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3427 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3428 	if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3429 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3430 	if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3431 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3432 	if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3433 		nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3434 
3435 	if (nreaped != 0) {
3436 		sc->sc_tx_timer = 0;
3437 		if (mbufq_first(&sc->sc_snd) != NULL) {
3438 			/* NB: kick fw; the tx thread may have been preempted */
3439 			mwl_hal_txstart(sc->sc_mh, 0);
3440 			mwl_start(sc);
3441 		}
3442 	}
3443 }
3444 
3445 static void
3446 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3447 {
3448 	struct ieee80211_node *ni;
3449 	struct mwl_txbuf *bf;
3450 	u_int ix;
3451 
3452 	/*
3453 	 * NB: this assumes output has been stopped and
3454 	 *     we do not need to block mwl_tx_tasklet
3455 	 */
3456 	for (ix = 0;; ix++) {
3457 		MWL_TXQ_LOCK(txq);
3458 		bf = STAILQ_FIRST(&txq->active);
3459 		if (bf == NULL) {
3460 			MWL_TXQ_UNLOCK(txq);
3461 			break;
3462 		}
3463 		STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3464 		MWL_TXQ_UNLOCK(txq);
3465 #ifdef MWL_DEBUG
3466 		if (sc->sc_debug & MWL_DEBUG_RESET) {
3467 			struct ieee80211com *ic = &sc->sc_ic;
3468 			const struct mwltxrec *tr =
3469 			    mtod(bf->bf_m, const struct mwltxrec *);
3470 			mwl_printtxbuf(bf, txq->qnum, ix);
3471 			ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3472 				bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3473 		}
3474 #endif /* MWL_DEBUG */
3475 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3476 		ni = bf->bf_node;
3477 		if (ni != NULL) {
3478 			/*
3479 			 * Reclaim node reference.
3480 			 */
3481 			ieee80211_free_node(ni);
3482 		}
3483 		m_freem(bf->bf_m);
3484 
3485 		mwl_puttxbuf_tail(txq, bf);
3486 	}
3487 }
3488 
3489 /*
3490  * Drain the transmit queues and reclaim resources.
3491  */
3492 static void
3493 mwl_draintxq(struct mwl_softc *sc)
3494 {
3495 	int i;
3496 
3497 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3498 		mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3499 	sc->sc_tx_timer = 0;
3500 }
3501 
3502 #ifdef MWL_DIAGAPI
3503 /*
3504  * Reset the transmit queues to a pristine state after a fw download.
3505  */
3506 static void
3507 mwl_resettxq(struct mwl_softc *sc)
3508 {
3509 	int i;
3510 
3511 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3512 		mwl_txq_reset(sc, &sc->sc_txq[i]);
3513 }
3514 #endif /* MWL_DIAGAPI */
3515 
3516 /*
3517  * Clear the transmit queues of any frames submitted for the
3518  * specified vap.  This is done when the vap is deleted so we
3519  * don't potentially reference the vap after it is gone.
3520  * Note we cannot remove the frames; we only reclaim the node
3521  * reference.
3522  */
3523 static void
3524 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3525 {
3526 	struct mwl_txq *txq;
3527 	struct mwl_txbuf *bf;
3528 	int i;
3529 
3530 	for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3531 		txq = &sc->sc_txq[i];
3532 		MWL_TXQ_LOCK(txq);
3533 		STAILQ_FOREACH(bf, &txq->active, bf_list) {
3534 			struct ieee80211_node *ni = bf->bf_node;
3535 			if (ni != NULL && ni->ni_vap == vap) {
3536 				bf->bf_node = NULL;
3537 				ieee80211_free_node(ni);
3538 			}
3539 		}
3540 		MWL_TXQ_UNLOCK(txq);
3541 	}
3542 }
3543 
3544 static int
3545 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3546 	const uint8_t *frm, const uint8_t *efrm)
3547 {
3548 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3549 	const struct ieee80211_action *ia;
3550 
3551 	ia = (const struct ieee80211_action *) frm;
3552 	if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3553 	    ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3554 		const struct ieee80211_action_ht_mimopowersave *mps =
3555 		    (const struct ieee80211_action_ht_mimopowersave *) ia;
3556 
3557 		mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3558 		    mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3559 		    MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3560 		return 0;
3561 	} else
3562 		return sc->sc_recv_action(ni, wh, frm, efrm);
3563 }
3564 
3565 static int
3566 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3567 	int dialogtoken, int baparamset, int batimeout)
3568 {
3569 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3570 	struct ieee80211vap *vap = ni->ni_vap;
3571 	struct mwl_node *mn = MWL_NODE(ni);
3572 	struct mwl_bastate *bas;
3573 
3574 	bas = tap->txa_private;
3575 	if (bas == NULL) {
3576 		const MWL_HAL_BASTREAM *sp;
3577 		/*
3578 		 * Check for a free BA stream slot.
3579 		 */
3580 #if MWL_MAXBA > 3
3581 		if (mn->mn_ba[3].bastream == NULL)
3582 			bas = &mn->mn_ba[3];
3583 		else
3584 #endif
3585 #if MWL_MAXBA > 2
3586 		if (mn->mn_ba[2].bastream == NULL)
3587 			bas = &mn->mn_ba[2];
3588 		else
3589 #endif
3590 #if MWL_MAXBA > 1
3591 		if (mn->mn_ba[1].bastream == NULL)
3592 			bas = &mn->mn_ba[1];
3593 		else
3594 #endif
3595 #if MWL_MAXBA > 0
3596 		if (mn->mn_ba[0].bastream == NULL)
3597 			bas = &mn->mn_ba[0];
3598 		else
3599 #endif
3600 		{
3601 			/* sta already has max BA streams */
3602 			/* XXX assign BA stream to highest priority tid */
3603 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3604 			    "%s: already has max bastreams\n", __func__);
3605 			sc->sc_stats.mst_ampdu_reject++;
3606 			return 0;
3607 		}
3608 		/* NB: no held reference to ni */
3609 		sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3610 		    (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3611 		    ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3612 		    ni, tap);
3613 		if (sp == NULL) {
3614 			/*
3615 			 * No available stream, return 0 so no
3616 			 * a-mpdu aggregation will be done.
3617 			 */
3618 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3619 			    "%s: no bastream available\n", __func__);
3620 			sc->sc_stats.mst_ampdu_nostream++;
3621 			return 0;
3622 		}
3623 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3624 		    __func__, sp);
3625 		/* NB: qos is left zero so we won't match in mwl_tx_start */
3626 		bas->bastream = sp;
3627 		tap->txa_private = bas;
3628 	}
3629 	/* fetch current seq# from the firmware; if available */
3630 	if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3631 	    vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3632 	    &tap->txa_start) != 0)
3633 		tap->txa_start = 0;
3634 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3635 }
3636 
3637 static int
3638 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3639 	int code, int baparamset, int batimeout)
3640 {
3641 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3642 	struct mwl_bastate *bas;
3643 
3644 	bas = tap->txa_private;
3645 	if (bas == NULL) {
3646 		/* XXX should not happen */
3647 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3648 		    "%s: no BA stream allocated, TID %d\n",
3649 		    __func__, tap->txa_tid);
3650 		sc->sc_stats.mst_addba_nostream++;
3651 		return 0;
3652 	}
3653 	if (code == IEEE80211_STATUS_SUCCESS) {
3654 		struct ieee80211vap *vap = ni->ni_vap;
3655 		int bufsiz, error;
3656 
3657 		/*
3658 		 * Tell the firmware to setup the BA stream;
3659 		 * we know resources are available because we
3660 		 * pre-allocated one before forming the request.
3661 		 */
3662 		bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3663 		if (bufsiz == 0)
3664 			bufsiz = IEEE80211_AGGR_BAWMAX;
3665 		error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3666 		    bas->bastream, bufsiz, bufsiz, tap->txa_start);
3667 		if (error != 0) {
3668 			/*
3669 			 * Setup failed, return immediately so no a-mpdu
3670 			 * aggregation will be done.
3671 			 */
3672 			mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3673 			mwl_bastream_free(bas);
3674 			tap->txa_private = NULL;
3675 
3676 			DPRINTF(sc, MWL_DEBUG_AMPDU,
3677 			    "%s: create failed, error %d, bufsiz %d TID %d "
3678 			    "htparam 0x%x\n", __func__, error, bufsiz,
3679 			    tap->txa_tid, ni->ni_htparam);
3680 			sc->sc_stats.mst_bacreate_failed++;
3681 			return 0;
3682 		}
3683 		/* NB: cache txq to avoid ptr indirect */
3684 		mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3685 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3686 		    "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3687 		    "htparam 0x%x\n", __func__, bas->bastream,
3688 		    bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3689 	} else {
3690 		/*
3691 		 * Other side NAK'd us; return the resources.
3692 		 */
3693 		DPRINTF(sc, MWL_DEBUG_AMPDU,
3694 		    "%s: request failed with code %d, destroy bastream %p\n",
3695 		    __func__, code, bas->bastream);
3696 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3697 		mwl_bastream_free(bas);
3698 		tap->txa_private = NULL;
3699 	}
3700 	/* NB: firmware sends BAR so we don't need to */
3701 	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3702 }
3703 
3704 static void
3705 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3706 {
3707 	struct mwl_softc *sc = ni->ni_ic->ic_softc;
3708 	struct mwl_bastate *bas;
3709 
3710 	bas = tap->txa_private;
3711 	if (bas != NULL) {
3712 		DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3713 		    __func__, bas->bastream);
3714 		mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3715 		mwl_bastream_free(bas);
3716 		tap->txa_private = NULL;
3717 	}
3718 	sc->sc_addba_stop(ni, tap);
3719 }
3720 
3721 /*
3722  * Setup the rx data structures.  This should only be
3723  * done once or we may get out of sync with the firmware.
3724  */
3725 static int
3726 mwl_startrecv(struct mwl_softc *sc)
3727 {
3728 	if (!sc->sc_recvsetup) {
3729 		struct mwl_rxbuf *bf, *prev;
3730 		struct mwl_rxdesc *ds;
3731 
3732 		prev = NULL;
3733 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3734 			int error = mwl_rxbuf_init(sc, bf);
3735 			if (error != 0) {
3736 				DPRINTF(sc, MWL_DEBUG_RECV,
3737 					"%s: mwl_rxbuf_init failed %d\n",
3738 					__func__, error);
3739 				return error;
3740 			}
3741 			if (prev != NULL) {
3742 				ds = prev->bf_desc;
3743 				ds->pPhysNext = htole32(bf->bf_daddr);
3744 			}
3745 			prev = bf;
3746 		}
3747 		if (prev != NULL) {
3748 			ds = prev->bf_desc;
3749 			ds->pPhysNext =
3750 			    htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3751 		}
3752 		sc->sc_recvsetup = 1;
3753 	}
3754 	mwl_mode_init(sc);		/* set filters, etc. */
3755 	return 0;
3756 }
3757 
3758 static MWL_HAL_APMODE
3759 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3760 {
3761 	MWL_HAL_APMODE mode;
3762 
3763 	if (IEEE80211_IS_CHAN_HT(chan)) {
3764 		if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3765 			mode = AP_MODE_N_ONLY;
3766 		else if (IEEE80211_IS_CHAN_5GHZ(chan))
3767 			mode = AP_MODE_AandN;
3768 		else if (vap->iv_flags & IEEE80211_F_PUREG)
3769 			mode = AP_MODE_GandN;
3770 		else
3771 			mode = AP_MODE_BandGandN;
3772 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3773 		if (vap->iv_flags & IEEE80211_F_PUREG)
3774 			mode = AP_MODE_G_ONLY;
3775 		else
3776 			mode = AP_MODE_MIXED;
3777 	} else if (IEEE80211_IS_CHAN_B(chan))
3778 		mode = AP_MODE_B_ONLY;
3779 	else if (IEEE80211_IS_CHAN_A(chan))
3780 		mode = AP_MODE_A_ONLY;
3781 	else
3782 		mode = AP_MODE_MIXED;		/* XXX should not happen? */
3783 	return mode;
3784 }
3785 
3786 static int
3787 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3788 {
3789 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3790 	return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3791 }
3792 
3793 /*
3794  * Set/change channels.
3795  */
3796 static int
3797 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3798 {
3799 	struct mwl_hal *mh = sc->sc_mh;
3800 	struct ieee80211com *ic = &sc->sc_ic;
3801 	MWL_HAL_CHANNEL hchan;
3802 	int maxtxpow;
3803 
3804 	DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3805 	    __func__, chan->ic_freq, chan->ic_flags);
3806 
3807 	/*
3808 	 * Convert to a HAL channel description with
3809 	 * the flags constrained to reflect the current
3810 	 * operating mode.
3811 	 */
3812 	mwl_mapchan(&hchan, chan);
3813 	mwl_hal_intrset(mh, 0);		/* disable interrupts */
3814 #if 0
3815 	mwl_draintxq(sc);		/* clear pending tx frames */
3816 #endif
3817 	mwl_hal_setchannel(mh, &hchan);
3818 	/*
3819 	 * Tx power is cap'd by the regulatory setting and
3820 	 * possibly a user-set limit.  We pass the min of
3821 	 * these to the hal to apply them to the cal data
3822 	 * for this channel.
3823 	 * XXX min bound?
3824 	 */
3825 	maxtxpow = 2*chan->ic_maxregpower;
3826 	if (maxtxpow > ic->ic_txpowlimit)
3827 		maxtxpow = ic->ic_txpowlimit;
3828 	mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3829 	/* NB: potentially change mcast/mgt rates */
3830 	mwl_setcurchanrates(sc);
3831 
3832 	/*
3833 	 * Update internal state.
3834 	 */
3835 	sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3836 	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3837 	if (IEEE80211_IS_CHAN_A(chan)) {
3838 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3839 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3840 	} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3841 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3842 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3843 	} else {
3844 		sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3845 		sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3846 	}
3847 	sc->sc_curchan = hchan;
3848 	mwl_hal_intrset(mh, sc->sc_imask);
3849 
3850 	return 0;
3851 }
3852 
3853 static void
3854 mwl_scan_start(struct ieee80211com *ic)
3855 {
3856 	struct mwl_softc *sc = ic->ic_softc;
3857 
3858 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3859 }
3860 
3861 static void
3862 mwl_scan_end(struct ieee80211com *ic)
3863 {
3864 	struct mwl_softc *sc = ic->ic_softc;
3865 
3866 	DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3867 }
3868 
3869 static void
3870 mwl_set_channel(struct ieee80211com *ic)
3871 {
3872 	struct mwl_softc *sc = ic->ic_softc;
3873 
3874 	(void) mwl_chan_set(sc, ic->ic_curchan);
3875 }
3876 
3877 /*
3878  * Handle a channel switch request.  We inform the firmware
3879  * and mark the global state to suppress various actions.
3880  * NB: we issue only one request to the fw; we may be called
3881  * multiple times if there are multiple vap's.
3882  */
3883 static void
3884 mwl_startcsa(struct ieee80211vap *vap)
3885 {
3886 	struct ieee80211com *ic = vap->iv_ic;
3887 	struct mwl_softc *sc = ic->ic_softc;
3888 	MWL_HAL_CHANNEL hchan;
3889 
3890 	if (sc->sc_csapending)
3891 		return;
3892 
3893 	mwl_mapchan(&hchan, ic->ic_csa_newchan);
3894 	/* 1 =>'s quiet channel */
3895 	mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3896 	sc->sc_csapending = 1;
3897 }
3898 
3899 /*
3900  * Plumb any static WEP key for the station.  This is
3901  * necessary as we must propagate the key from the
3902  * global key table of the vap to each sta db entry.
3903  */
3904 static void
3905 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3906 {
3907 	if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3908 		IEEE80211_F_PRIVACY &&
3909 	    vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3910 	    vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3911 		(void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3912 				    mac);
3913 }
3914 
3915 static int
3916 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3917 {
3918 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
3919 	struct ieee80211vap *vap = ni->ni_vap;
3920 	struct mwl_hal_vap *hvap;
3921 	int error;
3922 
3923 	if (vap->iv_opmode == IEEE80211_M_WDS) {
3924 		/*
3925 		 * WDS vap's do not have a f/w vap; instead they piggyback
3926 		 * on an AP vap and we must install the sta db entry and
3927 		 * crypto state using that AP's handle (the WDS vap has none).
3928 		 */
3929 		hvap = MWL_VAP(vap)->mv_ap_hvap;
3930 	} else
3931 		hvap = MWL_VAP(vap)->mv_hvap;
3932 	error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3933 	    aid, staid, pi,
3934 	    ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3935 	    ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3936 	if (error == 0) {
3937 		/*
3938 		 * Setup security for this station.  For sta mode this is
3939 		 * needed even though do the same thing on transition to
3940 		 * AUTH state because the call to mwl_hal_newstation
3941 		 * clobbers the crypto state we setup.
3942 		 */
3943 		mwl_setanywepkey(vap, ni->ni_macaddr);
3944 	}
3945 	return error;
3946 #undef WME
3947 }
3948 
3949 static void
3950 mwl_setglobalkeys(struct ieee80211vap *vap)
3951 {
3952 	struct ieee80211_key *wk;
3953 
3954 	wk = &vap->iv_nw_keys[0];
3955 	for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3956 		if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3957 			(void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3958 }
3959 
3960 /*
3961  * Convert a legacy rate set to a firmware bitmask.
3962  */
3963 static uint32_t
3964 get_rate_bitmap(const struct ieee80211_rateset *rs)
3965 {
3966 	uint32_t rates;
3967 	int i;
3968 
3969 	rates = 0;
3970 	for (i = 0; i < rs->rs_nrates; i++)
3971 		switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3972 		case 2:	  rates |= 0x001; break;
3973 		case 4:	  rates |= 0x002; break;
3974 		case 11:  rates |= 0x004; break;
3975 		case 22:  rates |= 0x008; break;
3976 		case 44:  rates |= 0x010; break;
3977 		case 12:  rates |= 0x020; break;
3978 		case 18:  rates |= 0x040; break;
3979 		case 24:  rates |= 0x080; break;
3980 		case 36:  rates |= 0x100; break;
3981 		case 48:  rates |= 0x200; break;
3982 		case 72:  rates |= 0x400; break;
3983 		case 96:  rates |= 0x800; break;
3984 		case 108: rates |= 0x1000; break;
3985 		}
3986 	return rates;
3987 }
3988 
3989 /*
3990  * Construct an HT firmware bitmask from an HT rate set.
3991  */
3992 static uint32_t
3993 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
3994 {
3995 	uint32_t rates;
3996 	int i;
3997 
3998 	rates = 0;
3999 	for (i = 0; i < rs->rs_nrates; i++) {
4000 		if (rs->rs_rates[i] < 16)
4001 			rates |= 1<<rs->rs_rates[i];
4002 	}
4003 	return rates;
4004 }
4005 
4006 /*
4007  * Craft station database entry for station.
4008  * NB: use host byte order here, the hal handles byte swapping.
4009  */
4010 static MWL_HAL_PEERINFO *
4011 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4012 {
4013 	const struct ieee80211vap *vap = ni->ni_vap;
4014 
4015 	memset(pi, 0, sizeof(*pi));
4016 	pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4017 	pi->CapInfo = ni->ni_capinfo;
4018 	if (ni->ni_flags & IEEE80211_NODE_HT) {
4019 		/* HT capabilities, etc */
4020 		pi->HTCapabilitiesInfo = ni->ni_htcap;
4021 		/* XXX pi.HTCapabilitiesInfo */
4022 	        pi->MacHTParamInfo = ni->ni_htparam;
4023 		pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4024 		pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4025 		pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4026 		pi->AddHtInfo.OpMode = ni->ni_htopmode;
4027 		pi->AddHtInfo.stbc = ni->ni_htstbc;
4028 
4029 		/* constrain according to local configuration */
4030 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4031 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4032 		if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4033 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4034 		if (ni->ni_chw != 40)
4035 			pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4036 	}
4037 	return pi;
4038 }
4039 
4040 /*
4041  * Re-create the local sta db entry for a vap to ensure
4042  * up to date WME state is pushed to the firmware.  Because
4043  * this resets crypto state this must be followed by a
4044  * reload of any keys in the global key table.
4045  */
4046 static int
4047 mwl_localstadb(struct ieee80211vap *vap)
4048 {
4049 #define	WME(ie) ((const struct ieee80211_wme_info *) ie)
4050 	struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4051 	struct ieee80211_node *bss;
4052 	MWL_HAL_PEERINFO pi;
4053 	int error;
4054 
4055 	switch (vap->iv_opmode) {
4056 	case IEEE80211_M_STA:
4057 		bss = vap->iv_bss;
4058 		error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4059 		    vap->iv_state == IEEE80211_S_RUN ?
4060 			mkpeerinfo(&pi, bss) : NULL,
4061 		    (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4062 		    bss->ni_ies.wme_ie != NULL ?
4063 			WME(bss->ni_ies.wme_ie)->wme_info : 0);
4064 		if (error == 0)
4065 			mwl_setglobalkeys(vap);
4066 		break;
4067 	case IEEE80211_M_HOSTAP:
4068 	case IEEE80211_M_MBSS:
4069 		error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4070 		    0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4071 		if (error == 0)
4072 			mwl_setglobalkeys(vap);
4073 		break;
4074 	default:
4075 		error = 0;
4076 		break;
4077 	}
4078 	return error;
4079 #undef WME
4080 }
4081 
4082 static int
4083 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4084 {
4085 	struct mwl_vap *mvp = MWL_VAP(vap);
4086 	struct mwl_hal_vap *hvap = mvp->mv_hvap;
4087 	struct ieee80211com *ic = vap->iv_ic;
4088 	struct ieee80211_node *ni = NULL;
4089 	struct mwl_softc *sc = ic->ic_softc;
4090 	struct mwl_hal *mh = sc->sc_mh;
4091 	enum ieee80211_state ostate = vap->iv_state;
4092 	int error;
4093 
4094 	DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4095 	    vap->iv_ifp->if_xname, __func__,
4096 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4097 
4098 	callout_stop(&sc->sc_timer);
4099 	/*
4100 	 * Clear current radar detection state.
4101 	 */
4102 	if (ostate == IEEE80211_S_CAC) {
4103 		/* stop quiet mode radar detection */
4104 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4105 	} else if (sc->sc_radarena) {
4106 		/* stop in-service radar detection */
4107 		mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4108 		sc->sc_radarena = 0;
4109 	}
4110 	/*
4111 	 * Carry out per-state actions before doing net80211 work.
4112 	 */
4113 	if (nstate == IEEE80211_S_INIT) {
4114 		/* NB: only ap+sta vap's have a fw entity */
4115 		if (hvap != NULL)
4116 			mwl_hal_stop(hvap);
4117 	} else if (nstate == IEEE80211_S_SCAN) {
4118 		mwl_hal_start(hvap);
4119 		/* NB: this disables beacon frames */
4120 		mwl_hal_setinframode(hvap);
4121 	} else if (nstate == IEEE80211_S_AUTH) {
4122 		/*
4123 		 * Must create a sta db entry in case a WEP key needs to
4124 		 * be plumbed.  This entry will be overwritten if we
4125 		 * associate; otherwise it will be reclaimed on node free.
4126 		 */
4127 		ni = vap->iv_bss;
4128 		MWL_NODE(ni)->mn_hvap = hvap;
4129 		(void) mwl_peerstadb(ni, 0, 0, NULL);
4130 	} else if (nstate == IEEE80211_S_CSA) {
4131 		/* XXX move to below? */
4132 		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4133 		    vap->iv_opmode == IEEE80211_M_MBSS)
4134 			mwl_startcsa(vap);
4135 	} else if (nstate == IEEE80211_S_CAC) {
4136 		/* XXX move to below? */
4137 		/* stop ap xmit and enable quiet mode radar detection */
4138 		mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4139 	}
4140 
4141 	/*
4142 	 * Invoke the parent method to do net80211 work.
4143 	 */
4144 	error = mvp->mv_newstate(vap, nstate, arg);
4145 
4146 	/*
4147 	 * Carry out work that must be done after net80211 runs;
4148 	 * this work requires up to date state (e.g. iv_bss).
4149 	 */
4150 	if (error == 0 && nstate == IEEE80211_S_RUN) {
4151 		/* NB: collect bss node again, it may have changed */
4152 		ni = vap->iv_bss;
4153 
4154 		DPRINTF(sc, MWL_DEBUG_STATE,
4155 		    "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4156 		    "capinfo 0x%04x chan %d\n",
4157 		    vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4158 		    ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4159 		    ieee80211_chan2ieee(ic, ic->ic_curchan));
4160 
4161 		/*
4162 		 * Recreate local sta db entry to update WME/HT state.
4163 		 */
4164 		mwl_localstadb(vap);
4165 		switch (vap->iv_opmode) {
4166 		case IEEE80211_M_HOSTAP:
4167 		case IEEE80211_M_MBSS:
4168 			if (ostate == IEEE80211_S_CAC) {
4169 				/* enable in-service radar detection */
4170 				mwl_hal_setradardetection(mh,
4171 				    DR_IN_SERVICE_MONITOR_START);
4172 				sc->sc_radarena = 1;
4173 			}
4174 			/*
4175 			 * Allocate and setup the beacon frame
4176 			 * (and related state).
4177 			 */
4178 			error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4179 			if (error != 0) {
4180 				DPRINTF(sc, MWL_DEBUG_STATE,
4181 				    "%s: beacon setup failed, error %d\n",
4182 				    __func__, error);
4183 				goto bad;
4184 			}
4185 			/* NB: must be after setting up beacon */
4186 			mwl_hal_start(hvap);
4187 			break;
4188 		case IEEE80211_M_STA:
4189 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4190 			    vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4191 			/*
4192 			 * Set state now that we're associated.
4193 			 */
4194 			mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4195 			mwl_setrates(vap);
4196 			mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4197 			if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4198 			    sc->sc_ndwdsvaps++ == 0)
4199 				mwl_hal_setdwds(mh, 1);
4200 			break;
4201 		case IEEE80211_M_WDS:
4202 			DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4203 			    vap->iv_ifp->if_xname, __func__,
4204 			    ether_sprintf(ni->ni_bssid));
4205 			mwl_seteapolformat(vap);
4206 			break;
4207 		default:
4208 			break;
4209 		}
4210 		/*
4211 		 * Set CS mode according to operating channel;
4212 		 * this mostly an optimization for 5GHz.
4213 		 *
4214 		 * NB: must follow mwl_hal_start which resets csmode
4215 		 */
4216 		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4217 			mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4218 		else
4219 			mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4220 		/*
4221 		 * Start timer to prod firmware.
4222 		 */
4223 		if (sc->sc_ageinterval != 0)
4224 			callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4225 			    mwl_agestations, sc);
4226 	} else if (nstate == IEEE80211_S_SLEEP) {
4227 		/* XXX set chip in power save */
4228 	} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4229 	    --sc->sc_ndwdsvaps == 0)
4230 		mwl_hal_setdwds(mh, 0);
4231 bad:
4232 	return error;
4233 }
4234 
4235 /*
4236  * Manage station id's; these are separate from AID's
4237  * as AID's may have values out of the range of possible
4238  * station id's acceptable to the firmware.
4239  */
4240 static int
4241 allocstaid(struct mwl_softc *sc, int aid)
4242 {
4243 	int staid;
4244 
4245 	if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4246 		/* NB: don't use 0 */
4247 		for (staid = 1; staid < MWL_MAXSTAID; staid++)
4248 			if (isclr(sc->sc_staid, staid))
4249 				break;
4250 	} else
4251 		staid = aid;
4252 	setbit(sc->sc_staid, staid);
4253 	return staid;
4254 }
4255 
4256 static void
4257 delstaid(struct mwl_softc *sc, int staid)
4258 {
4259 	clrbit(sc->sc_staid, staid);
4260 }
4261 
4262 /*
4263  * Setup driver-specific state for a newly associated node.
4264  * Note that we're called also on a re-associate, the isnew
4265  * param tells us if this is the first time or not.
4266  */
4267 static void
4268 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4269 {
4270 	struct ieee80211vap *vap = ni->ni_vap;
4271         struct mwl_softc *sc = vap->iv_ic->ic_softc;
4272 	struct mwl_node *mn = MWL_NODE(ni);
4273 	MWL_HAL_PEERINFO pi;
4274 	uint16_t aid;
4275 	int error;
4276 
4277 	aid = IEEE80211_AID(ni->ni_associd);
4278 	if (isnew) {
4279 		mn->mn_staid = allocstaid(sc, aid);
4280 		mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4281 	} else {
4282 		mn = MWL_NODE(ni);
4283 		/* XXX reset BA stream? */
4284 	}
4285 	DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4286 	    __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4287 	error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4288 	if (error != 0) {
4289 		DPRINTF(sc, MWL_DEBUG_NODE,
4290 		    "%s: error %d creating sta db entry\n",
4291 		    __func__, error);
4292 		/* XXX how to deal with error? */
4293 	}
4294 }
4295 
4296 /*
4297  * Periodically poke the firmware to age out station state
4298  * (power save queues, pending tx aggregates).
4299  */
4300 static void
4301 mwl_agestations(void *arg)
4302 {
4303 	struct mwl_softc *sc = arg;
4304 
4305 	mwl_hal_setkeepalive(sc->sc_mh);
4306 	if (sc->sc_ageinterval != 0)		/* NB: catch dynamic changes */
4307 		callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4308 }
4309 
4310 static const struct mwl_hal_channel *
4311 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4312 {
4313 	int i;
4314 
4315 	for (i = 0; i < ci->nchannels; i++) {
4316 		const struct mwl_hal_channel *hc = &ci->channels[i];
4317 		if (hc->ieee == ieee)
4318 			return hc;
4319 	}
4320 	return NULL;
4321 }
4322 
4323 static int
4324 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4325 	int nchan, struct ieee80211_channel chans[])
4326 {
4327 	struct mwl_softc *sc = ic->ic_softc;
4328 	struct mwl_hal *mh = sc->sc_mh;
4329 	const MWL_HAL_CHANNELINFO *ci;
4330 	int i;
4331 
4332 	for (i = 0; i < nchan; i++) {
4333 		struct ieee80211_channel *c = &chans[i];
4334 		const struct mwl_hal_channel *hc;
4335 
4336 		if (IEEE80211_IS_CHAN_2GHZ(c)) {
4337 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4338 			    IEEE80211_IS_CHAN_HT40(c) ?
4339 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4340 		} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4341 			mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4342 			    IEEE80211_IS_CHAN_HT40(c) ?
4343 				MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4344 		} else {
4345 			device_printf(sc->sc_dev,
4346 			    "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4347 			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4348 			return EINVAL;
4349 		}
4350 		/*
4351 		 * Verify channel has cal data and cap tx power.
4352 		 */
4353 		hc = findhalchannel(ci, c->ic_ieee);
4354 		if (hc != NULL) {
4355 			if (c->ic_maxpower > 2*hc->maxTxPow)
4356 				c->ic_maxpower = 2*hc->maxTxPow;
4357 			goto next;
4358 		}
4359 		if (IEEE80211_IS_CHAN_HT40(c)) {
4360 			/*
4361 			 * Look for the extension channel since the
4362 			 * hal table only has the primary channel.
4363 			 */
4364 			hc = findhalchannel(ci, c->ic_extieee);
4365 			if (hc != NULL) {
4366 				if (c->ic_maxpower > 2*hc->maxTxPow)
4367 					c->ic_maxpower = 2*hc->maxTxPow;
4368 				goto next;
4369 			}
4370 		}
4371 		device_printf(sc->sc_dev,
4372 		    "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4373 		    __func__, c->ic_ieee, c->ic_extieee,
4374 		    c->ic_freq, c->ic_flags);
4375 		return EINVAL;
4376 	next:
4377 		;
4378 	}
4379 	return 0;
4380 }
4381 
4382 #define	IEEE80211_CHAN_HTG	(IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4383 #define	IEEE80211_CHAN_HTA	(IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4384 
4385 static void
4386 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4387 	const MWL_HAL_CHANNELINFO *ci, int flags)
4388 {
4389 	int i, error;
4390 
4391 	for (i = 0; i < ci->nchannels; i++) {
4392 		const struct mwl_hal_channel *hc = &ci->channels[i];
4393 
4394 		error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
4395 		    hc->ieee, hc->maxTxPow, flags);
4396 		if (error != 0 && error != ENOENT)
4397 			break;
4398 	}
4399 }
4400 
4401 static void
4402 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4403 	const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4404 {
4405 	int i, error;
4406 
4407 	error = 0;
4408 	for (i = 0; i < ci->nchannels && error == 0; i++) {
4409 		const struct mwl_hal_channel *hc = &ci->channels[i];
4410 
4411 		error = ieee80211_add_channel(chans, maxchans, nchans,
4412 		    hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4413 	}
4414 }
4415 
4416 static void
4417 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4418 	struct ieee80211_channel chans[])
4419 {
4420 	const MWL_HAL_CHANNELINFO *ci;
4421 	uint8_t bands[IEEE80211_MODE_BYTES];
4422 
4423 	/*
4424 	 * Use the channel info from the hal to craft the
4425 	 * channel list.  Note that we pass back an unsorted
4426 	 * list; the caller is required to sort it for us
4427 	 * (if desired).
4428 	 */
4429 	*nchans = 0;
4430 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4431 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4432 		memset(bands, 0, sizeof(bands));
4433 		setbit(bands, IEEE80211_MODE_11B);
4434 		setbit(bands, IEEE80211_MODE_11G);
4435 		setbit(bands, IEEE80211_MODE_11NG);
4436 		addchannels(chans, maxchans, nchans, ci, bands);
4437 	}
4438 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4439 	    MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4440 		memset(bands, 0, sizeof(bands));
4441 		setbit(bands, IEEE80211_MODE_11A);
4442 		setbit(bands, IEEE80211_MODE_11NA);
4443 		addchannels(chans, maxchans, nchans, ci, bands);
4444 	}
4445 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4446 	    MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4447 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4448 	if (mwl_hal_getchannelinfo(sc->sc_mh,
4449 	    MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4450 		addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4451 }
4452 
4453 static void
4454 mwl_getradiocaps(struct ieee80211com *ic,
4455 	int maxchans, int *nchans, struct ieee80211_channel chans[])
4456 {
4457 	struct mwl_softc *sc = ic->ic_softc;
4458 
4459 	getchannels(sc, maxchans, nchans, chans);
4460 }
4461 
4462 static int
4463 mwl_getchannels(struct mwl_softc *sc)
4464 {
4465 	struct ieee80211com *ic = &sc->sc_ic;
4466 
4467 	/*
4468 	 * Use the channel info from the hal to craft the
4469 	 * channel list for net80211.  Note that we pass up
4470 	 * an unsorted list; net80211 will sort it for us.
4471 	 */
4472 	memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4473 	ic->ic_nchans = 0;
4474 	getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4475 
4476 	ic->ic_regdomain.regdomain = SKU_DEBUG;
4477 	ic->ic_regdomain.country = CTRY_DEFAULT;
4478 	ic->ic_regdomain.location = 'I';
4479 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX? */
4480 	ic->ic_regdomain.isocc[1] = ' ';
4481 	return (ic->ic_nchans == 0 ? EIO : 0);
4482 }
4483 #undef IEEE80211_CHAN_HTA
4484 #undef IEEE80211_CHAN_HTG
4485 
4486 #ifdef MWL_DEBUG
4487 static void
4488 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4489 {
4490 	const struct mwl_rxdesc *ds = bf->bf_desc;
4491 	uint32_t status = le32toh(ds->Status);
4492 
4493 	printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4494 	       "      STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4495 	    ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4496 	    le32toh(ds->pPhysBuffData), ds->RxControl,
4497 	    ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4498 	        "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4499 	    ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4500 	    ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4501 }
4502 
4503 static void
4504 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4505 {
4506 	const struct mwl_txdesc *ds = bf->bf_desc;
4507 	uint32_t status = le32toh(ds->Status);
4508 
4509 	printf("Q%u[%3u]", qnum, ix);
4510 	printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4511 	printf("    NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4512 	    le32toh(ds->pPhysNext),
4513 	    le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4514 	    status & EAGLE_TXD_STATUS_USED ?
4515 		"" : (status & 3) != 0 ? " *" : " !");
4516 	printf("    RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4517 	    ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4518 	    le32toh(ds->SapPktInfo), le16toh(ds->Format));
4519 #if MWL_TXDESC > 1
4520 	printf("    MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4521 	    , le32toh(ds->multiframes)
4522 	    , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4523 	    , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4524 	    , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4525 	);
4526 	printf("    DATA:%08x %08x %08x %08x %08x %08x\n"
4527 	    , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4528 	    , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4529 	    , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4530 	);
4531 #endif
4532 #if 0
4533 { const uint8_t *cp = (const uint8_t *) ds;
4534   int i;
4535   for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4536 	printf("%02x ", cp[i]);
4537 	if (((i+1) % 16) == 0)
4538 		printf("\n");
4539   }
4540   printf("\n");
4541 }
4542 #endif
4543 }
4544 #endif /* MWL_DEBUG */
4545 
4546 #if 0
4547 static void
4548 mwl_txq_dump(struct mwl_txq *txq)
4549 {
4550 	struct mwl_txbuf *bf;
4551 	int i = 0;
4552 
4553 	MWL_TXQ_LOCK(txq);
4554 	STAILQ_FOREACH(bf, &txq->active, bf_list) {
4555 		struct mwl_txdesc *ds = bf->bf_desc;
4556 		MWL_TXDESC_SYNC(txq, ds,
4557 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4558 #ifdef MWL_DEBUG
4559 		mwl_printtxbuf(bf, txq->qnum, i);
4560 #endif
4561 		i++;
4562 	}
4563 	MWL_TXQ_UNLOCK(txq);
4564 }
4565 #endif
4566 
4567 static void
4568 mwl_watchdog(void *arg)
4569 {
4570 	struct mwl_softc *sc = arg;
4571 
4572 	callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4573 	if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4574 		return;
4575 
4576 	if (sc->sc_running && !sc->sc_invalid) {
4577 		if (mwl_hal_setkeepalive(sc->sc_mh))
4578 			device_printf(sc->sc_dev,
4579 			    "transmit timeout (firmware hung?)\n");
4580 		else
4581 			device_printf(sc->sc_dev,
4582 			    "transmit timeout\n");
4583 #if 0
4584 		mwl_reset(sc);
4585 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4586 #endif
4587 		counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4588 		sc->sc_stats.mst_watchdog++;
4589 	}
4590 }
4591 
4592 #ifdef MWL_DIAGAPI
4593 /*
4594  * Diagnostic interface to the HAL.  This is used by various
4595  * tools to do things like retrieve register contents for
4596  * debugging.  The mechanism is intentionally opaque so that
4597  * it can change frequently w/o concern for compatibility.
4598  */
4599 static int
4600 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4601 {
4602 	struct mwl_hal *mh = sc->sc_mh;
4603 	u_int id = md->md_id & MWL_DIAG_ID;
4604 	void *indata = NULL;
4605 	void *outdata = NULL;
4606 	u_int32_t insize = md->md_in_size;
4607 	u_int32_t outsize = md->md_out_size;
4608 	int error = 0;
4609 
4610 	if (md->md_id & MWL_DIAG_IN) {
4611 		/*
4612 		 * Copy in data.
4613 		 */
4614 		indata = malloc(insize, M_TEMP, M_NOWAIT);
4615 		if (indata == NULL) {
4616 			error = ENOMEM;
4617 			goto bad;
4618 		}
4619 		error = copyin(md->md_in_data, indata, insize);
4620 		if (error)
4621 			goto bad;
4622 	}
4623 	if (md->md_id & MWL_DIAG_DYN) {
4624 		/*
4625 		 * Allocate a buffer for the results (otherwise the HAL
4626 		 * returns a pointer to a buffer where we can read the
4627 		 * results).  Note that we depend on the HAL leaving this
4628 		 * pointer for us to use below in reclaiming the buffer;
4629 		 * may want to be more defensive.
4630 		 */
4631 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4632 		if (outdata == NULL) {
4633 			error = ENOMEM;
4634 			goto bad;
4635 		}
4636 	}
4637 	if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4638 		if (outsize < md->md_out_size)
4639 			md->md_out_size = outsize;
4640 		if (outdata != NULL)
4641 			error = copyout(outdata, md->md_out_data,
4642 					md->md_out_size);
4643 	} else {
4644 		error = EINVAL;
4645 	}
4646 bad:
4647 	if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4648 		free(indata, M_TEMP);
4649 	if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4650 		free(outdata, M_TEMP);
4651 	return error;
4652 }
4653 
4654 static int
4655 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4656 {
4657 	struct mwl_hal *mh = sc->sc_mh;
4658 	int error;
4659 
4660 	MWL_LOCK_ASSERT(sc);
4661 
4662 	if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4663 		device_printf(sc->sc_dev, "unable to load firmware\n");
4664 		return EIO;
4665 	}
4666 	if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4667 		device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4668 		return EIO;
4669 	}
4670 	error = mwl_setupdma(sc);
4671 	if (error != 0) {
4672 		/* NB: mwl_setupdma prints a msg */
4673 		return error;
4674 	}
4675 	/*
4676 	 * Reset tx/rx data structures; after reload we must
4677 	 * re-start the driver's notion of the next xmit/recv.
4678 	 */
4679 	mwl_draintxq(sc);		/* clear pending frames */
4680 	mwl_resettxq(sc);		/* rebuild tx q lists */
4681 	sc->sc_rxnext = NULL;		/* force rx to start at the list head */
4682 	return 0;
4683 }
4684 #endif /* MWL_DIAGAPI */
4685 
4686 static void
4687 mwl_parent(struct ieee80211com *ic)
4688 {
4689 	struct mwl_softc *sc = ic->ic_softc;
4690 	int startall = 0;
4691 
4692 	MWL_LOCK(sc);
4693 	if (ic->ic_nrunning > 0) {
4694 		if (sc->sc_running) {
4695 			/*
4696 			 * To avoid rescanning another access point,
4697 			 * do not call mwl_init() here.  Instead,
4698 			 * only reflect promisc mode settings.
4699 			 */
4700 			mwl_mode_init(sc);
4701 		} else {
4702 			/*
4703 			 * Beware of being called during attach/detach
4704 			 * to reset promiscuous mode.  In that case we
4705 			 * will still be marked UP but not RUNNING.
4706 			 * However trying to re-init the interface
4707 			 * is the wrong thing to do as we've already
4708 			 * torn down much of our state.  There's
4709 			 * probably a better way to deal with this.
4710 			 */
4711 			if (!sc->sc_invalid) {
4712 				mwl_init(sc);	/* XXX lose error */
4713 				startall = 1;
4714 			}
4715 		}
4716 	} else
4717 		mwl_stop(sc);
4718 	MWL_UNLOCK(sc);
4719 	if (startall)
4720 		ieee80211_start_all(ic);
4721 }
4722 
4723 static int
4724 mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4725 {
4726 	struct mwl_softc *sc = ic->ic_softc;
4727 	struct ifreq *ifr = data;
4728 	int error = 0;
4729 
4730 	switch (cmd) {
4731 	case SIOCGMVSTATS:
4732 		mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4733 #if 0
4734 		/* NB: embed these numbers to get a consistent view */
4735 		sc->sc_stats.mst_tx_packets =
4736 		    ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
4737 		sc->sc_stats.mst_rx_packets =
4738 		    ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
4739 #endif
4740 		/*
4741 		 * NB: Drop the softc lock in case of a page fault;
4742 		 * we'll accept any potential inconsisentcy in the
4743 		 * statistics.  The alternative is to copy the data
4744 		 * to a local structure.
4745 		 */
4746 		return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4747 		    sizeof (sc->sc_stats)));
4748 #ifdef MWL_DIAGAPI
4749 	case SIOCGMVDIAG:
4750 		/* XXX check privs */
4751 		return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4752 	case SIOCGMVRESET:
4753 		/* XXX check privs */
4754 		MWL_LOCK(sc);
4755 		error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4756 		MWL_UNLOCK(sc);
4757 		break;
4758 #endif /* MWL_DIAGAPI */
4759 	default:
4760 		error = ENOTTY;
4761 		break;
4762 	}
4763 	return (error);
4764 }
4765 
4766 #ifdef	MWL_DEBUG
4767 static int
4768 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4769 {
4770 	struct mwl_softc *sc = arg1;
4771 	int debug, error;
4772 
4773 	debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4774 	error = sysctl_handle_int(oidp, &debug, 0, req);
4775 	if (error || !req->newptr)
4776 		return error;
4777 	mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4778 	sc->sc_debug = debug & 0x00ffffff;
4779 	return 0;
4780 }
4781 #endif /* MWL_DEBUG */
4782 
4783 static void
4784 mwl_sysctlattach(struct mwl_softc *sc)
4785 {
4786 #ifdef	MWL_DEBUG
4787 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4788 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4789 
4790 	sc->sc_debug = mwl_debug;
4791 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
4792 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
4793 	    mwl_sysctl_debug, "I", "control debugging printfs");
4794 #endif
4795 }
4796 
4797 /*
4798  * Announce various information on device/driver attach.
4799  */
4800 static void
4801 mwl_announce(struct mwl_softc *sc)
4802 {
4803 
4804 	device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4805 		sc->sc_hwspecs.hwVersion,
4806 		(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4807 		(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4808 		(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4809 		(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4810 		sc->sc_hwspecs.regionCode);
4811 	sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4812 
4813 	if (bootverbose) {
4814 		int i;
4815 		for (i = 0; i <= WME_AC_VO; i++) {
4816 			struct mwl_txq *txq = sc->sc_ac2q[i];
4817 			device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4818 				txq->qnum, ieee80211_wme_acnames[i]);
4819 		}
4820 	}
4821 	if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4822 		device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4823 	if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4824 		device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4825 	if (bootverbose || mwl_txbuf != MWL_TXBUF)
4826 		device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4827 	if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4828 		device_printf(sc->sc_dev, "multi-bss support\n");
4829 #ifdef MWL_TX_NODROP
4830 	if (bootverbose)
4831 		device_printf(sc->sc_dev, "no tx drop\n");
4832 #endif
4833 }
4834