xref: /dragonfly/sys/dev/netif/ath/ath/if_athvar.h (revision 7d3e9a5b)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * Defintions for the Atheros Wireless LAN controller driver.
34  */
35 #ifndef _DEV_ATH_ATHVAR_H
36 #define _DEV_ATH_ATHVAR_H
37 
38 #include <machine/atomic.h>
39 
40 #include <dev/netif/ath/ath_hal/ah.h>
41 #include <dev/netif/ath/ath_hal/ah_desc.h>
42 #include <netproto/802_11/ieee80211_radiotap.h>
43 #include <dev/netif/ath/ath/if_athioctl.h>
44 #include <dev/netif/ath/ath/if_athrate.h>
45 #ifdef	ATH_DEBUG_ALQ
46 #include <dev/netif/ath/ath/if_ath_alq.h>
47 #endif
48 
49 #define	ATH_TIMEOUT		1000
50 
51 #if defined(__DragonFly__)
52 #define ATH_ENABLE_11N
53 #endif
54 
55 /*
56  * There is a separate TX ath_buf pool for management frames.
57  * This ensures that management frames such as probe responses
58  * and BAR frames can be transmitted during periods of high
59  * TX activity.
60  */
61 #define	ATH_MGMT_TXBUF		32
62 
63 /*
64  * 802.11n requires more TX and RX buffers to do AMPDU.
65  */
66 #ifdef	ATH_ENABLE_11N
67 #define	ATH_TXBUF	512
68 #define	ATH_RXBUF	512
69 #endif
70 
71 #ifndef ATH_RXBUF
72 #define	ATH_RXBUF	40		/* number of RX buffers */
73 #endif
74 #ifndef ATH_TXBUF
75 #define	ATH_TXBUF	200		/* number of TX buffers */
76 #endif
77 #define	ATH_BCBUF	4		/* number of beacon buffers */
78 
79 #define	ATH_TXDESC	10		/* number of descriptors per buffer */
80 #define	ATH_TXMAXTRY	11		/* max number of transmit attempts */
81 #define	ATH_TXMGTTRY	4		/* xmit attempts for mgt/ctl frames */
82 #define	ATH_TXINTR_PERIOD 5		/* max number of batched tx descriptors */
83 
84 #define	ATH_BEACON_AIFS_DEFAULT	 1	/* default aifs for ap beacon q */
85 #define	ATH_BEACON_CWMIN_DEFAULT 0	/* default cwmin for ap beacon q */
86 #define	ATH_BEACON_CWMAX_DEFAULT 0	/* default cwmax for ap beacon q */
87 
88 /*
89  * The following bits can be set during the PCI (and perhaps non-PCI
90  * later) device probe path.
91  *
92  * It controls some of the driver and HAL behaviour.
93  */
94 
95 #define	ATH_PCI_CUS198		0x0001
96 #define	ATH_PCI_CUS230		0x0002
97 #define	ATH_PCI_CUS217		0x0004
98 #define	ATH_PCI_CUS252		0x0008
99 #define	ATH_PCI_WOW		0x0010
100 #define	ATH_PCI_BT_ANT_DIV	0x0020
101 #define	ATH_PCI_D3_L1_WAR	0x0040
102 #define	ATH_PCI_AR9565_1ANT	0x0080
103 #define	ATH_PCI_AR9565_2ANT	0x0100
104 #define	ATH_PCI_NO_PLL_PWRSAVE	0x0200
105 #define	ATH_PCI_KILLER		0x0400
106 
107 /*
108  * The key cache is used for h/w cipher state and also for
109  * tracking station state such as the current tx antenna.
110  * We also setup a mapping table between key cache slot indices
111  * and station state to short-circuit node lookups on rx.
112  * Different parts have different size key caches.  We handle
113  * up to ATH_KEYMAX entries (could dynamically allocate state).
114  */
115 #define	ATH_KEYMAX	128		/* max key cache size we handle */
116 #define	ATH_KEYBYTES	(ATH_KEYMAX/NBBY)	/* storage space in bytes */
117 
118 struct taskqueue;
119 struct kthread;
120 struct ath_buf;
121 
122 #define	ATH_TID_MAX_BUFS	(2 * IEEE80211_AGGR_BAWMAX)
123 
124 /*
125  * Per-TID state
126  *
127  * Note that TID 16 (WME_NUM_TID+1) is for handling non-QoS frames.
128  */
129 struct ath_tid {
130 	TAILQ_HEAD(,ath_buf)	tid_q;		/* pending buffers */
131 	struct ath_node		*an;		/* pointer to parent */
132 	int			tid;		/* tid */
133 	int			ac;		/* which AC gets this traffic */
134 	int			hwq_depth;	/* how many buffers are on HW */
135 	u_int			axq_depth;	/* SW queue depth */
136 
137 	struct {
138 		TAILQ_HEAD(,ath_buf)	tid_q;		/* filtered queue */
139 		u_int			axq_depth;	/* SW queue depth */
140 	} filtq;
141 
142 	/*
143 	 * Entry on the ath_txq; when there's traffic
144 	 * to send
145 	 */
146 	TAILQ_ENTRY(ath_tid)	axq_qelem;
147 	int			sched;
148 	int			paused;	/* >0 if the TID has been paused */
149 
150 	/*
151 	 * These are flags - perhaps later collapse
152 	 * down to a single uint32_t ?
153 	 */
154 	int			addba_tx_pending;	/* TX ADDBA pending */
155 	int			bar_wait;	/* waiting for BAR */
156 	int			bar_tx;		/* BAR TXed */
157 	int			isfiltered;	/* is this node currently filtered */
158 
159 	/*
160 	 * Is the TID being cleaned up after a transition
161 	 * from aggregation to non-aggregation?
162 	 * When this is set to 1, this TID will be paused
163 	 * and no further traffic will be queued until all
164 	 * the hardware packets pending for this TID have been
165 	 * TXed/completed; at which point (non-aggregation)
166 	 * traffic will resume being TXed.
167 	 */
168 	int			cleanup_inprogress;
169 	/*
170 	 * How many hardware-queued packets are
171 	 * waiting to be cleaned up.
172 	 * This is only valid if cleanup_inprogress is 1.
173 	 */
174 	int			incomp;
175 
176 	/*
177 	 * The following implements a ring representing
178 	 * the frames in the current BAW.
179 	 * To avoid copying the array content each time
180 	 * the BAW is moved, the baw_head/baw_tail point
181 	 * to the current BAW begin/end; when the BAW is
182 	 * shifted the head/tail of the array are also
183 	 * appropriately shifted.
184 	 */
185 	/* active tx buffers, beginning at current BAW */
186 	struct ath_buf		*tx_buf[ATH_TID_MAX_BUFS];
187 	/* where the baw head is in the array */
188 	int			baw_head;
189 	/* where the BAW tail is in the array */
190 	int			baw_tail;
191 };
192 
193 /* driver-specific node state */
194 struct ath_node {
195 	struct ieee80211_node an_node;	/* base class */
196 	u_int8_t	an_mgmtrix;	/* min h/w rate index */
197 	u_int8_t	an_mcastrix;	/* mcast h/w rate index */
198 	uint32_t	an_is_powersave;	/* node is sleeping */
199 	uint32_t	an_stack_psq;		/* net80211 psq isn't empty */
200 	uint32_t	an_tim_set;		/* TIM has been set */
201 	struct ath_buf	*an_ff_buf[WME_NUM_AC]; /* ff staging area */
202 	struct ath_tid	an_tid[IEEE80211_TID_SIZE];	/* per-TID state */
203 	char		an_name[32];	/* eg "wlan0_a1" */
204 #if defined(__DragonFly__)
205 	struct lock	an_mtx;		/* protecting the rate control state */
206 #else
207 	struct mtx	an_mtx;		/* protecting the rate control state */
208 #endif
209 	uint32_t	an_swq_depth;	/* how many SWQ packets for this
210 					   node */
211 	int			clrdmask;	/* has clrdmask been set */
212 	uint32_t	an_leak_count;	/* How many frames to leak during pause */
213 	/* variable-length rate control state follows */
214 };
215 #define	ATH_NODE(ni)	((struct ath_node *)(ni))
216 #define	ATH_NODE_CONST(ni)	((const struct ath_node *)(ni))
217 
218 #define ATH_RSSI_LPF_LEN	10
219 #define ATH_RSSI_DUMMY_MARKER	0x127
220 #define ATH_EP_MUL(x, mul)	((x) * (mul))
221 #define ATH_RSSI_IN(x)		(ATH_EP_MUL((x), HAL_RSSI_EP_MULTIPLIER))
222 #define ATH_LPF_RSSI(x, y, len) \
223     ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
224 #define ATH_RSSI_LPF(x, y) do {						\
225     if ((y) >= -20)							\
226     	x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN);	\
227 } while (0)
228 #define	ATH_EP_RND(x,mul) \
229 	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
230 #define	ATH_RSSI(x)		ATH_EP_RND(x, HAL_RSSI_EP_MULTIPLIER)
231 
232 typedef enum {
233 	ATH_BUFTYPE_NORMAL	= 0,
234 	ATH_BUFTYPE_MGMT	= 1,
235 } ath_buf_type_t;
236 
237 struct ath_buf {
238 	TAILQ_ENTRY(ath_buf)	bf_list;
239 	struct ath_buf *	bf_next;	/* next buffer in the aggregate */
240 	int			bf_nseg;
241 	HAL_STATUS		bf_rxstatus;
242 	uint16_t		bf_flags;	/* status flags (below) */
243 	uint16_t		bf_descid;	/* 16 bit descriptor ID */
244 	struct ath_desc		*bf_desc;	/* virtual addr of desc */
245 	struct ath_desc_status	bf_status;	/* tx/rx status */
246 	bus_addr_t		bf_daddr;	/* physical addr of desc */
247 	bus_dmamap_t		bf_dmamap;	/* DMA map for mbuf chain */
248 	struct mbuf		*bf_m;		/* mbuf for buf */
249 	struct ieee80211_node	*bf_node;	/* pointer to the node */
250 	struct ath_desc		*bf_lastds;	/* last descriptor for comp status */
251 	struct ath_buf		*bf_last;	/* last buffer in aggregate, or self for non-aggregate */
252 	bus_size_t		bf_mapsize;
253 #define	ATH_MAX_SCATTER		ATH_TXDESC	/* max(tx,rx,beacon) desc's */
254 	bus_dma_segment_t	bf_segs[ATH_MAX_SCATTER];
255 	uint32_t		bf_nextfraglen;	/* length of next fragment */
256 
257 	/* Completion function to call on TX complete (fail or not) */
258 	/*
259 	 * "fail" here is set to 1 if the queue entries were removed
260 	 * through a call to ath_tx_draintxq().
261 	 */
262 	void(* bf_comp) (struct ath_softc *sc, struct ath_buf *bf, int fail);
263 
264 	/* This state is kept to support software retries and aggregation */
265 	struct {
266 		uint16_t bfs_seqno;	/* sequence number of this packet */
267 		uint16_t bfs_ndelim;	/* number of delims for padding */
268 
269 		uint8_t bfs_retries;	/* retry count */
270 		uint8_t bfs_tid;	/* packet TID (or TID_MAX for no QoS) */
271 		uint8_t bfs_nframes;	/* number of frames in aggregate */
272 		uint8_t bfs_pri;	/* packet AC priority */
273 		uint8_t bfs_tx_queue;	/* destination hardware TX queue */
274 
275 		u_int32_t bfs_aggr:1,		/* part of aggregate? */
276 		    bfs_aggrburst:1,	/* part of aggregate burst? */
277 		    bfs_isretried:1,	/* retried frame? */
278 		    bfs_dobaw:1,	/* actually check against BAW? */
279 		    bfs_addedbaw:1,	/* has been added to the BAW */
280 		    bfs_shpream:1,	/* use short preamble */
281 		    bfs_istxfrag:1,	/* is fragmented */
282 		    bfs_ismrr:1,	/* do multi-rate TX retry */
283 		    bfs_doprot:1,	/* do RTS/CTS based protection */
284 		    bfs_doratelookup:1;	/* do rate lookup before each TX */
285 
286 		/*
287 		 * These fields are passed into the
288 		 * descriptor setup functions.
289 		 */
290 
291 		/* Make this an 8 bit value? */
292 		HAL_PKT_TYPE bfs_atype;	/* packet type */
293 
294 		uint32_t bfs_pktlen;	/* length of this packet */
295 
296 		uint16_t bfs_hdrlen;	/* length of this packet header */
297 		uint16_t bfs_al;	/* length of aggregate */
298 
299 		uint16_t bfs_txflags;	/* HAL (tx) descriptor flags */
300 		uint8_t bfs_txrate0;	/* first TX rate */
301 		uint8_t bfs_try0;		/* first try count */
302 
303 		uint16_t bfs_txpower;	/* tx power */
304 		uint8_t bfs_ctsrate0;	/* Non-zero - use this as ctsrate */
305 		uint8_t bfs_ctsrate;	/* CTS rate */
306 
307 		/* 16 bit? */
308 		int32_t bfs_keyix;		/* crypto key index */
309 		int32_t bfs_txantenna;	/* TX antenna config */
310 
311 		/* Make this an 8 bit value? */
312 		enum ieee80211_protmode bfs_protmode;
313 
314 		/* 16 bit? */
315 		uint32_t bfs_ctsduration;	/* CTS duration (pre-11n NICs) */
316 		struct ath_rc_series bfs_rc[ATH_RC_NUM];	/* non-11n TX series */
317 	} bf_state;
318 };
319 typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead;
320 
321 #define	ATH_BUF_MGMT	0x00000001	/* (tx) desc is a mgmt desc */
322 #define	ATH_BUF_BUSY	0x00000002	/* (tx) desc owned by h/w */
323 #define	ATH_BUF_FIFOEND	0x00000004
324 #define	ATH_BUF_FIFOPTR	0x00000008
325 
326 #define	ATH_BUF_FLAGS_CLONE	(ATH_BUF_MGMT)
327 
328 /*
329  * DMA state for tx/rx descriptors.
330  */
331 struct ath_descdma {
332 	const char*		dd_name;
333 	struct ath_desc		*dd_desc;	/* descriptors */
334 	int			dd_descsize;	/* size of single descriptor */
335 	bus_addr_t		dd_desc_paddr;	/* physical addr of dd_desc */
336 	bus_size_t		dd_desc_len;	/* size of dd_desc */
337 	bus_dma_segment_t	dd_dseg;
338 	bus_dma_tag_t		dd_dmat;	/* bus DMA tag */
339 	bus_dmamap_t		dd_dmamap;	/* DMA map for descriptors */
340 	struct ath_buf		*dd_bufptr;	/* associated buffers */
341 };
342 
343 /*
344  * Data transmit queue state.  One of these exists for each
345  * hardware transmit queue.  Packets sent to us from above
346  * are assigned to queues based on their priority.  Not all
347  * devices support a complete set of hardware transmit queues.
348  * For those devices the array sc_ac2q will map multiple
349  * priorities to fewer hardware queues (typically all to one
350  * hardware queue).
351  */
352 struct ath_txq {
353 	struct ath_softc	*axq_softc;	/* Needed for scheduling */
354 	u_int			axq_qnum;	/* hardware q number */
355 #define	ATH_TXQ_SWQ	(HAL_NUM_TX_QUEUES+1)	/* qnum for s/w only queue */
356 	u_int			axq_ac;		/* WME AC */
357 	u_int			axq_flags;
358 //#define	ATH_TXQ_PUTPENDING	0x0001		/* ath_hal_puttxbuf pending */
359 #define	ATH_TXQ_PUTRUNNING	0x0002		/* ath_hal_puttxbuf has been called */
360 	u_int			axq_depth;	/* queue depth (stat only) */
361 	u_int			axq_aggr_depth;	/* how many aggregates are queued */
362 	u_int			axq_intrcnt;	/* interrupt count */
363 	u_int32_t		*axq_link;	/* link ptr in last TX desc */
364 	TAILQ_HEAD(axq_q_s, ath_buf)	axq_q;		/* transmit queue */
365 #if defined(__DragonFly__)
366 	struct lock		axq_lock;	/* lock on q and link */
367 #else
368 	struct mtx		axq_lock;	/* lock on q and link */
369 #endif
370 
371 	/*
372 	 * This is the FIFO staging buffer when doing EDMA.
373 	 *
374 	 * For legacy chips, we just push the head pointer to
375 	 * the hardware and we ignore this list.
376 	 *
377 	 * For EDMA, the staging buffer is treated as normal;
378 	 * when it's time to push a list of frames to the hardware
379 	 * we move that list here and we stamp buffers with
380 	 * flags to identify the beginning/end of that particular
381 	 * FIFO entry.
382 	 */
383 	struct {
384 		TAILQ_HEAD(axq_q_f_s, ath_buf)	axq_q;
385 		u_int				axq_depth;
386 	} fifo;
387 	u_int			axq_fifo_depth;	/* depth of FIFO frames */
388 
389 	/*
390 	 * XXX the holdingbf field is protected by the TXBUF lock
391 	 * for now, NOT the TXQ lock.
392 	 *
393 	 * Architecturally, it would likely be better to move
394 	 * the holdingbf field to a separate array in ath_softc
395 	 * just to highlight that it's not protected by the normal
396 	 * TX path lock.
397 	 */
398 	struct ath_buf		*axq_holdingbf;	/* holding TX buffer */
399 	char			axq_name[12];	/* e.g. "ath0_txq4" */
400 
401 	/* Per-TID traffic queue for software -> hardware TX */
402 	/*
403 	 * This is protected by the general TX path lock, not (for now)
404 	 * by the TXQ lock.
405 	 */
406 	TAILQ_HEAD(axq_t_s,ath_tid)	axq_tidq;
407 };
408 
409 /*
410  * Macros modified for DragonFly
411  */
412 #define	ATH_TXQ_LOCK_INIT(_sc, _tq) do { \
413 	    ksnprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \
414 	      device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \
415 	    lockinit(&(_tq)->axq_lock, (_tq)->axq_name, 0, 0); \
416 	} while (0)
417 #define	ATH_TXQ_LOCK_DESTROY(_tq)	lockuninit(&(_tq)->axq_lock)
418 #define	ATH_TXQ_LOCK(_tq)		lockmgr(&(_tq)->axq_lock, LK_EXCLUSIVE)
419 #define	ATH_TXQ_UNLOCK(_tq)		lockmgr(&(_tq)->axq_lock, LK_RELEASE)
420 #define	ATH_TXQ_LOCK_ASSERT(_tq)	KKASSERT(lockstatus(&(_tq)->axq_lock, curthread) == LK_EXCLUSIVE)
421 #define	ATH_TXQ_UNLOCK_ASSERT(_tq)	KKASSERT(lockstatus(&(_tq)->axq_lock, curthread) != LK_EXCLUSIVE)
422 
423 
424 #define	ATH_NODE_LOCK(_an)		lockmgr(&(_an)->an_mtx, LK_EXCLUSIVE)
425 #define	ATH_NODE_UNLOCK(_an)		lockmgr(&(_an)->an_mtx, LK_RELEASE)
426 #define	ATH_NODE_LOCK_ASSERT(_an)	KKASSERT(lockstatus(&(_an)->an_mtx, curthread) == LK_EXCLUSIVE)
427 #define	ATH_NODE_UNLOCK_ASSERT(_an)	KKASSERT(lockstatus(&(_an)->an_mtx, curthread) != LK_EXCLUSIVE)
428 
429 /*
430  * These are for the hardware queue.
431  */
432 #define ATH_TXQ_INSERT_HEAD(_tq, _elm, _field) do { \
433 	TAILQ_INSERT_HEAD(&(_tq)->axq_q, (_elm), _field); \
434 	(_tq)->axq_depth++; \
435 } while (0)
436 #define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
437 	TAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
438 	(_tq)->axq_depth++; \
439 } while (0)
440 #define ATH_TXQ_REMOVE(_tq, _elm, _field) do { \
441 	TAILQ_REMOVE(&(_tq)->axq_q, _elm, _field); \
442 	(_tq)->axq_depth--; \
443 } while (0)
444 #define	ATH_TXQ_FIRST(_tq)		TAILQ_FIRST(&(_tq)->axq_q)
445 #define	ATH_TXQ_LAST(_tq, _field)	TAILQ_LAST(&(_tq)->axq_q, _field)
446 
447 /*
448  * These are for the TID software queue.
449  */
450 #define ATH_TID_INSERT_HEAD(_tq, _elm, _field) do { \
451 	TAILQ_INSERT_HEAD(&(_tq)->tid_q, (_elm), _field); \
452 	(_tq)->axq_depth++; \
453 	(_tq)->an->an_swq_depth++; \
454 } while (0)
455 #define ATH_TID_INSERT_TAIL(_tq, _elm, _field) do { \
456 	TAILQ_INSERT_TAIL(&(_tq)->tid_q, (_elm), _field); \
457 	(_tq)->axq_depth++; \
458 	(_tq)->an->an_swq_depth++; \
459 } while (0)
460 #define ATH_TID_REMOVE(_tq, _elm, _field) do { \
461 	TAILQ_REMOVE(&(_tq)->tid_q, _elm, _field); \
462 	(_tq)->axq_depth--; \
463 	(_tq)->an->an_swq_depth--; \
464 } while (0)
465 #define	ATH_TID_FIRST(_tq)		TAILQ_FIRST(&(_tq)->tid_q)
466 #define	ATH_TID_LAST(_tq, _field)	TAILQ_LAST(&(_tq)->tid_q, _field)
467 
468 /*
469  * These are for the TID filtered frame queue
470  */
471 #define ATH_TID_FILT_INSERT_HEAD(_tq, _elm, _field) do { \
472 	TAILQ_INSERT_HEAD(&(_tq)->filtq.tid_q, (_elm), _field); \
473 	(_tq)->axq_depth++; \
474 	(_tq)->an->an_swq_depth++; \
475 } while (0)
476 #define ATH_TID_FILT_INSERT_TAIL(_tq, _elm, _field) do { \
477 	TAILQ_INSERT_TAIL(&(_tq)->filtq.tid_q, (_elm), _field); \
478 	(_tq)->axq_depth++; \
479 	(_tq)->an->an_swq_depth++; \
480 } while (0)
481 #define ATH_TID_FILT_REMOVE(_tq, _elm, _field) do { \
482 	TAILQ_REMOVE(&(_tq)->filtq.tid_q, _elm, _field); \
483 	(_tq)->axq_depth--; \
484 	(_tq)->an->an_swq_depth--; \
485 } while (0)
486 #define	ATH_TID_FILT_FIRST(_tq)		TAILQ_FIRST(&(_tq)->filtq.tid_q)
487 #define	ATH_TID_FILT_LAST(_tq, _field)	TAILQ_LAST(&(_tq)->filtq.tid_q,_field)
488 
489 struct ath_vap {
490 	struct ieee80211vap av_vap;	/* base class */
491 	int		av_bslot;	/* beacon slot index */
492 	struct ath_buf	*av_bcbuf;	/* beacon buffer */
493 	struct ath_txq	av_mcastq;	/* buffered mcast s/w queue */
494 
495 	void		(*av_recv_mgmt)(struct ieee80211_node *,
496 				struct mbuf *, int,
497 				const struct ieee80211_rx_stats *, int, int);
498 	int		(*av_newstate)(struct ieee80211vap *,
499 				enum ieee80211_state, int);
500 	void		(*av_bmiss)(struct ieee80211vap *);
501 	void		(*av_node_ps)(struct ieee80211_node *, int);
502 	int		(*av_set_tim)(struct ieee80211_node *, int);
503 	void		(*av_recv_pspoll)(struct ieee80211_node *,
504 				struct mbuf *);
505 };
506 #define	ATH_VAP(vap)	((struct ath_vap *)(vap))
507 
508 struct taskqueue;
509 struct ath_tx99;
510 
511 /*
512  * Whether to reset the TX/RX queue with or without
513  * a queue flush.
514  */
515 typedef enum {
516 	ATH_RESET_DEFAULT = 0,
517 	ATH_RESET_NOLOSS = 1,
518 	ATH_RESET_FULL = 2,
519 } ATH_RESET_TYPE;
520 
521 struct ath_rx_methods {
522 	void		(*recv_sched_queue)(struct ath_softc *sc,
523 			    HAL_RX_QUEUE q, int dosched);
524 	void		(*recv_sched)(struct ath_softc *sc, int dosched);
525 	void		(*recv_stop)(struct ath_softc *sc, int dodelay);
526 	int		(*recv_start)(struct ath_softc *sc);
527 	void		(*recv_flush)(struct ath_softc *sc);
528 	void		(*recv_tasklet)(void *arg, int npending);
529 	int		(*recv_rxbuf_init)(struct ath_softc *sc,
530 			    struct ath_buf *bf);
531 	int		(*recv_setup)(struct ath_softc *sc);
532 	int		(*recv_teardown)(struct ath_softc *sc);
533 };
534 
535 /*
536  * Represent the current state of the RX FIFO.
537  */
538 struct ath_rx_edma {
539 	struct ath_buf	**m_fifo;
540 	int		m_fifolen;
541 	int		m_fifo_head;
542 	int		m_fifo_tail;
543 	int		m_fifo_depth;
544 	struct mbuf	*m_rxpending;
545 	struct ath_buf	*m_holdbf;
546 };
547 
548 struct ath_tx_edma_fifo {
549 	struct ath_buf	**m_fifo;
550 	int		m_fifolen;
551 	int		m_fifo_head;
552 	int		m_fifo_tail;
553 	int		m_fifo_depth;
554 };
555 
556 struct ath_tx_methods {
557 	int		(*xmit_setup)(struct ath_softc *sc);
558 	int		(*xmit_teardown)(struct ath_softc *sc);
559 	void		(*xmit_attach_comp_func)(struct ath_softc *sc);
560 
561 	void		(*xmit_dma_restart)(struct ath_softc *sc,
562 			    struct ath_txq *txq);
563 	void		(*xmit_handoff)(struct ath_softc *sc,
564 			    struct ath_txq *txq, struct ath_buf *bf);
565 	void		(*xmit_drain)(struct ath_softc *sc,
566 			    ATH_RESET_TYPE reset_type);
567 };
568 
569 struct ath_softc {
570 	struct ieee80211com	sc_ic;
571 	struct ath_stats	sc_stats;	/* device statistics */
572 	struct ath_tx_aggr_stats	sc_aggr_stats;
573 	struct ath_intr_stats	sc_intr_stats;
574 	uint64_t		sc_debug;
575 	uint64_t		sc_ktrdebug;
576 	int			sc_nvaps;	/* # vaps */
577 	int			sc_nstavaps;	/* # station vaps */
578 	int			sc_nmeshvaps;	/* # mbss vaps */
579 	u_int8_t		sc_hwbssidmask[IEEE80211_ADDR_LEN];
580 	u_int8_t		sc_nbssid0;	/* # vap's using base mac */
581 	uint32_t		sc_bssidmask;	/* bssid mask */
582 
583 	struct ath_rx_methods	sc_rx;
584 	struct ath_rx_edma	sc_rxedma[HAL_NUM_RX_QUEUES];	/* HP/LP queues */
585 	ath_bufhead		sc_rx_rxlist[HAL_NUM_RX_QUEUES];	/* deferred RX completion */
586 	struct ath_tx_methods	sc_tx;
587 	struct ath_tx_edma_fifo	sc_txedma[HAL_NUM_TX_QUEUES];
588 
589 	/*
590 	 * This is (currently) protected by the TX queue lock;
591 	 * it should migrate to a separate lock later
592 	 * so as to minimise contention.
593 	 */
594 	ath_bufhead		sc_txbuf_list;
595 
596 	int			sc_rx_statuslen;
597 	int			sc_tx_desclen;
598 	int			sc_tx_statuslen;
599 	int			sc_tx_nmaps;	/* Number of TX maps */
600 	int			sc_edma_bufsize;
601 	int			sc_rx_stopped;	/* XXX only for EDMA */
602 	int			sc_rx_resetted;	/* XXX only for EDMA */
603 
604 	void 			(*sc_node_cleanup)(struct ieee80211_node *);
605 	void 			(*sc_node_free)(struct ieee80211_node *);
606 	device_t		sc_dev;
607 	HAL_BUS_TAG		sc_st;		/* bus space tag */
608 	HAL_BUS_HANDLE		sc_sh;		/* bus space handle */
609 	bus_dma_tag_t		sc_dmat;	/* bus DMA tag */
610 #if defined(__DragonFly__)
611 	struct lock		sc_mtx;		/* master lock (recursive) */
612 	struct lock		sc_pcu_mtx;	/* PCU access mutex */
613 #else
614 	struct mtx		sc_mtx;		/* master lock (recursive) */
615 	struct mtx		sc_pcu_mtx;	/* PCU access mutex */
616 #endif
617 	char			sc_pcu_mtx_name[32];
618 #if defined(__DragonFly__)
619 	struct lock		sc_rx_mtx;	/* RX access mutex */
620 #else
621 	struct mtx		sc_rx_mtx;	/* RX access mutex */
622 #endif
623 	char			sc_rx_mtx_name[32];
624 #if defined(__DragonFly__)
625 	struct lock		sc_tx_mtx;	/* TX handling/comp mutex */
626 #else
627 	struct mtx		sc_tx_mtx;	/* TX handling/comp mutex */
628 #endif
629 	char			sc_tx_mtx_name[32];
630 #if defined(__DragonFly__)
631 	struct lock		sc_tx_ic_mtx;	/* TX queue mutex */
632 #else
633 	struct mtx		sc_tx_ic_mtx;	/* TX queue mutex */
634 #endif
635 	char			sc_tx_ic_mtx_name[32];
636 	struct taskqueue	*sc_tq;		/* private task queue */
637 	struct ath_hal		*sc_ah;		/* Atheros HAL */
638 	struct ath_ratectrl	*sc_rc;		/* tx rate control support */
639 	struct ath_tx99		*sc_tx99;	/* tx99 adjunct state */
640 	void			(*sc_setdefantenna)(struct ath_softc *, u_int);
641 
642 	/*
643 	 * First set of flags.
644 	 */
645 	uint32_t		sc_invalid  : 1,/* disable hardware accesses */
646 				sc_mrretry  : 1,/* multi-rate retry support */
647 				sc_mrrprot  : 1,/* MRR + protection support */
648 				sc_softled  : 1,/* enable LED gpio status */
649 				sc_hardled  : 1,/* enable MAC LED status */
650 				sc_splitmic : 1,/* split TKIP MIC keys */
651 				sc_needmib  : 1,/* enable MIB stats intr */
652 				sc_diversity: 1,/* enable rx diversity */
653 				sc_hasveol  : 1,/* tx VEOL support */
654 				sc_ledstate : 1,/* LED on/off state */
655 				sc_blinking : 1,/* LED blink operation active */
656 				sc_mcastkey : 1,/* mcast key cache search */
657 				sc_scanning : 1,/* scanning active */
658 				sc_syncbeacon:1,/* sync/resync beacon timers */
659 				sc_hasclrkey: 1,/* CLR key supported */
660 				sc_xchanmode: 1,/* extended channel mode */
661 				sc_outdoor  : 1,/* outdoor operation */
662 				sc_dturbo   : 1,/* dynamic turbo in use */
663 				sc_hasbmask : 1,/* bssid mask support */
664 				sc_hasbmatch: 1,/* bssid match disable support*/
665 				sc_hastsfadd: 1,/* tsf adjust support */
666 				sc_beacons  : 1,/* beacons running */
667 				sc_swbmiss  : 1,/* sta mode using sw bmiss */
668 				sc_stagbeacons:1,/* use staggered beacons */
669 				sc_wmetkipmic:1,/* can do WME+TKIP MIC */
670 				sc_resume_up: 1,/* on resume, start all vaps */
671 				sc_tdma	    : 1,/* TDMA in use */
672 				sc_setcca   : 1,/* set/clr CCA with TDMA */
673 				sc_resetcal : 1,/* reset cal state next trip */
674 				sc_rxslink  : 1,/* do self-linked final descriptor */
675 				sc_rxtsf32  : 1,/* RX dec TSF is 32 bits */
676 				sc_isedma   : 1,/* supports EDMA */
677 				sc_do_mybeacon : 1; /* supports mybeacon */
678 
679 	/*
680 	 * Second set of flags.
681 	 */
682 	u_int32_t		sc_running  : 1,	/* initialized */
683 				sc_use_ent  : 1,
684 				sc_rx_stbc  : 1,
685 				sc_tx_stbc  : 1,
686 				sc_has_ldpc : 1,
687 				sc_hasenforcetxop : 1, /* support enforce TxOP */
688 				sc_hasdivcomb : 1,     /* RX diversity combining */
689 				sc_rx_lnamixer : 1;    /* RX using LNA mixing */
690 
691 	int			sc_cabq_enable;	/* Enable cabq transmission */
692 
693 	/*
694 	 * Enterprise mode configuration for AR9380 and later chipsets.
695 	 */
696 	uint32_t		sc_ent_cfg;
697 
698 	uint32_t		sc_eerd;	/* regdomain from EEPROM */
699 	uint32_t		sc_eecc;	/* country code from EEPROM */
700 						/* rate tables */
701 	const HAL_RATE_TABLE	*sc_rates[IEEE80211_MODE_MAX];
702 	const HAL_RATE_TABLE	*sc_currates;	/* current rate table */
703 	enum ieee80211_phymode	sc_curmode;	/* current phy mode */
704 	HAL_OPMODE		sc_opmode;	/* current operating mode */
705 	u_int16_t		sc_curtxpow;	/* current tx power limit */
706 	u_int16_t		sc_curaid;	/* current association id */
707 	struct ieee80211_channel *sc_curchan;	/* current installed channel */
708 	u_int8_t		sc_curbssid[IEEE80211_ADDR_LEN];
709 	u_int8_t		sc_rixmap[256];	/* IEEE to h/w rate table ix */
710 	struct {
711 		u_int8_t	ieeerate;	/* IEEE rate */
712 		u_int8_t	rxflags;	/* radiotap rx flags */
713 		u_int8_t	txflags;	/* radiotap tx flags */
714 		u_int16_t	ledon;		/* softled on time */
715 		u_int16_t	ledoff;		/* softled off time */
716 	} sc_hwmap[32];				/* h/w rate ix mappings */
717 	u_int8_t		sc_protrix;	/* protection rate index */
718 	u_int8_t		sc_lastdatarix;	/* last data frame rate index */
719 	u_int			sc_mcastrate;	/* ieee rate for mcastrateix */
720 	u_int			sc_fftxqmin;	/* min frames before staging */
721 	u_int			sc_fftxqmax;	/* max frames before drop */
722 	u_int			sc_txantenna;	/* tx antenna (fixed or auto) */
723 
724 	HAL_INT			sc_imask;	/* interrupt mask copy */
725 
726 	/*
727 	 * These are modified in the interrupt handler as well as
728 	 * the task queues and other contexts. Thus these must be
729 	 * protected by a mutex, or they could clash.
730 	 *
731 	 * For now, access to these is behind the ATH_LOCK,
732 	 * just to save time.
733 	 */
734 	uint32_t		sc_txq_active;	/* bitmap of active TXQs */
735 	uint32_t		sc_kickpcu;	/* whether to kick the PCU */
736 	uint32_t		sc_rxproc_cnt;	/* In RX processing */
737 	uint32_t		sc_txproc_cnt;	/* In TX processing */
738 	uint32_t		sc_txstart_cnt;	/* In TX output (raw/start) */
739 	uint32_t		sc_inreset_cnt;	/* In active reset/chanchange */
740 	uint32_t		sc_txrx_cnt;	/* refcount on stop/start'ing TX */
741 	uint32_t		sc_intr_cnt;	/* refcount on interrupt handling */
742 
743 	u_int			sc_keymax;	/* size of key cache */
744 	u_int8_t		sc_keymap[ATH_KEYBYTES];/* key use bit map */
745 
746 	/*
747 	 * Software based LED blinking
748 	 */
749 	u_int			sc_ledpin;	/* GPIO pin for driving LED */
750 	u_int			sc_ledon;	/* pin setting for LED on */
751 	u_int			sc_ledidle;	/* idle polling interval */
752 	int			sc_ledevent;	/* time of last LED event */
753 	u_int8_t		sc_txrix;	/* current tx rate for LED */
754 	u_int16_t		sc_ledoff;	/* off time for current blink */
755 	struct callout		sc_ledtimer;	/* led off timer */
756 
757 	/*
758 	 * Hardware based LED blinking
759 	 */
760 	int			sc_led_pwr_pin;	/* MAC power LED GPIO pin */
761 	int			sc_led_net_pin;	/* MAC network LED GPIO pin */
762 
763 	u_int			sc_rfsilentpin;	/* GPIO pin for rfkill int */
764 	u_int			sc_rfsilentpol;	/* pin setting for rfkill on */
765 
766 	struct ath_descdma	sc_rxdma;	/* RX descriptors */
767 	ath_bufhead		sc_rxbuf;	/* receive buffer */
768 	u_int32_t		*sc_rxlink;	/* link ptr in last RX desc */
769 	struct task		sc_rxtask;	/* rx int processing */
770 	u_int8_t		sc_defant;	/* current default antenna */
771 	u_int8_t		sc_rxotherant;	/* rx's on non-default antenna*/
772 	u_int64_t		sc_lastrx;	/* tsf at last rx'd frame */
773 	struct ath_rx_status	*sc_lastrs;	/* h/w status of last rx */
774 	struct ath_rx_radiotap_header sc_rx_th;
775 	int			sc_rx_th_len;
776 	u_int			sc_monpass;	/* frames to pass in mon.mode */
777 
778 	struct ath_descdma	sc_txdma;	/* TX descriptors */
779 	uint16_t		sc_txbuf_descid;
780 	ath_bufhead		sc_txbuf;	/* transmit buffer */
781 	int			sc_txbuf_cnt;	/* how many buffers avail */
782 	struct ath_descdma	sc_txdma_mgmt;	/* mgmt TX descriptors */
783 	ath_bufhead		sc_txbuf_mgmt;	/* mgmt transmit buffer */
784 	struct ath_descdma	sc_txsdma;	/* EDMA TX status desc's */
785 #if defined(__DragonFly__)
786 	struct lock		sc_txbuflock;	/* txbuf lock */
787 #else
788 	struct mtx		sc_txbuflock;	/* txbuf lock */
789 #endif
790 	char			sc_txname[12];	/* e.g. "ath0_buf" */
791 	u_int			sc_txqsetup;	/* h/w queues setup */
792 	u_int			sc_txintrperiod;/* tx interrupt batching */
793 	struct ath_txq		sc_txq[HAL_NUM_TX_QUEUES];
794 	struct ath_txq		*sc_ac2q[5];	/* WME AC -> h/w q map */
795 	struct task		sc_txtask;	/* tx int processing */
796 	struct task		sc_txqtask;	/* tx proc processing */
797 
798 	struct ath_descdma	sc_txcompdma;	/* TX EDMA completion */
799 #if defined(__DragonFly__)
800 	struct lock		sc_txcomplock;	/* TX EDMA completion lock */
801 #else
802 	struct mtx		sc_txcomplock;	/* TX EDMA completion lock */
803 #endif
804 	char			sc_txcompname[12];	/* eg ath0_txcomp */
805 
806 	int			sc_wd_timer;	/* count down for wd timer */
807 	struct callout		sc_wd_ch;	/* tx watchdog timer */
808 	struct ath_tx_radiotap_header sc_tx_th;
809 	int			sc_tx_th_len;
810 
811 	struct ath_descdma	sc_bdma;	/* beacon descriptors */
812 	ath_bufhead		sc_bbuf;	/* beacon buffers */
813 	u_int			sc_bhalq;	/* HAL q for outgoing beacons */
814 	u_int			sc_bmisscount;	/* missed beacon transmits */
815 	u_int32_t		sc_ant_tx[8];	/* recent tx frames/antenna */
816 	struct ath_txq		*sc_cabq;	/* tx q for cab frames */
817 	struct task		sc_bmisstask;	/* bmiss int processing */
818 	struct task		sc_bstucktask;	/* stuck beacon processing */
819 	struct task		sc_resettask;	/* interface reset task */
820 	struct task		sc_fataltask;	/* fatal task */
821 	enum {
822 		OK,				/* no change needed */
823 		UPDATE,				/* update pending */
824 		COMMIT				/* beacon sent, commit change */
825 	} sc_updateslot;			/* slot time update fsm */
826 	int			sc_slotupdate;	/* slot to advance fsm */
827 	struct ieee80211vap	*sc_bslot[ATH_BCBUF];
828 	int			sc_nbcnvaps;	/* # vaps with beacons */
829 
830 	struct callout		sc_cal_ch;	/* callout handle for cals */
831 	int			sc_lastlongcal;	/* last long cal completed */
832 	int			sc_lastcalreset;/* last cal reset done */
833 	int			sc_lastani;	/* last ANI poll */
834 	int			sc_lastshortcal;	/* last short calibration */
835 	HAL_BOOL		sc_doresetcal;	/* Yes, we're doing a reset cal atm */
836 	HAL_NODE_STATS		sc_halstats;	/* station-mode rssi stats */
837 	u_int			sc_tdmadbaprep;	/* TDMA DBA prep time */
838 	u_int			sc_tdmaswbaprep;/* TDMA SWBA prep time */
839 	u_int			sc_tdmaswba;	/* TDMA SWBA counter */
840 	u_int32_t		sc_tdmabintval;	/* TDMA beacon interval (TU) */
841 	u_int32_t		sc_tdmaguard;	/* TDMA guard time (usec) */
842 	u_int			sc_tdmaslotlen;	/* TDMA slot length (usec) */
843 	u_int32_t		sc_avgtsfdeltap;/* TDMA slot adjust (+) */
844 	u_int32_t		sc_avgtsfdeltam;/* TDMA slot adjust (-) */
845 	uint16_t		*sc_eepromdata;	/* Local eeprom data, if AR9100 */
846 	uint32_t		sc_txchainmask;	/* hardware TX chainmask */
847 	uint32_t		sc_rxchainmask;	/* hardware RX chainmask */
848 	uint32_t		sc_cur_txchainmask;	/* currently configured TX chainmask */
849 	uint32_t		sc_cur_rxchainmask;	/* currently configured RX chainmask */
850 	uint32_t		sc_rts_aggr_limit;	/* TX limit on RTS aggregates */
851 	int			sc_aggr_limit;	/* TX limit on all aggregates */
852 	int			sc_delim_min_pad;	/* Minimum delimiter count */
853 
854 	/* Queue limits */
855 
856 	/*
857 	 * To avoid queue starvation in congested conditions,
858 	 * these parameters tune the maximum number of frames
859 	 * queued to the data/mcastq before they're dropped.
860 	 *
861 	 * This is to prevent:
862 	 * + a single destination overwhelming everything, including
863 	 *   management/multicast frames;
864 	 * + multicast frames overwhelming everything (when the
865 	 *   air is sufficiently busy that cabq can't drain.)
866 	 * + A node in powersave shouldn't be allowed to exhaust
867 	 *   all available mbufs;
868 	 *
869 	 * These implement:
870 	 * + data_minfree is the maximum number of free buffers
871 	 *   overall to successfully allow a data frame.
872 	 *
873 	 * + mcastq_maxdepth is the maximum depth allowed of the cabq.
874 	 */
875 	int			sc_txq_node_maxdepth;
876 	int			sc_txq_data_minfree;
877 	int			sc_txq_mcastq_maxdepth;
878 	int			sc_txq_node_psq_maxdepth;
879 
880 	/*
881 	 * Software queue twiddles
882 	 *
883 	 * hwq_limit_nonaggr:
884 	 *		when to begin limiting non-aggregate frames to the
885 	 *		hardware queue, regardless of the TID.
886 	 * hwq_limit_aggr:
887 	 *		when to begin limiting A-MPDU frames to the
888 	 *		hardware queue, regardless of the TID.
889 	 * tid_hwq_lo:	how low the per-TID hwq count has to be before the
890 	 *		TID will be scheduled again
891 	 * tid_hwq_hi:	how many frames to queue to the HWQ before the TID
892 	 *		stops being scheduled.
893 	 */
894 	int			sc_hwq_limit_nonaggr;
895 	int			sc_hwq_limit_aggr;
896 	int			sc_tid_hwq_lo;
897 	int			sc_tid_hwq_hi;
898 
899 	/* DFS related state */
900 	void			*sc_dfs;	/* Used by an optional DFS module */
901 	int			sc_dodfs;	/* Whether to enable DFS rx filter bits */
902 	struct task		sc_dfstask;	/* DFS processing task */
903 
904 	/* Spectral related state */
905 	void			*sc_spectral;
906 	int			sc_dospectral;
907 
908 	/* LNA diversity related state */
909 	void			*sc_lna_div;
910 	int			sc_dolnadiv;
911 
912 	/* ALQ */
913 #ifdef	ATH_DEBUG_ALQ
914 	struct if_ath_alq sc_alq;
915 #endif
916 
917 	/* TX AMPDU handling */
918 	int			(*sc_addba_request)(struct ieee80211_node *,
919 				    struct ieee80211_tx_ampdu *, int, int, int);
920 	int			(*sc_addba_response)(struct ieee80211_node *,
921 				    struct ieee80211_tx_ampdu *, int, int, int);
922 	void			(*sc_addba_stop)(struct ieee80211_node *,
923 				    struct ieee80211_tx_ampdu *);
924 	void			(*sc_addba_response_timeout)
925 				    (struct ieee80211_node *,
926 				    struct ieee80211_tx_ampdu *);
927 	void			(*sc_bar_response)(struct ieee80211_node *ni,
928 				    struct ieee80211_tx_ampdu *tap,
929 				    int status);
930 
931 	/*
932 	 * Powersave state tracking.
933 	 *
934 	 * target/cur powerstate is the chip power state.
935 	 * target selfgen state is the self-generated frames
936 	 *   state.  The chip can be awake but transmitted frames
937 	 *   can have the PWRMGT bit set to 1 so the destination
938 	 *   thinks the node is asleep.
939 	 */
940 	HAL_POWER_MODE		sc_target_powerstate;
941 	HAL_POWER_MODE		sc_target_selfgen_state;
942 
943 	HAL_POWER_MODE		sc_cur_powerstate;
944 
945 	int			sc_powersave_refcnt;
946 
947 	/* ATH_PCI_* flags */
948 	uint32_t		sc_pci_devinfo;
949 };
950 
951 #define	ATH_LOCK_INIT(_sc) \
952 	lockinit(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
953 		 0, LK_CANRECURSE)
954 #define	ATH_LOCK_DESTROY(_sc)	lockuninit(&(_sc)->sc_mtx)
955 #define	ATH_LOCK(_sc)		lockmgr(&(_sc)->sc_mtx, LK_EXCLUSIVE)
956 #define	ATH_UNLOCK(_sc)		lockmgr(&(_sc)->sc_mtx, LK_RELEASE)
957 #define	ATH_LOCK_ASSERT(_sc)	KKASSERT(lockstatus(&(_sc)->sc_mtx, curthread) == LK_EXCLUSIVE)
958 #define	ATH_UNLOCK_ASSERT(_sc)	KKASSERT(lockstatus(&(_sc)->sc_mtx, curthread) != LK_EXCLUSIVE)
959 
960 /*
961  * The TX lock is non-reentrant and serialises the TX frame send
962  * and completion operations.
963  */
964 #define	ATH_TX_LOCK_INIT(_sc) do {\
965 	ksnprintf((_sc)->sc_tx_mtx_name,				\
966 	    sizeof((_sc)->sc_tx_mtx_name),				\
967 	    "%s TX lock",						\
968 	    device_get_nameunit((_sc)->sc_dev));			\
969 	lockinit(&(_sc)->sc_tx_mtx, (_sc)->sc_tx_mtx_name,		\
970 		 0, 0);							\
971 	} while (0)
972 #define	ATH_TX_LOCK_DESTROY(_sc)	lockuninit(&(_sc)->sc_tx_mtx)
973 #define	ATH_TX_LOCK(_sc)		lockmgr(&(_sc)->sc_tx_mtx, LK_EXCLUSIVE)
974 #define	ATH_TX_UNLOCK(_sc)		lockmgr(&(_sc)->sc_tx_mtx, LK_RELEASE)
975 #define	ATH_TX_LOCK_ASSERT(_sc)		KKASSERT(lockstatus(&(_sc)->sc_tx_mtx, curthread) == LK_EXCLUSIVE)
976 #define	ATH_TX_UNLOCK_ASSERT(_sc)	KKASSERT(lockstatus(&(_sc)->sc_tx_mtx, curthread) != LK_EXCLUSIVE)
977 
978 /*
979  * The PCU lock is non-recursive and should be treated as a spinlock.
980  * Although currently the interrupt code is run in netisr context and
981  * doesn't require this, this may change in the future.
982  * Please keep this in mind when protecting certain code paths
983  * with the PCU lock.
984  *
985  * The PCU lock is used to serialise access to the PCU so things such
986  * as TX, RX, state change (eg channel change), channel reset and updates
987  * from interrupt context (eg kickpcu, txqactive bits) do not clash.
988  *
989  * Although the current single-thread taskqueue mechanism protects the
990  * majority of these situations by simply serialising them, there are
991  * a few others which occur at the same time. These include the TX path
992  * (which only acquires ATH_LOCK when recycling buffers to the free list),
993  * ath_set_channel, the channel scanning API and perhaps quite a bit more.
994  */
995 #define	ATH_PCU_LOCK_INIT(_sc) do {\
996 	ksnprintf((_sc)->sc_pcu_mtx_name,				\
997 	    sizeof((_sc)->sc_pcu_mtx_name),				\
998 	    "%s PCU lock",						\
999 	    device_get_nameunit((_sc)->sc_dev));			\
1000 	lockinit(&(_sc)->sc_pcu_mtx, (_sc)->sc_pcu_mtx_name,		\
1001 		 0, 0);						\
1002 	} while (0)
1003 #define	ATH_PCU_LOCK_DESTROY(_sc)	lockuninit(&(_sc)->sc_pcu_mtx)
1004 #define	ATH_PCU_LOCK(_sc)		lockmgr(&(_sc)->sc_pcu_mtx, LK_EXCLUSIVE)
1005 #define	ATH_PCU_UNLOCK(_sc)		lockmgr(&(_sc)->sc_pcu_mtx, LK_RELEASE)
1006 #define	ATH_PCU_LOCK_ASSERT(_sc)	KKASSERT(lockstatus(&(_sc)->sc_pcu_mtx, curthread) == LK_EXCLUSIVE)
1007 #define	ATH_PCU_UNLOCK_ASSERT(_sc)	KKASSERT(lockstatus(&(_sc)->sc_pcu_mtx, curthread) != LK_EXCLUSIVE)
1008 
1009 /*
1010  * The RX lock is primarily a(nother) workaround to ensure that the
1011  * RX FIFO/list isn't modified by various execution paths.
1012  * Even though RX occurs in a single context (the ath taskqueue), the
1013  * RX path can be executed via various reset/channel change paths.
1014  */
1015 #define	ATH_RX_LOCK_INIT(_sc) do {\
1016 	ksnprintf((_sc)->sc_rx_mtx_name,					\
1017 	    sizeof((_sc)->sc_rx_mtx_name),				\
1018 	    "%s RX lock",						\
1019 	    device_get_nameunit((_sc)->sc_dev));			\
1020 	lockinit(&(_sc)->sc_rx_mtx, (_sc)->sc_rx_mtx_name,		\
1021 		 0, 0);					\
1022 	} while (0)
1023 #define	ATH_RX_LOCK_DESTROY(_sc)	lockuninit(&(_sc)->sc_rx_mtx)
1024 #define	ATH_RX_LOCK(_sc)		lockmgr(&(_sc)->sc_rx_mtx, LK_EXCLUSIVE)
1025 #define	ATH_RX_UNLOCK(_sc)		lockmgr(&(_sc)->sc_rx_mtx, LK_RELEASE)
1026 #define	ATH_RX_LOCK_ASSERT(_sc)		KKASSERT(lockstatus(&(_sc)->sc_rx_mtx, curthread) == LK_EXCLUSIVE)
1027 #define	ATH_RX_UNLOCK_ASSERT(_sc)	KKASSERT(lockstatus(&(_sc)->sc_rx_mtx, curthread) != LK_EXCLUSIVE)
1028 
1029 #define	ATH_TXQ_SETUP(sc, i)	((sc)->sc_txqsetup & (1<<i))
1030 
1031 #define	ATH_TXBUF_LOCK_INIT(_sc) do { \
1032 	ksnprintf((_sc)->sc_txname, sizeof((_sc)->sc_txname), "%s_buf", \
1033 		device_get_nameunit((_sc)->sc_dev)); \
1034 	lockinit(&(_sc)->sc_txbuflock, (_sc)->sc_txname, 0, 0); \
1035 } while (0)
1036 #define	ATH_TXBUF_LOCK_DESTROY(_sc)	lockuninit(&(_sc)->sc_txbuflock)
1037 #define	ATH_TXBUF_LOCK(_sc)		lockmgr(&(_sc)->sc_txbuflock, LK_EXCLUSIVE)
1038 #define	ATH_TXBUF_UNLOCK(_sc)		lockmgr(&(_sc)->sc_txbuflock, LK_RELEASE)
1039 #define	ATH_TXBUF_LOCK_ASSERT(_sc) \
1040 	KKASSERT(lockstatus(&(_sc)->sc_txbuflock, curthread) == LK_EXCLUSIVE)
1041 #define	ATH_TXBUF_UNLOCK_ASSERT(_sc) \
1042 	KKASSERT(lockstatus(&(_sc)->sc_txbuflock, curthread) != LK_EXCLUSIVE)
1043 
1044 #define	ATH_TXSTATUS_LOCK_INIT(_sc) do { \
1045 	ksnprintf((_sc)->sc_txcompname, sizeof((_sc)->sc_txcompname), \
1046 		"%s_buf", \
1047 		device_get_nameunit((_sc)->sc_dev)); \
1048 	lockinit(&(_sc)->sc_txcomplock, (_sc)->sc_txcompname, 0, \
1049 		0); \
1050 } while (0)
1051 #define	ATH_TXSTATUS_LOCK_DESTROY(_sc)	lockuninit(&(_sc)->sc_txcomplock)
1052 #define	ATH_TXSTATUS_LOCK(_sc)		lockmgr(&(_sc)->sc_txcomplock, LK_EXCLUSIVE)
1053 #define	ATH_TXSTATUS_UNLOCK(_sc)	lockmgr(&(_sc)->sc_txcomplock, LK_RELEASE)
1054 #define	ATH_TXSTATUS_LOCK_ASSERT(_sc) \
1055 	KKASSERT(lockstatus(&(_sc)->sc_txcomplock, curthread) == LK_EXCLUSIVE)
1056 
1057 int	ath_attach(u_int16_t, struct ath_softc *);
1058 int	ath_detach(struct ath_softc *);
1059 void	ath_resume(struct ath_softc *);
1060 void	ath_suspend(struct ath_softc *);
1061 void	ath_shutdown(struct ath_softc *);
1062 void	ath_intr(void *);
1063 
1064 #if defined(__DragonFly__)
1065 
1066 #define IF_LOCK(ifsnd)		/* XXX */
1067 #define IF_UNLOCK(ifsnd)	/* XXX */
1068 
1069 #endif
1070 
1071 /*
1072  * HAL definitions to comply with local coding convention.
1073  */
1074 #define	ath_hal_detach(_ah) \
1075 	((*(_ah)->ah_detach)((_ah)))
1076 #define	ath_hal_reset(_ah, _opmode, _chan, _fullreset, _resettype, _pstatus) \
1077 	((*(_ah)->ah_reset)((_ah), (_opmode), (_chan), (_fullreset), \
1078 	    (_resettype), (_pstatus)))
1079 #define	ath_hal_macversion(_ah) \
1080 	(((_ah)->ah_macVersion << 4) | ((_ah)->ah_macRev))
1081 #define	ath_hal_getratetable(_ah, _mode) \
1082 	((*(_ah)->ah_getRateTable)((_ah), (_mode)))
1083 #define	ath_hal_getmac(_ah, _mac) \
1084 	((*(_ah)->ah_getMacAddress)((_ah), (_mac)))
1085 #define	ath_hal_setmac(_ah, _mac) \
1086 	((*(_ah)->ah_setMacAddress)((_ah), (_mac)))
1087 #define	ath_hal_getbssidmask(_ah, _mask) \
1088 	((*(_ah)->ah_getBssIdMask)((_ah), (_mask)))
1089 #define	ath_hal_setbssidmask(_ah, _mask) \
1090 	((*(_ah)->ah_setBssIdMask)((_ah), (_mask)))
1091 #define	ath_hal_intrset(_ah, _mask) \
1092 	((*(_ah)->ah_setInterrupts)((_ah), (_mask)))
1093 #define	ath_hal_intrget(_ah) \
1094 	((*(_ah)->ah_getInterrupts)((_ah)))
1095 #define	ath_hal_intrpend(_ah) \
1096 	((*(_ah)->ah_isInterruptPending)((_ah)))
1097 #define	ath_hal_getisr(_ah, _pmask) \
1098 	((*(_ah)->ah_getPendingInterrupts)((_ah), (_pmask)))
1099 #define	ath_hal_updatetxtriglevel(_ah, _inc) \
1100 	((*(_ah)->ah_updateTxTrigLevel)((_ah), (_inc)))
1101 #define	ath_hal_setpower(_ah, _mode) \
1102 	((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_TRUE))
1103 #define	ath_hal_setselfgenpower(_ah, _mode) \
1104 	((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_FALSE))
1105 #define	ath_hal_keycachesize(_ah) \
1106 	((*(_ah)->ah_getKeyCacheSize)((_ah)))
1107 #define	ath_hal_keyreset(_ah, _ix) \
1108 	((*(_ah)->ah_resetKeyCacheEntry)((_ah), (_ix)))
1109 #define	ath_hal_keyset(_ah, _ix, _pk, _mac) \
1110 	((*(_ah)->ah_setKeyCacheEntry)((_ah), (_ix), (_pk), (_mac), AH_FALSE))
1111 #define	ath_hal_keyisvalid(_ah, _ix) \
1112 	(((*(_ah)->ah_isKeyCacheEntryValid)((_ah), (_ix))))
1113 #define	ath_hal_keysetmac(_ah, _ix, _mac) \
1114 	((*(_ah)->ah_setKeyCacheEntryMac)((_ah), (_ix), (_mac)))
1115 #define	ath_hal_getrxfilter(_ah) \
1116 	((*(_ah)->ah_getRxFilter)((_ah)))
1117 #define	ath_hal_setrxfilter(_ah, _filter) \
1118 	((*(_ah)->ah_setRxFilter)((_ah), (_filter)))
1119 #define	ath_hal_setmcastfilter(_ah, _mfilt0, _mfilt1) \
1120 	((*(_ah)->ah_setMulticastFilter)((_ah), (_mfilt0), (_mfilt1)))
1121 #define	ath_hal_waitforbeacon(_ah, _bf) \
1122 	((*(_ah)->ah_waitForBeaconDone)((_ah), (_bf)->bf_daddr))
1123 #define	ath_hal_putrxbuf(_ah, _bufaddr, _rxq) \
1124 	((*(_ah)->ah_setRxDP)((_ah), (_bufaddr), (_rxq)))
1125 /* NB: common across all chips */
1126 #define	AR_TSF_L32	0x804c	/* MAC local clock lower 32 bits */
1127 #define	ath_hal_gettsf32(_ah) \
1128 	OS_REG_READ(_ah, AR_TSF_L32)
1129 #define	ath_hal_gettsf64(_ah) \
1130 	((*(_ah)->ah_getTsf64)((_ah)))
1131 #define	ath_hal_settsf64(_ah, _val) \
1132 	((*(_ah)->ah_setTsf64)((_ah), (_val)))
1133 #define	ath_hal_resettsf(_ah) \
1134 	((*(_ah)->ah_resetTsf)((_ah)))
1135 #define	ath_hal_rxena(_ah) \
1136 	((*(_ah)->ah_enableReceive)((_ah)))
1137 #define	ath_hal_puttxbuf(_ah, _q, _bufaddr) \
1138 	((*(_ah)->ah_setTxDP)((_ah), (_q), (_bufaddr)))
1139 #define	ath_hal_gettxbuf(_ah, _q) \
1140 	((*(_ah)->ah_getTxDP)((_ah), (_q)))
1141 #define	ath_hal_numtxpending(_ah, _q) \
1142 	((*(_ah)->ah_numTxPending)((_ah), (_q)))
1143 #define	ath_hal_getrxbuf(_ah, _rxq) \
1144 	((*(_ah)->ah_getRxDP)((_ah), (_rxq)))
1145 #define	ath_hal_txstart(_ah, _q) \
1146 	((*(_ah)->ah_startTxDma)((_ah), (_q)))
1147 #define	ath_hal_setchannel(_ah, _chan) \
1148 	((*(_ah)->ah_setChannel)((_ah), (_chan)))
1149 #define	ath_hal_calibrate(_ah, _chan, _iqcal) \
1150 	((*(_ah)->ah_perCalibration)((_ah), (_chan), (_iqcal)))
1151 #define	ath_hal_calibrateN(_ah, _chan, _lcal, _isdone) \
1152 	((*(_ah)->ah_perCalibrationN)((_ah), (_chan), 0x1, (_lcal), (_isdone)))
1153 #define	ath_hal_calreset(_ah, _chan) \
1154 	((*(_ah)->ah_resetCalValid)((_ah), (_chan)))
1155 #define	ath_hal_setledstate(_ah, _state) \
1156 	((*(_ah)->ah_setLedState)((_ah), (_state)))
1157 #define	ath_hal_beaconinit(_ah, _nextb, _bperiod) \
1158 	((*(_ah)->ah_beaconInit)((_ah), (_nextb), (_bperiod)))
1159 #define	ath_hal_beaconreset(_ah) \
1160 	((*(_ah)->ah_resetStationBeaconTimers)((_ah)))
1161 #define	ath_hal_beaconsettimers(_ah, _bt) \
1162 	((*(_ah)->ah_setBeaconTimers)((_ah), (_bt)))
1163 #define	ath_hal_beacontimers(_ah, _bs) \
1164 	((*(_ah)->ah_setStationBeaconTimers)((_ah), (_bs)))
1165 #define	ath_hal_getnexttbtt(_ah) \
1166 	((*(_ah)->ah_getNextTBTT)((_ah)))
1167 #define	ath_hal_setassocid(_ah, _bss, _associd) \
1168 	((*(_ah)->ah_writeAssocid)((_ah), (_bss), (_associd)))
1169 #define	ath_hal_phydisable(_ah) \
1170 	((*(_ah)->ah_phyDisable)((_ah)))
1171 #define	ath_hal_setopmode(_ah) \
1172 	((*(_ah)->ah_setPCUConfig)((_ah)))
1173 #define	ath_hal_stoptxdma(_ah, _qnum) \
1174 	((*(_ah)->ah_stopTxDma)((_ah), (_qnum)))
1175 #define	ath_hal_stoppcurecv(_ah) \
1176 	((*(_ah)->ah_stopPcuReceive)((_ah)))
1177 #define	ath_hal_startpcurecv(_ah) \
1178 	((*(_ah)->ah_startPcuReceive)((_ah)))
1179 #define	ath_hal_stopdmarecv(_ah) \
1180 	((*(_ah)->ah_stopDmaReceive)((_ah)))
1181 #define	ath_hal_getdiagstate(_ah, _id, _indata, _insize, _outdata, _outsize) \
1182 	((*(_ah)->ah_getDiagState)((_ah), (_id), \
1183 		(_indata), (_insize), (_outdata), (_outsize)))
1184 #define	ath_hal_getfatalstate(_ah, _outdata, _outsize) \
1185 	ath_hal_getdiagstate(_ah, 29, NULL, 0, (_outdata), _outsize)
1186 #define	ath_hal_setuptxqueue(_ah, _type, _irq) \
1187 	((*(_ah)->ah_setupTxQueue)((_ah), (_type), (_irq)))
1188 #define	ath_hal_resettxqueue(_ah, _q) \
1189 	((*(_ah)->ah_resetTxQueue)((_ah), (_q)))
1190 #define	ath_hal_releasetxqueue(_ah, _q) \
1191 	((*(_ah)->ah_releaseTxQueue)((_ah), (_q)))
1192 #define	ath_hal_gettxqueueprops(_ah, _q, _qi) \
1193 	((*(_ah)->ah_getTxQueueProps)((_ah), (_q), (_qi)))
1194 #define	ath_hal_settxqueueprops(_ah, _q, _qi) \
1195 	((*(_ah)->ah_setTxQueueProps)((_ah), (_q), (_qi)))
1196 /* NB: common across all chips */
1197 #define	AR_Q_TXE	0x0840	/* MAC Transmit Queue enable */
1198 #define	ath_hal_txqenabled(_ah, _qnum) \
1199 	(OS_REG_READ(_ah, AR_Q_TXE) & (1<<(_qnum)))
1200 #define	ath_hal_getrfgain(_ah) \
1201 	((*(_ah)->ah_getRfGain)((_ah)))
1202 #define	ath_hal_getdefantenna(_ah) \
1203 	((*(_ah)->ah_getDefAntenna)((_ah)))
1204 #define	ath_hal_setdefantenna(_ah, _ant) \
1205 	((*(_ah)->ah_setDefAntenna)((_ah), (_ant)))
1206 #define	ath_hal_rxmonitor(_ah, _arg, _chan) \
1207 	((*(_ah)->ah_rxMonitor)((_ah), (_arg), (_chan)))
1208 #define	ath_hal_ani_poll(_ah, _chan) \
1209 	((*(_ah)->ah_aniPoll)((_ah), (_chan)))
1210 #define	ath_hal_mibevent(_ah, _stats) \
1211 	((*(_ah)->ah_procMibEvent)((_ah), (_stats)))
1212 #define	ath_hal_setslottime(_ah, _us) \
1213 	((*(_ah)->ah_setSlotTime)((_ah), (_us)))
1214 #define	ath_hal_getslottime(_ah) \
1215 	((*(_ah)->ah_getSlotTime)((_ah)))
1216 #define	ath_hal_setacktimeout(_ah, _us) \
1217 	((*(_ah)->ah_setAckTimeout)((_ah), (_us)))
1218 #define	ath_hal_getacktimeout(_ah) \
1219 	((*(_ah)->ah_getAckTimeout)((_ah)))
1220 #define	ath_hal_setctstimeout(_ah, _us) \
1221 	((*(_ah)->ah_setCTSTimeout)((_ah), (_us)))
1222 #define	ath_hal_getctstimeout(_ah) \
1223 	((*(_ah)->ah_getCTSTimeout)((_ah)))
1224 #define	ath_hal_getcapability(_ah, _cap, _param, _result) \
1225 	((*(_ah)->ah_getCapability)((_ah), (_cap), (_param), (_result)))
1226 #define	ath_hal_setcapability(_ah, _cap, _param, _v, _status) \
1227 	((*(_ah)->ah_setCapability)((_ah), (_cap), (_param), (_v), (_status)))
1228 #define	ath_hal_ciphersupported(_ah, _cipher) \
1229 	(ath_hal_getcapability(_ah, HAL_CAP_CIPHER, _cipher, NULL) == HAL_OK)
1230 #define	ath_hal_getregdomain(_ah, _prd) \
1231 	(ath_hal_getcapability(_ah, HAL_CAP_REG_DMN, 0, (_prd)) == HAL_OK)
1232 #define	ath_hal_setregdomain(_ah, _rd) \
1233 	ath_hal_setcapability(_ah, HAL_CAP_REG_DMN, 0, _rd, NULL)
1234 #define	ath_hal_getcountrycode(_ah, _pcc) \
1235 	(*(_pcc) = (_ah)->ah_countryCode)
1236 #define	ath_hal_gettkipmic(_ah) \
1237 	(ath_hal_getcapability(_ah, HAL_CAP_TKIP_MIC, 1, NULL) == HAL_OK)
1238 #define	ath_hal_settkipmic(_ah, _v) \
1239 	ath_hal_setcapability(_ah, HAL_CAP_TKIP_MIC, 1, _v, NULL)
1240 #define	ath_hal_hastkipsplit(_ah) \
1241 	(ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 0, NULL) == HAL_OK)
1242 #define	ath_hal_gettkipsplit(_ah) \
1243 	(ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, NULL) == HAL_OK)
1244 #define	ath_hal_settkipsplit(_ah, _v) \
1245 	ath_hal_setcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, _v, NULL)
1246 #define	ath_hal_haswmetkipmic(_ah) \
1247 	(ath_hal_getcapability(_ah, HAL_CAP_WME_TKIPMIC, 0, NULL) == HAL_OK)
1248 #define	ath_hal_hwphycounters(_ah) \
1249 	(ath_hal_getcapability(_ah, HAL_CAP_PHYCOUNTERS, 0, NULL) == HAL_OK)
1250 #define	ath_hal_hasdiversity(_ah) \
1251 	(ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 0, NULL) == HAL_OK)
1252 #define	ath_hal_getdiversity(_ah) \
1253 	(ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 1, NULL) == HAL_OK)
1254 #define	ath_hal_setdiversity(_ah, _v) \
1255 	ath_hal_setcapability(_ah, HAL_CAP_DIVERSITY, 1, _v, NULL)
1256 #define	ath_hal_getantennaswitch(_ah) \
1257 	((*(_ah)->ah_getAntennaSwitch)((_ah)))
1258 #define	ath_hal_setantennaswitch(_ah, _v) \
1259 	((*(_ah)->ah_setAntennaSwitch)((_ah), (_v)))
1260 #define	ath_hal_getdiag(_ah, _pv) \
1261 	(ath_hal_getcapability(_ah, HAL_CAP_DIAG, 0, _pv) == HAL_OK)
1262 #define	ath_hal_setdiag(_ah, _v) \
1263 	ath_hal_setcapability(_ah, HAL_CAP_DIAG, 0, _v, NULL)
1264 #define	ath_hal_getnumtxqueues(_ah, _pv) \
1265 	(ath_hal_getcapability(_ah, HAL_CAP_NUM_TXQUEUES, 0, _pv) == HAL_OK)
1266 #define	ath_hal_hasveol(_ah) \
1267 	(ath_hal_getcapability(_ah, HAL_CAP_VEOL, 0, NULL) == HAL_OK)
1268 #define	ath_hal_hastxpowlimit(_ah) \
1269 	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 0, NULL) == HAL_OK)
1270 #define	ath_hal_settxpowlimit(_ah, _pow) \
1271 	((*(_ah)->ah_setTxPowerLimit)((_ah), (_pow)))
1272 #define	ath_hal_gettxpowlimit(_ah, _ppow) \
1273 	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 1, _ppow) == HAL_OK)
1274 #define	ath_hal_getmaxtxpow(_ah, _ppow) \
1275 	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 2, _ppow) == HAL_OK)
1276 #define	ath_hal_gettpscale(_ah, _scale) \
1277 	(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 3, _scale) == HAL_OK)
1278 #define	ath_hal_settpscale(_ah, _v) \
1279 	ath_hal_setcapability(_ah, HAL_CAP_TXPOW, 3, _v, NULL)
1280 #define	ath_hal_hastpc(_ah) \
1281 	(ath_hal_getcapability(_ah, HAL_CAP_TPC, 0, NULL) == HAL_OK)
1282 #define	ath_hal_gettpc(_ah) \
1283 	(ath_hal_getcapability(_ah, HAL_CAP_TPC, 1, NULL) == HAL_OK)
1284 #define	ath_hal_settpc(_ah, _v) \
1285 	ath_hal_setcapability(_ah, HAL_CAP_TPC, 1, _v, NULL)
1286 #define	ath_hal_hasbursting(_ah) \
1287 	(ath_hal_getcapability(_ah, HAL_CAP_BURST, 0, NULL) == HAL_OK)
1288 #define	ath_hal_setmcastkeysearch(_ah, _v) \
1289 	ath_hal_setcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, _v, NULL)
1290 #define	ath_hal_hasmcastkeysearch(_ah) \
1291 	(ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, NULL) == HAL_OK)
1292 #define	ath_hal_getmcastkeysearch(_ah) \
1293 	(ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 1, NULL) == HAL_OK)
1294 #define	ath_hal_hasfastframes(_ah) \
1295 	(ath_hal_getcapability(_ah, HAL_CAP_FASTFRAME, 0, NULL) == HAL_OK)
1296 #define	ath_hal_hasbssidmask(_ah) \
1297 	(ath_hal_getcapability(_ah, HAL_CAP_BSSIDMASK, 0, NULL) == HAL_OK)
1298 #define	ath_hal_hasbssidmatch(_ah) \
1299 	(ath_hal_getcapability(_ah, HAL_CAP_BSSIDMATCH, 0, NULL) == HAL_OK)
1300 #define	ath_hal_hastsfadjust(_ah) \
1301 	(ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 0, NULL) == HAL_OK)
1302 #define	ath_hal_gettsfadjust(_ah) \
1303 	(ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 1, NULL) == HAL_OK)
1304 #define	ath_hal_settsfadjust(_ah, _onoff) \
1305 	ath_hal_setcapability(_ah, HAL_CAP_TSF_ADJUST, 1, _onoff, NULL)
1306 #define	ath_hal_hasrfsilent(_ah) \
1307 	(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 0, NULL) == HAL_OK)
1308 #define	ath_hal_getrfkill(_ah) \
1309 	(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 1, NULL) == HAL_OK)
1310 #define	ath_hal_setrfkill(_ah, _onoff) \
1311 	ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 1, _onoff, NULL)
1312 #define	ath_hal_getrfsilent(_ah, _prfsilent) \
1313 	(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 2, _prfsilent) == HAL_OK)
1314 #define	ath_hal_setrfsilent(_ah, _rfsilent) \
1315 	ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 2, _rfsilent, NULL)
1316 #define	ath_hal_gettpack(_ah, _ptpack) \
1317 	(ath_hal_getcapability(_ah, HAL_CAP_TPC_ACK, 0, _ptpack) == HAL_OK)
1318 #define	ath_hal_settpack(_ah, _tpack) \
1319 	ath_hal_setcapability(_ah, HAL_CAP_TPC_ACK, 0, _tpack, NULL)
1320 #define	ath_hal_gettpcts(_ah, _ptpcts) \
1321 	(ath_hal_getcapability(_ah, HAL_CAP_TPC_CTS, 0, _ptpcts) == HAL_OK)
1322 #define	ath_hal_settpcts(_ah, _tpcts) \
1323 	ath_hal_setcapability(_ah, HAL_CAP_TPC_CTS, 0, _tpcts, NULL)
1324 #define	ath_hal_hasintmit(_ah) \
1325 	(ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \
1326 	HAL_CAP_INTMIT_PRESENT, NULL) == HAL_OK)
1327 #define	ath_hal_getintmit(_ah) \
1328 	(ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \
1329 	HAL_CAP_INTMIT_ENABLE, NULL) == HAL_OK)
1330 #define	ath_hal_setintmit(_ah, _v) \
1331 	ath_hal_setcapability(_ah, HAL_CAP_INTMIT, \
1332 	HAL_CAP_INTMIT_ENABLE, _v, NULL)
1333 #define	ath_hal_hasmybeacon(_ah) \
1334 	(ath_hal_getcapability(_ah, HAL_CAP_DO_MYBEACON, 1, NULL) == HAL_OK)
1335 
1336 #define	ath_hal_hasenforcetxop(_ah) \
1337 	(ath_hal_getcapability(_ah, HAL_CAP_ENFORCE_TXOP, 0, NULL) == HAL_OK)
1338 #define	ath_hal_getenforcetxop(_ah) \
1339 	(ath_hal_getcapability(_ah, HAL_CAP_ENFORCE_TXOP, 1, NULL) == HAL_OK)
1340 #define	ath_hal_setenforcetxop(_ah, _v) \
1341 	ath_hal_setcapability(_ah, HAL_CAP_ENFORCE_TXOP, 1, _v, NULL)
1342 
1343 #define	ath_hal_hasrxlnamixer(_ah) \
1344 	(ath_hal_getcapability(_ah, HAL_CAP_RX_LNA_MIXING, 0, NULL) == HAL_OK)
1345 
1346 #define	ath_hal_hasdivantcomb(_ah) \
1347 	(ath_hal_getcapability(_ah, HAL_CAP_ANT_DIV_COMB, 0, NULL) == HAL_OK)
1348 #define	ath_hal_hasldpc(_ah) \
1349 	(ath_hal_getcapability(_ah, HAL_CAP_LDPC, 0, NULL) == HAL_OK)
1350 #define	ath_hal_hasldpcwar(_ah) \
1351 	(ath_hal_getcapability(_ah, HAL_CAP_LDPCWAR, 0, NULL) == HAL_OK)
1352 
1353 /* EDMA definitions */
1354 #define	ath_hal_hasedma(_ah) \
1355 	(ath_hal_getcapability(_ah, HAL_CAP_ENHANCED_DMA_SUPPORT,	\
1356 	0, NULL) == HAL_OK)
1357 #define	ath_hal_getrxfifodepth(_ah, _qtype, _req) \
1358 	(ath_hal_getcapability(_ah, HAL_CAP_RXFIFODEPTH, _qtype, _req)	\
1359 	== HAL_OK)
1360 #define	ath_hal_getntxmaps(_ah, _req) \
1361 	(ath_hal_getcapability(_ah, HAL_CAP_NUM_TXMAPS, 0, _req)	\
1362 	== HAL_OK)
1363 #define	ath_hal_gettxdesclen(_ah, _req) \
1364 	(ath_hal_getcapability(_ah, HAL_CAP_TXDESCLEN, 0, _req)		\
1365 	== HAL_OK)
1366 #define	ath_hal_gettxstatuslen(_ah, _req) \
1367 	(ath_hal_getcapability(_ah, HAL_CAP_TXSTATUSLEN, 0, _req)	\
1368 	== HAL_OK)
1369 #define	ath_hal_getrxstatuslen(_ah, _req) \
1370 	(ath_hal_getcapability(_ah, HAL_CAP_RXSTATUSLEN, 0, _req)	\
1371 	== HAL_OK)
1372 #define	ath_hal_setrxbufsize(_ah, _req) \
1373 	((int)ath_hal_setcapability(_ah, HAL_CAP_RXBUFSIZE, 0, _req, NULL) \
1374 	== HAL_OK)
1375 
1376 #define	ath_hal_getchannoise(_ah, _c) \
1377 	((*(_ah)->ah_getChanNoise)((_ah), (_c)))
1378 
1379 /* 802.11n HAL methods */
1380 #define	ath_hal_getrxchainmask(_ah, _prxchainmask) \
1381 	(ath_hal_getcapability(_ah, HAL_CAP_RX_CHAINMASK, 0, _prxchainmask))
1382 #define	ath_hal_gettxchainmask(_ah, _ptxchainmask) \
1383 	(ath_hal_getcapability(_ah, HAL_CAP_TX_CHAINMASK, 0, _ptxchainmask))
1384 #define	ath_hal_setrxchainmask(_ah, _rx) \
1385 	(ath_hal_setcapability(_ah, HAL_CAP_RX_CHAINMASK, 1, _rx, NULL))
1386 #define	ath_hal_settxchainmask(_ah, _tx) \
1387 	(ath_hal_setcapability(_ah, HAL_CAP_TX_CHAINMASK, 1, _tx, NULL))
1388 #define	ath_hal_split4ktrans(_ah) \
1389 	(ath_hal_getcapability(_ah, HAL_CAP_SPLIT_4KB_TRANS, \
1390 	0, NULL) == HAL_OK)
1391 #define	ath_hal_self_linked_final_rxdesc(_ah) \
1392 	(ath_hal_getcapability(_ah, HAL_CAP_RXDESC_SELFLINK, \
1393 	0, NULL) == HAL_OK)
1394 #define	ath_hal_gtxto_supported(_ah) \
1395 	(ath_hal_getcapability(_ah, HAL_CAP_GTXTO, 0, NULL) == HAL_OK)
1396 #define	ath_hal_has_long_rxdesc_tsf(_ah) \
1397 	(ath_hal_getcapability(_ah, HAL_CAP_LONG_RXDESC_TSF, \
1398 	0, NULL) == HAL_OK)
1399 #define	ath_hal_setuprxdesc(_ah, _ds, _size, _intreq) \
1400 	((*(_ah)->ah_setupRxDesc)((_ah), (_ds), (_size), (_intreq)))
1401 #define	ath_hal_rxprocdesc(_ah, _ds, _dspa, _dsnext, _rs) \
1402 	((*(_ah)->ah_procRxDesc)((_ah), (_ds), (_dspa), (_dsnext), 0, (_rs)))
1403 #define	ath_hal_setuptxdesc(_ah, _ds, _plen, _hlen, _atype, _txpow, \
1404 		_txr0, _txtr0, _keyix, _ant, _flags, \
1405 		_rtsrate, _rtsdura) \
1406 	((*(_ah)->ah_setupTxDesc)((_ah), (_ds), (_plen), (_hlen), (_atype), \
1407 		(_txpow), (_txr0), (_txtr0), (_keyix), (_ant), \
1408 		(_flags), (_rtsrate), (_rtsdura), 0, 0, 0))
1409 #define	ath_hal_setupxtxdesc(_ah, _ds, \
1410 		_txr1, _txtr1, _txr2, _txtr2, _txr3, _txtr3) \
1411 	((*(_ah)->ah_setupXTxDesc)((_ah), (_ds), \
1412 		(_txr1), (_txtr1), (_txr2), (_txtr2), (_txr3), (_txtr3)))
1413 #define	ath_hal_filltxdesc(_ah, _ds, _b, _l, _did, _qid, _first, _last, _ds0) \
1414 	((*(_ah)->ah_fillTxDesc)((_ah), (_ds), (_b), (_l), (_did), (_qid), \
1415 		(_first), (_last), (_ds0)))
1416 #define	ath_hal_txprocdesc(_ah, _ds, _ts) \
1417 	((*(_ah)->ah_procTxDesc)((_ah), (_ds), (_ts)))
1418 #define	ath_hal_gettxintrtxqs(_ah, _txqs) \
1419 	((*(_ah)->ah_getTxIntrQueue)((_ah), (_txqs)))
1420 #define ath_hal_gettxcompletionrates(_ah, _ds, _rates, _tries) \
1421 	((*(_ah)->ah_getTxCompletionRates)((_ah), (_ds), (_rates), (_tries)))
1422 #define ath_hal_settxdesclink(_ah, _ds, _link) \
1423 	((*(_ah)->ah_setTxDescLink)((_ah), (_ds), (_link)))
1424 #define ath_hal_gettxdesclink(_ah, _ds, _link) \
1425 	((*(_ah)->ah_getTxDescLink)((_ah), (_ds), (_link)))
1426 #define ath_hal_gettxdesclinkptr(_ah, _ds, _linkptr) \
1427 	((*(_ah)->ah_getTxDescLinkPtr)((_ah), (_ds), (_linkptr)))
1428 #define	ath_hal_setuptxstatusring(_ah, _tsstart, _tspstart, _size) \
1429 	((*(_ah)->ah_setupTxStatusRing)((_ah), (_tsstart), (_tspstart), \
1430 		(_size)))
1431 #define	ath_hal_gettxrawtxdesc(_ah, _txstatus) \
1432 	((*(_ah)->ah_getTxRawTxDesc)((_ah), (_txstatus)))
1433 
1434 #define	ath_hal_setupfirsttxdesc(_ah, _ds, _aggrlen, _flags, _txpower, \
1435 		_txr0, _txtr0, _antm, _rcr, _rcd) \
1436 	((*(_ah)->ah_setupFirstTxDesc)((_ah), (_ds), (_aggrlen), (_flags), \
1437 	(_txpower), (_txr0), (_txtr0), (_antm), (_rcr), (_rcd)))
1438 #define	ath_hal_chaintxdesc(_ah, _ds, _bl, _sl, _pktlen, _hdrlen, _type, \
1439 	_keyix, _cipher, _delims, _first, _last, _lastaggr) \
1440 	((*(_ah)->ah_chainTxDesc)((_ah), (_ds), (_bl), (_sl), \
1441 	(_pktlen), (_hdrlen), (_type), (_keyix), (_cipher), (_delims), \
1442 	(_first), (_last), (_lastaggr)))
1443 #define	ath_hal_setuplasttxdesc(_ah, _ds, _ds0) \
1444 	((*(_ah)->ah_setupLastTxDesc)((_ah), (_ds), (_ds0)))
1445 
1446 #define	ath_hal_set11nratescenario(_ah, _ds, _dur, _rt, _series, _ns, _flags) \
1447 	((*(_ah)->ah_set11nRateScenario)((_ah), (_ds), (_dur), (_rt), \
1448 	(_series), (_ns), (_flags)))
1449 
1450 #define	ath_hal_set11n_aggr_first(_ah, _ds, _len, _num) \
1451 	((*(_ah)->ah_set11nAggrFirst)((_ah), (_ds), (_len), (_num)))
1452 #define	ath_hal_set11n_aggr_middle(_ah, _ds, _num) \
1453 	((*(_ah)->ah_set11nAggrMiddle)((_ah), (_ds), (_num)))
1454 #define	ath_hal_set11n_aggr_last(_ah, _ds) \
1455 	((*(_ah)->ah_set11nAggrLast)((_ah), (_ds)))
1456 
1457 #define	ath_hal_set11nburstduration(_ah, _ds, _dur) \
1458 	((*(_ah)->ah_set11nBurstDuration)((_ah), (_ds), (_dur)))
1459 #define	ath_hal_clr11n_aggr(_ah, _ds) \
1460 	((*(_ah)->ah_clr11nAggr)((_ah), (_ds)))
1461 #define	ath_hal_set11n_virtmorefrag(_ah, _ds, _v) \
1462 	((*(_ah)->ah_set11nVirtMoreFrag)((_ah), (_ds), (_v)))
1463 
1464 #define	ath_hal_gpioCfgOutput(_ah, _gpio, _type) \
1465 	((*(_ah)->ah_gpioCfgOutput)((_ah), (_gpio), (_type)))
1466 #define	ath_hal_gpioset(_ah, _gpio, _b) \
1467 	((*(_ah)->ah_gpioSet)((_ah), (_gpio), (_b)))
1468 #define	ath_hal_gpioget(_ah, _gpio) \
1469 	((*(_ah)->ah_gpioGet)((_ah), (_gpio)))
1470 #define	ath_hal_gpiosetintr(_ah, _gpio, _b) \
1471 	((*(_ah)->ah_gpioSetIntr)((_ah), (_gpio), (_b)))
1472 
1473 /*
1474  * PCIe suspend/resume/poweron/poweroff related macros
1475  */
1476 #define	ath_hal_enablepcie(_ah, _restore, _poweroff) \
1477 	((*(_ah)->ah_configPCIE)((_ah), (_restore), (_poweroff)))
1478 #define	ath_hal_disablepcie(_ah) \
1479 	((*(_ah)->ah_disablePCIE)((_ah)))
1480 
1481 /*
1482  * This is badly-named; you need to set the correct parameters
1483  * to begin to receive useful radar events; and even then
1484  * it doesn't "enable" DFS. See the ath_dfs/null/ module for
1485  * more information.
1486  */
1487 #define	ath_hal_enabledfs(_ah, _param) \
1488 	((*(_ah)->ah_enableDfs)((_ah), (_param)))
1489 #define	ath_hal_getdfsthresh(_ah, _param) \
1490 	((*(_ah)->ah_getDfsThresh)((_ah), (_param)))
1491 #define	ath_hal_getdfsdefaultthresh(_ah, _param) \
1492 	((*(_ah)->ah_getDfsDefaultThresh)((_ah), (_param)))
1493 #define	ath_hal_procradarevent(_ah, _rxs, _fulltsf, _buf, _event) \
1494 	((*(_ah)->ah_procRadarEvent)((_ah), (_rxs), (_fulltsf), \
1495 	(_buf), (_event)))
1496 #define	ath_hal_is_fast_clock_enabled(_ah) \
1497 	((*(_ah)->ah_isFastClockEnabled)((_ah)))
1498 #define	ath_hal_radar_wait(_ah, _chan) \
1499 	((*(_ah)->ah_radarWait)((_ah), (_chan)))
1500 #define	ath_hal_get_mib_cycle_counts(_ah, _sample) \
1501 	((*(_ah)->ah_getMibCycleCounts)((_ah), (_sample)))
1502 #define	ath_hal_get_chan_ext_busy(_ah) \
1503 	((*(_ah)->ah_get11nExtBusy)((_ah)))
1504 #define	ath_hal_setchainmasks(_ah, _txchainmask, _rxchainmask) \
1505 	((*(_ah)->ah_setChainMasks)((_ah), (_txchainmask), (_rxchainmask)))
1506 
1507 #define	ath_hal_spectral_supported(_ah) \
1508 	(ath_hal_getcapability(_ah, HAL_CAP_SPECTRAL_SCAN, 0, NULL) == HAL_OK)
1509 #define	ath_hal_spectral_get_config(_ah, _p) \
1510 	((*(_ah)->ah_spectralGetConfig)((_ah), (_p)))
1511 #define	ath_hal_spectral_configure(_ah, _p) \
1512 	((*(_ah)->ah_spectralConfigure)((_ah), (_p)))
1513 #define	ath_hal_spectral_start(_ah) \
1514 	((*(_ah)->ah_spectralStart)((_ah)))
1515 #define	ath_hal_spectral_stop(_ah) \
1516 	((*(_ah)->ah_spectralStop)((_ah)))
1517 
1518 #define	ath_hal_btcoex_supported(_ah) \
1519 	(ath_hal_getcapability(_ah, HAL_CAP_BT_COEX, 0, NULL) == HAL_OK)
1520 #define	ath_hal_btcoex_set_info(_ah, _info) \
1521 	((*(_ah)->ah_btCoexSetInfo)((_ah), (_info)))
1522 #define	ath_hal_btcoex_set_config(_ah, _cfg) \
1523 	((*(_ah)->ah_btCoexSetConfig)((_ah), (_cfg)))
1524 #define	ath_hal_btcoex_set_qcu_thresh(_ah, _qcuid) \
1525 	((*(_ah)->ah_btCoexSetQcuThresh)((_ah), (_qcuid)))
1526 #define	ath_hal_btcoex_set_weights(_ah, _weight) \
1527 	((*(_ah)->ah_btCoexSetWeights)((_ah), (_weight)))
1528 #define	ath_hal_btcoex_set_weights(_ah, _weight) \
1529 	((*(_ah)->ah_btCoexSetWeights)((_ah), (_weight)))
1530 #define	ath_hal_btcoex_set_bmiss_thresh(_ah, _thr) \
1531 	((*(_ah)->ah_btCoexSetBmissThresh)((_ah), (_thr)))
1532 #define	ath_hal_btcoex_set_parameter(_ah, _attrib, _val) \
1533 	((*(_ah)->ah_btCoexSetParameter)((_ah), (_attrib), (_val)))
1534 #define	ath_hal_btcoex_enable(_ah) \
1535 	((*(_ah)->ah_btCoexEnable)((_ah)))
1536 #define	ath_hal_btcoex_disable(_ah) \
1537 	((*(_ah)->ah_btCoexDisable)((_ah)))
1538 
1539 #define	ath_hal_div_comb_conf_get(_ah, _conf) \
1540 	((*(_ah)->ah_divLnaConfGet)((_ah), (_conf)))
1541 #define	ath_hal_div_comb_conf_set(_ah, _conf) \
1542 	((*(_ah)->ah_divLnaConfSet)((_ah), (_conf)))
1543 
1544 #endif /* _DEV_ATH_ATHVAR_H */
1545