xref: /dragonfly/sys/dev/netif/ath/ath/if_ath_tx.c (revision 5f39c7e7)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 
33 /*
34  * Driver for the Atheros Wireless LAN controller.
35  *
36  * This software is derived from work of Atsushi Onoe; his contribution
37  * is greatly appreciated.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 #include "opt_wlan.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/errno.h>
55 #include <sys/callout.h>
56 #include <sys/bus.h>
57 #include <sys/endian.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/priv.h>
61 #include <sys/ktr.h>
62 
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_arp.h>
69 #include <net/ethernet.h>
70 #include <net/if_llc.h>
71 
72 #include <netproto/802_11/ieee80211_var.h>
73 #include <netproto/802_11/ieee80211_regdomain.h>
74 #ifdef IEEE80211_SUPPORT_SUPERG
75 #include <netproto/802_11/ieee80211_superg.h>
76 #endif
77 #ifdef IEEE80211_SUPPORT_TDMA
78 #include <netproto/802_11/ieee80211_tdma.h>
79 #endif
80 #include <netproto/802_11/ieee80211_ht.h>
81 
82 #include <net/bpf.h>
83 
84 #ifdef INET
85 #include <netinet/in.h>
86 #include <netinet/if_ether.h>
87 #endif
88 
89 #include <dev/netif/ath/ath/if_athvar.h>
90 #include <dev/netif/ath/ath_hal/ah_devid.h>		/* XXX for softled */
91 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
92 
93 #include <dev/netif/ath/ath/if_ath_debug.h>
94 
95 #ifdef ATH_TX99_DIAG
96 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
97 #endif
98 
99 #include <dev/netif/ath/ath/if_ath_misc.h>
100 #include <dev/netif/ath/ath/if_ath_tx.h>
101 #include <dev/netif/ath/ath/if_ath_tx_ht.h>
102 
103 #ifdef	ATH_DEBUG_ALQ
104 #include <dev/netif/ath/ath/if_ath_alq.h>
105 #endif
106 
107 extern  const char* ath_hal_ether_sprintf(const uint8_t *mac);
108 
109 /*
110  * How many retries to perform in software
111  */
112 #define	SWMAX_RETRIES		10
113 
114 /*
115  * What queue to throw the non-QoS TID traffic into
116  */
117 #define	ATH_NONQOS_TID_AC	WME_AC_VO
118 
119 #if 0
120 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
121 #endif
122 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
123     int tid);
124 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
125     int tid);
126 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
127     struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
128 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
129     struct ieee80211_node *ni, struct mbuf *m0, int *tid);
130 static struct ath_buf *
131 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
132     struct ath_tid *tid, struct ath_buf *bf);
133 
134 #ifdef	ATH_DEBUG_ALQ
135 void
136 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
137 {
138 	struct ath_buf *bf;
139 	int i, n;
140 	const char *ds;
141 
142 	/* XXX we should skip out early if debugging isn't enabled! */
143 	bf = bf_first;
144 
145 	while (bf != NULL) {
146 		/* XXX should ensure bf_nseg > 0! */
147 		if (bf->bf_nseg == 0)
148 			break;
149 		n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
150 		for (i = 0, ds = (const char *) bf->bf_desc;
151 		    i < n;
152 		    i++, ds += sc->sc_tx_desclen) {
153 			if_ath_alq_post(&sc->sc_alq,
154 			    ATH_ALQ_EDMA_TXDESC,
155 			    sc->sc_tx_desclen,
156 			    ds);
157 		}
158 		bf = bf->bf_next;
159 	}
160 }
161 #endif /* ATH_DEBUG_ALQ */
162 
163 /*
164  * Whether to use the 11n rate scenario functions or not
165  */
166 static inline int
167 ath_tx_is_11n(struct ath_softc *sc)
168 {
169 	return ((sc->sc_ah->ah_magic == 0x20065416) ||
170 		    (sc->sc_ah->ah_magic == 0x19741014));
171 }
172 
173 /*
174  * Obtain the current TID from the given frame.
175  *
176  * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
177  * This has implications for which AC/priority the packet is placed
178  * in.
179  */
180 static int
181 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
182 {
183 	const struct ieee80211_frame *wh;
184 	int pri = M_WME_GETAC(m0);
185 
186 	wh = mtod(m0, const struct ieee80211_frame *);
187 	if (! IEEE80211_QOS_HAS_SEQ(wh))
188 		return IEEE80211_NONQOS_TID;
189 	else
190 		return WME_AC_TO_TID(pri);
191 }
192 
193 static void
194 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
195 {
196 	struct ieee80211_frame *wh;
197 
198 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
199 	/* Only update/resync if needed */
200 	if (bf->bf_state.bfs_isretried == 0) {
201 		wh->i_fc[1] |= IEEE80211_FC1_RETRY;
202 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
203 		    BUS_DMASYNC_PREWRITE);
204 	}
205 	bf->bf_state.bfs_isretried = 1;
206 	bf->bf_state.bfs_retries ++;
207 }
208 
209 /*
210  * Determine what the correct AC queue for the given frame
211  * should be.
212  *
213  * This code assumes that the TIDs map consistently to
214  * the underlying hardware (or software) ath_txq.
215  * Since the sender may try to set an AC which is
216  * arbitrary, non-QoS TIDs may end up being put on
217  * completely different ACs. There's no way to put a
218  * TID into multiple ath_txq's for scheduling, so
219  * for now we override the AC/TXQ selection and set
220  * non-QOS TID frames into the BE queue.
221  *
222  * This may be completely incorrect - specifically,
223  * some management frames may end up out of order
224  * compared to the QoS traffic they're controlling.
225  * I'll look into this later.
226  */
227 static int
228 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
229 {
230 	const struct ieee80211_frame *wh;
231 	int pri = M_WME_GETAC(m0);
232 	wh = mtod(m0, const struct ieee80211_frame *);
233 	if (IEEE80211_QOS_HAS_SEQ(wh))
234 		return pri;
235 
236 	return ATH_NONQOS_TID_AC;
237 }
238 
239 void
240 ath_txfrag_cleanup(struct ath_softc *sc,
241 	ath_bufhead *frags, struct ieee80211_node *ni)
242 {
243 	struct ath_buf *bf;
244 	struct ath_buf *next;
245 
246 	ATH_TXBUF_LOCK_ASSERT(sc);
247 
248 	next = TAILQ_FIRST(frags);
249 	while ((bf = next) != NULL) {
250 		next = TAILQ_NEXT(bf, bf_list);
251 		/* NB: bf assumed clean */
252 		TAILQ_REMOVE(frags, bf, bf_list);
253 		ath_returnbuf_head(sc, bf);
254 		ieee80211_node_decref(ni);
255 	}
256 }
257 
258 /*
259  * Setup xmit of a fragmented frame.  Allocate a buffer
260  * for each frag and bump the node reference count to
261  * reflect the held reference to be setup by ath_tx_start.
262  */
263 int
264 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
265 	struct mbuf *m0, struct ieee80211_node *ni)
266 {
267 	struct mbuf *m;
268 	struct ath_buf *bf;
269 
270 	ATH_TXBUF_LOCK(sc);
271 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
272 		/* XXX non-management? */
273 		bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
274 		if (bf == NULL) {	/* out of buffers, cleanup */
275 			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
276 			    __func__);
277 			ath_txfrag_cleanup(sc, frags, ni);
278 			break;
279 		}
280 		ieee80211_node_incref(ni);
281 		TAILQ_INSERT_TAIL(frags, bf, bf_list);
282 	}
283 	ATH_TXBUF_UNLOCK(sc);
284 
285 	return !TAILQ_EMPTY(frags);
286 }
287 
288 /*
289  * Reclaim mbuf resources.  For fragmented frames we
290  * need to claim each frag chained with m_nextpkt.
291  */
292 void
293 ath_freetx(struct mbuf *m)
294 {
295 	struct mbuf *next;
296 
297 	do {
298 		next = m->m_nextpkt;
299 		m->m_nextpkt = NULL;
300 		m_freem(m);
301 	} while ((m = next) != NULL);
302 }
303 
304 static int
305 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
306 {
307 	int error;
308 
309 	/*
310 	 * Load the DMA map so any coalescing is done.  This
311 	 * also calculates the number of descriptors we need.
312 	 */
313 	error = bus_dmamap_load_mbuf_defrag(sc->sc_dmat, bf->bf_dmamap, &m0,
314 				     bf->bf_segs, ATH_TXDESC, &bf->bf_nseg,
315 				     BUS_DMA_NOWAIT);
316 	if (error != 0) {
317 		sc->sc_stats.ast_tx_busdma++;
318 		ath_freetx(m0);
319 		return error;
320 	}
321 
322 	/*
323 	 * Discard null packets.
324 	 */
325 	if (bf->bf_nseg == 0) {
326 		sc->sc_stats.ast_tx_nodata++;
327 		ath_freetx(m0);
328 		return EIO;
329 	}
330 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
331 		__func__, m0, m0->m_pkthdr.len);
332 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
333 	bf->bf_m = m0;
334 
335 	return 0;
336 }
337 
338 /*
339  * Chain together segments+descriptors for a frame - 11n or otherwise.
340  *
341  * For aggregates, this is called on each frame in the aggregate.
342  */
343 static void
344 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
345     struct ath_buf *bf, int is_aggr, int is_first_subframe,
346     int is_last_subframe)
347 {
348 	struct ath_hal *ah = sc->sc_ah;
349 	char *ds;
350 	int i, bp, dsp;
351 	HAL_DMA_ADDR bufAddrList[4];
352 	uint32_t segLenList[4];
353 	int numTxMaps = 1;
354 	int isFirstDesc = 1;
355 
356 	/*
357 	 * XXX There's txdma and txdma_mgmt; the descriptor
358 	 * sizes must match.
359 	 */
360 	struct ath_descdma *dd = &sc->sc_txdma;
361 
362 	/*
363 	 * Fillin the remainder of the descriptor info.
364 	 */
365 
366 	/*
367 	 * We need the number of TX data pointers in each descriptor.
368 	 * EDMA and later chips support 4 TX buffers per descriptor;
369 	 * previous chips just support one.
370 	 */
371 	numTxMaps = sc->sc_tx_nmaps;
372 
373 	/*
374 	 * For EDMA and later chips ensure the TX map is fully populated
375 	 * before advancing to the next descriptor.
376 	 */
377 	ds = (char *) bf->bf_desc;
378 	bp = dsp = 0;
379 	bzero(bufAddrList, sizeof(bufAddrList));
380 	bzero(segLenList, sizeof(segLenList));
381 	for (i = 0; i < bf->bf_nseg; i++) {
382 		bufAddrList[bp] = bf->bf_segs[i].ds_addr;
383 		segLenList[bp] = bf->bf_segs[i].ds_len;
384 		bp++;
385 
386 		/*
387 		 * Go to the next segment if this isn't the last segment
388 		 * and there's space in the current TX map.
389 		 */
390 		if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
391 			continue;
392 
393 		/*
394 		 * Last segment or we're out of buffer pointers.
395 		 */
396 		bp = 0;
397 
398 		if (i == bf->bf_nseg - 1)
399 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
400 		else
401 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
402 			    bf->bf_daddr + dd->dd_descsize * (dsp + 1));
403 
404 		/*
405 		 * XXX This assumes that bfs_txq is the actual destination
406 		 * hardware queue at this point.  It may not have been
407 		 * assigned, it may actually be pointing to the multicast
408 		 * software TXQ id.  These must be fixed!
409 		 */
410 		ath_hal_filltxdesc(ah, (struct ath_desc *) ds
411 			, bufAddrList
412 			, segLenList
413 			, bf->bf_descid		/* XXX desc id */
414 			, bf->bf_state.bfs_tx_queue
415 			, isFirstDesc		/* first segment */
416 			, i == bf->bf_nseg - 1	/* last segment */
417 			, (struct ath_desc *) ds0	/* first descriptor */
418 		);
419 
420 		/*
421 		 * Make sure the 11n aggregate fields are cleared.
422 		 *
423 		 * XXX TODO: this doesn't need to be called for
424 		 * aggregate frames; as it'll be called on all
425 		 * sub-frames.  Since the descriptors are in
426 		 * non-cacheable memory, this leads to some
427 		 * rather slow writes on MIPS/ARM platforms.
428 		 */
429 		if (ath_tx_is_11n(sc))
430 			ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
431 
432 		/*
433 		 * If 11n is enabled, set it up as if it's an aggregate
434 		 * frame.
435 		 */
436 		if (is_last_subframe) {
437 			ath_hal_set11n_aggr_last(sc->sc_ah,
438 			    (struct ath_desc *) ds);
439 		} else if (is_aggr) {
440 			/*
441 			 * This clears the aggrlen field; so
442 			 * the caller needs to call set_aggr_first()!
443 			 *
444 			 * XXX TODO: don't call this for the first
445 			 * descriptor in the first frame in an
446 			 * aggregate!
447 			 */
448 			ath_hal_set11n_aggr_middle(sc->sc_ah,
449 			    (struct ath_desc *) ds,
450 			    bf->bf_state.bfs_ndelim);
451 		}
452 		isFirstDesc = 0;
453 		bf->bf_lastds = (struct ath_desc *) ds;
454 
455 		/*
456 		 * Don't forget to skip to the next descriptor.
457 		 */
458 		ds += sc->sc_tx_desclen;
459 		dsp++;
460 
461 		/*
462 		 * .. and don't forget to blank these out!
463 		 */
464 		bzero(bufAddrList, sizeof(bufAddrList));
465 		bzero(segLenList, sizeof(segLenList));
466 	}
467 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
468 }
469 
470 /*
471  * Set the rate control fields in the given descriptor based on
472  * the bf_state fields and node state.
473  *
474  * The bfs fields should already be set with the relevant rate
475  * control information, including whether MRR is to be enabled.
476  *
477  * Since the FreeBSD HAL currently sets up the first TX rate
478  * in ath_hal_setuptxdesc(), this will setup the MRR
479  * conditionally for the pre-11n chips, and call ath_buf_set_rate
480  * unconditionally for 11n chips. These require the 11n rate
481  * scenario to be set if MCS rates are enabled, so it's easier
482  * to just always call it. The caller can then only set rates 2, 3
483  * and 4 if multi-rate retry is needed.
484  */
485 static void
486 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
487     struct ath_buf *bf)
488 {
489 	struct ath_rc_series *rc = bf->bf_state.bfs_rc;
490 
491 	/* If mrr is disabled, blank tries 1, 2, 3 */
492 	if (! bf->bf_state.bfs_ismrr)
493 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
494 
495 #if 0
496 	/*
497 	 * If NOACK is set, just set ntries=1.
498 	 */
499 	else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
500 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
501 		rc[0].tries = 1;
502 	}
503 #endif
504 
505 	/*
506 	 * Always call - that way a retried descriptor will
507 	 * have the MRR fields overwritten.
508 	 *
509 	 * XXX TODO: see if this is really needed - setting up
510 	 * the first descriptor should set the MRR fields to 0
511 	 * for us anyway.
512 	 */
513 	if (ath_tx_is_11n(sc)) {
514 		ath_buf_set_rate(sc, ni, bf);
515 	} else {
516 		ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
517 			, rc[1].ratecode, rc[1].tries
518 			, rc[2].ratecode, rc[2].tries
519 			, rc[3].ratecode, rc[3].tries
520 		);
521 	}
522 }
523 
524 /*
525  * Setup segments+descriptors for an 11n aggregate.
526  * bf_first is the first buffer in the aggregate.
527  * The descriptor list must already been linked together using
528  * bf->bf_next.
529  */
530 static void
531 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
532 {
533 	struct ath_buf *bf, *bf_prev = NULL;
534 	struct ath_desc *ds0 = bf_first->bf_desc;
535 
536 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
537 	    __func__, bf_first->bf_state.bfs_nframes,
538 	    bf_first->bf_state.bfs_al);
539 
540 	bf = bf_first;
541 
542 	if (bf->bf_state.bfs_txrate0 == 0)
543 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
544 		    __func__, bf, 0);
545 	if (bf->bf_state.bfs_rc[0].ratecode == 0)
546 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
547 		    __func__, bf, 0);
548 
549 	/*
550 	 * Setup all descriptors of all subframes - this will
551 	 * call ath_hal_set11naggrmiddle() on every frame.
552 	 */
553 	while (bf != NULL) {
554 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
555 		    "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
556 		    __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
557 		    SEQNO(bf->bf_state.bfs_seqno));
558 
559 		/*
560 		 * Setup the initial fields for the first descriptor - all
561 		 * the non-11n specific stuff.
562 		 */
563 		ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
564 			, bf->bf_state.bfs_pktlen	/* packet length */
565 			, bf->bf_state.bfs_hdrlen	/* header length */
566 			, bf->bf_state.bfs_atype	/* Atheros packet type */
567 			, bf->bf_state.bfs_txpower	/* txpower */
568 			, bf->bf_state.bfs_txrate0
569 			, bf->bf_state.bfs_try0		/* series 0 rate/tries */
570 			, bf->bf_state.bfs_keyix	/* key cache index */
571 			, bf->bf_state.bfs_txantenna	/* antenna mode */
572 			, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ	/* flags */
573 			, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
574 			, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
575 		);
576 
577 		/*
578 		 * First descriptor? Setup the rate control and initial
579 		 * aggregate header information.
580 		 */
581 		if (bf == bf_first) {
582 			/*
583 			 * setup first desc with rate and aggr info
584 			 */
585 			ath_tx_set_ratectrl(sc, bf->bf_node, bf);
586 		}
587 
588 		/*
589 		 * Setup the descriptors for a multi-descriptor frame.
590 		 * This is both aggregate and non-aggregate aware.
591 		 */
592 		ath_tx_chaindesclist(sc, ds0, bf,
593 		    1, /* is_aggr */
594 		    !! (bf == bf_first), /* is_first_subframe */
595 		    !! (bf->bf_next == NULL) /* is_last_subframe */
596 		    );
597 
598 		if (bf == bf_first) {
599 			/*
600 			 * Initialise the first 11n aggregate with the
601 			 * aggregate length and aggregate enable bits.
602 			 */
603 			ath_hal_set11n_aggr_first(sc->sc_ah,
604 			    ds0,
605 			    bf->bf_state.bfs_al,
606 			    bf->bf_state.bfs_ndelim);
607 		}
608 
609 		/*
610 		 * Link the last descriptor of the previous frame
611 		 * to the beginning descriptor of this frame.
612 		 */
613 		if (bf_prev != NULL)
614 			ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
615 			    bf->bf_daddr);
616 
617 		/* Save a copy so we can link the next descriptor in */
618 		bf_prev = bf;
619 		bf = bf->bf_next;
620 	}
621 
622 	/*
623 	 * Set the first descriptor bf_lastds field to point to
624 	 * the last descriptor in the last subframe, that's where
625 	 * the status update will occur.
626 	 */
627 	bf_first->bf_lastds = bf_prev->bf_lastds;
628 
629 	/*
630 	 * And bf_last in the first descriptor points to the end of
631 	 * the aggregate list.
632 	 */
633 	bf_first->bf_last = bf_prev;
634 
635 	/*
636 	 * For non-AR9300 NICs, which require the rate control
637 	 * in the final descriptor - let's set that up now.
638 	 *
639 	 * This is because the filltxdesc() HAL call doesn't
640 	 * populate the last segment with rate control information
641 	 * if firstSeg is also true.  For non-aggregate frames
642 	 * that is fine, as the first frame already has rate control
643 	 * info.  But if the last frame in an aggregate has one
644 	 * descriptor, both firstseg and lastseg will be true and
645 	 * the rate info isn't copied.
646 	 *
647 	 * This is inefficient on MIPS/ARM platforms that have
648 	 * non-cachable memory for TX descriptors, but we'll just
649 	 * make do for now.
650 	 *
651 	 * As to why the rate table is stashed in the last descriptor
652 	 * rather than the first descriptor?  Because proctxdesc()
653 	 * is called on the final descriptor in an MPDU or A-MPDU -
654 	 * ie, the one that gets updated by the hardware upon
655 	 * completion.  That way proctxdesc() doesn't need to know
656 	 * about the first _and_ last TX descriptor.
657 	 */
658 	ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
659 
660 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
661 }
662 
663 /*
664  * Hand-off a frame to the multicast TX queue.
665  *
666  * This is a software TXQ which will be appended to the CAB queue
667  * during the beacon setup code.
668  *
669  * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
670  * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
671  * with the actual hardware txq, or all of this will fall apart.
672  *
673  * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
674  * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
675  * correctly.
676  */
677 static void
678 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
679     struct ath_buf *bf)
680 {
681 	ATH_TX_LOCK_ASSERT(sc);
682 
683 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
684 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
685 
686 	/*
687 	 * Ensure that the tx queue is the cabq, so things get
688 	 * mapped correctly.
689 	 */
690 	if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
691 		DPRINTF(sc, ATH_DEBUG_XMIT,
692 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
693 		    __func__, bf, bf->bf_state.bfs_tx_queue,
694 		    txq->axq_qnum);
695 	}
696 
697 	ATH_TXQ_LOCK(txq);
698 	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
699 		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
700 		struct ieee80211_frame *wh;
701 
702 		/* mark previous frame */
703 		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
704 		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
705 		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
706 		    BUS_DMASYNC_PREWRITE);
707 
708 		/* link descriptor */
709 		ath_hal_settxdesclink(sc->sc_ah,
710 		    bf_last->bf_lastds,
711 		    bf->bf_daddr);
712 	}
713 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
714 	ATH_TXQ_UNLOCK(txq);
715 }
716 
717 /*
718  * Hand-off packet to a hardware queue.
719  */
720 static void
721 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
722     struct ath_buf *bf)
723 {
724 	struct ath_hal *ah = sc->sc_ah;
725 	struct ath_buf *bf_first;
726 
727 	/*
728 	 * Insert the frame on the outbound list and pass it on
729 	 * to the hardware.  Multicast frames buffered for power
730 	 * save stations and transmit from the CAB queue are stored
731 	 * on a s/w only queue and loaded on to the CAB queue in
732 	 * the SWBA handler since frames only go out on DTIM and
733 	 * to avoid possible races.
734 	 */
735 	ATH_TX_LOCK_ASSERT(sc);
736 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
737 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
738 	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
739 	     ("ath_tx_handoff_hw called for mcast queue"));
740 
741 	/*
742 	 * XXX We should instead just verify that sc_txstart_cnt
743 	 * or ath_txproc_cnt > 0.  That would mean that
744 	 * the reset is going to be waiting for us to complete.
745 	 */
746 	if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
747 		device_printf(sc->sc_dev,
748 		    "%s: TX dispatch without holding txcount/txstart refcnt!\n",
749 		    __func__);
750 	}
751 
752 	/*
753 	 * XXX .. this is going to cause the hardware to get upset;
754 	 * so we really should find some way to drop or queue
755 	 * things.
756 	 */
757 
758 	ATH_TXQ_LOCK(txq);
759 
760 	/*
761 	 * XXX TODO: if there's a holdingbf, then
762 	 * ATH_TXQ_PUTRUNNING should be clear.
763 	 *
764 	 * If there is a holdingbf and the list is empty,
765 	 * then axq_link should be pointing to the holdingbf.
766 	 *
767 	 * Otherwise it should point to the last descriptor
768 	 * in the last ath_buf.
769 	 *
770 	 * In any case, we should really ensure that we
771 	 * update the previous descriptor link pointer to
772 	 * this descriptor, regardless of all of the above state.
773 	 *
774 	 * For now this is captured by having axq_link point
775 	 * to either the holdingbf (if the TXQ list is empty)
776 	 * or the end of the list (if the TXQ list isn't empty.)
777 	 * I'd rather just kill axq_link here and do it as above.
778 	 */
779 
780 	/*
781 	 * Append the frame to the TX queue.
782 	 */
783 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
784 	ATH_KTR(sc, ATH_KTR_TX, 3,
785 	    "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
786 	    "depth=%d",
787 	    txq->axq_qnum,
788 	    bf,
789 	    txq->axq_depth);
790 
791 	/*
792 	 * If there's a link pointer, update it.
793 	 *
794 	 * XXX we should replace this with the above logic, just
795 	 * to kill axq_link with fire.
796 	 */
797 	if (txq->axq_link != NULL) {
798 		*txq->axq_link = bf->bf_daddr;
799 		DPRINTF(sc, ATH_DEBUG_XMIT,
800 		    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
801 		    txq->axq_qnum, txq->axq_link,
802 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
803 		    txq->axq_depth);
804 		ATH_KTR(sc, ATH_KTR_TX, 5,
805 		    "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
806 		    "lastds=%d",
807 		    txq->axq_qnum, txq->axq_link,
808 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
809 		    bf->bf_lastds);
810 	}
811 
812 	/*
813 	 * If we've not pushed anything into the hardware yet,
814 	 * push the head of the queue into the TxDP.
815 	 *
816 	 * Once we've started DMA, there's no guarantee that
817 	 * updating the TxDP with a new value will actually work.
818 	 * So we just don't do that - if we hit the end of the list,
819 	 * we keep that buffer around (the "holding buffer") and
820 	 * re-start DMA by updating the link pointer of _that_
821 	 * descriptor and then restart DMA.
822 	 */
823 	if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
824 		bf_first = TAILQ_FIRST(&txq->axq_q);
825 		txq->axq_flags |= ATH_TXQ_PUTRUNNING;
826 		ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
827 		DPRINTF(sc, ATH_DEBUG_XMIT,
828 		    "%s: TXDP[%u] = %p (%p) depth %d\n",
829 		    __func__, txq->axq_qnum,
830 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
831 		    txq->axq_depth);
832 		ATH_KTR(sc, ATH_KTR_TX, 5,
833 		    "ath_tx_handoff: TXDP[%u] = %p (%p) "
834 		    "lastds=%p depth %d",
835 		    txq->axq_qnum,
836 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
837 		    bf_first->bf_lastds,
838 		    txq->axq_depth);
839 	}
840 
841 	/*
842 	 * Ensure that the bf TXQ matches this TXQ, so later
843 	 * checking and holding buffer manipulation is sane.
844 	 */
845 	if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
846 		DPRINTF(sc, ATH_DEBUG_XMIT,
847 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
848 		    __func__, bf, bf->bf_state.bfs_tx_queue,
849 		    txq->axq_qnum);
850 	}
851 
852 	/*
853 	 * Track aggregate queue depth.
854 	 */
855 	if (bf->bf_state.bfs_aggr)
856 		txq->axq_aggr_depth++;
857 
858 	/*
859 	 * Update the link pointer.
860 	 */
861 	ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
862 
863 	/*
864 	 * Start DMA.
865 	 *
866 	 * If we wrote a TxDP above, DMA will start from here.
867 	 *
868 	 * If DMA is running, it'll do nothing.
869 	 *
870 	 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
871 	 * or VEOL) then it stops at the last transmitted write.
872 	 * We then append a new frame by updating the link pointer
873 	 * in that descriptor and then kick TxE here; it will re-read
874 	 * that last descriptor and find the new descriptor to transmit.
875 	 *
876 	 * This is why we keep the holding descriptor around.
877 	 */
878 	ath_hal_txstart(ah, txq->axq_qnum);
879 	ATH_TXQ_UNLOCK(txq);
880 	ATH_KTR(sc, ATH_KTR_TX, 1,
881 	    "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
882 }
883 
884 /*
885  * Restart TX DMA for the given TXQ.
886  *
887  * This must be called whether the queue is empty or not.
888  */
889 static void
890 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
891 {
892 	struct ath_buf *bf, *bf_last;
893 
894 	ATH_TXQ_LOCK_ASSERT(txq);
895 
896 	/* XXX make this ATH_TXQ_FIRST */
897 	bf = TAILQ_FIRST(&txq->axq_q);
898 	bf_last = ATH_TXQ_LAST(txq, axq_q_s);
899 
900 	if (bf == NULL)
901 		return;
902 
903 	DPRINTF(sc, ATH_DEBUG_RESET,
904 	    "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
905 	    __func__,
906 	    txq->axq_qnum,
907 	    bf,
908 	    bf_last,
909 	    (uint32_t) bf->bf_daddr);
910 
911 #ifdef	ATH_DEBUG
912 	if (sc->sc_debug & ATH_DEBUG_RESET)
913 		ath_tx_dump(sc, txq);
914 #endif
915 
916 	/*
917 	 * This is called from a restart, so DMA is known to be
918 	 * completely stopped.
919 	 */
920 	KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
921 	    ("%s: Q%d: called with PUTRUNNING=1\n",
922 	    __func__,
923 	    txq->axq_qnum));
924 
925 	ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
926 	txq->axq_flags |= ATH_TXQ_PUTRUNNING;
927 
928 	ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
929 	    &txq->axq_link);
930 	ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
931 }
932 
933 /*
934  * Hand off a packet to the hardware (or mcast queue.)
935  *
936  * The relevant hardware txq should be locked.
937  */
938 static void
939 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
940     struct ath_buf *bf)
941 {
942 	ATH_TX_LOCK_ASSERT(sc);
943 
944 #ifdef	ATH_DEBUG_ALQ
945 	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
946 		ath_tx_alq_post(sc, bf);
947 #endif
948 
949 	if (txq->axq_qnum == ATH_TXQ_SWQ)
950 		ath_tx_handoff_mcast(sc, txq, bf);
951 	else
952 		ath_tx_handoff_hw(sc, txq, bf);
953 }
954 
955 static int
956 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
957     struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
958     int *keyix)
959 {
960 	DPRINTF(sc, ATH_DEBUG_XMIT,
961 	    "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
962 	    __func__,
963 	    *hdrlen,
964 	    *pktlen,
965 	    isfrag,
966 	    iswep,
967 	    m0);
968 
969 	if (iswep) {
970 		const struct ieee80211_cipher *cip;
971 		struct ieee80211_key *k;
972 
973 		/*
974 		 * Construct the 802.11 header+trailer for an encrypted
975 		 * frame. The only reason this can fail is because of an
976 		 * unknown or unsupported cipher/key type.
977 		 */
978 		k = ieee80211_crypto_encap(ni, m0);
979 		if (k == NULL) {
980 			/*
981 			 * This can happen when the key is yanked after the
982 			 * frame was queued.  Just discard the frame; the
983 			 * 802.11 layer counts failures and provides
984 			 * debugging/diagnostics.
985 			 */
986 			return (0);
987 		}
988 		/*
989 		 * Adjust the packet + header lengths for the crypto
990 		 * additions and calculate the h/w key index.  When
991 		 * a s/w mic is done the frame will have had any mic
992 		 * added to it prior to entry so m0->m_pkthdr.len will
993 		 * account for it. Otherwise we need to add it to the
994 		 * packet length.
995 		 */
996 		cip = k->wk_cipher;
997 		(*hdrlen) += cip->ic_header;
998 		(*pktlen) += cip->ic_header + cip->ic_trailer;
999 		/* NB: frags always have any TKIP MIC done in s/w */
1000 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1001 			(*pktlen) += cip->ic_miclen;
1002 		(*keyix) = k->wk_keyix;
1003 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1004 		/*
1005 		 * Use station key cache slot, if assigned.
1006 		 */
1007 		(*keyix) = ni->ni_ucastkey.wk_keyix;
1008 		if ((*keyix) == IEEE80211_KEYIX_NONE)
1009 			(*keyix) = HAL_TXKEYIX_INVALID;
1010 	} else
1011 		(*keyix) = HAL_TXKEYIX_INVALID;
1012 
1013 	return (1);
1014 }
1015 
1016 /*
1017  * Calculate whether interoperability protection is required for
1018  * this frame.
1019  *
1020  * This requires the rate control information be filled in,
1021  * as the protection requirement depends upon the current
1022  * operating mode / PHY.
1023  */
1024 static void
1025 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1026 {
1027 	struct ieee80211_frame *wh;
1028 	uint8_t rix;
1029 	uint16_t flags;
1030 	int shortPreamble;
1031 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1032 	struct ifnet *ifp = sc->sc_ifp;
1033 	struct ieee80211com *ic = ifp->if_l2com;
1034 
1035 	flags = bf->bf_state.bfs_txflags;
1036 	rix = bf->bf_state.bfs_rc[0].rix;
1037 	shortPreamble = bf->bf_state.bfs_shpream;
1038 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1039 
1040 	/*
1041 	 * If 802.11g protection is enabled, determine whether
1042 	 * to use RTS/CTS or just CTS.  Note that this is only
1043 	 * done for OFDM unicast frames.
1044 	 */
1045 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1046 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
1047 	    (flags & HAL_TXDESC_NOACK) == 0) {
1048 		bf->bf_state.bfs_doprot = 1;
1049 		/* XXX fragments must use CCK rates w/ protection */
1050 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1051 			flags |= HAL_TXDESC_RTSENA;
1052 		} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1053 			flags |= HAL_TXDESC_CTSENA;
1054 		}
1055 		/*
1056 		 * For frags it would be desirable to use the
1057 		 * highest CCK rate for RTS/CTS.  But stations
1058 		 * farther away may detect it at a lower CCK rate
1059 		 * so use the configured protection rate instead
1060 		 * (for now).
1061 		 */
1062 		sc->sc_stats.ast_tx_protect++;
1063 	}
1064 
1065 	/*
1066 	 * If 11n protection is enabled and it's a HT frame,
1067 	 * enable RTS.
1068 	 *
1069 	 * XXX ic_htprotmode or ic_curhtprotmode?
1070 	 * XXX should it_htprotmode only matter if ic_curhtprotmode
1071 	 * XXX indicates it's not a HT pure environment?
1072 	 */
1073 	if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1074 	    rt->info[rix].phy == IEEE80211_T_HT &&
1075 	    (flags & HAL_TXDESC_NOACK) == 0) {
1076 		flags |= HAL_TXDESC_RTSENA;
1077 		sc->sc_stats.ast_tx_htprotect++;
1078 	}
1079 	bf->bf_state.bfs_txflags = flags;
1080 }
1081 
1082 /*
1083  * Update the frame duration given the currently selected rate.
1084  *
1085  * This also updates the frame duration value, so it will require
1086  * a DMA flush.
1087  */
1088 static void
1089 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1090 {
1091 	struct ieee80211_frame *wh;
1092 	uint8_t rix;
1093 	uint16_t flags;
1094 	int shortPreamble;
1095 	struct ath_hal *ah = sc->sc_ah;
1096 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1097 	int isfrag = bf->bf_m->m_flags & M_FRAG;
1098 
1099 	flags = bf->bf_state.bfs_txflags;
1100 	rix = bf->bf_state.bfs_rc[0].rix;
1101 	shortPreamble = bf->bf_state.bfs_shpream;
1102 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1103 
1104 	/*
1105 	 * Calculate duration.  This logically belongs in the 802.11
1106 	 * layer but it lacks sufficient information to calculate it.
1107 	 */
1108 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
1109 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1110 		u_int16_t dur;
1111 		if (shortPreamble)
1112 			dur = rt->info[rix].spAckDuration;
1113 		else
1114 			dur = rt->info[rix].lpAckDuration;
1115 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1116 			dur += dur;		/* additional SIFS+ACK */
1117 			/*
1118 			 * Include the size of next fragment so NAV is
1119 			 * updated properly.  The last fragment uses only
1120 			 * the ACK duration
1121 			 *
1122 			 * XXX TODO: ensure that the rate lookup for each
1123 			 * fragment is the same as the rate used by the
1124 			 * first fragment!
1125 			 */
1126 			dur += ath_hal_computetxtime(ah,
1127 			    rt,
1128 			    bf->bf_nextfraglen,
1129 			    rix, shortPreamble);
1130 		}
1131 		if (isfrag) {
1132 			/*
1133 			 * Force hardware to use computed duration for next
1134 			 * fragment by disabling multi-rate retry which updates
1135 			 * duration based on the multi-rate duration table.
1136 			 */
1137 			bf->bf_state.bfs_ismrr = 0;
1138 			bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1139 			/* XXX update bfs_rc[0].try? */
1140 		}
1141 
1142 		/* Update the duration field itself */
1143 		*(u_int16_t *)wh->i_dur = htole16(dur);
1144 	}
1145 }
1146 
1147 static uint8_t
1148 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1149     int cix, int shortPreamble)
1150 {
1151 	uint8_t ctsrate;
1152 
1153 	/*
1154 	 * CTS transmit rate is derived from the transmit rate
1155 	 * by looking in the h/w rate table.  We must also factor
1156 	 * in whether or not a short preamble is to be used.
1157 	 */
1158 	/* NB: cix is set above where RTS/CTS is enabled */
1159 	KASSERT(cix != 0xff, ("cix not setup"));
1160 	ctsrate = rt->info[cix].rateCode;
1161 
1162 	/* XXX this should only matter for legacy rates */
1163 	if (shortPreamble)
1164 		ctsrate |= rt->info[cix].shortPreamble;
1165 
1166 	return (ctsrate);
1167 }
1168 
1169 /*
1170  * Calculate the RTS/CTS duration for legacy frames.
1171  */
1172 static int
1173 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1174     int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1175     int flags)
1176 {
1177 	int ctsduration = 0;
1178 
1179 	/* This mustn't be called for HT modes */
1180 	if (rt->info[cix].phy == IEEE80211_T_HT) {
1181 		kprintf("%s: HT rate where it shouldn't be (0x%x)\n",
1182 		    __func__, rt->info[cix].rateCode);
1183 		return (-1);
1184 	}
1185 
1186 	/*
1187 	 * Compute the transmit duration based on the frame
1188 	 * size and the size of an ACK frame.  We call into the
1189 	 * HAL to do the computation since it depends on the
1190 	 * characteristics of the actual PHY being used.
1191 	 *
1192 	 * NB: CTS is assumed the same size as an ACK so we can
1193 	 *     use the precalculated ACK durations.
1194 	 */
1195 	if (shortPreamble) {
1196 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1197 			ctsduration += rt->info[cix].spAckDuration;
1198 		ctsduration += ath_hal_computetxtime(ah,
1199 			rt, pktlen, rix, AH_TRUE);
1200 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1201 			ctsduration += rt->info[rix].spAckDuration;
1202 	} else {
1203 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1204 			ctsduration += rt->info[cix].lpAckDuration;
1205 		ctsduration += ath_hal_computetxtime(ah,
1206 			rt, pktlen, rix, AH_FALSE);
1207 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1208 			ctsduration += rt->info[rix].lpAckDuration;
1209 	}
1210 
1211 	return (ctsduration);
1212 }
1213 
1214 /*
1215  * Update the given ath_buf with updated rts/cts setup and duration
1216  * values.
1217  *
1218  * To support rate lookups for each software retry, the rts/cts rate
1219  * and cts duration must be re-calculated.
1220  *
1221  * This function assumes the RTS/CTS flags have been set as needed;
1222  * mrr has been disabled; and the rate control lookup has been done.
1223  *
1224  * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1225  * XXX The 11n NICs support per-rate RTS/CTS configuration.
1226  */
1227 static void
1228 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1229 {
1230 	uint16_t ctsduration = 0;
1231 	uint8_t ctsrate = 0;
1232 	uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1233 	uint8_t cix = 0;
1234 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1235 
1236 	/*
1237 	 * No RTS/CTS enabled? Don't bother.
1238 	 */
1239 	if ((bf->bf_state.bfs_txflags &
1240 	    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1241 		/* XXX is this really needed? */
1242 		bf->bf_state.bfs_ctsrate = 0;
1243 		bf->bf_state.bfs_ctsduration = 0;
1244 		return;
1245 	}
1246 
1247 	/*
1248 	 * If protection is enabled, use the protection rix control
1249 	 * rate. Otherwise use the rate0 control rate.
1250 	 */
1251 	if (bf->bf_state.bfs_doprot)
1252 		rix = sc->sc_protrix;
1253 	else
1254 		rix = bf->bf_state.bfs_rc[0].rix;
1255 
1256 	/*
1257 	 * If the raw path has hard-coded ctsrate0 to something,
1258 	 * use it.
1259 	 */
1260 	if (bf->bf_state.bfs_ctsrate0 != 0)
1261 		cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1262 	else
1263 		/* Control rate from above */
1264 		cix = rt->info[rix].controlRate;
1265 
1266 	/* Calculate the rtscts rate for the given cix */
1267 	ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1268 	    bf->bf_state.bfs_shpream);
1269 
1270 	/* The 11n chipsets do ctsduration calculations for you */
1271 	if (! ath_tx_is_11n(sc))
1272 		ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1273 		    bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1274 		    rt, bf->bf_state.bfs_txflags);
1275 
1276 	/* Squirrel away in ath_buf */
1277 	bf->bf_state.bfs_ctsrate = ctsrate;
1278 	bf->bf_state.bfs_ctsduration = ctsduration;
1279 
1280 	/*
1281 	 * Must disable multi-rate retry when using RTS/CTS.
1282 	 */
1283 	if (!sc->sc_mrrprot) {
1284 		bf->bf_state.bfs_ismrr = 0;
1285 		bf->bf_state.bfs_try0 =
1286 		    bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1287 	}
1288 }
1289 
1290 /*
1291  * Setup the descriptor chain for a normal or fast-frame
1292  * frame.
1293  *
1294  * XXX TODO: extend to include the destination hardware QCU ID.
1295  * Make sure that is correct.  Make sure that when being added
1296  * to the mcastq, the CABQ QCUID is set or things will get a bit
1297  * odd.
1298  */
1299 static void
1300 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1301 {
1302 	struct ath_desc *ds = bf->bf_desc;
1303 	struct ath_hal *ah = sc->sc_ah;
1304 
1305 	if (bf->bf_state.bfs_txrate0 == 0)
1306 		DPRINTF(sc, ATH_DEBUG_XMIT,
1307 		    "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1308 
1309 	ath_hal_setuptxdesc(ah, ds
1310 		, bf->bf_state.bfs_pktlen	/* packet length */
1311 		, bf->bf_state.bfs_hdrlen	/* header length */
1312 		, bf->bf_state.bfs_atype	/* Atheros packet type */
1313 		, bf->bf_state.bfs_txpower	/* txpower */
1314 		, bf->bf_state.bfs_txrate0
1315 		, bf->bf_state.bfs_try0		/* series 0 rate/tries */
1316 		, bf->bf_state.bfs_keyix	/* key cache index */
1317 		, bf->bf_state.bfs_txantenna	/* antenna mode */
1318 		, bf->bf_state.bfs_txflags	/* flags */
1319 		, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
1320 		, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
1321 	);
1322 
1323 	/*
1324 	 * This will be overriden when the descriptor chain is written.
1325 	 */
1326 	bf->bf_lastds = ds;
1327 	bf->bf_last = bf;
1328 
1329 	/* Set rate control and descriptor chain for this frame */
1330 	ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1331 	ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1332 }
1333 
1334 /*
1335  * Do a rate lookup.
1336  *
1337  * This performs a rate lookup for the given ath_buf only if it's required.
1338  * Non-data frames and raw frames don't require it.
1339  *
1340  * This populates the primary and MRR entries; MRR values are
1341  * then disabled later on if something requires it (eg RTS/CTS on
1342  * pre-11n chipsets.
1343  *
1344  * This needs to be done before the RTS/CTS fields are calculated
1345  * as they may depend upon the rate chosen.
1346  */
1347 static void
1348 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1349 {
1350 	uint8_t rate, rix;
1351 	int try0;
1352 
1353 	if (! bf->bf_state.bfs_doratelookup)
1354 		return;
1355 
1356 	/* Get rid of any previous state */
1357 	bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1358 
1359 	ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1360 	ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1361 	    bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1362 
1363 	/* In case MRR is disabled, make sure rc[0] is setup correctly */
1364 	bf->bf_state.bfs_rc[0].rix = rix;
1365 	bf->bf_state.bfs_rc[0].ratecode = rate;
1366 	bf->bf_state.bfs_rc[0].tries = try0;
1367 
1368 	if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1369 		ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1370 		    bf->bf_state.bfs_rc);
1371 	ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1372 
1373 	sc->sc_txrix = rix;	/* for LED blinking */
1374 	sc->sc_lastdatarix = rix;	/* for fast frames */
1375 	bf->bf_state.bfs_try0 = try0;
1376 	bf->bf_state.bfs_txrate0 = rate;
1377 }
1378 
1379 /*
1380  * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1381  */
1382 static void
1383 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1384     struct ath_buf *bf)
1385 {
1386 	struct ath_node *an = ATH_NODE(bf->bf_node);
1387 
1388 	ATH_TX_LOCK_ASSERT(sc);
1389 
1390 	if (an->clrdmask == 1) {
1391 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1392 		an->clrdmask = 0;
1393 	}
1394 }
1395 
1396 /*
1397  * Return whether this frame should be software queued or
1398  * direct dispatched.
1399  *
1400  * When doing powersave, BAR frames should be queued but other management
1401  * frames should be directly sent.
1402  *
1403  * When not doing powersave, stick BAR frames into the hardware queue
1404  * so it goes out even though the queue is paused.
1405  *
1406  * For now, management frames are also software queued by default.
1407  */
1408 static int
1409 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1410     struct mbuf *m0, int *queue_to_head)
1411 {
1412 	struct ieee80211_node *ni = &an->an_node;
1413 	struct ieee80211_frame *wh;
1414 	uint8_t type, subtype;
1415 
1416 	wh = mtod(m0, struct ieee80211_frame *);
1417 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1418 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1419 
1420 	(*queue_to_head) = 0;
1421 
1422 	/* If it's not in powersave - direct-dispatch BAR */
1423 	if ((ATH_NODE(ni)->an_is_powersave == 0)
1424 	    && type == IEEE80211_FC0_TYPE_CTL &&
1425 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1426 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1427 		    "%s: BAR: TX'ing direct\n", __func__);
1428 		return (0);
1429 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1430 	    && type == IEEE80211_FC0_TYPE_CTL &&
1431 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1432 		/* BAR TX whilst asleep; queue */
1433 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1434 		    "%s: swq: TX'ing\n", __func__);
1435 		(*queue_to_head) = 1;
1436 		return (1);
1437 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1438 	    && (type == IEEE80211_FC0_TYPE_MGT ||
1439 	        type == IEEE80211_FC0_TYPE_CTL)) {
1440 		/*
1441 		 * Other control/mgmt frame; bypass software queuing
1442 		 * for now!
1443 		 */
1444 		DPRINTF(sc, ATH_DEBUG_XMIT,
1445 		    "%s: %s: Node is asleep; sending mgmt "
1446 		    "(type=%d, subtype=%d)\n",
1447 		    __func__, ath_hal_ether_sprintf(ni->ni_macaddr),
1448 		    type, subtype);
1449 		return (0);
1450 	} else {
1451 		return (1);
1452 	}
1453 }
1454 
1455 
1456 /*
1457  * Transmit the given frame to the hardware.
1458  *
1459  * The frame must already be setup; rate control must already have
1460  * been done.
1461  *
1462  * XXX since the TXQ lock is being held here (and I dislike holding
1463  * it for this long when not doing software aggregation), later on
1464  * break this function into "setup_normal" and "xmit_normal". The
1465  * lock only needs to be held for the ath_tx_handoff call.
1466  *
1467  * XXX we don't update the leak count here - if we're doing
1468  * direct frame dispatch, we need to be able to do it without
1469  * decrementing the leak count (eg multicast queue frames.)
1470  */
1471 static void
1472 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1473     struct ath_buf *bf)
1474 {
1475 	struct ath_node *an = ATH_NODE(bf->bf_node);
1476 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1477 
1478 	ATH_TX_LOCK_ASSERT(sc);
1479 
1480 	/*
1481 	 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1482 	 * set a completion handler however it doesn't (yet) properly
1483 	 * handle the strict ordering requirements needed for normal,
1484 	 * non-aggregate session frames.
1485 	 *
1486 	 * Once this is implemented, only set CLRDMASK like this for
1487 	 * frames that must go out - eg management/raw frames.
1488 	 */
1489 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1490 
1491 	/* Setup the descriptor before handoff */
1492 	ath_tx_do_ratelookup(sc, bf);
1493 	ath_tx_calc_duration(sc, bf);
1494 	ath_tx_calc_protection(sc, bf);
1495 	ath_tx_set_rtscts(sc, bf);
1496 	ath_tx_rate_fill_rcflags(sc, bf);
1497 	ath_tx_setds(sc, bf);
1498 
1499 	/* Track per-TID hardware queue depth correctly */
1500 	tid->hwq_depth++;
1501 
1502 	/* Assign the completion handler */
1503 	bf->bf_comp = ath_tx_normal_comp;
1504 
1505 	/* Hand off to hardware */
1506 	ath_tx_handoff(sc, txq, bf);
1507 }
1508 
1509 /*
1510  * Do the basic frame setup stuff that's required before the frame
1511  * is added to a software queue.
1512  *
1513  * All frames get mostly the same treatment and it's done once.
1514  * Retransmits fiddle with things like the rate control setup,
1515  * setting the retransmit bit in the packet; doing relevant DMA/bus
1516  * syncing and relinking it (back) into the hardware TX queue.
1517  *
1518  * Note that this may cause the mbuf to be reallocated, so
1519  * m0 may not be valid.
1520  */
1521 static int
1522 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1523     struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1524 {
1525 	struct ieee80211vap *vap = ni->ni_vap;
1526 	struct ath_hal *ah = sc->sc_ah;
1527 	struct ifnet *ifp = sc->sc_ifp;
1528 	struct ieee80211com *ic = ifp->if_l2com;
1529 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1530 	int error, iswep, ismcast, isfrag, ismrr;
1531 	int keyix, hdrlen, pktlen, try0 = 0;
1532 	u_int8_t rix = 0, txrate = 0;
1533 	struct ath_desc *ds;
1534 	struct ieee80211_frame *wh;
1535 	u_int subtype, flags;
1536 	HAL_PKT_TYPE atype;
1537 	const HAL_RATE_TABLE *rt;
1538 	HAL_BOOL shortPreamble;
1539 	struct ath_node *an;
1540 	u_int pri;
1541 
1542 	/*
1543 	 * To ensure that both sequence numbers and the CCMP PN handling
1544 	 * is "correct", make sure that the relevant TID queue is locked.
1545 	 * Otherwise the CCMP PN and seqno may appear out of order, causing
1546 	 * re-ordered frames to have out of order CCMP PN's, resulting
1547 	 * in many, many frame drops.
1548 	 */
1549 	ATH_TX_LOCK_ASSERT(sc);
1550 
1551 	wh = mtod(m0, struct ieee80211_frame *);
1552 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1553 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1554 	isfrag = m0->m_flags & M_FRAG;
1555 	hdrlen = ieee80211_anyhdrsize(wh);
1556 	/*
1557 	 * Packet length must not include any
1558 	 * pad bytes; deduct them here.
1559 	 */
1560 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1561 
1562 	/* Handle encryption twiddling if needed */
1563 	if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1564 	    &pktlen, &keyix)) {
1565 		ath_freetx(m0);
1566 		return EIO;
1567 	}
1568 
1569 	/* packet header may have moved, reset our local pointer */
1570 	wh = mtod(m0, struct ieee80211_frame *);
1571 
1572 	pktlen += IEEE80211_CRC_LEN;
1573 
1574 	/*
1575 	 * Load the DMA map so any coalescing is done.  This
1576 	 * also calculates the number of descriptors we need.
1577 	 */
1578 	error = ath_tx_dmasetup(sc, bf, m0);
1579 	if (error != 0)
1580 		return error;
1581 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1582 	bf->bf_node = ni;			/* NB: held reference */
1583 	m0 = bf->bf_m;				/* NB: may have changed */
1584 	wh = mtod(m0, struct ieee80211_frame *);
1585 
1586 	/* setup descriptors */
1587 	ds = bf->bf_desc;
1588 	rt = sc->sc_currates;
1589 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1590 
1591 	/*
1592 	 * NB: the 802.11 layer marks whether or not we should
1593 	 * use short preamble based on the current mode and
1594 	 * negotiated parameters.
1595 	 */
1596 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1597 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1598 		shortPreamble = AH_TRUE;
1599 		sc->sc_stats.ast_tx_shortpre++;
1600 	} else {
1601 		shortPreamble = AH_FALSE;
1602 	}
1603 
1604 	an = ATH_NODE(ni);
1605 	//flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
1606 	flags = 0;
1607 	ismrr = 0;				/* default no multi-rate retry*/
1608 	pri = M_WME_GETAC(m0);			/* honor classification */
1609 	/* XXX use txparams instead of fixed values */
1610 	/*
1611 	 * Calculate Atheros packet type from IEEE80211 packet header,
1612 	 * setup for rate calculations, and select h/w transmit queue.
1613 	 */
1614 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1615 	case IEEE80211_FC0_TYPE_MGT:
1616 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1617 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1618 			atype = HAL_PKT_TYPE_BEACON;
1619 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1620 			atype = HAL_PKT_TYPE_PROBE_RESP;
1621 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1622 			atype = HAL_PKT_TYPE_ATIM;
1623 		else
1624 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
1625 		rix = an->an_mgmtrix;
1626 		txrate = rt->info[rix].rateCode;
1627 		if (shortPreamble)
1628 			txrate |= rt->info[rix].shortPreamble;
1629 		try0 = ATH_TXMGTTRY;
1630 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1631 		break;
1632 	case IEEE80211_FC0_TYPE_CTL:
1633 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
1634 		rix = an->an_mgmtrix;
1635 		txrate = rt->info[rix].rateCode;
1636 		if (shortPreamble)
1637 			txrate |= rt->info[rix].shortPreamble;
1638 		try0 = ATH_TXMGTTRY;
1639 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1640 		break;
1641 	case IEEE80211_FC0_TYPE_DATA:
1642 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
1643 		/*
1644 		 * Data frames: multicast frames go out at a fixed rate,
1645 		 * EAPOL frames use the mgmt frame rate; otherwise consult
1646 		 * the rate control module for the rate to use.
1647 		 */
1648 		if (ismcast) {
1649 			rix = an->an_mcastrix;
1650 			txrate = rt->info[rix].rateCode;
1651 			if (shortPreamble)
1652 				txrate |= rt->info[rix].shortPreamble;
1653 			try0 = 1;
1654 		} else if (m0->m_flags & M_EAPOL) {
1655 			/* XXX? maybe always use long preamble? */
1656 			rix = an->an_mgmtrix;
1657 			txrate = rt->info[rix].rateCode;
1658 			if (shortPreamble)
1659 				txrate |= rt->info[rix].shortPreamble;
1660 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
1661 		} else {
1662 			/*
1663 			 * Do rate lookup on each TX, rather than using
1664 			 * the hard-coded TX information decided here.
1665 			 */
1666 			ismrr = 1;
1667 			bf->bf_state.bfs_doratelookup = 1;
1668 		}
1669 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1670 			flags |= HAL_TXDESC_NOACK;
1671 		break;
1672 	default:
1673 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
1674 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1675 		/* XXX statistic */
1676 		/* XXX free tx dmamap */
1677 		ath_freetx(m0);
1678 		return EIO;
1679 	}
1680 
1681 	/*
1682 	 * There are two known scenarios where the frame AC doesn't match
1683 	 * what the destination TXQ is.
1684 	 *
1685 	 * + non-QoS frames (eg management?) that the net80211 stack has
1686 	 *   assigned a higher AC to, but since it's a non-QoS TID, it's
1687 	 *   being thrown into TID 16.  TID 16 gets the AC_BE queue.
1688 	 *   It's quite possible that management frames should just be
1689 	 *   direct dispatched to hardware rather than go via the software
1690 	 *   queue; that should be investigated in the future.  There are
1691 	 *   some specific scenarios where this doesn't make sense, mostly
1692 	 *   surrounding ADDBA request/response - hence why that is special
1693 	 *   cased.
1694 	 *
1695 	 * + Multicast frames going into the VAP mcast queue.  That shows up
1696 	 *   as "TXQ 11".
1697 	 *
1698 	 * This driver should eventually support separate TID and TXQ locking,
1699 	 * allowing for arbitrary AC frames to appear on arbitrary software
1700 	 * queues, being queued to the "correct" hardware queue when needed.
1701 	 */
1702 #if 0
1703 	if (txq != sc->sc_ac2q[pri]) {
1704 		DPRINTF(sc, ATH_DEBUG_XMIT,
1705 		    "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1706 		    __func__,
1707 		    txq,
1708 		    txq->axq_qnum,
1709 		    pri,
1710 		    sc->sc_ac2q[pri],
1711 		    sc->sc_ac2q[pri]->axq_qnum);
1712 	}
1713 #endif
1714 
1715 	/*
1716 	 * Calculate miscellaneous flags.
1717 	 */
1718 	if (ismcast) {
1719 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
1720 	} else if (pktlen > vap->iv_rtsthreshold &&
1721 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1722 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
1723 		sc->sc_stats.ast_tx_rts++;
1724 	}
1725 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
1726 		sc->sc_stats.ast_tx_noack++;
1727 #ifdef IEEE80211_SUPPORT_TDMA
1728 	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1729 		DPRINTF(sc, ATH_DEBUG_TDMA,
1730 		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
1731 		sc->sc_stats.ast_tdma_ack++;
1732 		/* XXX free tx dmamap */
1733 		ath_freetx(m0);
1734 		return EIO;
1735 	}
1736 #endif
1737 
1738 	/*
1739 	 * Determine if a tx interrupt should be generated for
1740 	 * this descriptor.  We take a tx interrupt to reap
1741 	 * descriptors when the h/w hits an EOL condition or
1742 	 * when the descriptor is specifically marked to generate
1743 	 * an interrupt.  We periodically mark descriptors in this
1744 	 * way to insure timely replenishing of the supply needed
1745 	 * for sending frames.  Defering interrupts reduces system
1746 	 * load and potentially allows more concurrent work to be
1747 	 * done but if done to aggressively can cause senders to
1748 	 * backup.
1749 	 *
1750 	 * NB: use >= to deal with sc_txintrperiod changing
1751 	 *     dynamically through sysctl.
1752 	 */
1753 	if (flags & HAL_TXDESC_INTREQ) {
1754 		txq->axq_intrcnt = 0;
1755 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1756 		flags |= HAL_TXDESC_INTREQ;
1757 		txq->axq_intrcnt = 0;
1758 	}
1759 
1760 	/* This point forward is actual TX bits */
1761 
1762 	/*
1763 	 * At this point we are committed to sending the frame
1764 	 * and we don't need to look at m_nextpkt; clear it in
1765 	 * case this frame is part of frag chain.
1766 	 */
1767 	m0->m_nextpkt = NULL;
1768 
1769 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1770 		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1771 		    sc->sc_hwmap[rix].ieeerate, -1);
1772 
1773 	if (ieee80211_radiotap_active_vap(vap)) {
1774 		u_int64_t tsf = ath_hal_gettsf64(ah);
1775 
1776 		sc->sc_tx_th.wt_tsf = htole64(tsf);
1777 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1778 		if (iswep)
1779 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1780 		if (isfrag)
1781 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1782 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1783 		sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1784 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1785 
1786 		ieee80211_radiotap_tx(vap, m0);
1787 	}
1788 
1789 	/* Blank the legacy rate array */
1790 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1791 
1792 	/*
1793 	 * ath_buf_set_rate needs at least one rate/try to setup
1794 	 * the rate scenario.
1795 	 */
1796 	bf->bf_state.bfs_rc[0].rix = rix;
1797 	bf->bf_state.bfs_rc[0].tries = try0;
1798 	bf->bf_state.bfs_rc[0].ratecode = txrate;
1799 
1800 	/* Store the decided rate index values away */
1801 	bf->bf_state.bfs_pktlen = pktlen;
1802 	bf->bf_state.bfs_hdrlen = hdrlen;
1803 	bf->bf_state.bfs_atype = atype;
1804 	bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1805 	bf->bf_state.bfs_txrate0 = txrate;
1806 	bf->bf_state.bfs_try0 = try0;
1807 	bf->bf_state.bfs_keyix = keyix;
1808 	bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1809 	bf->bf_state.bfs_txflags = flags;
1810 	bf->bf_state.bfs_shpream = shortPreamble;
1811 
1812 	/* XXX this should be done in ath_tx_setrate() */
1813 	bf->bf_state.bfs_ctsrate0 = 0;	/* ie, no hard-coded ctsrate */
1814 	bf->bf_state.bfs_ctsrate = 0;	/* calculated later */
1815 	bf->bf_state.bfs_ctsduration = 0;
1816 	bf->bf_state.bfs_ismrr = ismrr;
1817 
1818 	return 0;
1819 }
1820 
1821 /*
1822  * Queue a frame to the hardware or software queue.
1823  *
1824  * This can be called by the net80211 code.
1825  *
1826  * XXX what about locking? Or, push the seqno assign into the
1827  * XXX aggregate scheduler so its serialised?
1828  *
1829  * XXX When sending management frames via ath_raw_xmit(),
1830  *     should CLRDMASK be set unconditionally?
1831  */
1832 int
1833 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1834     struct ath_buf *bf, struct mbuf *m0)
1835 {
1836 	struct ieee80211vap *vap = ni->ni_vap;
1837 	struct ath_vap *avp = ATH_VAP(vap);
1838 	int r = 0;
1839 	u_int pri;
1840 	int tid;
1841 	struct ath_txq *txq;
1842 	int ismcast;
1843 	const struct ieee80211_frame *wh;
1844 	int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1845 	ieee80211_seq seqno;
1846 	uint8_t type, subtype;
1847 	int queue_to_head;
1848 
1849 	ATH_TX_LOCK_ASSERT(sc);
1850 
1851 	/*
1852 	 * Determine the target hardware queue.
1853 	 *
1854 	 * For multicast frames, the txq gets overridden appropriately
1855 	 * depending upon the state of PS.
1856 	 *
1857 	 * For any other frame, we do a TID/QoS lookup inside the frame
1858 	 * to see what the TID should be. If it's a non-QoS frame, the
1859 	 * AC and TID are overridden. The TID/TXQ code assumes the
1860 	 * TID is on a predictable hardware TXQ, so we don't support
1861 	 * having a node TID queued to multiple hardware TXQs.
1862 	 * This may change in the future but would require some locking
1863 	 * fudgery.
1864 	 */
1865 	pri = ath_tx_getac(sc, m0);
1866 	tid = ath_tx_gettid(sc, m0);
1867 
1868 	txq = sc->sc_ac2q[pri];
1869 	wh = mtod(m0, struct ieee80211_frame *);
1870 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1871 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1872 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1873 
1874 	/*
1875 	 * Enforce how deep the multicast queue can grow.
1876 	 *
1877 	 * XXX duplicated in ath_raw_xmit().
1878 	 */
1879 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1880 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1881 		    > sc->sc_txq_mcastq_maxdepth) {
1882 			sc->sc_stats.ast_tx_mcastq_overflow++;
1883 			m_freem(m0);
1884 			return (ENOBUFS);
1885 		}
1886 	}
1887 
1888 	/*
1889 	 * Enforce how deep the unicast queue can grow.
1890 	 *
1891 	 * If the node is in power save then we don't want
1892 	 * the software queue to grow too deep, or a node may
1893 	 * end up consuming all of the ath_buf entries.
1894 	 *
1895 	 * For now, only do this for DATA frames.
1896 	 *
1897 	 * We will want to cap how many management/control
1898 	 * frames get punted to the software queue so it doesn't
1899 	 * fill up.  But the correct solution isn't yet obvious.
1900 	 * In any case, this check should at least let frames pass
1901 	 * that we are direct-dispatching.
1902 	 *
1903 	 * XXX TODO: duplicate this to the raw xmit path!
1904 	 */
1905 	if (type == IEEE80211_FC0_TYPE_DATA &&
1906 	    ATH_NODE(ni)->an_is_powersave &&
1907 	    ATH_NODE(ni)->an_swq_depth >
1908 	     sc->sc_txq_node_psq_maxdepth) {
1909 		sc->sc_stats.ast_tx_node_psq_overflow++;
1910 		m_freem(m0);
1911 		return (ENOBUFS);
1912 	}
1913 
1914 	/* A-MPDU TX */
1915 	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1916 	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1917 	is_ampdu = is_ampdu_tx | is_ampdu_pending;
1918 
1919 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1920 	    __func__, tid, pri, is_ampdu);
1921 
1922 	/* Set local packet state, used to queue packets to hardware */
1923 	bf->bf_state.bfs_tid = tid;
1924 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1925 	bf->bf_state.bfs_pri = pri;
1926 
1927 #if 1
1928 	/*
1929 	 * When servicing one or more stations in power-save mode
1930 	 * (or) if there is some mcast data waiting on the mcast
1931 	 * queue (to prevent out of order delivery) multicast frames
1932 	 * must be bufferd until after the beacon.
1933 	 *
1934 	 * TODO: we should lock the mcastq before we check the length.
1935 	 */
1936 	if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1937 		txq = &avp->av_mcastq;
1938 		/*
1939 		 * Mark the frame as eventually belonging on the CAB
1940 		 * queue, so the descriptor setup functions will
1941 		 * correctly initialise the descriptor 'qcuId' field.
1942 		 */
1943 		bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1944 	}
1945 #endif
1946 
1947 	/* Do the generic frame setup */
1948 	/* XXX should just bzero the bf_state? */
1949 	bf->bf_state.bfs_dobaw = 0;
1950 
1951 	/* A-MPDU TX? Manually set sequence number */
1952 	/*
1953 	 * Don't do it whilst pending; the net80211 layer still
1954 	 * assigns them.
1955 	 */
1956 	if (is_ampdu_tx) {
1957 		/*
1958 		 * Always call; this function will
1959 		 * handle making sure that null data frames
1960 		 * don't get a sequence number from the current
1961 		 * TID and thus mess with the BAW.
1962 		 */
1963 		seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
1964 
1965 		/*
1966 		 * Don't add QoS NULL frames to the BAW.
1967 		 */
1968 		if (IEEE80211_QOS_HAS_SEQ(wh) &&
1969 		    subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
1970 			bf->bf_state.bfs_dobaw = 1;
1971 		}
1972 	}
1973 
1974 	/*
1975 	 * If needed, the sequence number has been assigned.
1976 	 * Squirrel it away somewhere easy to get to.
1977 	 */
1978 	bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
1979 
1980 	/* Is ampdu pending? fetch the seqno and print it out */
1981 	if (is_ampdu_pending)
1982 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1983 		    "%s: tid %d: ampdu pending, seqno %d\n",
1984 		    __func__, tid, M_SEQNO_GET(m0));
1985 
1986 	/* This also sets up the DMA map */
1987 	r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
1988 
1989 	if (r != 0)
1990 		goto done;
1991 
1992 	/* At this point m0 could have changed! */
1993 	m0 = bf->bf_m;
1994 
1995 #if 1
1996 	/*
1997 	 * If it's a multicast frame, do a direct-dispatch to the
1998 	 * destination hardware queue. Don't bother software
1999 	 * queuing it.
2000 	 */
2001 	/*
2002 	 * If it's a BAR frame, do a direct dispatch to the
2003 	 * destination hardware queue. Don't bother software
2004 	 * queuing it, as the TID will now be paused.
2005 	 * Sending a BAR frame can occur from the net80211 txa timer
2006 	 * (ie, retries) or from the ath txtask (completion call.)
2007 	 * It queues directly to hardware because the TID is paused
2008 	 * at this point (and won't be unpaused until the BAR has
2009 	 * either been TXed successfully or max retries has been
2010 	 * reached.)
2011 	 */
2012 	/*
2013 	 * Until things are better debugged - if this node is asleep
2014 	 * and we're sending it a non-BAR frame, direct dispatch it.
2015 	 * Why? Because we need to figure out what's actually being
2016 	 * sent - eg, during reassociation/reauthentication after
2017 	 * the node (last) disappeared whilst asleep, the driver should
2018 	 * have unpaused/unsleep'ed the node.  So until that is
2019 	 * sorted out, use this workaround.
2020 	 */
2021 	if (txq == &avp->av_mcastq) {
2022 		DPRINTF(sc, ATH_DEBUG_SW_TX,
2023 		    "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2024 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2025 		ath_tx_xmit_normal(sc, txq, bf);
2026 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2027 	    &queue_to_head)) {
2028 		ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2029 	} else {
2030 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2031 		ath_tx_xmit_normal(sc, txq, bf);
2032 	}
2033 #else
2034 	/*
2035 	 * For now, since there's no software queue,
2036 	 * direct-dispatch to the hardware.
2037 	 */
2038 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2039 	/*
2040 	 * Update the current leak count if
2041 	 * we're leaking frames; and set the
2042 	 * MORE flag as appropriate.
2043 	 */
2044 	ath_tx_leak_count_update(sc, tid, bf);
2045 	ath_tx_xmit_normal(sc, txq, bf);
2046 #endif
2047 done:
2048 	return 0;
2049 }
2050 
2051 static int
2052 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2053 	struct ath_buf *bf, struct mbuf *m0,
2054 	const struct ieee80211_bpf_params *params)
2055 {
2056 	struct ifnet *ifp = sc->sc_ifp;
2057 	struct ieee80211com *ic = ifp->if_l2com;
2058 	struct ath_hal *ah = sc->sc_ah;
2059 	struct ieee80211vap *vap = ni->ni_vap;
2060 	int error, ismcast, ismrr;
2061 	int keyix, hdrlen, pktlen, try0, txantenna;
2062 	u_int8_t rix, txrate;
2063 	struct ieee80211_frame *wh;
2064 	u_int flags;
2065 	HAL_PKT_TYPE atype;
2066 	const HAL_RATE_TABLE *rt;
2067 	struct ath_desc *ds;
2068 	u_int pri;
2069 	int o_tid = -1;
2070 	int do_override;
2071 	uint8_t type, subtype;
2072 	int queue_to_head;
2073 	struct ath_node *an = ATH_NODE(ni);
2074 
2075 	ATH_TX_LOCK_ASSERT(sc);
2076 
2077 	wh = mtod(m0, struct ieee80211_frame *);
2078 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2079 	hdrlen = ieee80211_anyhdrsize(wh);
2080 	/*
2081 	 * Packet length must not include any
2082 	 * pad bytes; deduct them here.
2083 	 */
2084 	/* XXX honor IEEE80211_BPF_DATAPAD */
2085 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2086 
2087 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2088 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2089 
2090 	ATH_KTR(sc, ATH_KTR_TX, 2,
2091 	     "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2092 
2093 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2094 	    __func__, ismcast);
2095 
2096 	pri = params->ibp_pri & 3;
2097 	/* Override pri if the frame isn't a QoS one */
2098 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2099 		pri = ath_tx_getac(sc, m0);
2100 
2101 	/* XXX If it's an ADDBA, override the correct queue */
2102 	do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2103 
2104 	/* Map ADDBA to the correct priority */
2105 	if (do_override) {
2106 #if 0
2107 		DPRINTF(sc, ATH_DEBUG_XMIT,
2108 		    "%s: overriding tid %d pri %d -> %d\n",
2109 		    __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2110 #endif
2111 		pri = TID_TO_WME_AC(o_tid);
2112 	}
2113 
2114 	/* Handle encryption twiddling if needed */
2115 	if (! ath_tx_tag_crypto(sc, ni,
2116 	    m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2117 	    &hdrlen, &pktlen, &keyix)) {
2118 		ath_freetx(m0);
2119 		return EIO;
2120 	}
2121 	/* packet header may have moved, reset our local pointer */
2122 	wh = mtod(m0, struct ieee80211_frame *);
2123 
2124 	/* Do the generic frame setup */
2125 	/* XXX should just bzero the bf_state? */
2126 	bf->bf_state.bfs_dobaw = 0;
2127 
2128 	error = ath_tx_dmasetup(sc, bf, m0);
2129 	if (error != 0)
2130 		return error;
2131 	m0 = bf->bf_m;				/* NB: may have changed */
2132 	wh = mtod(m0, struct ieee80211_frame *);
2133 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2134 	bf->bf_node = ni;			/* NB: held reference */
2135 
2136 	/* Always enable CLRDMASK for raw frames for now.. */
2137 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
2138 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
2139 	if (params->ibp_flags & IEEE80211_BPF_RTS)
2140 		flags |= HAL_TXDESC_RTSENA;
2141 	else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2142 		/* XXX assume 11g/11n protection? */
2143 		bf->bf_state.bfs_doprot = 1;
2144 		flags |= HAL_TXDESC_CTSENA;
2145 	}
2146 	/* XXX leave ismcast to injector? */
2147 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2148 		flags |= HAL_TXDESC_NOACK;
2149 
2150 	rt = sc->sc_currates;
2151 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2152 
2153 	/* Fetch first rate information */
2154 	rix = ath_tx_findrix(sc, params->ibp_rate0);
2155 	try0 = params->ibp_try0;
2156 
2157 	/*
2158 	 * Override EAPOL rate as appropriate.
2159 	 */
2160 	if (m0->m_flags & M_EAPOL) {
2161 		/* XXX? maybe always use long preamble? */
2162 		rix = an->an_mgmtrix;
2163 		try0 = ATH_TXMAXTRY;	/* XXX?too many? */
2164 	}
2165 
2166 	txrate = rt->info[rix].rateCode;
2167 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2168 		txrate |= rt->info[rix].shortPreamble;
2169 	sc->sc_txrix = rix;
2170 	ismrr = (params->ibp_try1 != 0);
2171 	txantenna = params->ibp_pri >> 2;
2172 	if (txantenna == 0)			/* XXX? */
2173 		txantenna = sc->sc_txantenna;
2174 
2175 	/*
2176 	 * Since ctsrate is fixed, store it away for later
2177 	 * use when the descriptor fields are being set.
2178 	 */
2179 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2180 		bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2181 
2182 	/*
2183 	 * NB: we mark all packets as type PSPOLL so the h/w won't
2184 	 * set the sequence number, duration, etc.
2185 	 */
2186 	atype = HAL_PKT_TYPE_PSPOLL;
2187 
2188 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2189 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2190 		    sc->sc_hwmap[rix].ieeerate, -1);
2191 
2192 	if (ieee80211_radiotap_active_vap(vap)) {
2193 		u_int64_t tsf = ath_hal_gettsf64(ah);
2194 
2195 		sc->sc_tx_th.wt_tsf = htole64(tsf);
2196 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2197 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2198 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2199 		if (m0->m_flags & M_FRAG)
2200 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2201 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2202 		sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2203 		    ieee80211_get_node_txpower(ni));
2204 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2205 
2206 		ieee80211_radiotap_tx(vap, m0);
2207 	}
2208 
2209 	/*
2210 	 * Formulate first tx descriptor with tx controls.
2211 	 */
2212 	ds = bf->bf_desc;
2213 	/* XXX check return value? */
2214 
2215 	/* Store the decided rate index values away */
2216 	bf->bf_state.bfs_pktlen = pktlen;
2217 	bf->bf_state.bfs_hdrlen = hdrlen;
2218 	bf->bf_state.bfs_atype = atype;
2219 	bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2220 	    ieee80211_get_node_txpower(ni));
2221 	bf->bf_state.bfs_txrate0 = txrate;
2222 	bf->bf_state.bfs_try0 = try0;
2223 	bf->bf_state.bfs_keyix = keyix;
2224 	bf->bf_state.bfs_txantenna = txantenna;
2225 	bf->bf_state.bfs_txflags = flags;
2226 	bf->bf_state.bfs_shpream =
2227 	    !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2228 
2229 	/* Set local packet state, used to queue packets to hardware */
2230 	bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2231 	bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2232 	bf->bf_state.bfs_pri = pri;
2233 
2234 	/* XXX this should be done in ath_tx_setrate() */
2235 	bf->bf_state.bfs_ctsrate = 0;
2236 	bf->bf_state.bfs_ctsduration = 0;
2237 	bf->bf_state.bfs_ismrr = ismrr;
2238 
2239 	/* Blank the legacy rate array */
2240 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2241 
2242 	bf->bf_state.bfs_rc[0].rix = rix;
2243 	bf->bf_state.bfs_rc[0].tries = try0;
2244 	bf->bf_state.bfs_rc[0].ratecode = txrate;
2245 
2246 	if (ismrr) {
2247 		int rix;
2248 
2249 		rix = ath_tx_findrix(sc, params->ibp_rate1);
2250 		bf->bf_state.bfs_rc[1].rix = rix;
2251 		bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2252 
2253 		rix = ath_tx_findrix(sc, params->ibp_rate2);
2254 		bf->bf_state.bfs_rc[2].rix = rix;
2255 		bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2256 
2257 		rix = ath_tx_findrix(sc, params->ibp_rate3);
2258 		bf->bf_state.bfs_rc[3].rix = rix;
2259 		bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2260 	}
2261 	/*
2262 	 * All the required rate control decisions have been made;
2263 	 * fill in the rc flags.
2264 	 */
2265 	ath_tx_rate_fill_rcflags(sc, bf);
2266 
2267 	/* NB: no buffered multicast in power save support */
2268 
2269 	/*
2270 	 * If we're overiding the ADDBA destination, dump directly
2271 	 * into the hardware queue, right after any pending
2272 	 * frames to that node are.
2273 	 */
2274 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2275 	    __func__, do_override);
2276 
2277 #if 1
2278 	/*
2279 	 * Put addba frames in the right place in the right TID/HWQ.
2280 	 */
2281 	if (do_override) {
2282 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2283 		/*
2284 		 * XXX if it's addba frames, should we be leaking
2285 		 * them out via the frame leak method?
2286 		 * XXX for now let's not risk it; but we may wish
2287 		 * to investigate this later.
2288 		 */
2289 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2290 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2291 	    &queue_to_head)) {
2292 		/* Queue to software queue */
2293 		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2294 	} else {
2295 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2296 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2297 	}
2298 #else
2299 	/* Direct-dispatch to the hardware */
2300 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2301 	/*
2302 	 * Update the current leak count if
2303 	 * we're leaking frames; and set the
2304 	 * MORE flag as appropriate.
2305 	 */
2306 	ath_tx_leak_count_update(sc, tid, bf);
2307 	ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2308 #endif
2309 	return 0;
2310 }
2311 
2312 /*
2313  * Send a raw frame.
2314  *
2315  * This can be called by net80211.
2316  */
2317 int
2318 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2319 	const struct ieee80211_bpf_params *params)
2320 {
2321 	struct ieee80211com *ic = ni->ni_ic;
2322 	struct ifnet *ifp = ic->ic_ifp;
2323 	struct ath_softc *sc = ifp->if_softc;
2324 	struct ath_buf *bf;
2325 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2326 	int error = 0;
2327 
2328 	ATH_PCU_LOCK(sc);
2329 	if (sc->sc_inreset_cnt > 0) {
2330 		DPRINTF(sc, ATH_DEBUG_XMIT,
2331 		    "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2332 		error = EIO;
2333 		ATH_PCU_UNLOCK(sc);
2334 		goto badbad;
2335 	}
2336 	sc->sc_txstart_cnt++;
2337 	ATH_PCU_UNLOCK(sc);
2338 
2339 	/* Wake the hardware up already */
2340 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
2341 
2342 	ATH_TX_LOCK(sc);
2343 
2344 	if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) {
2345 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
2346 		    (ifp->if_flags & IFF_RUNNING) == 0 ?
2347 			"!running" : "invalid");
2348 		m_freem(m);
2349 		error = ENETDOWN;
2350 		goto bad;
2351 	}
2352 
2353 	/*
2354 	 * Enforce how deep the multicast queue can grow.
2355 	 *
2356 	 * XXX duplicated in ath_tx_start().
2357 	 */
2358 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2359 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2360 		    > sc->sc_txq_mcastq_maxdepth) {
2361 			sc->sc_stats.ast_tx_mcastq_overflow++;
2362 			error = ENOBUFS;
2363 		}
2364 
2365 		if (error != 0) {
2366 			m_freem(m);
2367 			goto bad;
2368 		}
2369 	}
2370 
2371 	/*
2372 	 * Grab a TX buffer and associated resources.
2373 	 */
2374 	bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2375 	if (bf == NULL) {
2376 		sc->sc_stats.ast_tx_nobuf++;
2377 		m_freem(m);
2378 		error = ENOBUFS;
2379 		goto bad;
2380 	}
2381 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2382 	    m, params,  bf);
2383 
2384 	if (params == NULL) {
2385 		/*
2386 		 * Legacy path; interpret frame contents to decide
2387 		 * precisely how to send the frame.
2388 		 */
2389 		if (ath_tx_start(sc, ni, bf, m)) {
2390 			error = EIO;		/* XXX */
2391 			goto bad2;
2392 		}
2393 	} else {
2394 		/*
2395 		 * Caller supplied explicit parameters to use in
2396 		 * sending the frame.
2397 		 */
2398 		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2399 			error = EIO;		/* XXX */
2400 			goto bad2;
2401 		}
2402 	}
2403 	sc->sc_wd_timer = 5;
2404 	ifp->if_opackets++;
2405 	sc->sc_stats.ast_tx_raw++;
2406 
2407 	/*
2408 	 * Update the TIM - if there's anything queued to the
2409 	 * software queue and power save is enabled, we should
2410 	 * set the TIM.
2411 	 */
2412 	ath_tx_update_tim(sc, ni, 1);
2413 
2414 	ATH_TX_UNLOCK(sc);
2415 
2416 	ATH_PCU_LOCK(sc);
2417 	sc->sc_txstart_cnt--;
2418 	ATH_PCU_UNLOCK(sc);
2419 
2420 
2421 	/* Put the hardware back to sleep if required */
2422 	ath_power_restore_power_state(sc);
2423 
2424 	return 0;
2425 
2426 bad2:
2427 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2428 	    "bf=%p",
2429 	    m,
2430 	    params,
2431 	    bf);
2432 	ATH_TXBUF_LOCK(sc);
2433 	ath_returnbuf_head(sc, bf);
2434 	ATH_TXBUF_UNLOCK(sc);
2435 
2436 bad:
2437 	ATH_TX_UNLOCK(sc);
2438 
2439 	ATH_PCU_LOCK(sc);
2440 	sc->sc_txstart_cnt--;
2441 	ATH_PCU_UNLOCK(sc);
2442 
2443 	/* Put the hardware back to sleep if required */
2444 	ath_power_restore_power_state(sc);
2445 
2446 badbad:
2447 	ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2448 	    m, params);
2449 	ifp->if_oerrors++;
2450 	sc->sc_stats.ast_tx_raw_fail++;
2451 	ieee80211_free_node(ni);
2452 
2453 	return error;
2454 }
2455 
2456 /* Some helper functions */
2457 
2458 /*
2459  * ADDBA (and potentially others) need to be placed in the same
2460  * hardware queue as the TID/node it's relating to. This is so
2461  * it goes out after any pending non-aggregate frames to the
2462  * same node/TID.
2463  *
2464  * If this isn't done, the ADDBA can go out before the frames
2465  * queued in hardware. Even though these frames have a sequence
2466  * number -earlier- than the ADDBA can be transmitted (but
2467  * no frames whose sequence numbers are after the ADDBA should
2468  * be!) they'll arrive after the ADDBA - and the receiving end
2469  * will simply drop them as being out of the BAW.
2470  *
2471  * The frames can't be appended to the TID software queue - it'll
2472  * never be sent out. So these frames have to be directly
2473  * dispatched to the hardware, rather than queued in software.
2474  * So if this function returns true, the TXQ has to be
2475  * overridden and it has to be directly dispatched.
2476  *
2477  * It's a dirty hack, but someone's gotta do it.
2478  */
2479 
2480 /*
2481  * XXX doesn't belong here!
2482  */
2483 static int
2484 ieee80211_is_action(struct ieee80211_frame *wh)
2485 {
2486 	/* Type: Management frame? */
2487 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2488 	    IEEE80211_FC0_TYPE_MGT)
2489 		return 0;
2490 
2491 	/* Subtype: Action frame? */
2492 	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2493 	    IEEE80211_FC0_SUBTYPE_ACTION)
2494 		return 0;
2495 
2496 	return 1;
2497 }
2498 
2499 #define	MS(_v, _f)	(((_v) & _f) >> _f##_S)
2500 /*
2501  * Return an alternate TID for ADDBA request frames.
2502  *
2503  * Yes, this likely should be done in the net80211 layer.
2504  */
2505 static int
2506 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2507     struct ieee80211_node *ni,
2508     struct mbuf *m0, int *tid)
2509 {
2510 	struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2511 	struct ieee80211_action_ba_addbarequest *ia;
2512 	uint8_t *frm;
2513 	uint16_t baparamset;
2514 
2515 	/* Not action frame? Bail */
2516 	if (! ieee80211_is_action(wh))
2517 		return 0;
2518 
2519 	/* XXX Not needed for frames we send? */
2520 #if 0
2521 	/* Correct length? */
2522 	if (! ieee80211_parse_action(ni, m))
2523 		return 0;
2524 #endif
2525 
2526 	/* Extract out action frame */
2527 	frm = (u_int8_t *)&wh[1];
2528 	ia = (struct ieee80211_action_ba_addbarequest *) frm;
2529 
2530 	/* Not ADDBA? Bail */
2531 	if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2532 		return 0;
2533 	if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2534 		return 0;
2535 
2536 	/* Extract TID, return it */
2537 	baparamset = le16toh(ia->rq_baparamset);
2538 	*tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2539 
2540 	return 1;
2541 }
2542 #undef	MS
2543 
2544 /* Per-node software queue operations */
2545 
2546 /*
2547  * Add the current packet to the given BAW.
2548  * It is assumed that the current packet
2549  *
2550  * + fits inside the BAW;
2551  * + already has had a sequence number allocated.
2552  *
2553  * Since the BAW status may be modified by both the ath task and
2554  * the net80211/ifnet contexts, the TID must be locked.
2555  */
2556 void
2557 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2558     struct ath_tid *tid, struct ath_buf *bf)
2559 {
2560 	int index, cindex;
2561 	struct ieee80211_tx_ampdu *tap;
2562 
2563 	ATH_TX_LOCK_ASSERT(sc);
2564 
2565 	if (bf->bf_state.bfs_isretried)
2566 		return;
2567 
2568 	tap = ath_tx_get_tx_tid(an, tid->tid);
2569 
2570 	if (! bf->bf_state.bfs_dobaw) {
2571 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2572 		    "%s: dobaw=0, seqno=%d, window %d:%d\n",
2573 		    __func__, SEQNO(bf->bf_state.bfs_seqno),
2574 		    tap->txa_start, tap->txa_wnd);
2575 	}
2576 
2577 	if (bf->bf_state.bfs_addedbaw)
2578 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2579 		    "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2580 		    "baw head=%d tail=%d\n",
2581 		    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2582 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2583 		    tid->baw_tail);
2584 
2585 	/*
2586 	 * Verify that the given sequence number is not outside of the
2587 	 * BAW.  Complain loudly if that's the case.
2588 	 */
2589 	if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2590 	    SEQNO(bf->bf_state.bfs_seqno))) {
2591 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2592 		    "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2593 		    "baw head=%d tail=%d\n",
2594 		    __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2595 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2596 		    tid->baw_tail);
2597 	}
2598 
2599 	/*
2600 	 * ni->ni_txseqs[] is the currently allocated seqno.
2601 	 * the txa state contains the current baw start.
2602 	 */
2603 	index  = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2604 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2605 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2606 	    "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2607 	    "baw head=%d tail=%d\n",
2608 	    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2609 	    tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2610 	    tid->baw_tail);
2611 
2612 
2613 #if 0
2614 	assert(tid->tx_buf[cindex] == NULL);
2615 #endif
2616 	if (tid->tx_buf[cindex] != NULL) {
2617 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2618 		    "%s: ba packet dup (index=%d, cindex=%d, "
2619 		    "head=%d, tail=%d)\n",
2620 		    __func__, index, cindex, tid->baw_head, tid->baw_tail);
2621 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2622 		    "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2623 		    __func__,
2624 		    tid->tx_buf[cindex],
2625 		    SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2626 		    bf,
2627 		    SEQNO(bf->bf_state.bfs_seqno)
2628 		);
2629 	}
2630 	tid->tx_buf[cindex] = bf;
2631 
2632 	if (index >= ((tid->baw_tail - tid->baw_head) &
2633 	    (ATH_TID_MAX_BUFS - 1))) {
2634 		tid->baw_tail = cindex;
2635 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2636 	}
2637 }
2638 
2639 /*
2640  * Flip the BAW buffer entry over from the existing one to the new one.
2641  *
2642  * When software retransmitting a (sub-)frame, it is entirely possible that
2643  * the frame ath_buf is marked as BUSY and can't be immediately reused.
2644  * In that instance the buffer is cloned and the new buffer is used for
2645  * retransmit. We thus need to update the ath_buf slot in the BAW buf
2646  * tracking array to maintain consistency.
2647  */
2648 static void
2649 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2650     struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2651 {
2652 	int index, cindex;
2653 	struct ieee80211_tx_ampdu *tap;
2654 	int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2655 
2656 	ATH_TX_LOCK_ASSERT(sc);
2657 
2658 	tap = ath_tx_get_tx_tid(an, tid->tid);
2659 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2660 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2661 
2662 	/*
2663 	 * Just warn for now; if it happens then we should find out
2664 	 * about it. It's highly likely the aggregation session will
2665 	 * soon hang.
2666 	 */
2667 	if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2668 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2669 		    "%s: retransmitted buffer"
2670 		    " has mismatching seqno's, BA session may hang.\n",
2671 		    __func__);
2672 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2673 		    "%s: old seqno=%d, new_seqno=%d\n", __func__,
2674 		    old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2675 	}
2676 
2677 	if (tid->tx_buf[cindex] != old_bf) {
2678 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2679 		    "%s: ath_buf pointer incorrect; "
2680 		    " has m BA session may hang.\n", __func__);
2681 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2682 		    "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2683 	}
2684 
2685 	tid->tx_buf[cindex] = new_bf;
2686 }
2687 
2688 /*
2689  * seq_start - left edge of BAW
2690  * seq_next - current/next sequence number to allocate
2691  *
2692  * Since the BAW status may be modified by both the ath task and
2693  * the net80211/ifnet contexts, the TID must be locked.
2694  */
2695 static void
2696 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2697     struct ath_tid *tid, const struct ath_buf *bf)
2698 {
2699 	int index, cindex;
2700 	struct ieee80211_tx_ampdu *tap;
2701 	int seqno = SEQNO(bf->bf_state.bfs_seqno);
2702 
2703 	ATH_TX_LOCK_ASSERT(sc);
2704 
2705 	tap = ath_tx_get_tx_tid(an, tid->tid);
2706 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2707 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2708 
2709 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2710 	    "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2711 	    "baw head=%d, tail=%d\n",
2712 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2713 	    cindex, tid->baw_head, tid->baw_tail);
2714 
2715 	/*
2716 	 * If this occurs then we have a big problem - something else
2717 	 * has slid tap->txa_start along without updating the BAW
2718 	 * tracking start/end pointers. Thus the TX BAW state is now
2719 	 * completely busted.
2720 	 *
2721 	 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2722 	 * it's quite possible that a cloned buffer is making its way
2723 	 * here and causing it to fire off. Disable TDMA for now.
2724 	 */
2725 	if (tid->tx_buf[cindex] != bf) {
2726 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2727 		    "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2728 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2729 		    tid->tx_buf[cindex],
2730 		    (tid->tx_buf[cindex] != NULL) ?
2731 		      SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2732 	}
2733 
2734 	tid->tx_buf[cindex] = NULL;
2735 
2736 	while (tid->baw_head != tid->baw_tail &&
2737 	    !tid->tx_buf[tid->baw_head]) {
2738 		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2739 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2740 	}
2741 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2742 	    "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2743 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2744 }
2745 
2746 static void
2747 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2748     struct ath_buf *bf)
2749 {
2750 	struct ieee80211_frame *wh;
2751 
2752 	ATH_TX_LOCK_ASSERT(sc);
2753 
2754 	if (tid->an->an_leak_count > 0) {
2755 		wh = mtod(bf->bf_m, struct ieee80211_frame *);
2756 
2757 		/*
2758 		 * Update MORE based on the software/net80211 queue states.
2759 		 */
2760 		if ((tid->an->an_stack_psq > 0)
2761 		    || (tid->an->an_swq_depth > 0))
2762 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2763 		else
2764 			wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2765 
2766 		DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2767 		    "%s: %s: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2768 		    __func__,
2769 		    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
2770 		    tid->an->an_leak_count,
2771 		    tid->an->an_stack_psq,
2772 		    tid->an->an_swq_depth,
2773 		    !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2774 
2775 		/*
2776 		 * Re-sync the underlying buffer.
2777 		 */
2778 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2779 		    BUS_DMASYNC_PREWRITE);
2780 
2781 		tid->an->an_leak_count --;
2782 	}
2783 }
2784 
2785 static int
2786 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2787 {
2788 
2789 	ATH_TX_LOCK_ASSERT(sc);
2790 
2791 	if (tid->an->an_leak_count > 0) {
2792 		return (1);
2793 	}
2794 	if (tid->paused)
2795 		return (0);
2796 	return (1);
2797 }
2798 
2799 /*
2800  * Mark the current node/TID as ready to TX.
2801  *
2802  * This is done to make it easy for the software scheduler to
2803  * find which nodes have data to send.
2804  *
2805  * The TXQ lock must be held.
2806  */
2807 void
2808 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2809 {
2810 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2811 
2812 	ATH_TX_LOCK_ASSERT(sc);
2813 
2814 	/*
2815 	 * If we are leaking out a frame to this destination
2816 	 * for PS-POLL, ensure that we allow scheduling to
2817 	 * occur.
2818 	 */
2819 	if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2820 		return;		/* paused, can't schedule yet */
2821 
2822 	if (tid->sched)
2823 		return;		/* already scheduled */
2824 
2825 	tid->sched = 1;
2826 
2827 #if 0
2828 	/*
2829 	 * If this is a sleeping node we're leaking to, given
2830 	 * it a higher priority.  This is so bad for QoS it hurts.
2831 	 */
2832 	if (tid->an->an_leak_count) {
2833 		TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2834 	} else {
2835 		TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2836 	}
2837 #endif
2838 
2839 	/*
2840 	 * We can't do the above - it'll confuse the TXQ software
2841 	 * scheduler which will keep checking the _head_ TID
2842 	 * in the list to see if it has traffic.  If we queue
2843 	 * a TID to the head of the list and it doesn't transmit,
2844 	 * we'll check it again.
2845 	 *
2846 	 * So, get the rest of this leaking frames support working
2847 	 * and reliable first and _then_ optimise it so they're
2848 	 * pushed out in front of any other pending software
2849 	 * queued nodes.
2850 	 */
2851 	TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2852 }
2853 
2854 /*
2855  * Mark the current node as no longer needing to be polled for
2856  * TX packets.
2857  *
2858  * The TXQ lock must be held.
2859  */
2860 static void
2861 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2862 {
2863 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2864 
2865 	ATH_TX_LOCK_ASSERT(sc);
2866 
2867 	if (tid->sched == 0)
2868 		return;
2869 
2870 	tid->sched = 0;
2871 	TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2872 }
2873 
2874 /*
2875  * Assign a sequence number manually to the given frame.
2876  *
2877  * This should only be called for A-MPDU TX frames.
2878  */
2879 static ieee80211_seq
2880 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2881     struct ath_buf *bf, struct mbuf *m0)
2882 {
2883 	struct ieee80211_frame *wh;
2884 	int tid, pri;
2885 	ieee80211_seq seqno;
2886 	uint8_t subtype;
2887 
2888 	/* TID lookup */
2889 	wh = mtod(m0, struct ieee80211_frame *);
2890 	pri = M_WME_GETAC(m0);			/* honor classification */
2891 	tid = WME_AC_TO_TID(pri);
2892 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2893 	    __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2894 
2895 	/* XXX Is it a control frame? Ignore */
2896 
2897 	/* Does the packet require a sequence number? */
2898 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2899 		return -1;
2900 
2901 	ATH_TX_LOCK_ASSERT(sc);
2902 
2903 	/*
2904 	 * Is it a QOS NULL Data frame? Give it a sequence number from
2905 	 * the default TID (IEEE80211_NONQOS_TID.)
2906 	 *
2907 	 * The RX path of everything I've looked at doesn't include the NULL
2908 	 * data frame sequence number in the aggregation state updates, so
2909 	 * assigning it a sequence number there will cause a BAW hole on the
2910 	 * RX side.
2911 	 */
2912 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2913 	if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2914 		/* XXX no locking for this TID? This is a bit of a problem. */
2915 		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2916 		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2917 	} else {
2918 		/* Manually assign sequence number */
2919 		seqno = ni->ni_txseqs[tid];
2920 		INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2921 	}
2922 	*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2923 	M_SEQNO_SET(m0, seqno);
2924 
2925 	/* Return so caller can do something with it if needed */
2926 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s:  -> seqno=%d\n", __func__, seqno);
2927 	return seqno;
2928 }
2929 
2930 /*
2931  * Attempt to direct dispatch an aggregate frame to hardware.
2932  * If the frame is out of BAW, queue.
2933  * Otherwise, schedule it as a single frame.
2934  */
2935 static void
2936 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2937     struct ath_txq *txq, struct ath_buf *bf)
2938 {
2939 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2940 	struct ieee80211_tx_ampdu *tap;
2941 
2942 	ATH_TX_LOCK_ASSERT(sc);
2943 
2944 	tap = ath_tx_get_tx_tid(an, tid->tid);
2945 
2946 	/* paused? queue */
2947 	if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2948 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2949 		/* XXX don't sched - we're paused! */
2950 		return;
2951 	}
2952 
2953 	/* outside baw? queue */
2954 	if (bf->bf_state.bfs_dobaw &&
2955 	    (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2956 	    SEQNO(bf->bf_state.bfs_seqno)))) {
2957 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2958 		ath_tx_tid_sched(sc, tid);
2959 		return;
2960 	}
2961 
2962 	/*
2963 	 * This is a temporary check and should be removed once
2964 	 * all the relevant code paths have been fixed.
2965 	 *
2966 	 * During aggregate retries, it's possible that the head
2967 	 * frame will fail (which has the bfs_aggr and bfs_nframes
2968 	 * fields set for said aggregate) and will be retried as
2969 	 * a single frame.  In this instance, the values should
2970 	 * be reset or the completion code will get upset with you.
2971 	 */
2972 	if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
2973 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
2974 		    "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
2975 		    bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
2976 		bf->bf_state.bfs_aggr = 0;
2977 		bf->bf_state.bfs_nframes = 1;
2978 	}
2979 
2980 	/* Update CLRDMASK just before this frame is queued */
2981 	ath_tx_update_clrdmask(sc, tid, bf);
2982 
2983 	/* Direct dispatch to hardware */
2984 	ath_tx_do_ratelookup(sc, bf);
2985 	ath_tx_calc_duration(sc, bf);
2986 	ath_tx_calc_protection(sc, bf);
2987 	ath_tx_set_rtscts(sc, bf);
2988 	ath_tx_rate_fill_rcflags(sc, bf);
2989 	ath_tx_setds(sc, bf);
2990 
2991 	/* Statistics */
2992 	sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
2993 
2994 	/* Track per-TID hardware queue depth correctly */
2995 	tid->hwq_depth++;
2996 
2997 	/* Add to BAW */
2998 	if (bf->bf_state.bfs_dobaw) {
2999 		ath_tx_addto_baw(sc, an, tid, bf);
3000 		bf->bf_state.bfs_addedbaw = 1;
3001 	}
3002 
3003 	/* Set completion handler, multi-frame aggregate or not */
3004 	bf->bf_comp = ath_tx_aggr_comp;
3005 
3006 	/*
3007 	 * Update the current leak count if
3008 	 * we're leaking frames; and set the
3009 	 * MORE flag as appropriate.
3010 	 */
3011 	ath_tx_leak_count_update(sc, tid, bf);
3012 
3013 	/* Hand off to hardware */
3014 	ath_tx_handoff(sc, txq, bf);
3015 }
3016 
3017 /*
3018  * Attempt to send the packet.
3019  * If the queue isn't busy, direct-dispatch.
3020  * If the queue is busy enough, queue the given packet on the
3021  *  relevant software queue.
3022  */
3023 void
3024 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3025     struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3026 {
3027 	struct ath_node *an = ATH_NODE(ni);
3028 	struct ieee80211_frame *wh;
3029 	struct ath_tid *atid;
3030 	int pri, tid;
3031 	struct mbuf *m0 = bf->bf_m;
3032 
3033 	ATH_TX_LOCK_ASSERT(sc);
3034 
3035 	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3036 	wh = mtod(m0, struct ieee80211_frame *);
3037 	pri = ath_tx_getac(sc, m0);
3038 	tid = ath_tx_gettid(sc, m0);
3039 	atid = &an->an_tid[tid];
3040 
3041 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3042 	    __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3043 
3044 	/* Set local packet state, used to queue packets to hardware */
3045 	/* XXX potentially duplicate info, re-check */
3046 	bf->bf_state.bfs_tid = tid;
3047 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3048 	bf->bf_state.bfs_pri = pri;
3049 
3050 	/*
3051 	 * If the hardware queue isn't busy, queue it directly.
3052 	 * If the hardware queue is busy, queue it.
3053 	 * If the TID is paused or the traffic it outside BAW, software
3054 	 * queue it.
3055 	 *
3056 	 * If the node is in power-save and we're leaking a frame,
3057 	 * leak a single frame.
3058 	 */
3059 	if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3060 		/* TID is paused, queue */
3061 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3062 		/*
3063 		 * If the caller requested that it be sent at a high
3064 		 * priority, queue it at the head of the list.
3065 		 */
3066 		if (queue_to_head)
3067 			ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3068 		else
3069 			ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3070 	} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3071 		/* AMPDU pending; queue */
3072 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3073 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3074 		/* XXX sched? */
3075 	} else if (ath_tx_ampdu_running(sc, an, tid)) {
3076 		/* AMPDU running, attempt direct dispatch if possible */
3077 
3078 		/*
3079 		 * Always queue the frame to the tail of the list.
3080 		 */
3081 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3082 
3083 		/*
3084 		 * If the hardware queue isn't busy, direct dispatch
3085 		 * the head frame in the list.  Don't schedule the
3086 		 * TID - let it build some more frames first?
3087 		 *
3088 		 * When running A-MPDU, always just check the hardware
3089 		 * queue depth against the aggregate frame limit.
3090 		 * We don't want to burst a large number of single frames
3091 		 * out to the hardware; we want to aggressively hold back.
3092 		 *
3093 		 * Otherwise, schedule the TID.
3094 		 */
3095 		/* XXX TXQ locking */
3096 		if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3097 			bf = ATH_TID_FIRST(atid);
3098 			ATH_TID_REMOVE(atid, bf, bf_list);
3099 
3100 			/*
3101 			 * Ensure it's definitely treated as a non-AMPDU
3102 			 * frame - this information may have been left
3103 			 * over from a previous attempt.
3104 			 */
3105 			bf->bf_state.bfs_aggr = 0;
3106 			bf->bf_state.bfs_nframes = 1;
3107 
3108 			/* Queue to the hardware */
3109 			ath_tx_xmit_aggr(sc, an, txq, bf);
3110 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3111 			    "%s: xmit_aggr\n",
3112 			    __func__);
3113 		} else {
3114 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3115 			    "%s: ampdu; swq'ing\n",
3116 			    __func__);
3117 
3118 			ath_tx_tid_sched(sc, atid);
3119 		}
3120 	/*
3121 	 * If we're not doing A-MPDU, be prepared to direct dispatch
3122 	 * up to both limits if possible.  This particular corner
3123 	 * case may end up with packet starvation between aggregate
3124 	 * traffic and non-aggregate traffic: we wnat to ensure
3125 	 * that non-aggregate stations get a few frames queued to the
3126 	 * hardware before the aggregate station(s) get their chance.
3127 	 *
3128 	 * So if you only ever see a couple of frames direct dispatched
3129 	 * to the hardware from a non-AMPDU client, check both here
3130 	 * and in the software queue dispatcher to ensure that those
3131 	 * non-AMPDU stations get a fair chance to transmit.
3132 	 */
3133 	/* XXX TXQ locking */
3134 	} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3135 		    (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3136 		/* AMPDU not running, attempt direct dispatch */
3137 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3138 		/* See if clrdmask needs to be set */
3139 		ath_tx_update_clrdmask(sc, atid, bf);
3140 
3141 		/*
3142 		 * Update the current leak count if
3143 		 * we're leaking frames; and set the
3144 		 * MORE flag as appropriate.
3145 		 */
3146 		ath_tx_leak_count_update(sc, atid, bf);
3147 
3148 		/*
3149 		 * Dispatch the frame.
3150 		 */
3151 		ath_tx_xmit_normal(sc, txq, bf);
3152 	} else {
3153 		/* Busy; queue */
3154 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3155 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3156 		ath_tx_tid_sched(sc, atid);
3157 	}
3158 }
3159 
3160 /*
3161  * Only set the clrdmask bit if none of the nodes are currently
3162  * filtered.
3163  *
3164  * XXX TODO: go through all the callers and check to see
3165  * which are being called in the context of looping over all
3166  * TIDs (eg, if all tids are being paused, resumed, etc.)
3167  * That'll avoid O(n^2) complexity here.
3168  */
3169 static void
3170 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3171 {
3172 	int i;
3173 
3174 	ATH_TX_LOCK_ASSERT(sc);
3175 
3176 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3177 		if (an->an_tid[i].isfiltered == 1)
3178 			return;
3179 	}
3180 	an->clrdmask = 1;
3181 }
3182 
3183 /*
3184  * Configure the per-TID node state.
3185  *
3186  * This likely belongs in if_ath_node.c but I can't think of anywhere
3187  * else to put it just yet.
3188  *
3189  * This sets up the SLISTs and the mutex as appropriate.
3190  */
3191 void
3192 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3193 {
3194 	int i, j;
3195 	struct ath_tid *atid;
3196 
3197 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3198 		atid = &an->an_tid[i];
3199 
3200 		/* XXX now with this bzer(), is the field 0'ing needed? */
3201 		bzero(atid, sizeof(*atid));
3202 
3203 		TAILQ_INIT(&atid->tid_q);
3204 		TAILQ_INIT(&atid->filtq.tid_q);
3205 		atid->tid = i;
3206 		atid->an = an;
3207 		for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3208 			atid->tx_buf[j] = NULL;
3209 		atid->baw_head = atid->baw_tail = 0;
3210 		atid->paused = 0;
3211 		atid->sched = 0;
3212 		atid->hwq_depth = 0;
3213 		atid->cleanup_inprogress = 0;
3214 		if (i == IEEE80211_NONQOS_TID)
3215 			atid->ac = ATH_NONQOS_TID_AC;
3216 		else
3217 			atid->ac = TID_TO_WME_AC(i);
3218 	}
3219 	an->clrdmask = 1;	/* Always start by setting this bit */
3220 }
3221 
3222 /*
3223  * Pause the current TID. This stops packets from being transmitted
3224  * on it.
3225  *
3226  * Since this is also called from upper layers as well as the driver,
3227  * it will get the TID lock.
3228  */
3229 static void
3230 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3231 {
3232 
3233 	ATH_TX_LOCK_ASSERT(sc);
3234 	tid->paused++;
3235 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%s]: tid=%d, paused = %d\n",
3236 	    __func__,
3237 	    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3238 	    tid->tid,
3239 	    tid->paused);
3240 }
3241 
3242 /*
3243  * Unpause the current TID, and schedule it if needed.
3244  */
3245 static void
3246 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3247 {
3248 	ATH_TX_LOCK_ASSERT(sc);
3249 
3250 	/*
3251 	 * There's some odd places where ath_tx_tid_resume() is called
3252 	 * when it shouldn't be; this works around that particular issue
3253 	 * until it's actually resolved.
3254 	 */
3255 	if (tid->paused == 0) {
3256 		device_printf(sc->sc_dev,
3257 			      "%s: [%s]: tid=%d, paused=0?\n",
3258 			      __func__,
3259 			      ath_hal_ether_sprintf(
3260 						tid->an->an_node.ni_macaddr),
3261 			      tid->tid);
3262 	} else {
3263 		tid->paused--;
3264 	}
3265 
3266 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3267 	    "%s: [%s]: tid=%d, unpaused = %d\n",
3268 	    __func__,
3269 	    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3270 	    tid->tid,
3271 	    tid->paused);
3272 
3273 	if (tid->paused)
3274 		return;
3275 
3276 	/*
3277 	 * Override the clrdmask configuration for the next frame
3278 	 * from this TID, just to get the ball rolling.
3279 	 */
3280 	ath_tx_set_clrdmask(sc, tid->an);
3281 
3282 	if (tid->axq_depth == 0)
3283 		return;
3284 
3285 	/* XXX isfiltered shouldn't ever be 0 at this point */
3286 	if (tid->isfiltered == 1) {
3287 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3288 		    __func__);
3289 		return;
3290 	}
3291 
3292 	ath_tx_tid_sched(sc, tid);
3293 
3294 	/*
3295 	 * Queue the software TX scheduler.
3296 	 */
3297 	ath_tx_swq_kick(sc);
3298 }
3299 
3300 /*
3301  * Add the given ath_buf to the TID filtered frame list.
3302  * This requires the TID be filtered.
3303  */
3304 static void
3305 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3306     struct ath_buf *bf)
3307 {
3308 
3309 	ATH_TX_LOCK_ASSERT(sc);
3310 
3311 	if (!tid->isfiltered)
3312 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3313 		    __func__);
3314 
3315 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3316 
3317 	/* Set the retry bit and bump the retry counter */
3318 	ath_tx_set_retry(sc, bf);
3319 	sc->sc_stats.ast_tx_swfiltered++;
3320 
3321 	ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3322 }
3323 
3324 /*
3325  * Handle a completed filtered frame from the given TID.
3326  * This just enables/pauses the filtered frame state if required
3327  * and appends the filtered frame to the filtered queue.
3328  */
3329 static void
3330 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3331     struct ath_buf *bf)
3332 {
3333 
3334 	ATH_TX_LOCK_ASSERT(sc);
3335 
3336 	if (! tid->isfiltered) {
3337 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3338 		    __func__, tid->tid);
3339 		tid->isfiltered = 1;
3340 		ath_tx_tid_pause(sc, tid);
3341 	}
3342 
3343 	/* Add the frame to the filter queue */
3344 	ath_tx_tid_filt_addbuf(sc, tid, bf);
3345 }
3346 
3347 /*
3348  * Complete the filtered frame TX completion.
3349  *
3350  * If there are no more frames in the hardware queue, unpause/unfilter
3351  * the TID if applicable.  Otherwise we will wait for a node PS transition
3352  * to unfilter.
3353  */
3354 static void
3355 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3356 {
3357 	struct ath_buf *bf;
3358 	int do_resume = 0;
3359 
3360 	ATH_TX_LOCK_ASSERT(sc);
3361 
3362 	if (tid->hwq_depth != 0)
3363 		return;
3364 
3365 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3366 	    __func__, tid->tid);
3367 	if (tid->isfiltered == 1) {
3368 		tid->isfiltered = 0;
3369 		do_resume = 1;
3370 	}
3371 
3372 	/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3373 	ath_tx_set_clrdmask(sc, tid->an);
3374 
3375 	/* XXX this is really quite inefficient */
3376 	while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3377 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3378 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3379 	}
3380 
3381 	/* And only resume if we had paused before */
3382 	if (do_resume)
3383 		ath_tx_tid_resume(sc, tid);
3384 }
3385 
3386 /*
3387  * Called when a single (aggregate or otherwise) frame is completed.
3388  *
3389  * Returns 0 if the buffer could be added to the filtered list
3390  * (cloned or otherwise), 1 if the buffer couldn't be added to the
3391  * filtered list (failed clone; expired retry) and the caller should
3392  * free it and handle it like a failure (eg by sending a BAR.)
3393  *
3394  * since the buffer may be cloned, bf must be not touched after this
3395  * if the return value is 0.
3396  */
3397 static int
3398 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3399     struct ath_buf *bf)
3400 {
3401 	struct ath_buf *nbf;
3402 	int retval;
3403 
3404 	ATH_TX_LOCK_ASSERT(sc);
3405 
3406 	/*
3407 	 * Don't allow a filtered frame to live forever.
3408 	 */
3409 	if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3410 		sc->sc_stats.ast_tx_swretrymax++;
3411 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3412 		    "%s: bf=%p, seqno=%d, exceeded retries\n",
3413 		    __func__,
3414 		    bf,
3415 		    SEQNO(bf->bf_state.bfs_seqno));
3416 		retval = 1; /* error */
3417 		goto finish;
3418 	}
3419 
3420 	/*
3421 	 * A busy buffer can't be added to the retry list.
3422 	 * It needs to be cloned.
3423 	 */
3424 	if (bf->bf_flags & ATH_BUF_BUSY) {
3425 		nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3426 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3427 		    "%s: busy buffer clone: %p -> %p\n",
3428 		    __func__, bf, nbf);
3429 	} else {
3430 		nbf = bf;
3431 	}
3432 
3433 	if (nbf == NULL) {
3434 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3435 		    "%s: busy buffer couldn't be cloned (%p)!\n",
3436 		    __func__, bf);
3437 		retval = 1; /* error */
3438 	} else {
3439 		ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3440 		retval = 0; /* ok */
3441 	}
3442 finish:
3443 	ath_tx_tid_filt_comp_complete(sc, tid);
3444 
3445 	return (retval);
3446 }
3447 
3448 static void
3449 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3450     struct ath_buf *bf_first, ath_bufhead *bf_q)
3451 {
3452 	struct ath_buf *bf, *bf_next, *nbf;
3453 
3454 	ATH_TX_LOCK_ASSERT(sc);
3455 
3456 	bf = bf_first;
3457 	while (bf) {
3458 		bf_next = bf->bf_next;
3459 		bf->bf_next = NULL;	/* Remove it from the aggr list */
3460 
3461 		/*
3462 		 * Don't allow a filtered frame to live forever.
3463 		 */
3464 		if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3465 			sc->sc_stats.ast_tx_swretrymax++;
3466 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3467 			    "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3468 			    __func__,
3469 			    tid->tid,
3470 			    bf,
3471 			    SEQNO(bf->bf_state.bfs_seqno));
3472 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3473 			goto next;
3474 		}
3475 
3476 		if (bf->bf_flags & ATH_BUF_BUSY) {
3477 			nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3478 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3479 			    "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3480 			    __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3481 		} else {
3482 			nbf = bf;
3483 		}
3484 
3485 		/*
3486 		 * If the buffer couldn't be cloned, add it to bf_q;
3487 		 * the caller will free the buffer(s) as required.
3488 		 */
3489 		if (nbf == NULL) {
3490 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3491 			    "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3492 			    __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3493 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3494 		} else {
3495 			ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3496 		}
3497 next:
3498 		bf = bf_next;
3499 	}
3500 
3501 	ath_tx_tid_filt_comp_complete(sc, tid);
3502 }
3503 
3504 /*
3505  * Suspend the queue because we need to TX a BAR.
3506  */
3507 static void
3508 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3509 {
3510 
3511 	ATH_TX_LOCK_ASSERT(sc);
3512 
3513 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3514 	    "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3515 	    __func__,
3516 	    tid->tid,
3517 	    tid->bar_wait,
3518 	    tid->bar_tx);
3519 
3520 	/* We shouldn't be called when bar_tx is 1 */
3521 	if (tid->bar_tx) {
3522 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3523 		    "%s: bar_tx is 1?!\n", __func__);
3524 	}
3525 
3526 	/* If we've already been called, just be patient. */
3527 	if (tid->bar_wait)
3528 		return;
3529 
3530 	/* Wait! */
3531 	tid->bar_wait = 1;
3532 
3533 	/* Only one pause, no matter how many frames fail */
3534 	ath_tx_tid_pause(sc, tid);
3535 }
3536 
3537 /*
3538  * We've finished with BAR handling - either we succeeded or
3539  * failed. Either way, unsuspend TX.
3540  */
3541 static void
3542 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3543 {
3544 
3545 	ATH_TX_LOCK_ASSERT(sc);
3546 
3547 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3548 	    "%s: %s: TID=%d, called\n",
3549 	    __func__,
3550 	    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3551 	    tid->tid);
3552 
3553 	if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3554 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3555 		    "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3556 		    __func__,
3557 		    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3558 		    tid->tid, tid->bar_tx, tid->bar_wait);
3559 	}
3560 
3561 	tid->bar_tx = tid->bar_wait = 0;
3562 	ath_tx_tid_resume(sc, tid);
3563 }
3564 
3565 /*
3566  * Return whether we're ready to TX a BAR frame.
3567  *
3568  * Requires the TID lock be held.
3569  */
3570 static int
3571 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3572 {
3573 
3574 	ATH_TX_LOCK_ASSERT(sc);
3575 
3576 	if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3577 		return (0);
3578 
3579 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3580 	    "%s: %s: TID=%d, bar ready\n",
3581 	    __func__,
3582 	    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3583 	    tid->tid);
3584 
3585 	return (1);
3586 }
3587 
3588 /*
3589  * Check whether the current TID is ready to have a BAR
3590  * TXed and if so, do the TX.
3591  *
3592  * Since the TID/TXQ lock can't be held during a call to
3593  * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3594  * sending the BAR and locking it again.
3595  *
3596  * Eventually, the code to send the BAR should be broken out
3597  * from this routine so the lock doesn't have to be reacquired
3598  * just to be immediately dropped by the caller.
3599  */
3600 static void
3601 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3602 {
3603 	struct ieee80211_tx_ampdu *tap;
3604 
3605 	ATH_TX_LOCK_ASSERT(sc);
3606 
3607 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3608 	    "%s: %s: TID=%d, called\n",
3609 	    __func__,
3610 	    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3611 	    tid->tid);
3612 
3613 	tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3614 
3615 	/*
3616 	 * This is an error condition!
3617 	 */
3618 	if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3619 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3620 		    "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3621 		    __func__,
3622 		    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3623 		    tid->tid, tid->bar_tx, tid->bar_wait);
3624 		return;
3625 	}
3626 
3627 	/* Don't do anything if we still have pending frames */
3628 	if (tid->hwq_depth > 0) {
3629 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3630 		    "%s: %s: TID=%d, hwq_depth=%d, waiting\n",
3631 		    __func__,
3632 		    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3633 		    tid->tid,
3634 		    tid->hwq_depth);
3635 		return;
3636 	}
3637 
3638 	/* We're now about to TX */
3639 	tid->bar_tx = 1;
3640 
3641 	/*
3642 	 * Override the clrdmask configuration for the next frame,
3643 	 * just to get the ball rolling.
3644 	 */
3645 	ath_tx_set_clrdmask(sc, tid->an);
3646 
3647 	/*
3648 	 * Calculate new BAW left edge, now that all frames have either
3649 	 * succeeded or failed.
3650 	 *
3651 	 * XXX verify this is _actually_ the valid value to begin at!
3652 	 */
3653 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3654 	    "%s: %s: TID=%d, new BAW left edge=%d\n",
3655 	    __func__,
3656 	    ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3657 	    tid->tid,
3658 	    tap->txa_start);
3659 
3660 	/* Try sending the BAR frame */
3661 	/* We can't hold the lock here! */
3662 
3663 	ATH_TX_UNLOCK(sc);
3664 	if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3665 		/* Success? Now we wait for notification that it's done */
3666 		ATH_TX_LOCK(sc);
3667 		return;
3668 	}
3669 
3670 	/* Failure? For now, warn loudly and continue */
3671 	ATH_TX_LOCK(sc);
3672 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3673 	    "%s: %s: TID=%d, failed to TX BAR, continue!\n",
3674 	    __func__, ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3675 	    tid->tid);
3676 	ath_tx_tid_bar_unsuspend(sc, tid);
3677 }
3678 
3679 static void
3680 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3681     struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3682 {
3683 
3684 	ATH_TX_LOCK_ASSERT(sc);
3685 
3686 	/*
3687 	 * If the current TID is running AMPDU, update
3688 	 * the BAW.
3689 	 */
3690 	if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3691 	    bf->bf_state.bfs_dobaw) {
3692 		/*
3693 		 * Only remove the frame from the BAW if it's
3694 		 * been transmitted at least once; this means
3695 		 * the frame was in the BAW to begin with.
3696 		 */
3697 		if (bf->bf_state.bfs_retries > 0) {
3698 			ath_tx_update_baw(sc, an, tid, bf);
3699 			bf->bf_state.bfs_dobaw = 0;
3700 		}
3701 #if 0
3702 		/*
3703 		 * This has become a non-fatal error now
3704 		 */
3705 		if (! bf->bf_state.bfs_addedbaw)
3706 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3707 			    "%s: wasn't added: seqno %d\n",
3708 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
3709 #endif
3710 	}
3711 
3712 	/* Strip it out of an aggregate list if it was in one */
3713 	bf->bf_next = NULL;
3714 
3715 	/* Insert on the free queue to be freed by the caller */
3716 	TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3717 }
3718 
3719 static void
3720 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3721     const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3722 {
3723 	struct ieee80211_node *ni = &an->an_node;
3724 	struct ath_txq *txq;
3725 	struct ieee80211_tx_ampdu *tap;
3726 
3727 	txq = sc->sc_ac2q[tid->ac];
3728 	tap = ath_tx_get_tx_tid(an, tid->tid);
3729 
3730 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3731 	    "%s: %s: %s: bf=%p: addbaw=%d, dobaw=%d, "
3732 	    "seqno=%d, retry=%d\n",
3733 	    __func__,
3734 	    pfx,
3735 	    ath_hal_ether_sprintf(ni->ni_macaddr),
3736 	    bf,
3737 	    bf->bf_state.bfs_addedbaw,
3738 	    bf->bf_state.bfs_dobaw,
3739 	    SEQNO(bf->bf_state.bfs_seqno),
3740 	    bf->bf_state.bfs_retries);
3741 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3742 	    "%s: %s: %s: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3743 	    __func__,
3744 	    pfx,
3745 	    ath_hal_ether_sprintf(ni->ni_macaddr),
3746 	    bf,
3747 	    txq->axq_qnum,
3748 	    txq->axq_depth,
3749 	    txq->axq_aggr_depth);
3750 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3751 	    "%s: %s: %s: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3752 	      "isfiltered=%d\n",
3753 	    __func__,
3754 	    pfx,
3755 	    ath_hal_ether_sprintf(ni->ni_macaddr),
3756 	    bf,
3757 	    tid->axq_depth,
3758 	    tid->hwq_depth,
3759 	    tid->bar_wait,
3760 	    tid->isfiltered);
3761 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3762 	    "%s: %s: %s: tid %d: "
3763 	    "sched=%d, paused=%d, "
3764 	    "incomp=%d, baw_head=%d, "
3765 	    "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3766 	     __func__,
3767 	     pfx,
3768 	     ath_hal_ether_sprintf(ni->ni_macaddr),
3769 	     tid->tid,
3770 	     tid->sched, tid->paused,
3771 	     tid->incomp, tid->baw_head,
3772 	     tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3773 	     ni->ni_txseqs[tid->tid]);
3774 
3775 	/* XXX Dump the frame, see what it is? */
3776 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3777 		ieee80211_dump_pkt(ni->ni_ic,
3778 		    mtod(bf->bf_m, const uint8_t *),
3779 		    bf->bf_m->m_len, 0, -1);
3780 }
3781 
3782 /*
3783  * Free any packets currently pending in the software TX queue.
3784  *
3785  * This will be called when a node is being deleted.
3786  *
3787  * It can also be called on an active node during an interface
3788  * reset or state transition.
3789  *
3790  * (From Linux/reference):
3791  *
3792  * TODO: For frame(s) that are in the retry state, we will reuse the
3793  * sequence number(s) without setting the retry bit. The
3794  * alternative is to give up on these and BAR the receiver's window
3795  * forward.
3796  */
3797 static void
3798 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3799     struct ath_tid *tid, ath_bufhead *bf_cq)
3800 {
3801 	struct ath_buf *bf;
3802 	struct ieee80211_tx_ampdu *tap;
3803 	struct ieee80211_node *ni = &an->an_node;
3804 	int t;
3805 
3806 	tap = ath_tx_get_tx_tid(an, tid->tid);
3807 
3808 	ATH_TX_LOCK_ASSERT(sc);
3809 
3810 	/* Walk the queue, free frames */
3811 	t = 0;
3812 	for (;;) {
3813 		bf = ATH_TID_FIRST(tid);
3814 		if (bf == NULL) {
3815 			break;
3816 		}
3817 
3818 		if (t == 0) {
3819 			ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3820 //			t = 1;
3821 		}
3822 
3823 		ATH_TID_REMOVE(tid, bf, bf_list);
3824 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3825 	}
3826 
3827 	/* And now, drain the filtered frame queue */
3828 	t = 0;
3829 	for (;;) {
3830 		bf = ATH_TID_FILT_FIRST(tid);
3831 		if (bf == NULL)
3832 			break;
3833 
3834 		if (t == 0) {
3835 			ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3836 //			t = 1;
3837 		}
3838 
3839 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3840 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3841 	}
3842 
3843 	/*
3844 	 * Override the clrdmask configuration for the next frame
3845 	 * in case there is some future transmission, just to get
3846 	 * the ball rolling.
3847 	 *
3848 	 * This won't hurt things if the TID is about to be freed.
3849 	 */
3850 	ath_tx_set_clrdmask(sc, tid->an);
3851 
3852 	/*
3853 	 * Now that it's completed, grab the TID lock and update
3854 	 * the sequence number and BAW window.
3855 	 * Because sequence numbers have been assigned to frames
3856 	 * that haven't been sent yet, it's entirely possible
3857 	 * we'll be called with some pending frames that have not
3858 	 * been transmitted.
3859 	 *
3860 	 * The cleaner solution is to do the sequence number allocation
3861 	 * when the packet is first transmitted - and thus the "retries"
3862 	 * check above would be enough to update the BAW/seqno.
3863 	 */
3864 
3865 	/* But don't do it for non-QoS TIDs */
3866 	if (tap) {
3867 #if 1
3868 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3869 		    "%s: %s: node %p: TID %d: sliding BAW left edge to %d\n",
3870 		    __func__,
3871 		    ath_hal_ether_sprintf(ni->ni_macaddr),
3872 		    an,
3873 		    tid->tid,
3874 		    tap->txa_start);
3875 #endif
3876 		ni->ni_txseqs[tid->tid] = tap->txa_start;
3877 		tid->baw_tail = tid->baw_head;
3878 	}
3879 }
3880 
3881 /*
3882  * Reset the TID state.  This must be only called once the node has
3883  * had its frames flushed from this TID, to ensure that no other
3884  * pause / unpause logic can kick in.
3885  */
3886 static void
3887 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3888 {
3889 
3890 #if 0
3891 	tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
3892 	tid->paused = tid->sched = tid->addba_tx_pending = 0;
3893 	tid->incomp = tid->cleanup_inprogress = 0;
3894 #endif
3895 
3896 	/*
3897 	 * If we have a bar_wait set, we need to unpause the TID
3898 	 * here.  Otherwise once cleanup has finished, the TID won't
3899 	 * have the right paused counter.
3900 	 *
3901 	 * XXX I'm not going through resume here - I don't want the
3902 	 * node to be rescheuled just yet.  This however should be
3903 	 * methodized!
3904 	 */
3905 	if (tid->bar_wait) {
3906 		if (tid->paused > 0) {
3907 			tid->paused --;
3908 		}
3909 	}
3910 
3911 	/*
3912 	 * XXX same with a currently filtered TID.
3913 	 *
3914 	 * Since this is being called during a flush, we assume that
3915 	 * the filtered frame list is actually empty.
3916 	 *
3917 	 * XXX TODO: add in a check to ensure that the filtered queue
3918 	 * depth is actually 0!
3919 	 */
3920 	if (tid->isfiltered) {
3921 		if (tid->paused > 0) {
3922 			tid->paused --;
3923 		}
3924 	}
3925 
3926 	/*
3927 	 * Clear BAR, filtered frames, scheduled and ADDBA pending.
3928 	 * The TID may be going through cleanup from the last association
3929 	 * where things in the BAW are still in the hardware queue.
3930 	 */
3931 	tid->bar_wait = 0;
3932 	tid->bar_tx = 0;
3933 	tid->isfiltered = 0;
3934 	tid->sched = 0;
3935 	tid->addba_tx_pending = 0;
3936 
3937 	/*
3938 	 * XXX TODO: it may just be enough to walk the HWQs and mark
3939 	 * frames for that node as non-aggregate; or mark the ath_node
3940 	 * with something that indicates that aggregation is no longer
3941 	 * occuring.  Then we can just toss the BAW complaints and
3942 	 * do a complete hard reset of state here - no pause, no
3943 	 * complete counter, etc.
3944 	 */
3945 
3946 }
3947 
3948 /*
3949  * Flush all software queued packets for the given node.
3950  *
3951  * This occurs when a completion handler frees the last buffer
3952  * for a node, and the node is thus freed. This causes the node
3953  * to be cleaned up, which ends up calling ath_tx_node_flush.
3954  */
3955 void
3956 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
3957 {
3958 	int tid;
3959 	ath_bufhead bf_cq;
3960 	struct ath_buf *bf;
3961 
3962 	TAILQ_INIT(&bf_cq);
3963 
3964 	ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
3965 	    &an->an_node);
3966 
3967 	ATH_TX_LOCK(sc);
3968 	DPRINTF(sc, ATH_DEBUG_NODE,
3969 	    "%s: %s: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
3970 	    "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
3971 	    __func__,
3972 	    ath_hal_ether_sprintf(an->an_node.ni_macaddr),
3973 	    an->an_is_powersave,
3974 	    an->an_stack_psq,
3975 	    an->an_tim_set,
3976 	    an->an_swq_depth,
3977 	    an->clrdmask,
3978 	    an->an_leak_count);
3979 
3980 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
3981 		struct ath_tid *atid = &an->an_tid[tid];
3982 
3983 		/* Free packets */
3984 		ath_tx_tid_drain(sc, an, atid, &bf_cq);
3985 
3986 		/* Remove this tid from the list of active tids */
3987 		ath_tx_tid_unsched(sc, atid);
3988 
3989 		/* Reset the per-TID pause, BAR, etc state */
3990 		ath_tx_tid_reset(sc, atid);
3991 	}
3992 
3993 	/*
3994 	 * Clear global leak count
3995 	 */
3996 	an->an_leak_count = 0;
3997 	ATH_TX_UNLOCK(sc);
3998 
3999 	/* Handle completed frames */
4000 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4001 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4002 		ath_tx_default_comp(sc, bf, 0);
4003 	}
4004 }
4005 
4006 /*
4007  * Drain all the software TXQs currently with traffic queued.
4008  */
4009 void
4010 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4011 {
4012 	struct ath_tid *tid;
4013 	ath_bufhead bf_cq;
4014 	struct ath_buf *bf;
4015 
4016 	TAILQ_INIT(&bf_cq);
4017 	ATH_TX_LOCK(sc);
4018 
4019 	/*
4020 	 * Iterate over all active tids for the given txq,
4021 	 * flushing and unsched'ing them
4022 	 */
4023 	while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4024 		tid = TAILQ_FIRST(&txq->axq_tidq);
4025 		ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4026 		ath_tx_tid_unsched(sc, tid);
4027 	}
4028 
4029 	ATH_TX_UNLOCK(sc);
4030 
4031 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4032 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4033 		ath_tx_default_comp(sc, bf, 0);
4034 	}
4035 }
4036 
4037 /*
4038  * Handle completion of non-aggregate session frames.
4039  *
4040  * This (currently) doesn't implement software retransmission of
4041  * non-aggregate frames!
4042  *
4043  * Software retransmission of non-aggregate frames needs to obey
4044  * the strict sequence number ordering, and drop any frames that
4045  * will fail this.
4046  *
4047  * For now, filtered frames and frame transmission will cause
4048  * all kinds of issues.  So we don't support them.
4049  *
4050  * So anyone queuing frames via ath_tx_normal_xmit() or
4051  * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4052  */
4053 void
4054 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4055 {
4056 	struct ieee80211_node *ni = bf->bf_node;
4057 	struct ath_node *an = ATH_NODE(ni);
4058 	int tid = bf->bf_state.bfs_tid;
4059 	struct ath_tid *atid = &an->an_tid[tid];
4060 	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4061 
4062 	/* The TID state is protected behind the TXQ lock */
4063 	ATH_TX_LOCK(sc);
4064 
4065 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4066 	    __func__, bf, fail, atid->hwq_depth - 1);
4067 
4068 	atid->hwq_depth--;
4069 
4070 #if 0
4071 	/*
4072 	 * If the frame was filtered, stick it on the filter frame
4073 	 * queue and complain about it.  It shouldn't happen!
4074 	 */
4075 	if ((ts->ts_status & HAL_TXERR_FILT) ||
4076 	    (ts->ts_status != 0 && atid->isfiltered)) {
4077 		DPRINTF(sc, ATH_DEBUG_SW_TX,
4078 		    "%s: isfiltered=%d, ts_status=%d: huh?\n",
4079 		    __func__,
4080 		    atid->isfiltered,
4081 		    ts->ts_status);
4082 		ath_tx_tid_filt_comp_buf(sc, atid, bf);
4083 	}
4084 #endif
4085 	if (atid->isfiltered)
4086 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4087 	if (atid->hwq_depth < 0)
4088 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4089 		    __func__, atid->hwq_depth);
4090 
4091 	/* If the TID is being cleaned up, track things */
4092 	/* XXX refactor! */
4093 	if (atid->cleanup_inprogress) {
4094 		atid->incomp--;
4095 		if (atid->incomp == 0) {
4096 			DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4097 			    "%s: TID %d: cleaned up! resume!\n",
4098 			    __func__, tid);
4099 			atid->cleanup_inprogress = 0;
4100 			ath_tx_tid_resume(sc, atid);
4101 		}
4102 	}
4103 
4104 	/*
4105 	 * If the queue is filtered, potentially mark it as complete
4106 	 * and reschedule it as needed.
4107 	 *
4108 	 * This is required as there may be a subsequent TX descriptor
4109 	 * for this end-node that has CLRDMASK set, so it's quite possible
4110 	 * that a filtered frame will be followed by a non-filtered
4111 	 * (complete or otherwise) frame.
4112 	 *
4113 	 * XXX should we do this before we complete the frame?
4114 	 */
4115 	if (atid->isfiltered)
4116 		ath_tx_tid_filt_comp_complete(sc, atid);
4117 	ATH_TX_UNLOCK(sc);
4118 
4119 	/*
4120 	 * punt to rate control if we're not being cleaned up
4121 	 * during a hw queue drain and the frame wanted an ACK.
4122 	 */
4123 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4124 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4125 		    ts, bf->bf_state.bfs_pktlen,
4126 		    1, (ts->ts_status == 0) ? 0 : 1);
4127 
4128 	ath_tx_default_comp(sc, bf, fail);
4129 }
4130 
4131 /*
4132  * Handle cleanup of aggregate session packets that aren't
4133  * an A-MPDU.
4134  *
4135  * There's no need to update the BAW here - the session is being
4136  * torn down.
4137  */
4138 static void
4139 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4140 {
4141 	struct ieee80211_node *ni = bf->bf_node;
4142 	struct ath_node *an = ATH_NODE(ni);
4143 	int tid = bf->bf_state.bfs_tid;
4144 	struct ath_tid *atid = &an->an_tid[tid];
4145 
4146 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4147 	    __func__, tid, atid->incomp);
4148 
4149 	ATH_TX_LOCK(sc);
4150 	atid->incomp--;
4151 
4152 	/* XXX refactor! */
4153 	if (bf->bf_state.bfs_dobaw) {
4154 		ath_tx_update_baw(sc, an, atid, bf);
4155 		if (!bf->bf_state.bfs_addedbaw)
4156 			DPRINTF(sc, ATH_DEBUG_SW_TX,
4157 			    "%s: wasn't added: seqno %d\n",
4158 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4159 	}
4160 
4161 	if (atid->incomp == 0) {
4162 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4163 		    "%s: TID %d: cleaned up! resume!\n",
4164 		    __func__, tid);
4165 		atid->cleanup_inprogress = 0;
4166 		ath_tx_tid_resume(sc, atid);
4167 	}
4168 	ATH_TX_UNLOCK(sc);
4169 
4170 	ath_tx_default_comp(sc, bf, 0);
4171 }
4172 
4173 
4174 /*
4175  * This as it currently stands is a bit dumb.  Ideally we'd just
4176  * fail the frame the normal way and have it permanently fail
4177  * via the normal aggregate completion path.
4178  */
4179 static void
4180 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4181     int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4182 {
4183 	struct ath_tid *atid = &an->an_tid[tid];
4184 	struct ath_buf *bf, *bf_next;
4185 
4186 	ATH_TX_LOCK_ASSERT(sc);
4187 
4188 	/*
4189 	 * Remove this frame from the queue.
4190 	 */
4191 	ATH_TID_REMOVE(atid, bf_head, bf_list);
4192 
4193 	/*
4194 	 * Loop over all the frames in the aggregate.
4195 	 */
4196 	bf = bf_head;
4197 	while (bf != NULL) {
4198 		bf_next = bf->bf_next;	/* next aggregate frame, or NULL */
4199 
4200 		/*
4201 		 * If it's been added to the BAW we need to kick
4202 		 * it out of the BAW before we continue.
4203 		 *
4204 		 * XXX if it's an aggregate, assert that it's in the
4205 		 * BAW - we shouldn't have it be in an aggregate
4206 		 * otherwise!
4207 		 */
4208 		if (bf->bf_state.bfs_addedbaw) {
4209 			ath_tx_update_baw(sc, an, atid, bf);
4210 			bf->bf_state.bfs_dobaw = 0;
4211 		}
4212 
4213 		/*
4214 		 * Give it the default completion handler.
4215 		 */
4216 		bf->bf_comp = ath_tx_normal_comp;
4217 		bf->bf_next = NULL;
4218 
4219 		/*
4220 		 * Add it to the list to free.
4221 		 */
4222 		TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4223 
4224 		/*
4225 		 * Now advance to the next frame in the aggregate.
4226 		 */
4227 		bf = bf_next;
4228 	}
4229 }
4230 
4231 /*
4232  * Performs transmit side cleanup when TID changes from aggregated to
4233  * unaggregated and during reassociation.
4234  *
4235  * For now, this just tosses everything from the TID software queue
4236  * whether or not it has been retried and marks the TID as
4237  * pending completion if there's anything for this TID queued to
4238  * the hardware.
4239  *
4240  * The caller is responsible for pausing the TID and unpausing the
4241  * TID if no cleanup was required. Otherwise the cleanup path will
4242  * unpause the TID once the last hardware queued frame is completed.
4243  */
4244 static void
4245 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4246     ath_bufhead *bf_cq)
4247 {
4248 	struct ath_tid *atid = &an->an_tid[tid];
4249 	struct ath_buf *bf, *bf_next;
4250 
4251 	ATH_TX_LOCK_ASSERT(sc);
4252 
4253 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4254 	    "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4255 	    atid->cleanup_inprogress);
4256 
4257 	/*
4258 	 * Move the filtered frames to the TX queue, before
4259 	 * we run off and discard/process things.
4260 	 */
4261 
4262 	/* XXX this is really quite inefficient */
4263 	while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4264 		ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4265 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4266 	}
4267 
4268 	/*
4269 	 * Update the frames in the software TX queue:
4270 	 *
4271 	 * + Discard retry frames in the queue
4272 	 * + Fix the completion function to be non-aggregate
4273 	 */
4274 	bf = ATH_TID_FIRST(atid);
4275 	while (bf) {
4276 		/*
4277 		 * Grab the next frame in the list, we may
4278 		 * be fiddling with the list.
4279 		 */
4280 		bf_next = TAILQ_NEXT(bf, bf_list);
4281 
4282 		/*
4283 		 * Free the frame and all subframes.
4284 		 */
4285 		ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4286 
4287 		/*
4288 		 * Next frame!
4289 		 */
4290 		bf = bf_next;
4291 	}
4292 
4293 	/*
4294 	 * If there's anything in the hardware queue we wait
4295 	 * for the TID HWQ to empty.
4296 	 */
4297 	if (atid->hwq_depth > 0) {
4298 		/*
4299 		 * XXX how about we kill atid->incomp, and instead
4300 		 * replace it with a macro that checks that atid->hwq_depth
4301 		 * is 0?
4302 		 */
4303 		atid->incomp = atid->hwq_depth;
4304 		atid->cleanup_inprogress = 1;
4305 	}
4306 
4307 	if (atid->cleanup_inprogress)
4308 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4309 		    "%s: TID %d: cleanup needed: %d packets\n",
4310 		    __func__, tid, atid->incomp);
4311 
4312 	/* Owner now must free completed frames */
4313 }
4314 
4315 static struct ath_buf *
4316 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4317     struct ath_tid *tid, struct ath_buf *bf)
4318 {
4319 	struct ath_buf *nbf;
4320 	int error;
4321 
4322 	/*
4323 	 * Clone the buffer.  This will handle the dma unmap and
4324 	 * copy the node reference to the new buffer.  If this
4325 	 * works out, 'bf' will have no DMA mapping, no mbuf
4326 	 * pointer and no node reference.
4327 	 */
4328 	nbf = ath_buf_clone(sc, bf);
4329 
4330 #if 0
4331 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4332 	    __func__);
4333 #endif
4334 
4335 	if (nbf == NULL) {
4336 		/* Failed to clone */
4337 		DPRINTF(sc, ATH_DEBUG_XMIT,
4338 		    "%s: failed to clone a busy buffer\n",
4339 		    __func__);
4340 		return NULL;
4341 	}
4342 
4343 	/* Setup the dma for the new buffer */
4344 	error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4345 	if (error != 0) {
4346 		DPRINTF(sc, ATH_DEBUG_XMIT,
4347 		    "%s: failed to setup dma for clone\n",
4348 		    __func__);
4349 		/*
4350 		 * Put this at the head of the list, not tail;
4351 		 * that way it doesn't interfere with the
4352 		 * busy buffer logic (which uses the tail of
4353 		 * the list.)
4354 		 */
4355 		ATH_TXBUF_LOCK(sc);
4356 		ath_returnbuf_head(sc, nbf);
4357 		ATH_TXBUF_UNLOCK(sc);
4358 		return NULL;
4359 	}
4360 
4361 	/* Update BAW if required, before we free the original buf */
4362 	if (bf->bf_state.bfs_dobaw)
4363 		ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4364 
4365 	/* Free original buffer; return new buffer */
4366 	ath_freebuf(sc, bf);
4367 
4368 	return nbf;
4369 }
4370 
4371 /*
4372  * Handle retrying an unaggregate frame in an aggregate
4373  * session.
4374  *
4375  * If too many retries occur, pause the TID, wait for
4376  * any further retransmits (as there's no reason why
4377  * non-aggregate frames in an aggregate session are
4378  * transmitted in-order; they just have to be in-BAW)
4379  * and then queue a BAR.
4380  */
4381 static void
4382 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4383 {
4384 	struct ieee80211_node *ni = bf->bf_node;
4385 	struct ath_node *an = ATH_NODE(ni);
4386 	int tid = bf->bf_state.bfs_tid;
4387 	struct ath_tid *atid = &an->an_tid[tid];
4388 	struct ieee80211_tx_ampdu *tap;
4389 
4390 	ATH_TX_LOCK(sc);
4391 
4392 	tap = ath_tx_get_tx_tid(an, tid);
4393 
4394 	/*
4395 	 * If the buffer is marked as busy, we can't directly
4396 	 * reuse it. Instead, try to clone the buffer.
4397 	 * If the clone is successful, recycle the old buffer.
4398 	 * If the clone is unsuccessful, set bfs_retries to max
4399 	 * to force the next bit of code to free the buffer
4400 	 * for us.
4401 	 */
4402 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4403 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4404 		struct ath_buf *nbf;
4405 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4406 		if (nbf)
4407 			/* bf has been freed at this point */
4408 			bf = nbf;
4409 		else
4410 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4411 	}
4412 
4413 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4414 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4415 		    "%s: exceeded retries; seqno %d\n",
4416 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4417 		sc->sc_stats.ast_tx_swretrymax++;
4418 
4419 		/* Update BAW anyway */
4420 		if (bf->bf_state.bfs_dobaw) {
4421 			ath_tx_update_baw(sc, an, atid, bf);
4422 			if (! bf->bf_state.bfs_addedbaw)
4423 				DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4424 				    "%s: wasn't added: seqno %d\n",
4425 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4426 		}
4427 		bf->bf_state.bfs_dobaw = 0;
4428 
4429 		/* Suspend the TX queue and get ready to send the BAR */
4430 		ath_tx_tid_bar_suspend(sc, atid);
4431 
4432 		/* Send the BAR if there are no other frames waiting */
4433 		if (ath_tx_tid_bar_tx_ready(sc, atid))
4434 			ath_tx_tid_bar_tx(sc, atid);
4435 
4436 		ATH_TX_UNLOCK(sc);
4437 
4438 		/* Free buffer, bf is free after this call */
4439 		ath_tx_default_comp(sc, bf, 0);
4440 		return;
4441 	}
4442 
4443 	/*
4444 	 * This increments the retry counter as well as
4445 	 * sets the retry flag in the ath_buf and packet
4446 	 * body.
4447 	 */
4448 	ath_tx_set_retry(sc, bf);
4449 	sc->sc_stats.ast_tx_swretries++;
4450 
4451 	/*
4452 	 * Insert this at the head of the queue, so it's
4453 	 * retried before any current/subsequent frames.
4454 	 */
4455 	ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4456 	ath_tx_tid_sched(sc, atid);
4457 	/* Send the BAR if there are no other frames waiting */
4458 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4459 		ath_tx_tid_bar_tx(sc, atid);
4460 
4461 	ATH_TX_UNLOCK(sc);
4462 }
4463 
4464 /*
4465  * Common code for aggregate excessive retry/subframe retry.
4466  * If retrying, queues buffers to bf_q. If not, frees the
4467  * buffers.
4468  *
4469  * XXX should unify this with ath_tx_aggr_retry_unaggr()
4470  */
4471 static int
4472 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4473     ath_bufhead *bf_q)
4474 {
4475 	struct ieee80211_node *ni = bf->bf_node;
4476 	struct ath_node *an = ATH_NODE(ni);
4477 	int tid = bf->bf_state.bfs_tid;
4478 	struct ath_tid *atid = &an->an_tid[tid];
4479 
4480 	ATH_TX_LOCK_ASSERT(sc);
4481 
4482 	/* XXX clr11naggr should be done for all subframes */
4483 	ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4484 	ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4485 
4486 	/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4487 
4488 	/*
4489 	 * If the buffer is marked as busy, we can't directly
4490 	 * reuse it. Instead, try to clone the buffer.
4491 	 * If the clone is successful, recycle the old buffer.
4492 	 * If the clone is unsuccessful, set bfs_retries to max
4493 	 * to force the next bit of code to free the buffer
4494 	 * for us.
4495 	 */
4496 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4497 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4498 		struct ath_buf *nbf;
4499 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4500 		if (nbf)
4501 			/* bf has been freed at this point */
4502 			bf = nbf;
4503 		else
4504 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4505 	}
4506 
4507 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4508 		sc->sc_stats.ast_tx_swretrymax++;
4509 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4510 		    "%s: max retries: seqno %d\n",
4511 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4512 		ath_tx_update_baw(sc, an, atid, bf);
4513 		if (!bf->bf_state.bfs_addedbaw)
4514 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4515 			    "%s: wasn't added: seqno %d\n",
4516 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4517 		bf->bf_state.bfs_dobaw = 0;
4518 		return 1;
4519 	}
4520 
4521 	ath_tx_set_retry(sc, bf);
4522 	sc->sc_stats.ast_tx_swretries++;
4523 	bf->bf_next = NULL;		/* Just to make sure */
4524 
4525 	/* Clear the aggregate state */
4526 	bf->bf_state.bfs_aggr = 0;
4527 	bf->bf_state.bfs_ndelim = 0;	/* ??? needed? */
4528 	bf->bf_state.bfs_nframes = 1;
4529 
4530 	TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4531 	return 0;
4532 }
4533 
4534 /*
4535  * error pkt completion for an aggregate destination
4536  */
4537 static void
4538 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4539     struct ath_tid *tid)
4540 {
4541 	struct ieee80211_node *ni = bf_first->bf_node;
4542 	struct ath_node *an = ATH_NODE(ni);
4543 	struct ath_buf *bf_next, *bf;
4544 	ath_bufhead bf_q;
4545 	int drops = 0;
4546 	struct ieee80211_tx_ampdu *tap;
4547 	ath_bufhead bf_cq;
4548 
4549 	TAILQ_INIT(&bf_q);
4550 	TAILQ_INIT(&bf_cq);
4551 
4552 	/*
4553 	 * Update rate control - all frames have failed.
4554 	 *
4555 	 * XXX use the length in the first frame in the series;
4556 	 * XXX just so things are consistent for now.
4557 	 */
4558 	ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4559 	    &bf_first->bf_status.ds_txstat,
4560 	    bf_first->bf_state.bfs_pktlen,
4561 	    bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4562 
4563 	ATH_TX_LOCK(sc);
4564 	tap = ath_tx_get_tx_tid(an, tid->tid);
4565 	sc->sc_stats.ast_tx_aggr_failall++;
4566 
4567 	/* Retry all subframes */
4568 	bf = bf_first;
4569 	while (bf) {
4570 		bf_next = bf->bf_next;
4571 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4572 		sc->sc_stats.ast_tx_aggr_fail++;
4573 		if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4574 			drops++;
4575 			bf->bf_next = NULL;
4576 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4577 		}
4578 		bf = bf_next;
4579 	}
4580 
4581 	/* Prepend all frames to the beginning of the queue */
4582 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4583 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4584 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4585 	}
4586 
4587 	/*
4588 	 * Schedule the TID to be re-tried.
4589 	 */
4590 	ath_tx_tid_sched(sc, tid);
4591 
4592 	/*
4593 	 * send bar if we dropped any frames
4594 	 *
4595 	 * Keep the txq lock held for now, as we need to ensure
4596 	 * that ni_txseqs[] is consistent (as it's being updated
4597 	 * in the ifnet TX context or raw TX context.)
4598 	 */
4599 	if (drops) {
4600 		/* Suspend the TX queue and get ready to send the BAR */
4601 		ath_tx_tid_bar_suspend(sc, tid);
4602 	}
4603 
4604 	/*
4605 	 * Send BAR if required
4606 	 */
4607 	if (ath_tx_tid_bar_tx_ready(sc, tid))
4608 		ath_tx_tid_bar_tx(sc, tid);
4609 
4610 	ATH_TX_UNLOCK(sc);
4611 
4612 	/* Complete frames which errored out */
4613 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4614 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4615 		ath_tx_default_comp(sc, bf, 0);
4616 	}
4617 }
4618 
4619 /*
4620  * Handle clean-up of packets from an aggregate list.
4621  *
4622  * There's no need to update the BAW here - the session is being
4623  * torn down.
4624  */
4625 static void
4626 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4627 {
4628 	struct ath_buf *bf, *bf_next;
4629 	struct ieee80211_node *ni = bf_first->bf_node;
4630 	struct ath_node *an = ATH_NODE(ni);
4631 	int tid = bf_first->bf_state.bfs_tid;
4632 	struct ath_tid *atid = &an->an_tid[tid];
4633 
4634 	ATH_TX_LOCK(sc);
4635 
4636 	/* update incomp */
4637 	atid->incomp--;
4638 
4639 	/* Update the BAW */
4640 	bf = bf_first;
4641 	while (bf) {
4642 		/* XXX refactor! */
4643 		if (bf->bf_state.bfs_dobaw) {
4644 			ath_tx_update_baw(sc, an, atid, bf);
4645 			if (!bf->bf_state.bfs_addedbaw)
4646 				DPRINTF(sc, ATH_DEBUG_SW_TX,
4647 				    "%s: wasn't added: seqno %d\n",
4648 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4649 		}
4650 		bf = bf->bf_next;
4651 	}
4652 
4653 	if (atid->incomp == 0) {
4654 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4655 		    "%s: TID %d: cleaned up! resume!\n",
4656 		    __func__, tid);
4657 		atid->cleanup_inprogress = 0;
4658 		ath_tx_tid_resume(sc, atid);
4659 	}
4660 
4661 	/* Send BAR if required */
4662 	/* XXX why would we send a BAR when transitioning to non-aggregation? */
4663 	/*
4664 	 * XXX TODO: we should likely just tear down the BAR state here,
4665 	 * rather than sending a BAR.
4666 	 */
4667 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4668 		ath_tx_tid_bar_tx(sc, atid);
4669 
4670 	ATH_TX_UNLOCK(sc);
4671 
4672 	/* Handle frame completion as individual frames */
4673 	bf = bf_first;
4674 	while (bf) {
4675 		bf_next = bf->bf_next;
4676 		bf->bf_next = NULL;
4677 		ath_tx_default_comp(sc, bf, 1);
4678 		bf = bf_next;
4679 	}
4680 }
4681 
4682 /*
4683  * Handle completion of an set of aggregate frames.
4684  *
4685  * Note: the completion handler is the last descriptor in the aggregate,
4686  * not the last descriptor in the first frame.
4687  */
4688 static void
4689 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4690     int fail)
4691 {
4692 	//struct ath_desc *ds = bf->bf_lastds;
4693 	struct ieee80211_node *ni = bf_first->bf_node;
4694 	struct ath_node *an = ATH_NODE(ni);
4695 	int tid = bf_first->bf_state.bfs_tid;
4696 	struct ath_tid *atid = &an->an_tid[tid];
4697 	struct ath_tx_status ts;
4698 	struct ieee80211_tx_ampdu *tap;
4699 	ath_bufhead bf_q;
4700 	ath_bufhead bf_cq;
4701 	int seq_st, tx_ok;
4702 	int hasba, isaggr;
4703 	uint32_t ba[2];
4704 	struct ath_buf *bf, *bf_next;
4705 	int ba_index;
4706 	int drops = 0;
4707 	int nframes = 0, nbad = 0, nf;
4708 	int pktlen;
4709 	/* XXX there's too much on the stack? */
4710 	struct ath_rc_series rc[ATH_RC_NUM];
4711 	int txseq;
4712 
4713 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4714 	    __func__, atid->hwq_depth);
4715 
4716 	/*
4717 	 * Take a copy; this may be needed -after- bf_first
4718 	 * has been completed and freed.
4719 	 */
4720 	ts = bf_first->bf_status.ds_txstat;
4721 
4722 	TAILQ_INIT(&bf_q);
4723 	TAILQ_INIT(&bf_cq);
4724 
4725 	/* The TID state is kept behind the TXQ lock */
4726 	ATH_TX_LOCK(sc);
4727 
4728 	atid->hwq_depth--;
4729 	if (atid->hwq_depth < 0)
4730 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4731 		    __func__, atid->hwq_depth);
4732 
4733 	/*
4734 	 * If the TID is filtered, handle completing the filter
4735 	 * transition before potentially kicking it to the cleanup
4736 	 * function.
4737 	 *
4738 	 * XXX this is duplicate work, ew.
4739 	 */
4740 	if (atid->isfiltered)
4741 		ath_tx_tid_filt_comp_complete(sc, atid);
4742 
4743 	/*
4744 	 * Punt cleanup to the relevant function, not our problem now
4745 	 */
4746 	if (atid->cleanup_inprogress) {
4747 		if (atid->isfiltered)
4748 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4749 			    "%s: isfiltered=1, normal_comp?\n",
4750 			    __func__);
4751 		ATH_TX_UNLOCK(sc);
4752 		ath_tx_comp_cleanup_aggr(sc, bf_first);
4753 		return;
4754 	}
4755 
4756 	/*
4757 	 * If the frame is filtered, transition to filtered frame
4758 	 * mode and add this to the filtered frame list.
4759 	 *
4760 	 * XXX TODO: figure out how this interoperates with
4761 	 * BAR, pause and cleanup states.
4762 	 */
4763 	if ((ts.ts_status & HAL_TXERR_FILT) ||
4764 	    (ts.ts_status != 0 && atid->isfiltered)) {
4765 		if (fail != 0)
4766 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4767 			    "%s: isfiltered=1, fail=%d\n", __func__, fail);
4768 		ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4769 
4770 		/* Remove from BAW */
4771 		TAILQ_FOREACH(bf, &bf_cq, bf_list) {
4772 			if (bf->bf_state.bfs_addedbaw)
4773 				drops++;
4774 			if (bf->bf_state.bfs_dobaw) {
4775 				ath_tx_update_baw(sc, an, atid, bf);
4776 				if (!bf->bf_state.bfs_addedbaw)
4777 					DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4778 					    "%s: wasn't added: seqno %d\n",
4779 					    __func__,
4780 					    SEQNO(bf->bf_state.bfs_seqno));
4781 			}
4782 			bf->bf_state.bfs_dobaw = 0;
4783 		}
4784 		/*
4785 		 * If any intermediate frames in the BAW were dropped when
4786 		 * handling filtering things, send a BAR.
4787 		 */
4788 		if (drops)
4789 			ath_tx_tid_bar_suspend(sc, atid);
4790 
4791 		/*
4792 		 * Finish up by sending a BAR if required and freeing
4793 		 * the frames outside of the TX lock.
4794 		 */
4795 		goto finish_send_bar;
4796 	}
4797 
4798 	/*
4799 	 * XXX for now, use the first frame in the aggregate for
4800 	 * XXX rate control completion; it's at least consistent.
4801 	 */
4802 	pktlen = bf_first->bf_state.bfs_pktlen;
4803 
4804 	/*
4805 	 * Handle errors first!
4806 	 *
4807 	 * Here, handle _any_ error as a "exceeded retries" error.
4808 	 * Later on (when filtered frames are to be specially handled)
4809 	 * it'll have to be expanded.
4810 	 */
4811 #if 0
4812 	if (ts.ts_status & HAL_TXERR_XRETRY) {
4813 #endif
4814 	if (ts.ts_status != 0) {
4815 		ATH_TX_UNLOCK(sc);
4816 		ath_tx_comp_aggr_error(sc, bf_first, atid);
4817 		return;
4818 	}
4819 
4820 	tap = ath_tx_get_tx_tid(an, tid);
4821 
4822 	/*
4823 	 * extract starting sequence and block-ack bitmap
4824 	 */
4825 	/* XXX endian-ness of seq_st, ba? */
4826 	seq_st = ts.ts_seqnum;
4827 	hasba = !! (ts.ts_flags & HAL_TX_BA);
4828 	tx_ok = (ts.ts_status == 0);
4829 	isaggr = bf_first->bf_state.bfs_aggr;
4830 	ba[0] = ts.ts_ba_low;
4831 	ba[1] = ts.ts_ba_high;
4832 
4833 	/*
4834 	 * Copy the TX completion status and the rate control
4835 	 * series from the first descriptor, as it may be freed
4836 	 * before the rate control code can get its grubby fingers
4837 	 * into things.
4838 	 */
4839 	memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4840 
4841 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4842 	    "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4843 	    "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4844 	    __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4845 	    isaggr, seq_st, hasba, ba[0], ba[1]);
4846 
4847 	/*
4848 	 * The reference driver doesn't do this; it simply ignores
4849 	 * this check in its entirety.
4850 	 *
4851 	 * I've seen this occur when using iperf to send traffic
4852 	 * out tid 1 - the aggregate frames are all marked as TID 1,
4853 	 * but the TXSTATUS has TID=0.  So, let's just ignore this
4854 	 * check.
4855 	 */
4856 #if 0
4857 	/* Occasionally, the MAC sends a tx status for the wrong TID. */
4858 	if (tid != ts.ts_tid) {
4859 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4860 		    __func__, tid, ts.ts_tid);
4861 		tx_ok = 0;
4862 	}
4863 #endif
4864 
4865 	/* AR5416 BA bug; this requires an interface reset */
4866 	if (isaggr && tx_ok && (! hasba)) {
4867 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4868 		    "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4869 		    "seq_st=%d\n",
4870 		    __func__, hasba, tx_ok, isaggr, seq_st);
4871 		/* XXX TODO: schedule an interface reset */
4872 #ifdef ATH_DEBUG
4873 		ath_printtxbuf(sc, bf_first,
4874 		    sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4875 #endif
4876 	}
4877 
4878 	/*
4879 	 * Walk the list of frames, figure out which ones were correctly
4880 	 * sent and which weren't.
4881 	 */
4882 	bf = bf_first;
4883 	nf = bf_first->bf_state.bfs_nframes;
4884 
4885 	/* bf_first is going to be invalid once this list is walked */
4886 	bf_first = NULL;
4887 
4888 	/*
4889 	 * Walk the list of completed frames and determine
4890 	 * which need to be completed and which need to be
4891 	 * retransmitted.
4892 	 *
4893 	 * For completed frames, the completion functions need
4894 	 * to be called at the end of this function as the last
4895 	 * node reference may free the node.
4896 	 *
4897 	 * Finally, since the TXQ lock can't be held during the
4898 	 * completion callback (to avoid lock recursion),
4899 	 * the completion calls have to be done outside of the
4900 	 * lock.
4901 	 */
4902 	while (bf) {
4903 		nframes++;
4904 		ba_index = ATH_BA_INDEX(seq_st,
4905 		    SEQNO(bf->bf_state.bfs_seqno));
4906 		bf_next = bf->bf_next;
4907 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4908 
4909 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4910 		    "%s: checking bf=%p seqno=%d; ack=%d\n",
4911 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
4912 		    ATH_BA_ISSET(ba, ba_index));
4913 
4914 		if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
4915 			sc->sc_stats.ast_tx_aggr_ok++;
4916 			ath_tx_update_baw(sc, an, atid, bf);
4917 			bf->bf_state.bfs_dobaw = 0;
4918 			if (!bf->bf_state.bfs_addedbaw)
4919 				DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4920 				    "%s: wasn't added: seqno %d\n",
4921 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4922 			bf->bf_next = NULL;
4923 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4924 		} else {
4925 			sc->sc_stats.ast_tx_aggr_fail++;
4926 			if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4927 				drops++;
4928 				bf->bf_next = NULL;
4929 				TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4930 			}
4931 			nbad++;
4932 		}
4933 		bf = bf_next;
4934 	}
4935 
4936 	/*
4937 	 * Now that the BAW updates have been done, unlock
4938 	 *
4939 	 * txseq is grabbed before the lock is released so we
4940 	 * have a consistent view of what -was- in the BAW.
4941 	 * Anything after this point will not yet have been
4942 	 * TXed.
4943 	 */
4944 	txseq = tap->txa_start;
4945 	ATH_TX_UNLOCK(sc);
4946 
4947 	if (nframes != nf)
4948 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4949 		    "%s: num frames seen=%d; bf nframes=%d\n",
4950 		    __func__, nframes, nf);
4951 
4952 	/*
4953 	 * Now we know how many frames were bad, call the rate
4954 	 * control code.
4955 	 */
4956 	if (fail == 0)
4957 		ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
4958 		    nbad);
4959 
4960 	/*
4961 	 * send bar if we dropped any frames
4962 	 */
4963 	if (drops) {
4964 		/* Suspend the TX queue and get ready to send the BAR */
4965 		ATH_TX_LOCK(sc);
4966 		ath_tx_tid_bar_suspend(sc, atid);
4967 		ATH_TX_UNLOCK(sc);
4968 	}
4969 
4970 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4971 	    "%s: txa_start now %d\n", __func__, tap->txa_start);
4972 
4973 	ATH_TX_LOCK(sc);
4974 
4975 	/* Prepend all frames to the beginning of the queue */
4976 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4977 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4978 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4979 	}
4980 
4981 	/*
4982 	 * Reschedule to grab some further frames.
4983 	 */
4984 	ath_tx_tid_sched(sc, atid);
4985 
4986 	/*
4987 	 * If the queue is filtered, re-schedule as required.
4988 	 *
4989 	 * This is required as there may be a subsequent TX descriptor
4990 	 * for this end-node that has CLRDMASK set, so it's quite possible
4991 	 * that a filtered frame will be followed by a non-filtered
4992 	 * (complete or otherwise) frame.
4993 	 *
4994 	 * XXX should we do this before we complete the frame?
4995 	 */
4996 	if (atid->isfiltered)
4997 		ath_tx_tid_filt_comp_complete(sc, atid);
4998 
4999 finish_send_bar:
5000 
5001 	/*
5002 	 * Send BAR if required
5003 	 */
5004 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5005 		ath_tx_tid_bar_tx(sc, atid);
5006 
5007 	ATH_TX_UNLOCK(sc);
5008 
5009 	/* Do deferred completion */
5010 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5011 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5012 		ath_tx_default_comp(sc, bf, 0);
5013 	}
5014 }
5015 
5016 /*
5017  * Handle completion of unaggregated frames in an ADDBA
5018  * session.
5019  *
5020  * Fail is set to 1 if the entry is being freed via a call to
5021  * ath_tx_draintxq().
5022  */
5023 static void
5024 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5025 {
5026 	struct ieee80211_node *ni = bf->bf_node;
5027 	struct ath_node *an = ATH_NODE(ni);
5028 	int tid = bf->bf_state.bfs_tid;
5029 	struct ath_tid *atid = &an->an_tid[tid];
5030 	struct ath_tx_status ts;
5031 	int drops = 0;
5032 
5033 	/*
5034 	 * Take a copy of this; filtering/cloning the frame may free the
5035 	 * bf pointer.
5036 	 */
5037 	ts = bf->bf_status.ds_txstat;
5038 
5039 	/*
5040 	 * Update rate control status here, before we possibly
5041 	 * punt to retry or cleanup.
5042 	 *
5043 	 * Do it outside of the TXQ lock.
5044 	 */
5045 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5046 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5047 		    &bf->bf_status.ds_txstat,
5048 		    bf->bf_state.bfs_pktlen,
5049 		    1, (ts.ts_status == 0) ? 0 : 1);
5050 
5051 	/*
5052 	 * This is called early so atid->hwq_depth can be tracked.
5053 	 * This unfortunately means that it's released and regrabbed
5054 	 * during retry and cleanup. That's rather inefficient.
5055 	 */
5056 	ATH_TX_LOCK(sc);
5057 
5058 	if (tid == IEEE80211_NONQOS_TID)
5059 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5060 
5061 	DPRINTF(sc, ATH_DEBUG_SW_TX,
5062 	    "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5063 	    __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5064 	    SEQNO(bf->bf_state.bfs_seqno));
5065 
5066 	atid->hwq_depth--;
5067 	if (atid->hwq_depth < 0)
5068 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5069 		    __func__, atid->hwq_depth);
5070 
5071 	/*
5072 	 * If the TID is filtered, handle completing the filter
5073 	 * transition before potentially kicking it to the cleanup
5074 	 * function.
5075 	 */
5076 	if (atid->isfiltered)
5077 		ath_tx_tid_filt_comp_complete(sc, atid);
5078 
5079 	/*
5080 	 * If a cleanup is in progress, punt to comp_cleanup;
5081 	 * rather than handling it here. It's thus their
5082 	 * responsibility to clean up, call the completion
5083 	 * function in net80211, etc.
5084 	 */
5085 	if (atid->cleanup_inprogress) {
5086 		if (atid->isfiltered)
5087 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5088 			    "%s: isfiltered=1, normal_comp?\n",
5089 			    __func__);
5090 		ATH_TX_UNLOCK(sc);
5091 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5092 		    __func__);
5093 		ath_tx_comp_cleanup_unaggr(sc, bf);
5094 		return;
5095 	}
5096 
5097 	/*
5098 	 * XXX TODO: how does cleanup, BAR and filtered frame handling
5099 	 * overlap?
5100 	 *
5101 	 * If the frame is filtered OR if it's any failure but
5102 	 * the TID is filtered, the frame must be added to the
5103 	 * filtered frame list.
5104 	 *
5105 	 * However - a busy buffer can't be added to the filtered
5106 	 * list as it will end up being recycled without having
5107 	 * been made available for the hardware.
5108 	 */
5109 	if ((ts.ts_status & HAL_TXERR_FILT) ||
5110 	    (ts.ts_status != 0 && atid->isfiltered)) {
5111 		int freeframe;
5112 
5113 		if (fail != 0)
5114 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5115 			    "%s: isfiltered=1, fail=%d\n",
5116 			    __func__, fail);
5117 		freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5118 		/*
5119 		 * If freeframe=0 then bf is no longer ours; don't
5120 		 * touch it.
5121 		 */
5122 		if (freeframe) {
5123 			/* Remove from BAW */
5124 			if (bf->bf_state.bfs_addedbaw)
5125 				drops++;
5126 			if (bf->bf_state.bfs_dobaw) {
5127 				ath_tx_update_baw(sc, an, atid, bf);
5128 				if (!bf->bf_state.bfs_addedbaw)
5129 					DPRINTF(sc, ATH_DEBUG_SW_TX,
5130 					    "%s: wasn't added: seqno %d\n",
5131 					    __func__, SEQNO(bf->bf_state.bfs_seqno));
5132 			}
5133 			bf->bf_state.bfs_dobaw = 0;
5134 		}
5135 
5136 		/*
5137 		 * If the frame couldn't be filtered, treat it as a drop and
5138 		 * prepare to send a BAR.
5139 		 */
5140 		if (freeframe && drops)
5141 			ath_tx_tid_bar_suspend(sc, atid);
5142 
5143 		/*
5144 		 * Send BAR if required
5145 		 */
5146 		if (ath_tx_tid_bar_tx_ready(sc, atid))
5147 			ath_tx_tid_bar_tx(sc, atid);
5148 
5149 		ATH_TX_UNLOCK(sc);
5150 		/*
5151 		 * If freeframe is set, then the frame couldn't be
5152 		 * cloned and bf is still valid.  Just complete/free it.
5153 		 */
5154 		if (freeframe)
5155 			ath_tx_default_comp(sc, bf, fail);
5156 
5157 		return;
5158 	}
5159 	/*
5160 	 * Don't bother with the retry check if all frames
5161 	 * are being failed (eg during queue deletion.)
5162 	 */
5163 #if 0
5164 	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5165 #endif
5166 	if (fail == 0 && ts.ts_status != 0) {
5167 		ATH_TX_UNLOCK(sc);
5168 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5169 		    __func__);
5170 		ath_tx_aggr_retry_unaggr(sc, bf);
5171 		return;
5172 	}
5173 
5174 	/* Success? Complete */
5175 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5176 	    __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5177 	if (bf->bf_state.bfs_dobaw) {
5178 		ath_tx_update_baw(sc, an, atid, bf);
5179 		bf->bf_state.bfs_dobaw = 0;
5180 		if (!bf->bf_state.bfs_addedbaw)
5181 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5182 			    "%s: wasn't added: seqno %d\n",
5183 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
5184 	}
5185 
5186 	/*
5187 	 * If the queue is filtered, re-schedule as required.
5188 	 *
5189 	 * This is required as there may be a subsequent TX descriptor
5190 	 * for this end-node that has CLRDMASK set, so it's quite possible
5191 	 * that a filtered frame will be followed by a non-filtered
5192 	 * (complete or otherwise) frame.
5193 	 *
5194 	 * XXX should we do this before we complete the frame?
5195 	 */
5196 	if (atid->isfiltered)
5197 		ath_tx_tid_filt_comp_complete(sc, atid);
5198 
5199 	/*
5200 	 * Send BAR if required
5201 	 */
5202 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5203 		ath_tx_tid_bar_tx(sc, atid);
5204 
5205 	ATH_TX_UNLOCK(sc);
5206 
5207 	ath_tx_default_comp(sc, bf, fail);
5208 	/* bf is freed at this point */
5209 }
5210 
5211 void
5212 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5213 {
5214 	if (bf->bf_state.bfs_aggr)
5215 		ath_tx_aggr_comp_aggr(sc, bf, fail);
5216 	else
5217 		ath_tx_aggr_comp_unaggr(sc, bf, fail);
5218 }
5219 
5220 /*
5221  * Schedule some packets from the given node/TID to the hardware.
5222  *
5223  * This is the aggregate version.
5224  */
5225 void
5226 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5227     struct ath_tid *tid)
5228 {
5229 	struct ath_buf *bf;
5230 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5231 	struct ieee80211_tx_ampdu *tap;
5232 	ATH_AGGR_STATUS status;
5233 	ath_bufhead bf_q;
5234 
5235 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5236 	ATH_TX_LOCK_ASSERT(sc);
5237 
5238 	/*
5239 	 * XXX TODO: If we're called for a queue that we're leaking frames to,
5240 	 * ensure we only leak one.
5241 	 */
5242 
5243 	tap = ath_tx_get_tx_tid(an, tid->tid);
5244 
5245 	if (tid->tid == IEEE80211_NONQOS_TID)
5246 		DPRINTF(sc, ATH_DEBUG_SW_TX,
5247 		    "%s: called for TID=NONQOS_TID?\n", __func__);
5248 
5249 	for (;;) {
5250 		status = ATH_AGGR_DONE;
5251 
5252 		/*
5253 		 * If the upper layer has paused the TID, don't
5254 		 * queue any further packets.
5255 		 *
5256 		 * This can also occur from the completion task because
5257 		 * of packet loss; but as its serialised with this code,
5258 		 * it won't "appear" half way through queuing packets.
5259 		 */
5260 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5261 			break;
5262 
5263 		bf = ATH_TID_FIRST(tid);
5264 		if (bf == NULL) {
5265 			break;
5266 		}
5267 
5268 		/*
5269 		 * If the packet doesn't fall within the BAW (eg a NULL
5270 		 * data frame), schedule it directly; continue.
5271 		 */
5272 		if (! bf->bf_state.bfs_dobaw) {
5273 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5274 			    "%s: non-baw packet\n",
5275 			    __func__);
5276 			ATH_TID_REMOVE(tid, bf, bf_list);
5277 
5278 			if (bf->bf_state.bfs_nframes > 1)
5279 				DPRINTF(sc, ATH_DEBUG_SW_TX,
5280 				    "%s: aggr=%d, nframes=%d\n",
5281 				    __func__,
5282 				    bf->bf_state.bfs_aggr,
5283 				    bf->bf_state.bfs_nframes);
5284 
5285 			/*
5286 			 * This shouldn't happen - such frames shouldn't
5287 			 * ever have been queued as an aggregate in the
5288 			 * first place.  However, make sure the fields
5289 			 * are correctly setup just to be totally sure.
5290 			 */
5291 			bf->bf_state.bfs_aggr = 0;
5292 			bf->bf_state.bfs_nframes = 1;
5293 
5294 			/* Update CLRDMASK just before this frame is queued */
5295 			ath_tx_update_clrdmask(sc, tid, bf);
5296 
5297 			ath_tx_do_ratelookup(sc, bf);
5298 			ath_tx_calc_duration(sc, bf);
5299 			ath_tx_calc_protection(sc, bf);
5300 			ath_tx_set_rtscts(sc, bf);
5301 			ath_tx_rate_fill_rcflags(sc, bf);
5302 			ath_tx_setds(sc, bf);
5303 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5304 
5305 			sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5306 
5307 			/* Queue the packet; continue */
5308 			goto queuepkt;
5309 		}
5310 
5311 		TAILQ_INIT(&bf_q);
5312 
5313 		/*
5314 		 * Do a rate control lookup on the first frame in the
5315 		 * list. The rate control code needs that to occur
5316 		 * before it can determine whether to TX.
5317 		 * It's inaccurate because the rate control code doesn't
5318 		 * really "do" aggregate lookups, so it only considers
5319 		 * the size of the first frame.
5320 		 */
5321 		ath_tx_do_ratelookup(sc, bf);
5322 		bf->bf_state.bfs_rc[3].rix = 0;
5323 		bf->bf_state.bfs_rc[3].tries = 0;
5324 
5325 		ath_tx_calc_duration(sc, bf);
5326 		ath_tx_calc_protection(sc, bf);
5327 
5328 		ath_tx_set_rtscts(sc, bf);
5329 		ath_tx_rate_fill_rcflags(sc, bf);
5330 
5331 		status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5332 
5333 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5334 		    "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5335 
5336 		/*
5337 		 * No frames to be picked up - out of BAW
5338 		 */
5339 		if (TAILQ_EMPTY(&bf_q))
5340 			break;
5341 
5342 		/*
5343 		 * This assumes that the descriptor list in the ath_bufhead
5344 		 * are already linked together via bf_next pointers.
5345 		 */
5346 		bf = TAILQ_FIRST(&bf_q);
5347 
5348 		if (status == ATH_AGGR_8K_LIMITED)
5349 			sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5350 
5351 		/*
5352 		 * If it's the only frame send as non-aggregate
5353 		 * assume that ath_tx_form_aggr() has checked
5354 		 * whether it's in the BAW and added it appropriately.
5355 		 */
5356 		if (bf->bf_state.bfs_nframes == 1) {
5357 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5358 			    "%s: single-frame aggregate\n", __func__);
5359 
5360 			/* Update CLRDMASK just before this frame is queued */
5361 			ath_tx_update_clrdmask(sc, tid, bf);
5362 
5363 			bf->bf_state.bfs_aggr = 0;
5364 			bf->bf_state.bfs_ndelim = 0;
5365 			ath_tx_setds(sc, bf);
5366 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5367 			if (status == ATH_AGGR_BAW_CLOSED)
5368 				sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5369 			else
5370 				sc->sc_aggr_stats.aggr_single_pkt++;
5371 		} else {
5372 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5373 			    "%s: multi-frame aggregate: %d frames, "
5374 			    "length %d\n",
5375 			     __func__, bf->bf_state.bfs_nframes,
5376 			    bf->bf_state.bfs_al);
5377 			bf->bf_state.bfs_aggr = 1;
5378 			sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5379 			sc->sc_aggr_stats.aggr_aggr_pkt++;
5380 
5381 			/* Update CLRDMASK just before this frame is queued */
5382 			ath_tx_update_clrdmask(sc, tid, bf);
5383 
5384 			/*
5385 			 * Calculate the duration/protection as required.
5386 			 */
5387 			ath_tx_calc_duration(sc, bf);
5388 			ath_tx_calc_protection(sc, bf);
5389 
5390 			/*
5391 			 * Update the rate and rtscts information based on the
5392 			 * rate decision made by the rate control code;
5393 			 * the first frame in the aggregate needs it.
5394 			 */
5395 			ath_tx_set_rtscts(sc, bf);
5396 
5397 			/*
5398 			 * Setup the relevant descriptor fields
5399 			 * for aggregation. The first descriptor
5400 			 * already points to the rest in the chain.
5401 			 */
5402 			ath_tx_setds_11n(sc, bf);
5403 
5404 		}
5405 	queuepkt:
5406 		/* Set completion handler, multi-frame aggregate or not */
5407 		bf->bf_comp = ath_tx_aggr_comp;
5408 
5409 		if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5410 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5411 
5412 		/*
5413 		 * Update leak count and frame config if were leaking frames.
5414 		 *
5415 		 * XXX TODO: it should update all frames in an aggregate
5416 		 * correctly!
5417 		 */
5418 		ath_tx_leak_count_update(sc, tid, bf);
5419 
5420 		/* Punt to txq */
5421 		ath_tx_handoff(sc, txq, bf);
5422 
5423 		/* Track outstanding buffer count to hardware */
5424 		/* aggregates are "one" buffer */
5425 		tid->hwq_depth++;
5426 
5427 		/*
5428 		 * Break out if ath_tx_form_aggr() indicated
5429 		 * there can't be any further progress (eg BAW is full.)
5430 		 * Checking for an empty txq is done above.
5431 		 *
5432 		 * XXX locking on txq here?
5433 		 */
5434 		/* XXX TXQ locking */
5435 		if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5436 		    (status == ATH_AGGR_BAW_CLOSED ||
5437 		     status == ATH_AGGR_LEAK_CLOSED))
5438 			break;
5439 	}
5440 }
5441 
5442 /*
5443  * Schedule some packets from the given node/TID to the hardware.
5444  *
5445  * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5446  * It just dumps frames into the TXQ.  We should limit how deep
5447  * the transmit queue can grow for frames dispatched to the given
5448  * TXQ.
5449  *
5450  * To avoid locking issues, either we need to own the TXQ lock
5451  * at this point, or we need to pass in the maximum frame count
5452  * from the caller.
5453  */
5454 void
5455 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5456     struct ath_tid *tid)
5457 {
5458 	struct ath_buf *bf;
5459 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5460 
5461 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5462 	    __func__, an, tid->tid);
5463 
5464 	ATH_TX_LOCK_ASSERT(sc);
5465 
5466 	/* Check - is AMPDU pending or running? then print out something */
5467 	if (ath_tx_ampdu_pending(sc, an, tid->tid))
5468 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5469 		    __func__, tid->tid);
5470 	if (ath_tx_ampdu_running(sc, an, tid->tid))
5471 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5472 		    __func__, tid->tid);
5473 
5474 	for (;;) {
5475 
5476 		/*
5477 		 * If the upper layers have paused the TID, don't
5478 		 * queue any further packets.
5479 		 *
5480 		 * XXX if we are leaking frames, make sure we decrement
5481 		 * that counter _and_ we continue here.
5482 		 */
5483 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5484 			break;
5485 
5486 		bf = ATH_TID_FIRST(tid);
5487 		if (bf == NULL) {
5488 			break;
5489 		}
5490 
5491 		ATH_TID_REMOVE(tid, bf, bf_list);
5492 
5493 		/* Sanity check! */
5494 		if (tid->tid != bf->bf_state.bfs_tid) {
5495 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5496 			    " tid %d\n", __func__, bf->bf_state.bfs_tid,
5497 			    tid->tid);
5498 		}
5499 		/* Normal completion handler */
5500 		bf->bf_comp = ath_tx_normal_comp;
5501 
5502 		/*
5503 		 * Override this for now, until the non-aggregate
5504 		 * completion handler correctly handles software retransmits.
5505 		 */
5506 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5507 
5508 		/* Update CLRDMASK just before this frame is queued */
5509 		ath_tx_update_clrdmask(sc, tid, bf);
5510 
5511 		/* Program descriptors + rate control */
5512 		ath_tx_do_ratelookup(sc, bf);
5513 		ath_tx_calc_duration(sc, bf);
5514 		ath_tx_calc_protection(sc, bf);
5515 		ath_tx_set_rtscts(sc, bf);
5516 		ath_tx_rate_fill_rcflags(sc, bf);
5517 		ath_tx_setds(sc, bf);
5518 
5519 		/*
5520 		 * Update the current leak count if
5521 		 * we're leaking frames; and set the
5522 		 * MORE flag as appropriate.
5523 		 */
5524 		ath_tx_leak_count_update(sc, tid, bf);
5525 
5526 		/* Track outstanding buffer count to hardware */
5527 		/* aggregates are "one" buffer */
5528 		tid->hwq_depth++;
5529 
5530 		/* Punt to hardware or software txq */
5531 		ath_tx_handoff(sc, txq, bf);
5532 	}
5533 }
5534 
5535 /*
5536  * Schedule some packets to the given hardware queue.
5537  *
5538  * This function walks the list of TIDs (ie, ath_node TIDs
5539  * with queued traffic) and attempts to schedule traffic
5540  * from them.
5541  *
5542  * TID scheduling is implemented as a FIFO, with TIDs being
5543  * added to the end of the queue after some frames have been
5544  * scheduled.
5545  */
5546 void
5547 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5548 {
5549 	struct ath_tid *tid, *last;
5550 
5551 	ATH_TX_LOCK_ASSERT(sc);
5552 
5553 	/*
5554 	 * Don't schedule if the hardware queue is busy.
5555 	 * This (hopefully) gives some more time to aggregate
5556 	 * some packets in the aggregation queue.
5557 	 *
5558 	 * XXX It doesn't stop a parallel sender from sneaking
5559 	 * in transmitting a frame!
5560 	 */
5561 	/* XXX TXQ locking */
5562 	if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5563 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5564 		return;
5565 	}
5566 	if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5567 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5568 		return;
5569 	}
5570 
5571 	last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5572 
5573 	while ((tid = TAILQ_FIRST(&txq->axq_tidq)) != NULL) {
5574 		/*
5575 		 * Suspend paused queues here; they'll be resumed
5576 		 * once the addba completes or times out.
5577 		 */
5578 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5579 		    __func__, tid->tid, tid->paused);
5580 		ath_tx_tid_unsched(sc, tid);
5581 		/*
5582 		 * This node may be in power-save and we're leaking
5583 		 * a frame; be careful.
5584 		 */
5585 		if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5586 			if (tid == last)
5587 				break;
5588 			continue;
5589 		}
5590 		if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5591 			ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5592 		else
5593 			ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5594 
5595 		/* Not empty? Re-schedule */
5596 		if (tid->axq_depth != 0)
5597 			ath_tx_tid_sched(sc, tid);
5598 
5599 		/*
5600 		 * Give the software queue time to aggregate more
5601 		 * packets.  If we aren't running aggregation then
5602 		 * we should still limit the hardware queue depth.
5603 		 */
5604 		/* XXX TXQ locking */
5605 		if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5606 			break;
5607 		}
5608 		if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5609 			break;
5610 		}
5611 
5612 		/*
5613 		 * If this was the last entry on the original list, stop.
5614 		 * Otherwise nodes that have been rescheduled onto the end
5615 		 * of the TID FIFO list will just keep being rescheduled.
5616 		 *
5617 		 * XXX What should we do about nodes that were paused
5618 		 * but are pending a leaking frame in response to a ps-poll?
5619 		 * They'll be put at the front of the list; so they'll
5620 		 * prematurely trigger this condition! Ew.
5621 		 */
5622 		if (tid == last)
5623 			break;
5624 	}
5625 }
5626 
5627 /*
5628  * TX addba handling
5629  */
5630 
5631 /*
5632  * Return net80211 TID struct pointer, or NULL for none
5633  */
5634 struct ieee80211_tx_ampdu *
5635 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5636 {
5637 	struct ieee80211_node *ni = &an->an_node;
5638 	struct ieee80211_tx_ampdu *tap;
5639 
5640 	if (tid == IEEE80211_NONQOS_TID)
5641 		return NULL;
5642 
5643 	tap = &ni->ni_tx_ampdu[tid];
5644 	return tap;
5645 }
5646 
5647 /*
5648  * Is AMPDU-TX running?
5649  */
5650 static int
5651 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5652 {
5653 	struct ieee80211_tx_ampdu *tap;
5654 
5655 	if (tid == IEEE80211_NONQOS_TID)
5656 		return 0;
5657 
5658 	tap = ath_tx_get_tx_tid(an, tid);
5659 	if (tap == NULL)
5660 		return 0;	/* Not valid; default to not running */
5661 
5662 	return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5663 }
5664 
5665 /*
5666  * Is AMPDU-TX negotiation pending?
5667  */
5668 static int
5669 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5670 {
5671 	struct ieee80211_tx_ampdu *tap;
5672 
5673 	if (tid == IEEE80211_NONQOS_TID)
5674 		return 0;
5675 
5676 	tap = ath_tx_get_tx_tid(an, tid);
5677 	if (tap == NULL)
5678 		return 0;	/* Not valid; default to not pending */
5679 
5680 	return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5681 }
5682 
5683 /*
5684  * Is AMPDU-TX pending for the given TID?
5685  */
5686 
5687 
5688 /*
5689  * Method to handle sending an ADDBA request.
5690  *
5691  * We tap this so the relevant flags can be set to pause the TID
5692  * whilst waiting for the response.
5693  *
5694  * XXX there's no timeout handler we can override?
5695  */
5696 int
5697 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5698     int dialogtoken, int baparamset, int batimeout)
5699 {
5700 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5701 	int tid = tap->txa_ac;
5702 	struct ath_node *an = ATH_NODE(ni);
5703 	struct ath_tid *atid = &an->an_tid[tid];
5704 
5705 	/*
5706 	 * XXX danger Will Robinson!
5707 	 *
5708 	 * Although the taskqueue may be running and scheduling some more
5709 	 * packets, these should all be _before_ the addba sequence number.
5710 	 * However, net80211 will keep self-assigning sequence numbers
5711 	 * until addba has been negotiated.
5712 	 *
5713 	 * In the past, these packets would be "paused" (which still works
5714 	 * fine, as they're being scheduled to the driver in the same
5715 	 * serialised method which is calling the addba request routine)
5716 	 * and when the aggregation session begins, they'll be dequeued
5717 	 * as aggregate packets and added to the BAW. However, now there's
5718 	 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5719 	 * packets. Thus they never get included in the BAW tracking and
5720 	 * this can cause the initial burst of packets after the addba
5721 	 * negotiation to "hang", as they quickly fall outside the BAW.
5722 	 *
5723 	 * The "eventual" solution should be to tag these packets with
5724 	 * dobaw. Although net80211 has given us a sequence number,
5725 	 * it'll be "after" the left edge of the BAW and thus it'll
5726 	 * fall within it.
5727 	 */
5728 	ATH_TX_LOCK(sc);
5729 	/*
5730 	 * This is a bit annoying.  Until net80211 HT code inherits some
5731 	 * (any) locking, we may have this called in parallel BUT only
5732 	 * one response/timeout will be called.  Grr.
5733 	 */
5734 	if (atid->addba_tx_pending == 0) {
5735 		ath_tx_tid_pause(sc, atid);
5736 		atid->addba_tx_pending = 1;
5737 	}
5738 	ATH_TX_UNLOCK(sc);
5739 
5740 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5741 	    "%s: %s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5742 	    __func__,
5743 	    ath_hal_ether_sprintf(ni->ni_macaddr),
5744 	    dialogtoken, baparamset, batimeout);
5745 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5746 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5747 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5748 
5749 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5750 	    batimeout);
5751 }
5752 
5753 /*
5754  * Handle an ADDBA response.
5755  *
5756  * We unpause the queue so TX'ing can resume.
5757  *
5758  * Any packets TX'ed from this point should be "aggregate" (whether
5759  * aggregate or not) so the BAW is updated.
5760  *
5761  * Note! net80211 keeps self-assigning sequence numbers until
5762  * ampdu is negotiated. This means the initially-negotiated BAW left
5763  * edge won't match the ni->ni_txseq.
5764  *
5765  * So, being very dirty, the BAW left edge is "slid" here to match
5766  * ni->ni_txseq.
5767  *
5768  * What likely SHOULD happen is that all packets subsequent to the
5769  * addba request should be tagged as aggregate and queued as non-aggregate
5770  * frames; thus updating the BAW. For now though, I'll just slide the
5771  * window.
5772  */
5773 int
5774 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5775     int status, int code, int batimeout)
5776 {
5777 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5778 	int tid = tap->txa_ac;
5779 	struct ath_node *an = ATH_NODE(ni);
5780 	struct ath_tid *atid = &an->an_tid[tid];
5781 	int r;
5782 
5783 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5784 	    "%s: %s: called; status=%d, code=%d, batimeout=%d\n", __func__,
5785 	    ath_hal_ether_sprintf(ni->ni_macaddr),
5786 	    status, code, batimeout);
5787 
5788 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5789 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5790 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5791 
5792 	/*
5793 	 * Call this first, so the interface flags get updated
5794 	 * before the TID is unpaused. Otherwise a race condition
5795 	 * exists where the unpaused TID still doesn't yet have
5796 	 * IEEE80211_AGGR_RUNNING set.
5797 	 */
5798 	r = sc->sc_addba_response(ni, tap, status, code, batimeout);
5799 
5800 	ATH_TX_LOCK(sc);
5801 	atid->addba_tx_pending = 0;
5802 	/*
5803 	 * XXX dirty!
5804 	 * Slide the BAW left edge to wherever net80211 left it for us.
5805 	 * Read above for more information.
5806 	 */
5807 	tap->txa_start = ni->ni_txseqs[tid];
5808 	ath_tx_tid_resume(sc, atid);
5809 	ATH_TX_UNLOCK(sc);
5810 	return r;
5811 }
5812 
5813 
5814 /*
5815  * Stop ADDBA on a queue.
5816  *
5817  * This can be called whilst BAR TX is currently active on the queue,
5818  * so make sure this is unblocked before continuing.
5819  */
5820 void
5821 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5822 {
5823 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5824 	int tid = tap->txa_ac;
5825 	struct ath_node *an = ATH_NODE(ni);
5826 	struct ath_tid *atid = &an->an_tid[tid];
5827 	ath_bufhead bf_cq;
5828 	struct ath_buf *bf;
5829 
5830 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %s: called\n",
5831 	    __func__,
5832 	    ath_hal_ether_sprintf(ni->ni_macaddr));
5833 
5834 	/*
5835 	 * Pause TID traffic early, so there aren't any races
5836 	 * Unblock the pending BAR held traffic, if it's currently paused.
5837 	 */
5838 	ATH_TX_LOCK(sc);
5839 	ath_tx_tid_pause(sc, atid);
5840 	if (atid->bar_wait) {
5841 		/*
5842 		 * bar_unsuspend() expects bar_tx == 1, as it should be
5843 		 * called from the TX completion path.  This quietens
5844 		 * the warning.  It's cleared for us anyway.
5845 		 */
5846 		atid->bar_tx = 1;
5847 		ath_tx_tid_bar_unsuspend(sc, atid);
5848 	}
5849 	ATH_TX_UNLOCK(sc);
5850 
5851 	/* There's no need to hold the TXQ lock here */
5852 	sc->sc_addba_stop(ni, tap);
5853 
5854 	/*
5855 	 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
5856 	 * it'll set the cleanup flag, and it'll be unpaused once
5857 	 * things have been cleaned up.
5858 	 */
5859 	TAILQ_INIT(&bf_cq);
5860 	ATH_TX_LOCK(sc);
5861 
5862 	/*
5863 	 * In case there's a followup call to this, only call it
5864 	 * if we don't have a cleanup in progress.
5865 	 *
5866 	 * Since we've paused the queue above, we need to make
5867 	 * sure we unpause if there's already a cleanup in
5868 	 * progress - it means something else is also doing
5869 	 * this stuff, so we don't need to also keep it paused.
5870 	 */
5871 	if (atid->cleanup_inprogress) {
5872 		ath_tx_tid_resume(sc, atid);
5873 	} else {
5874 		ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
5875 		/*
5876 		 * Unpause the TID if no cleanup is required.
5877 		 */
5878 		if (! atid->cleanup_inprogress)
5879 			ath_tx_tid_resume(sc, atid);
5880 	}
5881 	ATH_TX_UNLOCK(sc);
5882 
5883 	/* Handle completing frames and fail them */
5884 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5885 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5886 		ath_tx_default_comp(sc, bf, 1);
5887 	}
5888 
5889 }
5890 
5891 /*
5892  * Handle a node reassociation.
5893  *
5894  * We may have a bunch of frames queued to the hardware; those need
5895  * to be marked as cleanup.
5896  */
5897 void
5898 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
5899 {
5900 	struct ath_tid *tid;
5901 	int i;
5902 	ath_bufhead bf_cq;
5903 	struct ath_buf *bf;
5904 
5905 	TAILQ_INIT(&bf_cq);
5906 
5907 	ATH_TX_UNLOCK_ASSERT(sc);
5908 
5909 	ATH_TX_LOCK(sc);
5910 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
5911 		tid = &an->an_tid[i];
5912 		if (tid->hwq_depth == 0)
5913 			continue;
5914 		DPRINTF(sc, ATH_DEBUG_NODE,
5915 		    "%s: %s: TID %d: cleaning up TID\n",
5916 		    __func__,
5917 		    ath_hal_ether_sprintf(an->an_node.ni_macaddr),
5918 		    i);
5919 		/*
5920 		 * In case there's a followup call to this, only call it
5921 		 * if we don't have a cleanup in progress.
5922 		 */
5923 		if (! tid->cleanup_inprogress) {
5924 			ath_tx_tid_pause(sc, tid);
5925 			ath_tx_tid_cleanup(sc, an, i, &bf_cq);
5926 			/*
5927 			 * Unpause the TID if no cleanup is required.
5928 			 */
5929 			if (! tid->cleanup_inprogress)
5930 				ath_tx_tid_resume(sc, tid);
5931 		}
5932 	}
5933 	ATH_TX_UNLOCK(sc);
5934 
5935 	/* Handle completing frames and fail them */
5936 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5937 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5938 		ath_tx_default_comp(sc, bf, 1);
5939 	}
5940 }
5941 
5942 /*
5943  * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
5944  * it simply tears down the aggregation session. Ew.
5945  *
5946  * It however will call ieee80211_ampdu_stop() which will call
5947  * ic->ic_addba_stop().
5948  *
5949  * XXX This uses a hard-coded max BAR count value; the whole
5950  * XXX BAR TX success or failure should be better handled!
5951  */
5952 void
5953 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5954     int status)
5955 {
5956 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5957 	int tid = tap->txa_ac;
5958 	struct ath_node *an = ATH_NODE(ni);
5959 	struct ath_tid *atid = &an->an_tid[tid];
5960 	int attempts = tap->txa_attempts;
5961 	int old_txa_start;
5962 
5963 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5964 	    "%s: %s: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d"
5965 	    ",  txa_start=%d, txa_seqpending=%d\n",
5966 	    __func__,
5967 	    ath_hal_ether_sprintf(ni->ni_macaddr),
5968 	    tap->txa_ac,
5969 	    atid->tid,
5970 	    status,
5971 	    attempts,
5972 	    tap->txa_start,
5973 	    tap->txa_seqpending);
5974 
5975 	/* Note: This may update the BAW details */
5976 	/*
5977 	 * XXX What if this does slide the BAW along? We need to somehow
5978 	 * XXX either fix things when it does happen, or prevent the
5979 	 * XXX seqpending value to be anything other than exactly what
5980 	 * XXX the hell we want!
5981 	 *
5982 	 * XXX So for now, how I do this inside the TX lock for now
5983 	 * XXX and just correct it afterwards? The below condition should
5984 	 * XXX never happen and if it does I need to fix all kinds of things.
5985 	 */
5986 	old_txa_start = tap->txa_start;
5987 	sc->sc_bar_response(ni, tap, status);
5988 	if (tap->txa_start != old_txa_start) {
5989 		device_printf(sc->sc_dev,
5990 			      "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
5991 			      __func__,
5992 			      tid,
5993 			      tap->txa_start,
5994 			      old_txa_start);
5995 	}
5996 	tap->txa_start = old_txa_start;
5997 
5998 	/* Unpause the TID */
5999 	/*
6000 	 * XXX if this is attempt=50, the TID will be downgraded
6001 	 * XXX to a non-aggregate session. So we must unpause the
6002 	 * XXX TID here or it'll never be done.
6003 	 *
6004 	 * Also, don't call it if bar_tx/bar_wait are 0; something
6005 	 * has beaten us to the punch? (XXX figure out what?)
6006 	 */
6007 	if (status == 0 || attempts == 50) {
6008 		ATH_TX_LOCK(sc);
6009 		if (atid->bar_tx == 0 || atid->bar_wait == 0)
6010 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6011 			    "%s: huh? bar_tx=%d, bar_wait=%d\n",
6012 			    __func__,
6013 			    atid->bar_tx, atid->bar_wait);
6014 		else
6015 			ath_tx_tid_bar_unsuspend(sc, atid);
6016 		ATH_TX_UNLOCK(sc);
6017 	}
6018 }
6019 
6020 /*
6021  * This is called whenever the pending ADDBA request times out.
6022  * Unpause and reschedule the TID.
6023  */
6024 void
6025 ath_addba_response_timeout(struct ieee80211_node *ni,
6026     struct ieee80211_tx_ampdu *tap)
6027 {
6028 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
6029 	int tid = tap->txa_ac;
6030 	struct ath_node *an = ATH_NODE(ni);
6031 	struct ath_tid *atid = &an->an_tid[tid];
6032 
6033 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6034 	    "%s: %s: TID=%d, called; resuming\n",
6035 	    __func__,
6036 	    ath_hal_ether_sprintf(ni->ni_macaddr),
6037 	    tid);
6038 
6039 	ATH_TX_LOCK(sc);
6040 	atid->addba_tx_pending = 0;
6041 	ATH_TX_UNLOCK(sc);
6042 
6043 	/* Note: This updates the aggregate state to (again) pending */
6044 	sc->sc_addba_response_timeout(ni, tap);
6045 
6046 	/* Unpause the TID; which reschedules it */
6047 	ATH_TX_LOCK(sc);
6048 	ath_tx_tid_resume(sc, atid);
6049 	ATH_TX_UNLOCK(sc);
6050 }
6051 
6052 /*
6053  * Check if a node is asleep or not.
6054  */
6055 int
6056 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6057 {
6058 
6059 	ATH_TX_LOCK_ASSERT(sc);
6060 
6061 	return (an->an_is_powersave);
6062 }
6063 
6064 /*
6065  * Mark a node as currently "in powersaving."
6066  * This suspends all traffic on the node.
6067  *
6068  * This must be called with the node/tx locks free.
6069  *
6070  * XXX TODO: the locking silliness below is due to how the node
6071  * locking currently works.  Right now, the node lock is grabbed
6072  * to do rate control lookups and these are done with the TX
6073  * queue lock held.  This means the node lock can't be grabbed
6074  * first here or a LOR will occur.
6075  *
6076  * Eventually (hopefully!) the TX path code will only grab
6077  * the TXQ lock when transmitting and the ath_node lock when
6078  * doing node/TID operations.  There are other complications -
6079  * the sched/unsched operations involve walking the per-txq
6080  * 'active tid' list and this requires both locks to be held.
6081  */
6082 void
6083 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6084 {
6085 	struct ath_tid *atid;
6086 	struct ath_txq *txq;
6087 	int tid;
6088 
6089 	ATH_TX_UNLOCK_ASSERT(sc);
6090 
6091 	/* Suspend all traffic on the node */
6092 	ATH_TX_LOCK(sc);
6093 
6094 	if (an->an_is_powersave) {
6095 		DPRINTF(sc, ATH_DEBUG_XMIT,
6096 		    "%s: %s: node was already asleep!\n",
6097 		    __func__, ath_hal_ether_sprintf(an->an_node.ni_macaddr));
6098 		ATH_TX_UNLOCK(sc);
6099 		return;
6100 	}
6101 
6102 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6103 		atid = &an->an_tid[tid];
6104 		txq = sc->sc_ac2q[atid->ac];
6105 
6106 		ath_tx_tid_pause(sc, atid);
6107 	}
6108 
6109 	/* Mark node as in powersaving */
6110 	an->an_is_powersave = 1;
6111 
6112 	ATH_TX_UNLOCK(sc);
6113 }
6114 
6115 /*
6116  * Mark a node as currently "awake."
6117  * This resumes all traffic to the node.
6118  */
6119 void
6120 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6121 {
6122 	struct ath_tid *atid;
6123 	struct ath_txq *txq;
6124 	int tid;
6125 
6126 	ATH_TX_UNLOCK_ASSERT(sc);
6127 
6128 	ATH_TX_LOCK(sc);
6129 
6130 	/* !? */
6131 	if (an->an_is_powersave == 0) {
6132 		ATH_TX_UNLOCK(sc);
6133 		DPRINTF(sc, ATH_DEBUG_XMIT,
6134 		    "%s: an=%p: node was already awake\n",
6135 		    __func__, an);
6136 		return;
6137 	}
6138 
6139 	/* Mark node as awake */
6140 	an->an_is_powersave = 0;
6141 	/*
6142 	 * Clear any pending leaked frame requests
6143 	 */
6144 	an->an_leak_count = 0;
6145 
6146 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6147 		atid = &an->an_tid[tid];
6148 		txq = sc->sc_ac2q[atid->ac];
6149 
6150 		ath_tx_tid_resume(sc, atid);
6151 	}
6152 	ATH_TX_UNLOCK(sc);
6153 }
6154 
6155 static int
6156 ath_legacy_dma_txsetup(struct ath_softc *sc)
6157 {
6158 
6159 	/* nothing new needed */
6160 	return (0);
6161 }
6162 
6163 static int
6164 ath_legacy_dma_txteardown(struct ath_softc *sc)
6165 {
6166 
6167 	/* nothing new needed */
6168 	return (0);
6169 }
6170 
6171 void
6172 ath_xmit_setup_legacy(struct ath_softc *sc)
6173 {
6174 	/*
6175 	 * For now, just set the descriptor length to sizeof(ath_desc);
6176 	 * worry about extracting the real length out of the HAL later.
6177 	 */
6178 	sc->sc_tx_desclen = sizeof(struct ath_desc);
6179 	sc->sc_tx_statuslen = sizeof(struct ath_desc);
6180 	sc->sc_tx_nmaps = 1;	/* only one buffer per TX desc */
6181 
6182 	sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6183 	sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6184 	sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6185 
6186 	sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6187 	sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6188 
6189 	sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6190 }
6191