xref: /freebsd/sys/dev/ath/if_ath_tx.c (revision f05cddf9)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Atheros Wireless LAN controller.
36  *
37  * This software is derived from work of Atsushi Onoe; his contribution
38  * is greatly appreciated.
39  */
40 
41 #include "opt_inet.h"
42 #include "opt_ath.h"
43 #include "opt_wlan.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/errno.h>
56 #include <sys/callout.h>
57 #include <sys/bus.h>
58 #include <sys/endian.h>
59 #include <sys/kthread.h>
60 #include <sys/taskqueue.h>
61 #include <sys/priv.h>
62 
63 #include <machine/bus.h>
64 
65 #include <net/if.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_arp.h>
70 #include <net/ethernet.h>
71 #include <net/if_llc.h>
72 
73 #include <net80211/ieee80211_var.h>
74 #include <net80211/ieee80211_regdomain.h>
75 #ifdef IEEE80211_SUPPORT_SUPERG
76 #include <net80211/ieee80211_superg.h>
77 #endif
78 #ifdef IEEE80211_SUPPORT_TDMA
79 #include <net80211/ieee80211_tdma.h>
80 #endif
81 #include <net80211/ieee80211_ht.h>
82 
83 #include <net/bpf.h>
84 
85 #ifdef INET
86 #include <netinet/in.h>
87 #include <netinet/if_ether.h>
88 #endif
89 
90 #include <dev/ath/if_athvar.h>
91 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
92 #include <dev/ath/ath_hal/ah_diagcodes.h>
93 
94 #include <dev/ath/if_ath_debug.h>
95 
96 #ifdef ATH_TX99_DIAG
97 #include <dev/ath/ath_tx99/ath_tx99.h>
98 #endif
99 
100 #include <dev/ath/if_ath_misc.h>
101 #include <dev/ath/if_ath_tx.h>
102 #include <dev/ath/if_ath_tx_ht.h>
103 
104 #ifdef	ATH_DEBUG_ALQ
105 #include <dev/ath/if_ath_alq.h>
106 #endif
107 
108 /*
109  * How many retries to perform in software
110  */
111 #define	SWMAX_RETRIES		10
112 
113 /*
114  * What queue to throw the non-QoS TID traffic into
115  */
116 #define	ATH_NONQOS_TID_AC	WME_AC_VO
117 
118 #if 0
119 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
120 #endif
121 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
122     int tid);
123 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
124     int tid);
125 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
126     struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
127 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
128     struct ieee80211_node *ni, struct mbuf *m0, int *tid);
129 static struct ath_buf *
130 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
131     struct ath_tid *tid, struct ath_buf *bf);
132 
133 #ifdef	ATH_DEBUG_ALQ
134 void
135 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
136 {
137 	struct ath_buf *bf;
138 	int i, n;
139 	const char *ds;
140 
141 	/* XXX we should skip out early if debugging isn't enabled! */
142 	bf = bf_first;
143 
144 	while (bf != NULL) {
145 		/* XXX should ensure bf_nseg > 0! */
146 		if (bf->bf_nseg == 0)
147 			break;
148 		n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
149 		for (i = 0, ds = (const char *) bf->bf_desc;
150 		    i < n;
151 		    i++, ds += sc->sc_tx_desclen) {
152 			if_ath_alq_post(&sc->sc_alq,
153 			    ATH_ALQ_EDMA_TXDESC,
154 			    sc->sc_tx_desclen,
155 			    ds);
156 		}
157 		bf = bf->bf_next;
158 	}
159 }
160 #endif /* ATH_DEBUG_ALQ */
161 
162 /*
163  * Whether to use the 11n rate scenario functions or not
164  */
165 static inline int
166 ath_tx_is_11n(struct ath_softc *sc)
167 {
168 	return ((sc->sc_ah->ah_magic == 0x20065416) ||
169 		    (sc->sc_ah->ah_magic == 0x19741014));
170 }
171 
172 /*
173  * Obtain the current TID from the given frame.
174  *
175  * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
176  * This has implications for which AC/priority the packet is placed
177  * in.
178  */
179 static int
180 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
181 {
182 	const struct ieee80211_frame *wh;
183 	int pri = M_WME_GETAC(m0);
184 
185 	wh = mtod(m0, const struct ieee80211_frame *);
186 	if (! IEEE80211_QOS_HAS_SEQ(wh))
187 		return IEEE80211_NONQOS_TID;
188 	else
189 		return WME_AC_TO_TID(pri);
190 }
191 
192 static void
193 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
194 {
195 	struct ieee80211_frame *wh;
196 
197 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
198 	/* Only update/resync if needed */
199 	if (bf->bf_state.bfs_isretried == 0) {
200 		wh->i_fc[1] |= IEEE80211_FC1_RETRY;
201 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
202 		    BUS_DMASYNC_PREWRITE);
203 	}
204 	bf->bf_state.bfs_isretried = 1;
205 	bf->bf_state.bfs_retries ++;
206 }
207 
208 /*
209  * Determine what the correct AC queue for the given frame
210  * should be.
211  *
212  * This code assumes that the TIDs map consistently to
213  * the underlying hardware (or software) ath_txq.
214  * Since the sender may try to set an AC which is
215  * arbitrary, non-QoS TIDs may end up being put on
216  * completely different ACs. There's no way to put a
217  * TID into multiple ath_txq's for scheduling, so
218  * for now we override the AC/TXQ selection and set
219  * non-QOS TID frames into the BE queue.
220  *
221  * This may be completely incorrect - specifically,
222  * some management frames may end up out of order
223  * compared to the QoS traffic they're controlling.
224  * I'll look into this later.
225  */
226 static int
227 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
228 {
229 	const struct ieee80211_frame *wh;
230 	int pri = M_WME_GETAC(m0);
231 	wh = mtod(m0, const struct ieee80211_frame *);
232 	if (IEEE80211_QOS_HAS_SEQ(wh))
233 		return pri;
234 
235 	return ATH_NONQOS_TID_AC;
236 }
237 
238 void
239 ath_txfrag_cleanup(struct ath_softc *sc,
240 	ath_bufhead *frags, struct ieee80211_node *ni)
241 {
242 	struct ath_buf *bf, *next;
243 
244 	ATH_TXBUF_LOCK_ASSERT(sc);
245 
246 	TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
247 		/* NB: bf assumed clean */
248 		TAILQ_REMOVE(frags, bf, bf_list);
249 		ath_returnbuf_head(sc, bf);
250 		ieee80211_node_decref(ni);
251 	}
252 }
253 
254 /*
255  * Setup xmit of a fragmented frame.  Allocate a buffer
256  * for each frag and bump the node reference count to
257  * reflect the held reference to be setup by ath_tx_start.
258  */
259 int
260 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
261 	struct mbuf *m0, struct ieee80211_node *ni)
262 {
263 	struct mbuf *m;
264 	struct ath_buf *bf;
265 
266 	ATH_TXBUF_LOCK(sc);
267 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
268 		/* XXX non-management? */
269 		bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
270 		if (bf == NULL) {	/* out of buffers, cleanup */
271 			device_printf(sc->sc_dev, "%s: no buffer?\n",
272 			    __func__);
273 			ath_txfrag_cleanup(sc, frags, ni);
274 			break;
275 		}
276 		ieee80211_node_incref(ni);
277 		TAILQ_INSERT_TAIL(frags, bf, bf_list);
278 	}
279 	ATH_TXBUF_UNLOCK(sc);
280 
281 	return !TAILQ_EMPTY(frags);
282 }
283 
284 /*
285  * Reclaim mbuf resources.  For fragmented frames we
286  * need to claim each frag chained with m_nextpkt.
287  */
288 void
289 ath_freetx(struct mbuf *m)
290 {
291 	struct mbuf *next;
292 
293 	do {
294 		next = m->m_nextpkt;
295 		m->m_nextpkt = NULL;
296 		m_freem(m);
297 	} while ((m = next) != NULL);
298 }
299 
300 static int
301 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
302 {
303 	struct mbuf *m;
304 	int error;
305 
306 	/*
307 	 * Load the DMA map so any coalescing is done.  This
308 	 * also calculates the number of descriptors we need.
309 	 */
310 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
311 				     bf->bf_segs, &bf->bf_nseg,
312 				     BUS_DMA_NOWAIT);
313 	if (error == EFBIG) {
314 		/* XXX packet requires too many descriptors */
315 		bf->bf_nseg = ATH_MAX_SCATTER + 1;
316 	} else if (error != 0) {
317 		sc->sc_stats.ast_tx_busdma++;
318 		ath_freetx(m0);
319 		return error;
320 	}
321 	/*
322 	 * Discard null packets and check for packets that
323 	 * require too many TX descriptors.  We try to convert
324 	 * the latter to a cluster.
325 	 */
326 	if (bf->bf_nseg > ATH_MAX_SCATTER) {		/* too many desc's, linearize */
327 		sc->sc_stats.ast_tx_linear++;
328 		m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
329 		if (m == NULL) {
330 			ath_freetx(m0);
331 			sc->sc_stats.ast_tx_nombuf++;
332 			return ENOMEM;
333 		}
334 		m0 = m;
335 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
336 					     bf->bf_segs, &bf->bf_nseg,
337 					     BUS_DMA_NOWAIT);
338 		if (error != 0) {
339 			sc->sc_stats.ast_tx_busdma++;
340 			ath_freetx(m0);
341 			return error;
342 		}
343 		KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
344 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
345 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
346 		sc->sc_stats.ast_tx_nodata++;
347 		ath_freetx(m0);
348 		return EIO;
349 	}
350 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
351 		__func__, m0, m0->m_pkthdr.len);
352 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
353 	bf->bf_m = m0;
354 
355 	return 0;
356 }
357 
358 /*
359  * Chain together segments+descriptors for a frame - 11n or otherwise.
360  *
361  * For aggregates, this is called on each frame in the aggregate.
362  */
363 static void
364 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
365     struct ath_buf *bf, int is_aggr, int is_first_subframe,
366     int is_last_subframe)
367 {
368 	struct ath_hal *ah = sc->sc_ah;
369 	char *ds;
370 	int i, bp, dsp;
371 	HAL_DMA_ADDR bufAddrList[4];
372 	uint32_t segLenList[4];
373 	int numTxMaps = 1;
374 	int isFirstDesc = 1;
375 
376 	/*
377 	 * XXX There's txdma and txdma_mgmt; the descriptor
378 	 * sizes must match.
379 	 */
380 	struct ath_descdma *dd = &sc->sc_txdma;
381 
382 	/*
383 	 * Fillin the remainder of the descriptor info.
384 	 */
385 
386 	/*
387 	 * We need the number of TX data pointers in each descriptor.
388 	 * EDMA and later chips support 4 TX buffers per descriptor;
389 	 * previous chips just support one.
390 	 */
391 	numTxMaps = sc->sc_tx_nmaps;
392 
393 	/*
394 	 * For EDMA and later chips ensure the TX map is fully populated
395 	 * before advancing to the next descriptor.
396 	 */
397 	ds = (char *) bf->bf_desc;
398 	bp = dsp = 0;
399 	bzero(bufAddrList, sizeof(bufAddrList));
400 	bzero(segLenList, sizeof(segLenList));
401 	for (i = 0; i < bf->bf_nseg; i++) {
402 		bufAddrList[bp] = bf->bf_segs[i].ds_addr;
403 		segLenList[bp] = bf->bf_segs[i].ds_len;
404 		bp++;
405 
406 		/*
407 		 * Go to the next segment if this isn't the last segment
408 		 * and there's space in the current TX map.
409 		 */
410 		if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
411 			continue;
412 
413 		/*
414 		 * Last segment or we're out of buffer pointers.
415 		 */
416 		bp = 0;
417 
418 		if (i == bf->bf_nseg - 1)
419 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
420 		else
421 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
422 			    bf->bf_daddr + dd->dd_descsize * (dsp + 1));
423 
424 		/*
425 		 * XXX This assumes that bfs_txq is the actual destination
426 		 * hardware queue at this point.  It may not have been
427 		 * assigned, it may actually be pointing to the multicast
428 		 * software TXQ id.  These must be fixed!
429 		 */
430 		ath_hal_filltxdesc(ah, (struct ath_desc *) ds
431 			, bufAddrList
432 			, segLenList
433 			, bf->bf_descid		/* XXX desc id */
434 			, bf->bf_state.bfs_tx_queue
435 			, isFirstDesc		/* first segment */
436 			, i == bf->bf_nseg - 1	/* last segment */
437 			, (struct ath_desc *) ds0	/* first descriptor */
438 		);
439 
440 		/*
441 		 * Make sure the 11n aggregate fields are cleared.
442 		 *
443 		 * XXX TODO: this doesn't need to be called for
444 		 * aggregate frames; as it'll be called on all
445 		 * sub-frames.  Since the descriptors are in
446 		 * non-cacheable memory, this leads to some
447 		 * rather slow writes on MIPS/ARM platforms.
448 		 */
449 		if (ath_tx_is_11n(sc))
450 			ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
451 
452 		/*
453 		 * If 11n is enabled, set it up as if it's an aggregate
454 		 * frame.
455 		 */
456 		if (is_last_subframe) {
457 			ath_hal_set11n_aggr_last(sc->sc_ah,
458 			    (struct ath_desc *) ds);
459 		} else if (is_aggr) {
460 			/*
461 			 * This clears the aggrlen field; so
462 			 * the caller needs to call set_aggr_first()!
463 			 *
464 			 * XXX TODO: don't call this for the first
465 			 * descriptor in the first frame in an
466 			 * aggregate!
467 			 */
468 			ath_hal_set11n_aggr_middle(sc->sc_ah,
469 			    (struct ath_desc *) ds,
470 			    bf->bf_state.bfs_ndelim);
471 		}
472 		isFirstDesc = 0;
473 		bf->bf_lastds = (struct ath_desc *) ds;
474 
475 		/*
476 		 * Don't forget to skip to the next descriptor.
477 		 */
478 		ds += sc->sc_tx_desclen;
479 		dsp++;
480 
481 		/*
482 		 * .. and don't forget to blank these out!
483 		 */
484 		bzero(bufAddrList, sizeof(bufAddrList));
485 		bzero(segLenList, sizeof(segLenList));
486 	}
487 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
488 }
489 
490 /*
491  * Set the rate control fields in the given descriptor based on
492  * the bf_state fields and node state.
493  *
494  * The bfs fields should already be set with the relevant rate
495  * control information, including whether MRR is to be enabled.
496  *
497  * Since the FreeBSD HAL currently sets up the first TX rate
498  * in ath_hal_setuptxdesc(), this will setup the MRR
499  * conditionally for the pre-11n chips, and call ath_buf_set_rate
500  * unconditionally for 11n chips. These require the 11n rate
501  * scenario to be set if MCS rates are enabled, so it's easier
502  * to just always call it. The caller can then only set rates 2, 3
503  * and 4 if multi-rate retry is needed.
504  */
505 static void
506 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
507     struct ath_buf *bf)
508 {
509 	struct ath_rc_series *rc = bf->bf_state.bfs_rc;
510 
511 	/* If mrr is disabled, blank tries 1, 2, 3 */
512 	if (! bf->bf_state.bfs_ismrr)
513 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
514 
515 #if 0
516 	/*
517 	 * If NOACK is set, just set ntries=1.
518 	 */
519 	else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
520 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
521 		rc[0].tries = 1;
522 	}
523 #endif
524 
525 	/*
526 	 * Always call - that way a retried descriptor will
527 	 * have the MRR fields overwritten.
528 	 *
529 	 * XXX TODO: see if this is really needed - setting up
530 	 * the first descriptor should set the MRR fields to 0
531 	 * for us anyway.
532 	 */
533 	if (ath_tx_is_11n(sc)) {
534 		ath_buf_set_rate(sc, ni, bf);
535 	} else {
536 		ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
537 			, rc[1].ratecode, rc[1].tries
538 			, rc[2].ratecode, rc[2].tries
539 			, rc[3].ratecode, rc[3].tries
540 		);
541 	}
542 }
543 
544 /*
545  * Setup segments+descriptors for an 11n aggregate.
546  * bf_first is the first buffer in the aggregate.
547  * The descriptor list must already been linked together using
548  * bf->bf_next.
549  */
550 static void
551 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
552 {
553 	struct ath_buf *bf, *bf_prev = NULL;
554 	struct ath_desc *ds0 = bf_first->bf_desc;
555 
556 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
557 	    __func__, bf_first->bf_state.bfs_nframes,
558 	    bf_first->bf_state.bfs_al);
559 
560 	bf = bf_first;
561 
562 	if (bf->bf_state.bfs_txrate0 == 0)
563 		device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n",
564 		    __func__, bf, 0);
565 	if (bf->bf_state.bfs_rc[0].ratecode == 0)
566 		device_printf(sc->sc_dev, "%s: bf=%p, rix0=%d\n",
567 		    __func__, bf, 0);
568 
569 	/*
570 	 * Setup all descriptors of all subframes - this will
571 	 * call ath_hal_set11naggrmiddle() on every frame.
572 	 */
573 	while (bf != NULL) {
574 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
575 		    "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
576 		    __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
577 		    SEQNO(bf->bf_state.bfs_seqno));
578 
579 		/*
580 		 * Setup the initial fields for the first descriptor - all
581 		 * the non-11n specific stuff.
582 		 */
583 		ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
584 			, bf->bf_state.bfs_pktlen	/* packet length */
585 			, bf->bf_state.bfs_hdrlen	/* header length */
586 			, bf->bf_state.bfs_atype	/* Atheros packet type */
587 			, bf->bf_state.bfs_txpower	/* txpower */
588 			, bf->bf_state.bfs_txrate0
589 			, bf->bf_state.bfs_try0		/* series 0 rate/tries */
590 			, bf->bf_state.bfs_keyix	/* key cache index */
591 			, bf->bf_state.bfs_txantenna	/* antenna mode */
592 			, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ	/* flags */
593 			, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
594 			, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
595 		);
596 
597 		/*
598 		 * First descriptor? Setup the rate control and initial
599 		 * aggregate header information.
600 		 */
601 		if (bf == bf_first) {
602 			/*
603 			 * setup first desc with rate and aggr info
604 			 */
605 			ath_tx_set_ratectrl(sc, bf->bf_node, bf);
606 		}
607 
608 		/*
609 		 * Setup the descriptors for a multi-descriptor frame.
610 		 * This is both aggregate and non-aggregate aware.
611 		 */
612 		ath_tx_chaindesclist(sc, ds0, bf,
613 		    1, /* is_aggr */
614 		    !! (bf == bf_first), /* is_first_subframe */
615 		    !! (bf->bf_next == NULL) /* is_last_subframe */
616 		    );
617 
618 		if (bf == bf_first) {
619 			/*
620 			 * Initialise the first 11n aggregate with the
621 			 * aggregate length and aggregate enable bits.
622 			 */
623 			ath_hal_set11n_aggr_first(sc->sc_ah,
624 			    ds0,
625 			    bf->bf_state.bfs_al,
626 			    bf->bf_state.bfs_ndelim);
627 		}
628 
629 		/*
630 		 * Link the last descriptor of the previous frame
631 		 * to the beginning descriptor of this frame.
632 		 */
633 		if (bf_prev != NULL)
634 			ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
635 			    bf->bf_daddr);
636 
637 		/* Save a copy so we can link the next descriptor in */
638 		bf_prev = bf;
639 		bf = bf->bf_next;
640 	}
641 
642 	/*
643 	 * Set the first descriptor bf_lastds field to point to
644 	 * the last descriptor in the last subframe, that's where
645 	 * the status update will occur.
646 	 */
647 	bf_first->bf_lastds = bf_prev->bf_lastds;
648 
649 	/*
650 	 * And bf_last in the first descriptor points to the end of
651 	 * the aggregate list.
652 	 */
653 	bf_first->bf_last = bf_prev;
654 
655 	/*
656 	 * For non-AR9300 NICs, which require the rate control
657 	 * in the final descriptor - let's set that up now.
658 	 *
659 	 * This is because the filltxdesc() HAL call doesn't
660 	 * populate the last segment with rate control information
661 	 * if firstSeg is also true.  For non-aggregate frames
662 	 * that is fine, as the first frame already has rate control
663 	 * info.  But if the last frame in an aggregate has one
664 	 * descriptor, both firstseg and lastseg will be true and
665 	 * the rate info isn't copied.
666 	 *
667 	 * This is inefficient on MIPS/ARM platforms that have
668 	 * non-cachable memory for TX descriptors, but we'll just
669 	 * make do for now.
670 	 *
671 	 * As to why the rate table is stashed in the last descriptor
672 	 * rather than the first descriptor?  Because proctxdesc()
673 	 * is called on the final descriptor in an MPDU or A-MPDU -
674 	 * ie, the one that gets updated by the hardware upon
675 	 * completion.  That way proctxdesc() doesn't need to know
676 	 * about the first _and_ last TX descriptor.
677 	 */
678 	ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
679 
680 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
681 }
682 
683 /*
684  * Hand-off a frame to the multicast TX queue.
685  *
686  * This is a software TXQ which will be appended to the CAB queue
687  * during the beacon setup code.
688  *
689  * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
690  * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
691  * with the actual hardware txq, or all of this will fall apart.
692  *
693  * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
694  * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
695  * correctly.
696  */
697 static void
698 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
699     struct ath_buf *bf)
700 {
701 	ATH_TX_LOCK_ASSERT(sc);
702 
703 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
704 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
705 
706 	/*
707 	 * Ensure that the tx queue is the cabq, so things get
708 	 * mapped correctly.
709 	 */
710 	if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
711 		device_printf(sc->sc_dev,
712 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
713 		    __func__,
714 		    bf,
715 		    bf->bf_state.bfs_tx_queue,
716 		    txq->axq_qnum);
717 	}
718 
719 	ATH_TXQ_LOCK(txq);
720 	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
721 		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
722 		struct ieee80211_frame *wh;
723 
724 		/* mark previous frame */
725 		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
726 		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
727 		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
728 		    BUS_DMASYNC_PREWRITE);
729 
730 		/* link descriptor */
731 		ath_hal_settxdesclink(sc->sc_ah,
732 		    bf_last->bf_lastds,
733 		    bf->bf_daddr);
734 	}
735 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
736 	ATH_TXQ_UNLOCK(txq);
737 }
738 
739 /*
740  * Hand-off packet to a hardware queue.
741  */
742 static void
743 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
744     struct ath_buf *bf)
745 {
746 	struct ath_hal *ah = sc->sc_ah;
747 	struct ath_buf *bf_first;
748 
749 	/*
750 	 * Insert the frame on the outbound list and pass it on
751 	 * to the hardware.  Multicast frames buffered for power
752 	 * save stations and transmit from the CAB queue are stored
753 	 * on a s/w only queue and loaded on to the CAB queue in
754 	 * the SWBA handler since frames only go out on DTIM and
755 	 * to avoid possible races.
756 	 */
757 	ATH_TX_LOCK_ASSERT(sc);
758 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
759 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
760 	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
761 	     ("ath_tx_handoff_hw called for mcast queue"));
762 
763 	/*
764 	 * XXX racy, should hold the PCU lock when checking this,
765 	 * and also should ensure that the TX counter is >0!
766 	 */
767 	KASSERT((sc->sc_inreset_cnt == 0),
768 	    ("%s: TX during reset?\n", __func__));
769 
770 #if 0
771 	/*
772 	 * This causes a LOR. Find out where the PCU lock is being
773 	 * held whilst the TXQ lock is grabbed - that shouldn't
774 	 * be occuring.
775 	 */
776 	ATH_PCU_LOCK(sc);
777 	if (sc->sc_inreset_cnt) {
778 		ATH_PCU_UNLOCK(sc);
779 		DPRINTF(sc, ATH_DEBUG_RESET,
780 		    "%s: called with sc_in_reset != 0\n",
781 		    __func__);
782 		DPRINTF(sc, ATH_DEBUG_XMIT,
783 		    "%s: queued: TXDP[%u] = %p (%p) depth %d\n",
784 		    __func__, txq->axq_qnum,
785 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
786 		    txq->axq_depth);
787 		/* XXX axq_link needs to be set and updated! */
788 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
789 		if (bf->bf_state.bfs_aggr)
790 			txq->axq_aggr_depth++;
791 		return;
792 		}
793 	ATH_PCU_UNLOCK(sc);
794 #endif
795 
796 	ATH_TXQ_LOCK(txq);
797 
798 	/*
799 	 * XXX TODO: if there's a holdingbf, then
800 	 * ATH_TXQ_PUTRUNNING should be clear.
801 	 *
802 	 * If there is a holdingbf and the list is empty,
803 	 * then axq_link should be pointing to the holdingbf.
804 	 *
805 	 * Otherwise it should point to the last descriptor
806 	 * in the last ath_buf.
807 	 *
808 	 * In any case, we should really ensure that we
809 	 * update the previous descriptor link pointer to
810 	 * this descriptor, regardless of all of the above state.
811 	 *
812 	 * For now this is captured by having axq_link point
813 	 * to either the holdingbf (if the TXQ list is empty)
814 	 * or the end of the list (if the TXQ list isn't empty.)
815 	 * I'd rather just kill axq_link here and do it as above.
816 	 */
817 
818 	/*
819 	 * Append the frame to the TX queue.
820 	 */
821 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
822 	ATH_KTR(sc, ATH_KTR_TX, 3,
823 	    "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
824 	    "depth=%d",
825 	    txq->axq_qnum,
826 	    bf,
827 	    txq->axq_depth);
828 
829 	/*
830 	 * If there's a link pointer, update it.
831 	 *
832 	 * XXX we should replace this with the above logic, just
833 	 * to kill axq_link with fire.
834 	 */
835 	if (txq->axq_link != NULL) {
836 		*txq->axq_link = bf->bf_daddr;
837 		DPRINTF(sc, ATH_DEBUG_XMIT,
838 		    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
839 		    txq->axq_qnum, txq->axq_link,
840 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
841 		    txq->axq_depth);
842 		ATH_KTR(sc, ATH_KTR_TX, 5,
843 		    "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
844 		    "lastds=%d",
845 		    txq->axq_qnum, txq->axq_link,
846 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
847 		    bf->bf_lastds);
848 	}
849 
850 	/*
851 	 * If we've not pushed anything into the hardware yet,
852 	 * push the head of the queue into the TxDP.
853 	 *
854 	 * Once we've started DMA, there's no guarantee that
855 	 * updating the TxDP with a new value will actually work.
856 	 * So we just don't do that - if we hit the end of the list,
857 	 * we keep that buffer around (the "holding buffer") and
858 	 * re-start DMA by updating the link pointer of _that_
859 	 * descriptor and then restart DMA.
860 	 */
861 	if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
862 		bf_first = TAILQ_FIRST(&txq->axq_q);
863 		txq->axq_flags |= ATH_TXQ_PUTRUNNING;
864 		ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
865 		DPRINTF(sc, ATH_DEBUG_XMIT,
866 		    "%s: TXDP[%u] = %p (%p) depth %d\n",
867 		    __func__, txq->axq_qnum,
868 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
869 		    txq->axq_depth);
870 		ATH_KTR(sc, ATH_KTR_TX, 5,
871 		    "ath_tx_handoff: TXDP[%u] = %p (%p) "
872 		    "lastds=%p depth %d",
873 		    txq->axq_qnum,
874 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
875 		    bf_first->bf_lastds,
876 		    txq->axq_depth);
877 	}
878 
879 	/*
880 	 * Ensure that the bf TXQ matches this TXQ, so later
881 	 * checking and holding buffer manipulation is sane.
882 	 */
883 	if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
884 		device_printf(sc->sc_dev,
885 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
886 		    __func__,
887 		    bf,
888 		    bf->bf_state.bfs_tx_queue,
889 		    txq->axq_qnum);
890 	}
891 
892 	/*
893 	 * Track aggregate queue depth.
894 	 */
895 	if (bf->bf_state.bfs_aggr)
896 		txq->axq_aggr_depth++;
897 
898 	/*
899 	 * Update the link pointer.
900 	 */
901 	ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
902 
903 	/*
904 	 * Start DMA.
905 	 *
906 	 * If we wrote a TxDP above, DMA will start from here.
907 	 *
908 	 * If DMA is running, it'll do nothing.
909 	 *
910 	 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
911 	 * or VEOL) then it stops at the last transmitted write.
912 	 * We then append a new frame by updating the link pointer
913 	 * in that descriptor and then kick TxE here; it will re-read
914 	 * that last descriptor and find the new descriptor to transmit.
915 	 *
916 	 * This is why we keep the holding descriptor around.
917 	 */
918 	ath_hal_txstart(ah, txq->axq_qnum);
919 	ATH_TXQ_UNLOCK(txq);
920 	ATH_KTR(sc, ATH_KTR_TX, 1,
921 	    "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
922 }
923 
924 /*
925  * Restart TX DMA for the given TXQ.
926  *
927  * This must be called whether the queue is empty or not.
928  */
929 static void
930 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
931 {
932 	struct ath_buf *bf, *bf_last;
933 
934 	ATH_TXQ_LOCK_ASSERT(txq);
935 
936 	/* XXX make this ATH_TXQ_FIRST */
937 	bf = TAILQ_FIRST(&txq->axq_q);
938 	bf_last = ATH_TXQ_LAST(txq, axq_q_s);
939 
940 	if (bf == NULL)
941 		return;
942 
943 	DPRINTF(sc, ATH_DEBUG_RESET,
944 	    "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
945 	    __func__,
946 	    txq->axq_qnum,
947 	    bf,
948 	    bf_last,
949 	    (uint32_t) bf->bf_daddr);
950 
951 #ifdef	ATH_DEBUG
952 	if (sc->sc_debug & ATH_DEBUG_RESET)
953 		ath_tx_dump(sc, txq);
954 #endif
955 
956 	/*
957 	 * This is called from a restart, so DMA is known to be
958 	 * completely stopped.
959 	 */
960 	KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
961 	    ("%s: Q%d: called with PUTRUNNING=1\n",
962 	    __func__,
963 	    txq->axq_qnum));
964 
965 	ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
966 	txq->axq_flags |= ATH_TXQ_PUTRUNNING;
967 
968 	ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
969 	    &txq->axq_link);
970 	ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
971 }
972 
973 /*
974  * Hand off a packet to the hardware (or mcast queue.)
975  *
976  * The relevant hardware txq should be locked.
977  */
978 static void
979 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
980     struct ath_buf *bf)
981 {
982 	ATH_TX_LOCK_ASSERT(sc);
983 
984 #ifdef	ATH_DEBUG_ALQ
985 	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
986 		ath_tx_alq_post(sc, bf);
987 #endif
988 
989 	if (txq->axq_qnum == ATH_TXQ_SWQ)
990 		ath_tx_handoff_mcast(sc, txq, bf);
991 	else
992 		ath_tx_handoff_hw(sc, txq, bf);
993 }
994 
995 static int
996 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
997     struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
998     int *keyix)
999 {
1000 	DPRINTF(sc, ATH_DEBUG_XMIT,
1001 	    "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
1002 	    __func__,
1003 	    *hdrlen,
1004 	    *pktlen,
1005 	    isfrag,
1006 	    iswep,
1007 	    m0);
1008 
1009 	if (iswep) {
1010 		const struct ieee80211_cipher *cip;
1011 		struct ieee80211_key *k;
1012 
1013 		/*
1014 		 * Construct the 802.11 header+trailer for an encrypted
1015 		 * frame. The only reason this can fail is because of an
1016 		 * unknown or unsupported cipher/key type.
1017 		 */
1018 		k = ieee80211_crypto_encap(ni, m0);
1019 		if (k == NULL) {
1020 			/*
1021 			 * This can happen when the key is yanked after the
1022 			 * frame was queued.  Just discard the frame; the
1023 			 * 802.11 layer counts failures and provides
1024 			 * debugging/diagnostics.
1025 			 */
1026 			return (0);
1027 		}
1028 		/*
1029 		 * Adjust the packet + header lengths for the crypto
1030 		 * additions and calculate the h/w key index.  When
1031 		 * a s/w mic is done the frame will have had any mic
1032 		 * added to it prior to entry so m0->m_pkthdr.len will
1033 		 * account for it. Otherwise we need to add it to the
1034 		 * packet length.
1035 		 */
1036 		cip = k->wk_cipher;
1037 		(*hdrlen) += cip->ic_header;
1038 		(*pktlen) += cip->ic_header + cip->ic_trailer;
1039 		/* NB: frags always have any TKIP MIC done in s/w */
1040 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1041 			(*pktlen) += cip->ic_miclen;
1042 		(*keyix) = k->wk_keyix;
1043 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1044 		/*
1045 		 * Use station key cache slot, if assigned.
1046 		 */
1047 		(*keyix) = ni->ni_ucastkey.wk_keyix;
1048 		if ((*keyix) == IEEE80211_KEYIX_NONE)
1049 			(*keyix) = HAL_TXKEYIX_INVALID;
1050 	} else
1051 		(*keyix) = HAL_TXKEYIX_INVALID;
1052 
1053 	return (1);
1054 }
1055 
1056 /*
1057  * Calculate whether interoperability protection is required for
1058  * this frame.
1059  *
1060  * This requires the rate control information be filled in,
1061  * as the protection requirement depends upon the current
1062  * operating mode / PHY.
1063  */
1064 static void
1065 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1066 {
1067 	struct ieee80211_frame *wh;
1068 	uint8_t rix;
1069 	uint16_t flags;
1070 	int shortPreamble;
1071 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1072 	struct ifnet *ifp = sc->sc_ifp;
1073 	struct ieee80211com *ic = ifp->if_l2com;
1074 
1075 	flags = bf->bf_state.bfs_txflags;
1076 	rix = bf->bf_state.bfs_rc[0].rix;
1077 	shortPreamble = bf->bf_state.bfs_shpream;
1078 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1079 
1080 	/*
1081 	 * If 802.11g protection is enabled, determine whether
1082 	 * to use RTS/CTS or just CTS.  Note that this is only
1083 	 * done for OFDM unicast frames.
1084 	 */
1085 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1086 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
1087 	    (flags & HAL_TXDESC_NOACK) == 0) {
1088 		bf->bf_state.bfs_doprot = 1;
1089 		/* XXX fragments must use CCK rates w/ protection */
1090 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1091 			flags |= HAL_TXDESC_RTSENA;
1092 		} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1093 			flags |= HAL_TXDESC_CTSENA;
1094 		}
1095 		/*
1096 		 * For frags it would be desirable to use the
1097 		 * highest CCK rate for RTS/CTS.  But stations
1098 		 * farther away may detect it at a lower CCK rate
1099 		 * so use the configured protection rate instead
1100 		 * (for now).
1101 		 */
1102 		sc->sc_stats.ast_tx_protect++;
1103 	}
1104 
1105 	/*
1106 	 * If 11n protection is enabled and it's a HT frame,
1107 	 * enable RTS.
1108 	 *
1109 	 * XXX ic_htprotmode or ic_curhtprotmode?
1110 	 * XXX should it_htprotmode only matter if ic_curhtprotmode
1111 	 * XXX indicates it's not a HT pure environment?
1112 	 */
1113 	if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1114 	    rt->info[rix].phy == IEEE80211_T_HT &&
1115 	    (flags & HAL_TXDESC_NOACK) == 0) {
1116 		flags |= HAL_TXDESC_RTSENA;
1117 		sc->sc_stats.ast_tx_htprotect++;
1118 	}
1119 	bf->bf_state.bfs_txflags = flags;
1120 }
1121 
1122 /*
1123  * Update the frame duration given the currently selected rate.
1124  *
1125  * This also updates the frame duration value, so it will require
1126  * a DMA flush.
1127  */
1128 static void
1129 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1130 {
1131 	struct ieee80211_frame *wh;
1132 	uint8_t rix;
1133 	uint16_t flags;
1134 	int shortPreamble;
1135 	struct ath_hal *ah = sc->sc_ah;
1136 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1137 	int isfrag = bf->bf_m->m_flags & M_FRAG;
1138 
1139 	flags = bf->bf_state.bfs_txflags;
1140 	rix = bf->bf_state.bfs_rc[0].rix;
1141 	shortPreamble = bf->bf_state.bfs_shpream;
1142 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1143 
1144 	/*
1145 	 * Calculate duration.  This logically belongs in the 802.11
1146 	 * layer but it lacks sufficient information to calculate it.
1147 	 */
1148 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
1149 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1150 		u_int16_t dur;
1151 		if (shortPreamble)
1152 			dur = rt->info[rix].spAckDuration;
1153 		else
1154 			dur = rt->info[rix].lpAckDuration;
1155 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1156 			dur += dur;		/* additional SIFS+ACK */
1157 			/*
1158 			 * Include the size of next fragment so NAV is
1159 			 * updated properly.  The last fragment uses only
1160 			 * the ACK duration
1161 			 *
1162 			 * XXX TODO: ensure that the rate lookup for each
1163 			 * fragment is the same as the rate used by the
1164 			 * first fragment!
1165 			 */
1166 			dur += ath_hal_computetxtime(ah,
1167 			    rt,
1168 			    bf->bf_nextfraglen,
1169 			    rix, shortPreamble);
1170 		}
1171 		if (isfrag) {
1172 			/*
1173 			 * Force hardware to use computed duration for next
1174 			 * fragment by disabling multi-rate retry which updates
1175 			 * duration based on the multi-rate duration table.
1176 			 */
1177 			bf->bf_state.bfs_ismrr = 0;
1178 			bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1179 			/* XXX update bfs_rc[0].try? */
1180 		}
1181 
1182 		/* Update the duration field itself */
1183 		*(u_int16_t *)wh->i_dur = htole16(dur);
1184 	}
1185 }
1186 
1187 static uint8_t
1188 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1189     int cix, int shortPreamble)
1190 {
1191 	uint8_t ctsrate;
1192 
1193 	/*
1194 	 * CTS transmit rate is derived from the transmit rate
1195 	 * by looking in the h/w rate table.  We must also factor
1196 	 * in whether or not a short preamble is to be used.
1197 	 */
1198 	/* NB: cix is set above where RTS/CTS is enabled */
1199 	KASSERT(cix != 0xff, ("cix not setup"));
1200 	ctsrate = rt->info[cix].rateCode;
1201 
1202 	/* XXX this should only matter for legacy rates */
1203 	if (shortPreamble)
1204 		ctsrate |= rt->info[cix].shortPreamble;
1205 
1206 	return (ctsrate);
1207 }
1208 
1209 /*
1210  * Calculate the RTS/CTS duration for legacy frames.
1211  */
1212 static int
1213 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1214     int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1215     int flags)
1216 {
1217 	int ctsduration = 0;
1218 
1219 	/* This mustn't be called for HT modes */
1220 	if (rt->info[cix].phy == IEEE80211_T_HT) {
1221 		printf("%s: HT rate where it shouldn't be (0x%x)\n",
1222 		    __func__, rt->info[cix].rateCode);
1223 		return (-1);
1224 	}
1225 
1226 	/*
1227 	 * Compute the transmit duration based on the frame
1228 	 * size and the size of an ACK frame.  We call into the
1229 	 * HAL to do the computation since it depends on the
1230 	 * characteristics of the actual PHY being used.
1231 	 *
1232 	 * NB: CTS is assumed the same size as an ACK so we can
1233 	 *     use the precalculated ACK durations.
1234 	 */
1235 	if (shortPreamble) {
1236 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1237 			ctsduration += rt->info[cix].spAckDuration;
1238 		ctsduration += ath_hal_computetxtime(ah,
1239 			rt, pktlen, rix, AH_TRUE);
1240 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1241 			ctsduration += rt->info[rix].spAckDuration;
1242 	} else {
1243 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1244 			ctsduration += rt->info[cix].lpAckDuration;
1245 		ctsduration += ath_hal_computetxtime(ah,
1246 			rt, pktlen, rix, AH_FALSE);
1247 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1248 			ctsduration += rt->info[rix].lpAckDuration;
1249 	}
1250 
1251 	return (ctsduration);
1252 }
1253 
1254 /*
1255  * Update the given ath_buf with updated rts/cts setup and duration
1256  * values.
1257  *
1258  * To support rate lookups for each software retry, the rts/cts rate
1259  * and cts duration must be re-calculated.
1260  *
1261  * This function assumes the RTS/CTS flags have been set as needed;
1262  * mrr has been disabled; and the rate control lookup has been done.
1263  *
1264  * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1265  * XXX The 11n NICs support per-rate RTS/CTS configuration.
1266  */
1267 static void
1268 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1269 {
1270 	uint16_t ctsduration = 0;
1271 	uint8_t ctsrate = 0;
1272 	uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1273 	uint8_t cix = 0;
1274 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1275 
1276 	/*
1277 	 * No RTS/CTS enabled? Don't bother.
1278 	 */
1279 	if ((bf->bf_state.bfs_txflags &
1280 	    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1281 		/* XXX is this really needed? */
1282 		bf->bf_state.bfs_ctsrate = 0;
1283 		bf->bf_state.bfs_ctsduration = 0;
1284 		return;
1285 	}
1286 
1287 	/*
1288 	 * If protection is enabled, use the protection rix control
1289 	 * rate. Otherwise use the rate0 control rate.
1290 	 */
1291 	if (bf->bf_state.bfs_doprot)
1292 		rix = sc->sc_protrix;
1293 	else
1294 		rix = bf->bf_state.bfs_rc[0].rix;
1295 
1296 	/*
1297 	 * If the raw path has hard-coded ctsrate0 to something,
1298 	 * use it.
1299 	 */
1300 	if (bf->bf_state.bfs_ctsrate0 != 0)
1301 		cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1302 	else
1303 		/* Control rate from above */
1304 		cix = rt->info[rix].controlRate;
1305 
1306 	/* Calculate the rtscts rate for the given cix */
1307 	ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1308 	    bf->bf_state.bfs_shpream);
1309 
1310 	/* The 11n chipsets do ctsduration calculations for you */
1311 	if (! ath_tx_is_11n(sc))
1312 		ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1313 		    bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1314 		    rt, bf->bf_state.bfs_txflags);
1315 
1316 	/* Squirrel away in ath_buf */
1317 	bf->bf_state.bfs_ctsrate = ctsrate;
1318 	bf->bf_state.bfs_ctsduration = ctsduration;
1319 
1320 	/*
1321 	 * Must disable multi-rate retry when using RTS/CTS.
1322 	 */
1323 	if (!sc->sc_mrrprot) {
1324 		bf->bf_state.bfs_ismrr = 0;
1325 		bf->bf_state.bfs_try0 =
1326 		    bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1327 	}
1328 }
1329 
1330 /*
1331  * Setup the descriptor chain for a normal or fast-frame
1332  * frame.
1333  *
1334  * XXX TODO: extend to include the destination hardware QCU ID.
1335  * Make sure that is correct.  Make sure that when being added
1336  * to the mcastq, the CABQ QCUID is set or things will get a bit
1337  * odd.
1338  */
1339 static void
1340 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1341 {
1342 	struct ath_desc *ds = bf->bf_desc;
1343 	struct ath_hal *ah = sc->sc_ah;
1344 
1345 	if (bf->bf_state.bfs_txrate0 == 0)
1346 		device_printf(sc->sc_dev, "%s: bf=%p, txrate0=%d\n",
1347 		    __func__, bf, 0);
1348 
1349 	ath_hal_setuptxdesc(ah, ds
1350 		, bf->bf_state.bfs_pktlen	/* packet length */
1351 		, bf->bf_state.bfs_hdrlen	/* header length */
1352 		, bf->bf_state.bfs_atype	/* Atheros packet type */
1353 		, bf->bf_state.bfs_txpower	/* txpower */
1354 		, bf->bf_state.bfs_txrate0
1355 		, bf->bf_state.bfs_try0		/* series 0 rate/tries */
1356 		, bf->bf_state.bfs_keyix	/* key cache index */
1357 		, bf->bf_state.bfs_txantenna	/* antenna mode */
1358 		, bf->bf_state.bfs_txflags	/* flags */
1359 		, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
1360 		, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
1361 	);
1362 
1363 	/*
1364 	 * This will be overriden when the descriptor chain is written.
1365 	 */
1366 	bf->bf_lastds = ds;
1367 	bf->bf_last = bf;
1368 
1369 	/* Set rate control and descriptor chain for this frame */
1370 	ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1371 	ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1372 }
1373 
1374 /*
1375  * Do a rate lookup.
1376  *
1377  * This performs a rate lookup for the given ath_buf only if it's required.
1378  * Non-data frames and raw frames don't require it.
1379  *
1380  * This populates the primary and MRR entries; MRR values are
1381  * then disabled later on if something requires it (eg RTS/CTS on
1382  * pre-11n chipsets.
1383  *
1384  * This needs to be done before the RTS/CTS fields are calculated
1385  * as they may depend upon the rate chosen.
1386  */
1387 static void
1388 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1389 {
1390 	uint8_t rate, rix;
1391 	int try0;
1392 
1393 	if (! bf->bf_state.bfs_doratelookup)
1394 		return;
1395 
1396 	/* Get rid of any previous state */
1397 	bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1398 
1399 	ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1400 	ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1401 	    bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1402 
1403 	/* In case MRR is disabled, make sure rc[0] is setup correctly */
1404 	bf->bf_state.bfs_rc[0].rix = rix;
1405 	bf->bf_state.bfs_rc[0].ratecode = rate;
1406 	bf->bf_state.bfs_rc[0].tries = try0;
1407 
1408 	if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1409 		ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1410 		    bf->bf_state.bfs_rc);
1411 	ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1412 
1413 	sc->sc_txrix = rix;	/* for LED blinking */
1414 	sc->sc_lastdatarix = rix;	/* for fast frames */
1415 	bf->bf_state.bfs_try0 = try0;
1416 	bf->bf_state.bfs_txrate0 = rate;
1417 }
1418 
1419 /*
1420  * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1421  */
1422 static void
1423 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1424     struct ath_buf *bf)
1425 {
1426 	struct ath_node *an = ATH_NODE(bf->bf_node);
1427 
1428 	ATH_TX_LOCK_ASSERT(sc);
1429 
1430 	if (an->clrdmask == 1) {
1431 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1432 		an->clrdmask = 0;
1433 	}
1434 }
1435 
1436 /*
1437  * Return whether this frame should be software queued or
1438  * direct dispatched.
1439  *
1440  * When doing powersave, BAR frames should be queued but other management
1441  * frames should be directly sent.
1442  *
1443  * When not doing powersave, stick BAR frames into the hardware queue
1444  * so it goes out even though the queue is paused.
1445  *
1446  * For now, management frames are also software queued by default.
1447  */
1448 static int
1449 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1450     struct mbuf *m0, int *queue_to_head)
1451 {
1452 	struct ieee80211_node *ni = &an->an_node;
1453 	struct ieee80211_frame *wh;
1454 	uint8_t type, subtype;
1455 
1456 	wh = mtod(m0, struct ieee80211_frame *);
1457 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1458 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1459 
1460 	(*queue_to_head) = 0;
1461 
1462 	/* If it's not in powersave - direct-dispatch BAR */
1463 	if ((ATH_NODE(ni)->an_is_powersave == 0)
1464 	    && type == IEEE80211_FC0_TYPE_CTL &&
1465 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1466 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1467 		    "%s: BAR: TX'ing direct\n", __func__);
1468 		return (0);
1469 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1470 	    && type == IEEE80211_FC0_TYPE_CTL &&
1471 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1472 		/* BAR TX whilst asleep; queue */
1473 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1474 		    "%s: swq: TX'ing\n", __func__);
1475 		(*queue_to_head) = 1;
1476 		return (1);
1477 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1478 	    && (type == IEEE80211_FC0_TYPE_MGT ||
1479 	        type == IEEE80211_FC0_TYPE_CTL)) {
1480 		/*
1481 		 * Other control/mgmt frame; bypass software queuing
1482 		 * for now!
1483 		 */
1484 		device_printf(sc->sc_dev,
1485 		    "%s: %6D: Node is asleep; sending mgmt "
1486 		    "(type=%d, subtype=%d)\n",
1487 		    __func__,
1488 		    ni->ni_macaddr,
1489 		    ":",
1490 		    type,
1491 		    subtype);
1492 		return (0);
1493 	} else {
1494 		return (1);
1495 	}
1496 }
1497 
1498 
1499 /*
1500  * Transmit the given frame to the hardware.
1501  *
1502  * The frame must already be setup; rate control must already have
1503  * been done.
1504  *
1505  * XXX since the TXQ lock is being held here (and I dislike holding
1506  * it for this long when not doing software aggregation), later on
1507  * break this function into "setup_normal" and "xmit_normal". The
1508  * lock only needs to be held for the ath_tx_handoff call.
1509  *
1510  * XXX we don't update the leak count here - if we're doing
1511  * direct frame dispatch, we need to be able to do it without
1512  * decrementing the leak count (eg multicast queue frames.)
1513  */
1514 static void
1515 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1516     struct ath_buf *bf)
1517 {
1518 	struct ath_node *an = ATH_NODE(bf->bf_node);
1519 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1520 
1521 	ATH_TX_LOCK_ASSERT(sc);
1522 
1523 	/*
1524 	 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1525 	 * set a completion handler however it doesn't (yet) properly
1526 	 * handle the strict ordering requirements needed for normal,
1527 	 * non-aggregate session frames.
1528 	 *
1529 	 * Once this is implemented, only set CLRDMASK like this for
1530 	 * frames that must go out - eg management/raw frames.
1531 	 */
1532 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1533 
1534 	/* Setup the descriptor before handoff */
1535 	ath_tx_do_ratelookup(sc, bf);
1536 	ath_tx_calc_duration(sc, bf);
1537 	ath_tx_calc_protection(sc, bf);
1538 	ath_tx_set_rtscts(sc, bf);
1539 	ath_tx_rate_fill_rcflags(sc, bf);
1540 	ath_tx_setds(sc, bf);
1541 
1542 	/* Track per-TID hardware queue depth correctly */
1543 	tid->hwq_depth++;
1544 
1545 	/* Assign the completion handler */
1546 	bf->bf_comp = ath_tx_normal_comp;
1547 
1548 	/* Hand off to hardware */
1549 	ath_tx_handoff(sc, txq, bf);
1550 }
1551 
1552 /*
1553  * Do the basic frame setup stuff that's required before the frame
1554  * is added to a software queue.
1555  *
1556  * All frames get mostly the same treatment and it's done once.
1557  * Retransmits fiddle with things like the rate control setup,
1558  * setting the retransmit bit in the packet; doing relevant DMA/bus
1559  * syncing and relinking it (back) into the hardware TX queue.
1560  *
1561  * Note that this may cause the mbuf to be reallocated, so
1562  * m0 may not be valid.
1563  */
1564 static int
1565 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1566     struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1567 {
1568 	struct ieee80211vap *vap = ni->ni_vap;
1569 	struct ath_hal *ah = sc->sc_ah;
1570 	struct ifnet *ifp = sc->sc_ifp;
1571 	struct ieee80211com *ic = ifp->if_l2com;
1572 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1573 	int error, iswep, ismcast, isfrag, ismrr;
1574 	int keyix, hdrlen, pktlen, try0 = 0;
1575 	u_int8_t rix = 0, txrate = 0;
1576 	struct ath_desc *ds;
1577 	struct ieee80211_frame *wh;
1578 	u_int subtype, flags;
1579 	HAL_PKT_TYPE atype;
1580 	const HAL_RATE_TABLE *rt;
1581 	HAL_BOOL shortPreamble;
1582 	struct ath_node *an;
1583 	u_int pri;
1584 
1585 	/*
1586 	 * To ensure that both sequence numbers and the CCMP PN handling
1587 	 * is "correct", make sure that the relevant TID queue is locked.
1588 	 * Otherwise the CCMP PN and seqno may appear out of order, causing
1589 	 * re-ordered frames to have out of order CCMP PN's, resulting
1590 	 * in many, many frame drops.
1591 	 */
1592 	ATH_TX_LOCK_ASSERT(sc);
1593 
1594 	wh = mtod(m0, struct ieee80211_frame *);
1595 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1596 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1597 	isfrag = m0->m_flags & M_FRAG;
1598 	hdrlen = ieee80211_anyhdrsize(wh);
1599 	/*
1600 	 * Packet length must not include any
1601 	 * pad bytes; deduct them here.
1602 	 */
1603 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1604 
1605 	/* Handle encryption twiddling if needed */
1606 	if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1607 	    &pktlen, &keyix)) {
1608 		ath_freetx(m0);
1609 		return EIO;
1610 	}
1611 
1612 	/* packet header may have moved, reset our local pointer */
1613 	wh = mtod(m0, struct ieee80211_frame *);
1614 
1615 	pktlen += IEEE80211_CRC_LEN;
1616 
1617 	/*
1618 	 * Load the DMA map so any coalescing is done.  This
1619 	 * also calculates the number of descriptors we need.
1620 	 */
1621 	error = ath_tx_dmasetup(sc, bf, m0);
1622 	if (error != 0)
1623 		return error;
1624 	bf->bf_node = ni;			/* NB: held reference */
1625 	m0 = bf->bf_m;				/* NB: may have changed */
1626 	wh = mtod(m0, struct ieee80211_frame *);
1627 
1628 	/* setup descriptors */
1629 	ds = bf->bf_desc;
1630 	rt = sc->sc_currates;
1631 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1632 
1633 	/*
1634 	 * NB: the 802.11 layer marks whether or not we should
1635 	 * use short preamble based on the current mode and
1636 	 * negotiated parameters.
1637 	 */
1638 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1639 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1640 		shortPreamble = AH_TRUE;
1641 		sc->sc_stats.ast_tx_shortpre++;
1642 	} else {
1643 		shortPreamble = AH_FALSE;
1644 	}
1645 
1646 	an = ATH_NODE(ni);
1647 	//flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
1648 	flags = 0;
1649 	ismrr = 0;				/* default no multi-rate retry*/
1650 	pri = M_WME_GETAC(m0);			/* honor classification */
1651 	/* XXX use txparams instead of fixed values */
1652 	/*
1653 	 * Calculate Atheros packet type from IEEE80211 packet header,
1654 	 * setup for rate calculations, and select h/w transmit queue.
1655 	 */
1656 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1657 	case IEEE80211_FC0_TYPE_MGT:
1658 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1659 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1660 			atype = HAL_PKT_TYPE_BEACON;
1661 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1662 			atype = HAL_PKT_TYPE_PROBE_RESP;
1663 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1664 			atype = HAL_PKT_TYPE_ATIM;
1665 		else
1666 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
1667 		rix = an->an_mgmtrix;
1668 		txrate = rt->info[rix].rateCode;
1669 		if (shortPreamble)
1670 			txrate |= rt->info[rix].shortPreamble;
1671 		try0 = ATH_TXMGTTRY;
1672 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1673 		break;
1674 	case IEEE80211_FC0_TYPE_CTL:
1675 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
1676 		rix = an->an_mgmtrix;
1677 		txrate = rt->info[rix].rateCode;
1678 		if (shortPreamble)
1679 			txrate |= rt->info[rix].shortPreamble;
1680 		try0 = ATH_TXMGTTRY;
1681 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1682 		break;
1683 	case IEEE80211_FC0_TYPE_DATA:
1684 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
1685 		/*
1686 		 * Data frames: multicast frames go out at a fixed rate,
1687 		 * EAPOL frames use the mgmt frame rate; otherwise consult
1688 		 * the rate control module for the rate to use.
1689 		 */
1690 		if (ismcast) {
1691 			rix = an->an_mcastrix;
1692 			txrate = rt->info[rix].rateCode;
1693 			if (shortPreamble)
1694 				txrate |= rt->info[rix].shortPreamble;
1695 			try0 = 1;
1696 		} else if (m0->m_flags & M_EAPOL) {
1697 			/* XXX? maybe always use long preamble? */
1698 			rix = an->an_mgmtrix;
1699 			txrate = rt->info[rix].rateCode;
1700 			if (shortPreamble)
1701 				txrate |= rt->info[rix].shortPreamble;
1702 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
1703 		} else {
1704 			/*
1705 			 * Do rate lookup on each TX, rather than using
1706 			 * the hard-coded TX information decided here.
1707 			 */
1708 			ismrr = 1;
1709 			bf->bf_state.bfs_doratelookup = 1;
1710 		}
1711 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1712 			flags |= HAL_TXDESC_NOACK;
1713 		break;
1714 	default:
1715 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
1716 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1717 		/* XXX statistic */
1718 		/* XXX free tx dmamap */
1719 		ath_freetx(m0);
1720 		return EIO;
1721 	}
1722 
1723 	/*
1724 	 * There are two known scenarios where the frame AC doesn't match
1725 	 * what the destination TXQ is.
1726 	 *
1727 	 * + non-QoS frames (eg management?) that the net80211 stack has
1728 	 *   assigned a higher AC to, but since it's a non-QoS TID, it's
1729 	 *   being thrown into TID 16.  TID 16 gets the AC_BE queue.
1730 	 *   It's quite possible that management frames should just be
1731 	 *   direct dispatched to hardware rather than go via the software
1732 	 *   queue; that should be investigated in the future.  There are
1733 	 *   some specific scenarios where this doesn't make sense, mostly
1734 	 *   surrounding ADDBA request/response - hence why that is special
1735 	 *   cased.
1736 	 *
1737 	 * + Multicast frames going into the VAP mcast queue.  That shows up
1738 	 *   as "TXQ 11".
1739 	 *
1740 	 * This driver should eventually support separate TID and TXQ locking,
1741 	 * allowing for arbitrary AC frames to appear on arbitrary software
1742 	 * queues, being queued to the "correct" hardware queue when needed.
1743 	 */
1744 #if 0
1745 	if (txq != sc->sc_ac2q[pri]) {
1746 		device_printf(sc->sc_dev,
1747 		    "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1748 		    __func__,
1749 		    txq,
1750 		    txq->axq_qnum,
1751 		    pri,
1752 		    sc->sc_ac2q[pri],
1753 		    sc->sc_ac2q[pri]->axq_qnum);
1754 	}
1755 #endif
1756 
1757 	/*
1758 	 * Calculate miscellaneous flags.
1759 	 */
1760 	if (ismcast) {
1761 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
1762 	} else if (pktlen > vap->iv_rtsthreshold &&
1763 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1764 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
1765 		sc->sc_stats.ast_tx_rts++;
1766 	}
1767 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
1768 		sc->sc_stats.ast_tx_noack++;
1769 #ifdef IEEE80211_SUPPORT_TDMA
1770 	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1771 		DPRINTF(sc, ATH_DEBUG_TDMA,
1772 		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
1773 		sc->sc_stats.ast_tdma_ack++;
1774 		/* XXX free tx dmamap */
1775 		ath_freetx(m0);
1776 		return EIO;
1777 	}
1778 #endif
1779 
1780 	/*
1781 	 * Determine if a tx interrupt should be generated for
1782 	 * this descriptor.  We take a tx interrupt to reap
1783 	 * descriptors when the h/w hits an EOL condition or
1784 	 * when the descriptor is specifically marked to generate
1785 	 * an interrupt.  We periodically mark descriptors in this
1786 	 * way to insure timely replenishing of the supply needed
1787 	 * for sending frames.  Defering interrupts reduces system
1788 	 * load and potentially allows more concurrent work to be
1789 	 * done but if done to aggressively can cause senders to
1790 	 * backup.
1791 	 *
1792 	 * NB: use >= to deal with sc_txintrperiod changing
1793 	 *     dynamically through sysctl.
1794 	 */
1795 	if (flags & HAL_TXDESC_INTREQ) {
1796 		txq->axq_intrcnt = 0;
1797 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1798 		flags |= HAL_TXDESC_INTREQ;
1799 		txq->axq_intrcnt = 0;
1800 	}
1801 
1802 	/* This point forward is actual TX bits */
1803 
1804 	/*
1805 	 * At this point we are committed to sending the frame
1806 	 * and we don't need to look at m_nextpkt; clear it in
1807 	 * case this frame is part of frag chain.
1808 	 */
1809 	m0->m_nextpkt = NULL;
1810 
1811 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1812 		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1813 		    sc->sc_hwmap[rix].ieeerate, -1);
1814 
1815 	if (ieee80211_radiotap_active_vap(vap)) {
1816 		u_int64_t tsf = ath_hal_gettsf64(ah);
1817 
1818 		sc->sc_tx_th.wt_tsf = htole64(tsf);
1819 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1820 		if (iswep)
1821 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1822 		if (isfrag)
1823 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1824 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1825 		sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1826 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1827 
1828 		ieee80211_radiotap_tx(vap, m0);
1829 	}
1830 
1831 	/* Blank the legacy rate array */
1832 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1833 
1834 	/*
1835 	 * ath_buf_set_rate needs at least one rate/try to setup
1836 	 * the rate scenario.
1837 	 */
1838 	bf->bf_state.bfs_rc[0].rix = rix;
1839 	bf->bf_state.bfs_rc[0].tries = try0;
1840 	bf->bf_state.bfs_rc[0].ratecode = txrate;
1841 
1842 	/* Store the decided rate index values away */
1843 	bf->bf_state.bfs_pktlen = pktlen;
1844 	bf->bf_state.bfs_hdrlen = hdrlen;
1845 	bf->bf_state.bfs_atype = atype;
1846 	bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1847 	bf->bf_state.bfs_txrate0 = txrate;
1848 	bf->bf_state.bfs_try0 = try0;
1849 	bf->bf_state.bfs_keyix = keyix;
1850 	bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1851 	bf->bf_state.bfs_txflags = flags;
1852 	bf->bf_state.bfs_shpream = shortPreamble;
1853 
1854 	/* XXX this should be done in ath_tx_setrate() */
1855 	bf->bf_state.bfs_ctsrate0 = 0;	/* ie, no hard-coded ctsrate */
1856 	bf->bf_state.bfs_ctsrate = 0;	/* calculated later */
1857 	bf->bf_state.bfs_ctsduration = 0;
1858 	bf->bf_state.bfs_ismrr = ismrr;
1859 
1860 	return 0;
1861 }
1862 
1863 /*
1864  * Queue a frame to the hardware or software queue.
1865  *
1866  * This can be called by the net80211 code.
1867  *
1868  * XXX what about locking? Or, push the seqno assign into the
1869  * XXX aggregate scheduler so its serialised?
1870  *
1871  * XXX When sending management frames via ath_raw_xmit(),
1872  *     should CLRDMASK be set unconditionally?
1873  */
1874 int
1875 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1876     struct ath_buf *bf, struct mbuf *m0)
1877 {
1878 	struct ieee80211vap *vap = ni->ni_vap;
1879 	struct ath_vap *avp = ATH_VAP(vap);
1880 	int r = 0;
1881 	u_int pri;
1882 	int tid;
1883 	struct ath_txq *txq;
1884 	int ismcast;
1885 	const struct ieee80211_frame *wh;
1886 	int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1887 	ieee80211_seq seqno;
1888 	uint8_t type, subtype;
1889 	int queue_to_head;
1890 
1891 	ATH_TX_LOCK_ASSERT(sc);
1892 
1893 	/*
1894 	 * Determine the target hardware queue.
1895 	 *
1896 	 * For multicast frames, the txq gets overridden appropriately
1897 	 * depending upon the state of PS.
1898 	 *
1899 	 * For any other frame, we do a TID/QoS lookup inside the frame
1900 	 * to see what the TID should be. If it's a non-QoS frame, the
1901 	 * AC and TID are overridden. The TID/TXQ code assumes the
1902 	 * TID is on a predictable hardware TXQ, so we don't support
1903 	 * having a node TID queued to multiple hardware TXQs.
1904 	 * This may change in the future but would require some locking
1905 	 * fudgery.
1906 	 */
1907 	pri = ath_tx_getac(sc, m0);
1908 	tid = ath_tx_gettid(sc, m0);
1909 
1910 	txq = sc->sc_ac2q[pri];
1911 	wh = mtod(m0, struct ieee80211_frame *);
1912 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1913 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1914 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1915 
1916 	/*
1917 	 * Enforce how deep the multicast queue can grow.
1918 	 *
1919 	 * XXX duplicated in ath_raw_xmit().
1920 	 */
1921 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1922 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1923 		    > sc->sc_txq_mcastq_maxdepth) {
1924 			sc->sc_stats.ast_tx_mcastq_overflow++;
1925 			m_freem(m0);
1926 			return (ENOBUFS);
1927 		}
1928 	}
1929 
1930 	/*
1931 	 * Enforce how deep the unicast queue can grow.
1932 	 *
1933 	 * If the node is in power save then we don't want
1934 	 * the software queue to grow too deep, or a node may
1935 	 * end up consuming all of the ath_buf entries.
1936 	 *
1937 	 * For now, only do this for DATA frames.
1938 	 *
1939 	 * We will want to cap how many management/control
1940 	 * frames get punted to the software queue so it doesn't
1941 	 * fill up.  But the correct solution isn't yet obvious.
1942 	 * In any case, this check should at least let frames pass
1943 	 * that we are direct-dispatching.
1944 	 *
1945 	 * XXX TODO: duplicate this to the raw xmit path!
1946 	 */
1947 	if (type == IEEE80211_FC0_TYPE_DATA &&
1948 	    ATH_NODE(ni)->an_is_powersave &&
1949 	    ATH_NODE(ni)->an_swq_depth >
1950 	     sc->sc_txq_node_psq_maxdepth) {
1951 		sc->sc_stats.ast_tx_node_psq_overflow++;
1952 		m_freem(m0);
1953 		return (ENOBUFS);
1954 	}
1955 
1956 	/* A-MPDU TX */
1957 	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1958 	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1959 	is_ampdu = is_ampdu_tx | is_ampdu_pending;
1960 
1961 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1962 	    __func__, tid, pri, is_ampdu);
1963 
1964 	/* Set local packet state, used to queue packets to hardware */
1965 	bf->bf_state.bfs_tid = tid;
1966 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1967 	bf->bf_state.bfs_pri = pri;
1968 
1969 #if 1
1970 	/*
1971 	 * When servicing one or more stations in power-save mode
1972 	 * (or) if there is some mcast data waiting on the mcast
1973 	 * queue (to prevent out of order delivery) multicast frames
1974 	 * must be bufferd until after the beacon.
1975 	 *
1976 	 * TODO: we should lock the mcastq before we check the length.
1977 	 */
1978 	if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1979 		txq = &avp->av_mcastq;
1980 		/*
1981 		 * Mark the frame as eventually belonging on the CAB
1982 		 * queue, so the descriptor setup functions will
1983 		 * correctly initialise the descriptor 'qcuId' field.
1984 		 */
1985 		bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1986 	}
1987 #endif
1988 
1989 	/* Do the generic frame setup */
1990 	/* XXX should just bzero the bf_state? */
1991 	bf->bf_state.bfs_dobaw = 0;
1992 
1993 	/* A-MPDU TX? Manually set sequence number */
1994 	/*
1995 	 * Don't do it whilst pending; the net80211 layer still
1996 	 * assigns them.
1997 	 */
1998 	if (is_ampdu_tx) {
1999 		/*
2000 		 * Always call; this function will
2001 		 * handle making sure that null data frames
2002 		 * don't get a sequence number from the current
2003 		 * TID and thus mess with the BAW.
2004 		 */
2005 		seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2006 
2007 		/*
2008 		 * Don't add QoS NULL frames to the BAW.
2009 		 */
2010 		if (IEEE80211_QOS_HAS_SEQ(wh) &&
2011 		    subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2012 			bf->bf_state.bfs_dobaw = 1;
2013 		}
2014 	}
2015 
2016 	/*
2017 	 * If needed, the sequence number has been assigned.
2018 	 * Squirrel it away somewhere easy to get to.
2019 	 */
2020 	bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2021 
2022 	/* Is ampdu pending? fetch the seqno and print it out */
2023 	if (is_ampdu_pending)
2024 		DPRINTF(sc, ATH_DEBUG_SW_TX,
2025 		    "%s: tid %d: ampdu pending, seqno %d\n",
2026 		    __func__, tid, M_SEQNO_GET(m0));
2027 
2028 	/* This also sets up the DMA map */
2029 	r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2030 
2031 	if (r != 0)
2032 		goto done;
2033 
2034 	/* At this point m0 could have changed! */
2035 	m0 = bf->bf_m;
2036 
2037 #if 1
2038 	/*
2039 	 * If it's a multicast frame, do a direct-dispatch to the
2040 	 * destination hardware queue. Don't bother software
2041 	 * queuing it.
2042 	 */
2043 	/*
2044 	 * If it's a BAR frame, do a direct dispatch to the
2045 	 * destination hardware queue. Don't bother software
2046 	 * queuing it, as the TID will now be paused.
2047 	 * Sending a BAR frame can occur from the net80211 txa timer
2048 	 * (ie, retries) or from the ath txtask (completion call.)
2049 	 * It queues directly to hardware because the TID is paused
2050 	 * at this point (and won't be unpaused until the BAR has
2051 	 * either been TXed successfully or max retries has been
2052 	 * reached.)
2053 	 */
2054 	/*
2055 	 * Until things are better debugged - if this node is asleep
2056 	 * and we're sending it a non-BAR frame, direct dispatch it.
2057 	 * Why? Because we need to figure out what's actually being
2058 	 * sent - eg, during reassociation/reauthentication after
2059 	 * the node (last) disappeared whilst asleep, the driver should
2060 	 * have unpaused/unsleep'ed the node.  So until that is
2061 	 * sorted out, use this workaround.
2062 	 */
2063 	if (txq == &avp->av_mcastq) {
2064 		DPRINTF(sc, ATH_DEBUG_SW_TX,
2065 		    "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2066 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2067 		ath_tx_xmit_normal(sc, txq, bf);
2068 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2069 	    &queue_to_head)) {
2070 		ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2071 	} else {
2072 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2073 		ath_tx_xmit_normal(sc, txq, bf);
2074 	}
2075 #else
2076 	/*
2077 	 * For now, since there's no software queue,
2078 	 * direct-dispatch to the hardware.
2079 	 */
2080 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2081 	/*
2082 	 * Update the current leak count if
2083 	 * we're leaking frames; and set the
2084 	 * MORE flag as appropriate.
2085 	 */
2086 	ath_tx_leak_count_update(sc, tid, bf);
2087 	ath_tx_xmit_normal(sc, txq, bf);
2088 #endif
2089 done:
2090 	return 0;
2091 }
2092 
2093 static int
2094 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2095 	struct ath_buf *bf, struct mbuf *m0,
2096 	const struct ieee80211_bpf_params *params)
2097 {
2098 	struct ifnet *ifp = sc->sc_ifp;
2099 	struct ieee80211com *ic = ifp->if_l2com;
2100 	struct ath_hal *ah = sc->sc_ah;
2101 	struct ieee80211vap *vap = ni->ni_vap;
2102 	int error, ismcast, ismrr;
2103 	int keyix, hdrlen, pktlen, try0, txantenna;
2104 	u_int8_t rix, txrate;
2105 	struct ieee80211_frame *wh;
2106 	u_int flags;
2107 	HAL_PKT_TYPE atype;
2108 	const HAL_RATE_TABLE *rt;
2109 	struct ath_desc *ds;
2110 	u_int pri;
2111 	int o_tid = -1;
2112 	int do_override;
2113 	uint8_t type, subtype;
2114 	int queue_to_head;
2115 
2116 	ATH_TX_LOCK_ASSERT(sc);
2117 
2118 	wh = mtod(m0, struct ieee80211_frame *);
2119 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2120 	hdrlen = ieee80211_anyhdrsize(wh);
2121 	/*
2122 	 * Packet length must not include any
2123 	 * pad bytes; deduct them here.
2124 	 */
2125 	/* XXX honor IEEE80211_BPF_DATAPAD */
2126 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2127 
2128 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2129 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2130 
2131 	ATH_KTR(sc, ATH_KTR_TX, 2,
2132 	     "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2133 
2134 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2135 	    __func__, ismcast);
2136 
2137 	pri = params->ibp_pri & 3;
2138 	/* Override pri if the frame isn't a QoS one */
2139 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2140 		pri = ath_tx_getac(sc, m0);
2141 
2142 	/* XXX If it's an ADDBA, override the correct queue */
2143 	do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2144 
2145 	/* Map ADDBA to the correct priority */
2146 	if (do_override) {
2147 #if 0
2148 		device_printf(sc->sc_dev,
2149 		    "%s: overriding tid %d pri %d -> %d\n",
2150 		    __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2151 #endif
2152 		pri = TID_TO_WME_AC(o_tid);
2153 	}
2154 
2155 	/* Handle encryption twiddling if needed */
2156 	if (! ath_tx_tag_crypto(sc, ni,
2157 	    m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2158 	    &hdrlen, &pktlen, &keyix)) {
2159 		ath_freetx(m0);
2160 		return EIO;
2161 	}
2162 	/* packet header may have moved, reset our local pointer */
2163 	wh = mtod(m0, struct ieee80211_frame *);
2164 
2165 	/* Do the generic frame setup */
2166 	/* XXX should just bzero the bf_state? */
2167 	bf->bf_state.bfs_dobaw = 0;
2168 
2169 	error = ath_tx_dmasetup(sc, bf, m0);
2170 	if (error != 0)
2171 		return error;
2172 	m0 = bf->bf_m;				/* NB: may have changed */
2173 	wh = mtod(m0, struct ieee80211_frame *);
2174 	bf->bf_node = ni;			/* NB: held reference */
2175 
2176 	/* Always enable CLRDMASK for raw frames for now.. */
2177 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
2178 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
2179 	if (params->ibp_flags & IEEE80211_BPF_RTS)
2180 		flags |= HAL_TXDESC_RTSENA;
2181 	else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2182 		/* XXX assume 11g/11n protection? */
2183 		bf->bf_state.bfs_doprot = 1;
2184 		flags |= HAL_TXDESC_CTSENA;
2185 	}
2186 	/* XXX leave ismcast to injector? */
2187 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2188 		flags |= HAL_TXDESC_NOACK;
2189 
2190 	rt = sc->sc_currates;
2191 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2192 	rix = ath_tx_findrix(sc, params->ibp_rate0);
2193 	txrate = rt->info[rix].rateCode;
2194 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2195 		txrate |= rt->info[rix].shortPreamble;
2196 	sc->sc_txrix = rix;
2197 	try0 = params->ibp_try0;
2198 	ismrr = (params->ibp_try1 != 0);
2199 	txantenna = params->ibp_pri >> 2;
2200 	if (txantenna == 0)			/* XXX? */
2201 		txantenna = sc->sc_txantenna;
2202 
2203 	/*
2204 	 * Since ctsrate is fixed, store it away for later
2205 	 * use when the descriptor fields are being set.
2206 	 */
2207 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2208 		bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2209 
2210 	/*
2211 	 * NB: we mark all packets as type PSPOLL so the h/w won't
2212 	 * set the sequence number, duration, etc.
2213 	 */
2214 	atype = HAL_PKT_TYPE_PSPOLL;
2215 
2216 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2217 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2218 		    sc->sc_hwmap[rix].ieeerate, -1);
2219 
2220 	if (ieee80211_radiotap_active_vap(vap)) {
2221 		u_int64_t tsf = ath_hal_gettsf64(ah);
2222 
2223 		sc->sc_tx_th.wt_tsf = htole64(tsf);
2224 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2225 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2226 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2227 		if (m0->m_flags & M_FRAG)
2228 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2229 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2230 		sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2231 		    ieee80211_get_node_txpower(ni));
2232 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2233 
2234 		ieee80211_radiotap_tx(vap, m0);
2235 	}
2236 
2237 	/*
2238 	 * Formulate first tx descriptor with tx controls.
2239 	 */
2240 	ds = bf->bf_desc;
2241 	/* XXX check return value? */
2242 
2243 	/* Store the decided rate index values away */
2244 	bf->bf_state.bfs_pktlen = pktlen;
2245 	bf->bf_state.bfs_hdrlen = hdrlen;
2246 	bf->bf_state.bfs_atype = atype;
2247 	bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2248 	    ieee80211_get_node_txpower(ni));
2249 	bf->bf_state.bfs_txrate0 = txrate;
2250 	bf->bf_state.bfs_try0 = try0;
2251 	bf->bf_state.bfs_keyix = keyix;
2252 	bf->bf_state.bfs_txantenna = txantenna;
2253 	bf->bf_state.bfs_txflags = flags;
2254 	bf->bf_state.bfs_shpream =
2255 	    !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2256 
2257 	/* Set local packet state, used to queue packets to hardware */
2258 	bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2259 	bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2260 	bf->bf_state.bfs_pri = pri;
2261 
2262 	/* XXX this should be done in ath_tx_setrate() */
2263 	bf->bf_state.bfs_ctsrate = 0;
2264 	bf->bf_state.bfs_ctsduration = 0;
2265 	bf->bf_state.bfs_ismrr = ismrr;
2266 
2267 	/* Blank the legacy rate array */
2268 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2269 
2270 	bf->bf_state.bfs_rc[0].rix =
2271 	    ath_tx_findrix(sc, params->ibp_rate0);
2272 	bf->bf_state.bfs_rc[0].tries = try0;
2273 	bf->bf_state.bfs_rc[0].ratecode = txrate;
2274 
2275 	if (ismrr) {
2276 		int rix;
2277 
2278 		rix = ath_tx_findrix(sc, params->ibp_rate1);
2279 		bf->bf_state.bfs_rc[1].rix = rix;
2280 		bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2281 
2282 		rix = ath_tx_findrix(sc, params->ibp_rate2);
2283 		bf->bf_state.bfs_rc[2].rix = rix;
2284 		bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2285 
2286 		rix = ath_tx_findrix(sc, params->ibp_rate3);
2287 		bf->bf_state.bfs_rc[3].rix = rix;
2288 		bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2289 	}
2290 	/*
2291 	 * All the required rate control decisions have been made;
2292 	 * fill in the rc flags.
2293 	 */
2294 	ath_tx_rate_fill_rcflags(sc, bf);
2295 
2296 	/* NB: no buffered multicast in power save support */
2297 
2298 	/*
2299 	 * If we're overiding the ADDBA destination, dump directly
2300 	 * into the hardware queue, right after any pending
2301 	 * frames to that node are.
2302 	 */
2303 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2304 	    __func__, do_override);
2305 
2306 #if 1
2307 	/*
2308 	 * Put addba frames in the right place in the right TID/HWQ.
2309 	 */
2310 	if (do_override) {
2311 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2312 		/*
2313 		 * XXX if it's addba frames, should we be leaking
2314 		 * them out via the frame leak method?
2315 		 * XXX for now let's not risk it; but we may wish
2316 		 * to investigate this later.
2317 		 */
2318 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2319 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2320 	    &queue_to_head)) {
2321 		/* Queue to software queue */
2322 		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2323 	} else {
2324 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2325 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2326 	}
2327 #else
2328 	/* Direct-dispatch to the hardware */
2329 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2330 	/*
2331 	 * Update the current leak count if
2332 	 * we're leaking frames; and set the
2333 	 * MORE flag as appropriate.
2334 	 */
2335 	ath_tx_leak_count_update(sc, tid, bf);
2336 	ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2337 #endif
2338 	return 0;
2339 }
2340 
2341 /*
2342  * Send a raw frame.
2343  *
2344  * This can be called by net80211.
2345  */
2346 int
2347 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2348 	const struct ieee80211_bpf_params *params)
2349 {
2350 	struct ieee80211com *ic = ni->ni_ic;
2351 	struct ifnet *ifp = ic->ic_ifp;
2352 	struct ath_softc *sc = ifp->if_softc;
2353 	struct ath_buf *bf;
2354 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2355 	int error = 0;
2356 
2357 	ATH_PCU_LOCK(sc);
2358 	if (sc->sc_inreset_cnt > 0) {
2359 		device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; bailing\n",
2360 		    __func__);
2361 		error = EIO;
2362 		ATH_PCU_UNLOCK(sc);
2363 		goto bad0;
2364 	}
2365 	sc->sc_txstart_cnt++;
2366 	ATH_PCU_UNLOCK(sc);
2367 
2368 	ATH_TX_LOCK(sc);
2369 
2370 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
2371 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
2372 		    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
2373 			"!running" : "invalid");
2374 		m_freem(m);
2375 		error = ENETDOWN;
2376 		goto bad;
2377 	}
2378 
2379 	/*
2380 	 * Enforce how deep the multicast queue can grow.
2381 	 *
2382 	 * XXX duplicated in ath_tx_start().
2383 	 */
2384 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2385 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2386 		    > sc->sc_txq_mcastq_maxdepth) {
2387 			sc->sc_stats.ast_tx_mcastq_overflow++;
2388 			error = ENOBUFS;
2389 		}
2390 
2391 		if (error != 0) {
2392 			m_freem(m);
2393 			goto bad;
2394 		}
2395 	}
2396 
2397 	/*
2398 	 * Grab a TX buffer and associated resources.
2399 	 */
2400 	bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2401 	if (bf == NULL) {
2402 		sc->sc_stats.ast_tx_nobuf++;
2403 		m_freem(m);
2404 		error = ENOBUFS;
2405 		goto bad;
2406 	}
2407 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2408 	    m, params,  bf);
2409 
2410 	if (params == NULL) {
2411 		/*
2412 		 * Legacy path; interpret frame contents to decide
2413 		 * precisely how to send the frame.
2414 		 */
2415 		if (ath_tx_start(sc, ni, bf, m)) {
2416 			error = EIO;		/* XXX */
2417 			goto bad2;
2418 		}
2419 	} else {
2420 		/*
2421 		 * Caller supplied explicit parameters to use in
2422 		 * sending the frame.
2423 		 */
2424 		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2425 			error = EIO;		/* XXX */
2426 			goto bad2;
2427 		}
2428 	}
2429 	sc->sc_wd_timer = 5;
2430 	ifp->if_opackets++;
2431 	sc->sc_stats.ast_tx_raw++;
2432 
2433 	/*
2434 	 * Update the TIM - if there's anything queued to the
2435 	 * software queue and power save is enabled, we should
2436 	 * set the TIM.
2437 	 */
2438 	ath_tx_update_tim(sc, ni, 1);
2439 
2440 	ATH_TX_UNLOCK(sc);
2441 
2442 	ATH_PCU_LOCK(sc);
2443 	sc->sc_txstart_cnt--;
2444 	ATH_PCU_UNLOCK(sc);
2445 
2446 	return 0;
2447 bad2:
2448 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2449 	    "bf=%p",
2450 	    m,
2451 	    params,
2452 	    bf);
2453 	ATH_TXBUF_LOCK(sc);
2454 	ath_returnbuf_head(sc, bf);
2455 	ATH_TXBUF_UNLOCK(sc);
2456 bad:
2457 
2458 	ATH_TX_UNLOCK(sc);
2459 
2460 	ATH_PCU_LOCK(sc);
2461 	sc->sc_txstart_cnt--;
2462 	ATH_PCU_UNLOCK(sc);
2463 bad0:
2464 	ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2465 	    m, params);
2466 	ifp->if_oerrors++;
2467 	sc->sc_stats.ast_tx_raw_fail++;
2468 	ieee80211_free_node(ni);
2469 
2470 	return error;
2471 }
2472 
2473 /* Some helper functions */
2474 
2475 /*
2476  * ADDBA (and potentially others) need to be placed in the same
2477  * hardware queue as the TID/node it's relating to. This is so
2478  * it goes out after any pending non-aggregate frames to the
2479  * same node/TID.
2480  *
2481  * If this isn't done, the ADDBA can go out before the frames
2482  * queued in hardware. Even though these frames have a sequence
2483  * number -earlier- than the ADDBA can be transmitted (but
2484  * no frames whose sequence numbers are after the ADDBA should
2485  * be!) they'll arrive after the ADDBA - and the receiving end
2486  * will simply drop them as being out of the BAW.
2487  *
2488  * The frames can't be appended to the TID software queue - it'll
2489  * never be sent out. So these frames have to be directly
2490  * dispatched to the hardware, rather than queued in software.
2491  * So if this function returns true, the TXQ has to be
2492  * overridden and it has to be directly dispatched.
2493  *
2494  * It's a dirty hack, but someone's gotta do it.
2495  */
2496 
2497 /*
2498  * XXX doesn't belong here!
2499  */
2500 static int
2501 ieee80211_is_action(struct ieee80211_frame *wh)
2502 {
2503 	/* Type: Management frame? */
2504 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2505 	    IEEE80211_FC0_TYPE_MGT)
2506 		return 0;
2507 
2508 	/* Subtype: Action frame? */
2509 	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2510 	    IEEE80211_FC0_SUBTYPE_ACTION)
2511 		return 0;
2512 
2513 	return 1;
2514 }
2515 
2516 #define	MS(_v, _f)	(((_v) & _f) >> _f##_S)
2517 /*
2518  * Return an alternate TID for ADDBA request frames.
2519  *
2520  * Yes, this likely should be done in the net80211 layer.
2521  */
2522 static int
2523 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2524     struct ieee80211_node *ni,
2525     struct mbuf *m0, int *tid)
2526 {
2527 	struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2528 	struct ieee80211_action_ba_addbarequest *ia;
2529 	uint8_t *frm;
2530 	uint16_t baparamset;
2531 
2532 	/* Not action frame? Bail */
2533 	if (! ieee80211_is_action(wh))
2534 		return 0;
2535 
2536 	/* XXX Not needed for frames we send? */
2537 #if 0
2538 	/* Correct length? */
2539 	if (! ieee80211_parse_action(ni, m))
2540 		return 0;
2541 #endif
2542 
2543 	/* Extract out action frame */
2544 	frm = (u_int8_t *)&wh[1];
2545 	ia = (struct ieee80211_action_ba_addbarequest *) frm;
2546 
2547 	/* Not ADDBA? Bail */
2548 	if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2549 		return 0;
2550 	if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2551 		return 0;
2552 
2553 	/* Extract TID, return it */
2554 	baparamset = le16toh(ia->rq_baparamset);
2555 	*tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2556 
2557 	return 1;
2558 }
2559 #undef	MS
2560 
2561 /* Per-node software queue operations */
2562 
2563 /*
2564  * Add the current packet to the given BAW.
2565  * It is assumed that the current packet
2566  *
2567  * + fits inside the BAW;
2568  * + already has had a sequence number allocated.
2569  *
2570  * Since the BAW status may be modified by both the ath task and
2571  * the net80211/ifnet contexts, the TID must be locked.
2572  */
2573 void
2574 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2575     struct ath_tid *tid, struct ath_buf *bf)
2576 {
2577 	int index, cindex;
2578 	struct ieee80211_tx_ampdu *tap;
2579 
2580 	ATH_TX_LOCK_ASSERT(sc);
2581 
2582 	if (bf->bf_state.bfs_isretried)
2583 		return;
2584 
2585 	tap = ath_tx_get_tx_tid(an, tid->tid);
2586 
2587 	if (! bf->bf_state.bfs_dobaw) {
2588 		device_printf(sc->sc_dev,
2589 		    "%s: dobaw=0, seqno=%d, window %d:%d\n",
2590 		    __func__,
2591 		    SEQNO(bf->bf_state.bfs_seqno),
2592 		    tap->txa_start,
2593 		    tap->txa_wnd);
2594 	}
2595 
2596 	if (bf->bf_state.bfs_addedbaw)
2597 		device_printf(sc->sc_dev,
2598 		    "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2599 		    "baw head=%d tail=%d\n",
2600 		    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2601 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2602 		    tid->baw_tail);
2603 
2604 	/*
2605 	 * Verify that the given sequence number is not outside of the
2606 	 * BAW.  Complain loudly if that's the case.
2607 	 */
2608 	if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2609 	    SEQNO(bf->bf_state.bfs_seqno))) {
2610 		device_printf(sc->sc_dev,
2611 		    "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2612 		    "baw head=%d tail=%d\n",
2613 		    __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2614 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2615 		    tid->baw_tail);
2616 	}
2617 
2618 	/*
2619 	 * ni->ni_txseqs[] is the currently allocated seqno.
2620 	 * the txa state contains the current baw start.
2621 	 */
2622 	index  = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2623 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2624 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2625 	    "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2626 	    "baw head=%d tail=%d\n",
2627 	    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2628 	    tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2629 	    tid->baw_tail);
2630 
2631 
2632 #if 0
2633 	assert(tid->tx_buf[cindex] == NULL);
2634 #endif
2635 	if (tid->tx_buf[cindex] != NULL) {
2636 		device_printf(sc->sc_dev,
2637 		    "%s: ba packet dup (index=%d, cindex=%d, "
2638 		    "head=%d, tail=%d)\n",
2639 		    __func__, index, cindex, tid->baw_head, tid->baw_tail);
2640 		device_printf(sc->sc_dev,
2641 		    "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2642 		    __func__,
2643 		    tid->tx_buf[cindex],
2644 		    SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2645 		    bf,
2646 		    SEQNO(bf->bf_state.bfs_seqno)
2647 		);
2648 	}
2649 	tid->tx_buf[cindex] = bf;
2650 
2651 	if (index >= ((tid->baw_tail - tid->baw_head) &
2652 	    (ATH_TID_MAX_BUFS - 1))) {
2653 		tid->baw_tail = cindex;
2654 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2655 	}
2656 }
2657 
2658 /*
2659  * Flip the BAW buffer entry over from the existing one to the new one.
2660  *
2661  * When software retransmitting a (sub-)frame, it is entirely possible that
2662  * the frame ath_buf is marked as BUSY and can't be immediately reused.
2663  * In that instance the buffer is cloned and the new buffer is used for
2664  * retransmit. We thus need to update the ath_buf slot in the BAW buf
2665  * tracking array to maintain consistency.
2666  */
2667 static void
2668 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2669     struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2670 {
2671 	int index, cindex;
2672 	struct ieee80211_tx_ampdu *tap;
2673 	int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2674 
2675 	ATH_TX_LOCK_ASSERT(sc);
2676 
2677 	tap = ath_tx_get_tx_tid(an, tid->tid);
2678 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2679 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2680 
2681 	/*
2682 	 * Just warn for now; if it happens then we should find out
2683 	 * about it. It's highly likely the aggregation session will
2684 	 * soon hang.
2685 	 */
2686 	if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2687 		device_printf(sc->sc_dev, "%s: retransmitted buffer"
2688 		    " has mismatching seqno's, BA session may hang.\n",
2689 		    __func__);
2690 		device_printf(sc->sc_dev, "%s: old seqno=%d, new_seqno=%d\n",
2691 		    __func__,
2692 		    old_bf->bf_state.bfs_seqno,
2693 		    new_bf->bf_state.bfs_seqno);
2694 	}
2695 
2696 	if (tid->tx_buf[cindex] != old_bf) {
2697 		device_printf(sc->sc_dev, "%s: ath_buf pointer incorrect; "
2698 		    " has m BA session may hang.\n",
2699 		    __func__);
2700 		device_printf(sc->sc_dev, "%s: old bf=%p, new bf=%p\n",
2701 		    __func__,
2702 		    old_bf, new_bf);
2703 	}
2704 
2705 	tid->tx_buf[cindex] = new_bf;
2706 }
2707 
2708 /*
2709  * seq_start - left edge of BAW
2710  * seq_next - current/next sequence number to allocate
2711  *
2712  * Since the BAW status may be modified by both the ath task and
2713  * the net80211/ifnet contexts, the TID must be locked.
2714  */
2715 static void
2716 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2717     struct ath_tid *tid, const struct ath_buf *bf)
2718 {
2719 	int index, cindex;
2720 	struct ieee80211_tx_ampdu *tap;
2721 	int seqno = SEQNO(bf->bf_state.bfs_seqno);
2722 
2723 	ATH_TX_LOCK_ASSERT(sc);
2724 
2725 	tap = ath_tx_get_tx_tid(an, tid->tid);
2726 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2727 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2728 
2729 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2730 	    "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2731 	    "baw head=%d, tail=%d\n",
2732 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2733 	    cindex, tid->baw_head, tid->baw_tail);
2734 
2735 	/*
2736 	 * If this occurs then we have a big problem - something else
2737 	 * has slid tap->txa_start along without updating the BAW
2738 	 * tracking start/end pointers. Thus the TX BAW state is now
2739 	 * completely busted.
2740 	 *
2741 	 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2742 	 * it's quite possible that a cloned buffer is making its way
2743 	 * here and causing it to fire off. Disable TDMA for now.
2744 	 */
2745 	if (tid->tx_buf[cindex] != bf) {
2746 		device_printf(sc->sc_dev,
2747 		    "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2748 		    __func__,
2749 		    bf, SEQNO(bf->bf_state.bfs_seqno),
2750 		    tid->tx_buf[cindex],
2751 		    (tid->tx_buf[cindex] != NULL) ?
2752 		      SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2753 	}
2754 
2755 	tid->tx_buf[cindex] = NULL;
2756 
2757 	while (tid->baw_head != tid->baw_tail &&
2758 	    !tid->tx_buf[tid->baw_head]) {
2759 		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2760 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2761 	}
2762 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2763 	    "%s: baw is now %d:%d, baw head=%d\n",
2764 	    __func__, tap->txa_start, tap->txa_wnd, tid->baw_head);
2765 }
2766 
2767 static void
2768 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2769     struct ath_buf *bf)
2770 {
2771 	struct ieee80211_frame *wh;
2772 
2773 	ATH_TX_LOCK_ASSERT(sc);
2774 
2775 	if (tid->an->an_leak_count > 0) {
2776 		wh = mtod(bf->bf_m, struct ieee80211_frame *);
2777 
2778 		/*
2779 		 * Update MORE based on the software/net80211 queue states.
2780 		 */
2781 		if ((tid->an->an_stack_psq > 0)
2782 		    || (tid->an->an_swq_depth > 0))
2783 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2784 		else
2785 			wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2786 
2787 		DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2788 		    "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2789 		    __func__,
2790 		    tid->an->an_node.ni_macaddr,
2791 		    ":",
2792 		    tid->an->an_leak_count,
2793 		    tid->an->an_stack_psq,
2794 		    tid->an->an_swq_depth,
2795 		    !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2796 
2797 		/*
2798 		 * Re-sync the underlying buffer.
2799 		 */
2800 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2801 		    BUS_DMASYNC_PREWRITE);
2802 
2803 		tid->an->an_leak_count --;
2804 	}
2805 }
2806 
2807 static int
2808 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2809 {
2810 
2811 	ATH_TX_LOCK_ASSERT(sc);
2812 
2813 	if (tid->an->an_leak_count > 0) {
2814 		return (1);
2815 	}
2816 	if (tid->paused)
2817 		return (0);
2818 	return (1);
2819 }
2820 
2821 /*
2822  * Mark the current node/TID as ready to TX.
2823  *
2824  * This is done to make it easy for the software scheduler to
2825  * find which nodes have data to send.
2826  *
2827  * The TXQ lock must be held.
2828  */
2829 void
2830 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2831 {
2832 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2833 
2834 	ATH_TX_LOCK_ASSERT(sc);
2835 
2836 	/*
2837 	 * If we are leaking out a frame to this destination
2838 	 * for PS-POLL, ensure that we allow scheduling to
2839 	 * occur.
2840 	 */
2841 	if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2842 		return;		/* paused, can't schedule yet */
2843 
2844 	if (tid->sched)
2845 		return;		/* already scheduled */
2846 
2847 	tid->sched = 1;
2848 
2849 #if 0
2850 	/*
2851 	 * If this is a sleeping node we're leaking to, given
2852 	 * it a higher priority.  This is so bad for QoS it hurts.
2853 	 */
2854 	if (tid->an->an_leak_count) {
2855 		TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2856 	} else {
2857 		TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2858 	}
2859 #endif
2860 
2861 	/*
2862 	 * We can't do the above - it'll confuse the TXQ software
2863 	 * scheduler which will keep checking the _head_ TID
2864 	 * in the list to see if it has traffic.  If we queue
2865 	 * a TID to the head of the list and it doesn't transmit,
2866 	 * we'll check it again.
2867 	 *
2868 	 * So, get the rest of this leaking frames support working
2869 	 * and reliable first and _then_ optimise it so they're
2870 	 * pushed out in front of any other pending software
2871 	 * queued nodes.
2872 	 */
2873 	TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2874 }
2875 
2876 /*
2877  * Mark the current node as no longer needing to be polled for
2878  * TX packets.
2879  *
2880  * The TXQ lock must be held.
2881  */
2882 static void
2883 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2884 {
2885 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2886 
2887 	ATH_TX_LOCK_ASSERT(sc);
2888 
2889 	if (tid->sched == 0)
2890 		return;
2891 
2892 	tid->sched = 0;
2893 	TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2894 }
2895 
2896 /*
2897  * Assign a sequence number manually to the given frame.
2898  *
2899  * This should only be called for A-MPDU TX frames.
2900  */
2901 static ieee80211_seq
2902 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2903     struct ath_buf *bf, struct mbuf *m0)
2904 {
2905 	struct ieee80211_frame *wh;
2906 	int tid, pri;
2907 	ieee80211_seq seqno;
2908 	uint8_t subtype;
2909 
2910 	/* TID lookup */
2911 	wh = mtod(m0, struct ieee80211_frame *);
2912 	pri = M_WME_GETAC(m0);			/* honor classification */
2913 	tid = WME_AC_TO_TID(pri);
2914 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2915 	    __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2916 
2917 	/* XXX Is it a control frame? Ignore */
2918 
2919 	/* Does the packet require a sequence number? */
2920 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2921 		return -1;
2922 
2923 	ATH_TX_LOCK_ASSERT(sc);
2924 
2925 	/*
2926 	 * Is it a QOS NULL Data frame? Give it a sequence number from
2927 	 * the default TID (IEEE80211_NONQOS_TID.)
2928 	 *
2929 	 * The RX path of everything I've looked at doesn't include the NULL
2930 	 * data frame sequence number in the aggregation state updates, so
2931 	 * assigning it a sequence number there will cause a BAW hole on the
2932 	 * RX side.
2933 	 */
2934 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2935 	if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2936 		/* XXX no locking for this TID? This is a bit of a problem. */
2937 		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2938 		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2939 	} else {
2940 		/* Manually assign sequence number */
2941 		seqno = ni->ni_txseqs[tid];
2942 		INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2943 	}
2944 	*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2945 	M_SEQNO_SET(m0, seqno);
2946 
2947 	/* Return so caller can do something with it if needed */
2948 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s:  -> seqno=%d\n", __func__, seqno);
2949 	return seqno;
2950 }
2951 
2952 /*
2953  * Attempt to direct dispatch an aggregate frame to hardware.
2954  * If the frame is out of BAW, queue.
2955  * Otherwise, schedule it as a single frame.
2956  */
2957 static void
2958 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2959     struct ath_txq *txq, struct ath_buf *bf)
2960 {
2961 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2962 	struct ieee80211_tx_ampdu *tap;
2963 
2964 	ATH_TX_LOCK_ASSERT(sc);
2965 
2966 	tap = ath_tx_get_tx_tid(an, tid->tid);
2967 
2968 	/* paused? queue */
2969 	if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2970 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2971 		/* XXX don't sched - we're paused! */
2972 		return;
2973 	}
2974 
2975 	/* outside baw? queue */
2976 	if (bf->bf_state.bfs_dobaw &&
2977 	    (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2978 	    SEQNO(bf->bf_state.bfs_seqno)))) {
2979 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2980 		ath_tx_tid_sched(sc, tid);
2981 		return;
2982 	}
2983 
2984 	/*
2985 	 * This is a temporary check and should be removed once
2986 	 * all the relevant code paths have been fixed.
2987 	 *
2988 	 * During aggregate retries, it's possible that the head
2989 	 * frame will fail (which has the bfs_aggr and bfs_nframes
2990 	 * fields set for said aggregate) and will be retried as
2991 	 * a single frame.  In this instance, the values should
2992 	 * be reset or the completion code will get upset with you.
2993 	 */
2994 	if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
2995 		device_printf(sc->sc_dev, "%s: bfs_aggr=%d, bfs_nframes=%d\n",
2996 		    __func__,
2997 		    bf->bf_state.bfs_aggr,
2998 		    bf->bf_state.bfs_nframes);
2999 		bf->bf_state.bfs_aggr = 0;
3000 		bf->bf_state.bfs_nframes = 1;
3001 	}
3002 
3003 	/* Update CLRDMASK just before this frame is queued */
3004 	ath_tx_update_clrdmask(sc, tid, bf);
3005 
3006 	/* Direct dispatch to hardware */
3007 	ath_tx_do_ratelookup(sc, bf);
3008 	ath_tx_calc_duration(sc, bf);
3009 	ath_tx_calc_protection(sc, bf);
3010 	ath_tx_set_rtscts(sc, bf);
3011 	ath_tx_rate_fill_rcflags(sc, bf);
3012 	ath_tx_setds(sc, bf);
3013 
3014 	/* Statistics */
3015 	sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3016 
3017 	/* Track per-TID hardware queue depth correctly */
3018 	tid->hwq_depth++;
3019 
3020 	/* Add to BAW */
3021 	if (bf->bf_state.bfs_dobaw) {
3022 		ath_tx_addto_baw(sc, an, tid, bf);
3023 		bf->bf_state.bfs_addedbaw = 1;
3024 	}
3025 
3026 	/* Set completion handler, multi-frame aggregate or not */
3027 	bf->bf_comp = ath_tx_aggr_comp;
3028 
3029 	/*
3030 	 * Update the current leak count if
3031 	 * we're leaking frames; and set the
3032 	 * MORE flag as appropriate.
3033 	 */
3034 	ath_tx_leak_count_update(sc, tid, bf);
3035 
3036 	/* Hand off to hardware */
3037 	ath_tx_handoff(sc, txq, bf);
3038 }
3039 
3040 /*
3041  * Attempt to send the packet.
3042  * If the queue isn't busy, direct-dispatch.
3043  * If the queue is busy enough, queue the given packet on the
3044  *  relevant software queue.
3045  */
3046 void
3047 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3048     struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3049 {
3050 	struct ath_node *an = ATH_NODE(ni);
3051 	struct ieee80211_frame *wh;
3052 	struct ath_tid *atid;
3053 	int pri, tid;
3054 	struct mbuf *m0 = bf->bf_m;
3055 
3056 	ATH_TX_LOCK_ASSERT(sc);
3057 
3058 	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3059 	wh = mtod(m0, struct ieee80211_frame *);
3060 	pri = ath_tx_getac(sc, m0);
3061 	tid = ath_tx_gettid(sc, m0);
3062 	atid = &an->an_tid[tid];
3063 
3064 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3065 	    __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3066 
3067 	/* Set local packet state, used to queue packets to hardware */
3068 	/* XXX potentially duplicate info, re-check */
3069 	bf->bf_state.bfs_tid = tid;
3070 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3071 	bf->bf_state.bfs_pri = pri;
3072 
3073 	/*
3074 	 * If the hardware queue isn't busy, queue it directly.
3075 	 * If the hardware queue is busy, queue it.
3076 	 * If the TID is paused or the traffic it outside BAW, software
3077 	 * queue it.
3078 	 *
3079 	 * If the node is in power-save and we're leaking a frame,
3080 	 * leak a single frame.
3081 	 */
3082 	if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3083 		/* TID is paused, queue */
3084 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3085 		/*
3086 		 * If the caller requested that it be sent at a high
3087 		 * priority, queue it at the head of the list.
3088 		 */
3089 		if (queue_to_head)
3090 			ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3091 		else
3092 			ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3093 	} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3094 		/* AMPDU pending; queue */
3095 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3096 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3097 		/* XXX sched? */
3098 	} else if (ath_tx_ampdu_running(sc, an, tid)) {
3099 		/* AMPDU running, attempt direct dispatch if possible */
3100 
3101 		/*
3102 		 * Always queue the frame to the tail of the list.
3103 		 */
3104 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3105 
3106 		/*
3107 		 * If the hardware queue isn't busy, direct dispatch
3108 		 * the head frame in the list.  Don't schedule the
3109 		 * TID - let it build some more frames first?
3110 		 *
3111 		 * When running A-MPDU, always just check the hardware
3112 		 * queue depth against the aggregate frame limit.
3113 		 * We don't want to burst a large number of single frames
3114 		 * out to the hardware; we want to aggressively hold back.
3115 		 *
3116 		 * Otherwise, schedule the TID.
3117 		 */
3118 		/* XXX TXQ locking */
3119 		if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3120 			bf = ATH_TID_FIRST(atid);
3121 			ATH_TID_REMOVE(atid, bf, bf_list);
3122 
3123 			/*
3124 			 * Ensure it's definitely treated as a non-AMPDU
3125 			 * frame - this information may have been left
3126 			 * over from a previous attempt.
3127 			 */
3128 			bf->bf_state.bfs_aggr = 0;
3129 			bf->bf_state.bfs_nframes = 1;
3130 
3131 			/* Queue to the hardware */
3132 			ath_tx_xmit_aggr(sc, an, txq, bf);
3133 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3134 			    "%s: xmit_aggr\n",
3135 			    __func__);
3136 		} else {
3137 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3138 			    "%s: ampdu; swq'ing\n",
3139 			    __func__);
3140 
3141 			ath_tx_tid_sched(sc, atid);
3142 		}
3143 	/*
3144 	 * If we're not doing A-MPDU, be prepared to direct dispatch
3145 	 * up to both limits if possible.  This particular corner
3146 	 * case may end up with packet starvation between aggregate
3147 	 * traffic and non-aggregate traffic: we wnat to ensure
3148 	 * that non-aggregate stations get a few frames queued to the
3149 	 * hardware before the aggregate station(s) get their chance.
3150 	 *
3151 	 * So if you only ever see a couple of frames direct dispatched
3152 	 * to the hardware from a non-AMPDU client, check both here
3153 	 * and in the software queue dispatcher to ensure that those
3154 	 * non-AMPDU stations get a fair chance to transmit.
3155 	 */
3156 	/* XXX TXQ locking */
3157 	} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3158 		    (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3159 		/* AMPDU not running, attempt direct dispatch */
3160 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3161 		/* See if clrdmask needs to be set */
3162 		ath_tx_update_clrdmask(sc, atid, bf);
3163 
3164 		/*
3165 		 * Update the current leak count if
3166 		 * we're leaking frames; and set the
3167 		 * MORE flag as appropriate.
3168 		 */
3169 		ath_tx_leak_count_update(sc, atid, bf);
3170 
3171 		/*
3172 		 * Dispatch the frame.
3173 		 */
3174 		ath_tx_xmit_normal(sc, txq, bf);
3175 	} else {
3176 		/* Busy; queue */
3177 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3178 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3179 		ath_tx_tid_sched(sc, atid);
3180 	}
3181 }
3182 
3183 /*
3184  * Only set the clrdmask bit if none of the nodes are currently
3185  * filtered.
3186  *
3187  * XXX TODO: go through all the callers and check to see
3188  * which are being called in the context of looping over all
3189  * TIDs (eg, if all tids are being paused, resumed, etc.)
3190  * That'll avoid O(n^2) complexity here.
3191  */
3192 static void
3193 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3194 {
3195 	int i;
3196 
3197 	ATH_TX_LOCK_ASSERT(sc);
3198 
3199 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3200 		if (an->an_tid[i].isfiltered == 1)
3201 			return;
3202 	}
3203 	an->clrdmask = 1;
3204 }
3205 
3206 /*
3207  * Configure the per-TID node state.
3208  *
3209  * This likely belongs in if_ath_node.c but I can't think of anywhere
3210  * else to put it just yet.
3211  *
3212  * This sets up the SLISTs and the mutex as appropriate.
3213  */
3214 void
3215 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3216 {
3217 	int i, j;
3218 	struct ath_tid *atid;
3219 
3220 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3221 		atid = &an->an_tid[i];
3222 
3223 		/* XXX now with this bzer(), is the field 0'ing needed? */
3224 		bzero(atid, sizeof(*atid));
3225 
3226 		TAILQ_INIT(&atid->tid_q);
3227 		TAILQ_INIT(&atid->filtq.tid_q);
3228 		atid->tid = i;
3229 		atid->an = an;
3230 		for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3231 			atid->tx_buf[j] = NULL;
3232 		atid->baw_head = atid->baw_tail = 0;
3233 		atid->paused = 0;
3234 		atid->sched = 0;
3235 		atid->hwq_depth = 0;
3236 		atid->cleanup_inprogress = 0;
3237 		if (i == IEEE80211_NONQOS_TID)
3238 			atid->ac = ATH_NONQOS_TID_AC;
3239 		else
3240 			atid->ac = TID_TO_WME_AC(i);
3241 	}
3242 	an->clrdmask = 1;	/* Always start by setting this bit */
3243 }
3244 
3245 /*
3246  * Pause the current TID. This stops packets from being transmitted
3247  * on it.
3248  *
3249  * Since this is also called from upper layers as well as the driver,
3250  * it will get the TID lock.
3251  */
3252 static void
3253 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3254 {
3255 
3256 	ATH_TX_LOCK_ASSERT(sc);
3257 	tid->paused++;
3258 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n",
3259 	    __func__, tid->paused);
3260 }
3261 
3262 /*
3263  * Unpause the current TID, and schedule it if needed.
3264  */
3265 static void
3266 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3267 {
3268 	ATH_TX_LOCK_ASSERT(sc);
3269 
3270 	/*
3271 	 * There's some odd places where ath_tx_tid_resume() is called
3272 	 * when it shouldn't be; this works around that particular issue
3273 	 * until it's actually resolved.
3274 	 */
3275 	if (tid->paused == 0) {
3276 		device_printf(sc->sc_dev, "%s: %6D: paused=0?\n",
3277 		    __func__,
3278 		    tid->an->an_node.ni_macaddr,
3279 		    ":");
3280 	} else {
3281 		tid->paused--;
3282 	}
3283 
3284 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n",
3285 	    __func__, tid->paused);
3286 
3287 	if (tid->paused)
3288 		return;
3289 
3290 	/*
3291 	 * Override the clrdmask configuration for the next frame
3292 	 * from this TID, just to get the ball rolling.
3293 	 */
3294 	ath_tx_set_clrdmask(sc, tid->an);
3295 
3296 	if (tid->axq_depth == 0)
3297 		return;
3298 
3299 	/* XXX isfiltered shouldn't ever be 0 at this point */
3300 	if (tid->isfiltered == 1) {
3301 		device_printf(sc->sc_dev, "%s: filtered?!\n", __func__);
3302 		return;
3303 	}
3304 
3305 	ath_tx_tid_sched(sc, tid);
3306 
3307 	/*
3308 	 * Queue the software TX scheduler.
3309 	 */
3310 	ath_tx_swq_kick(sc);
3311 }
3312 
3313 /*
3314  * Add the given ath_buf to the TID filtered frame list.
3315  * This requires the TID be filtered.
3316  */
3317 static void
3318 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3319     struct ath_buf *bf)
3320 {
3321 
3322 	ATH_TX_LOCK_ASSERT(sc);
3323 
3324 	if (! tid->isfiltered)
3325 		device_printf(sc->sc_dev, "%s: not filtered?!\n", __func__);
3326 
3327 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3328 
3329 	/* Set the retry bit and bump the retry counter */
3330 	ath_tx_set_retry(sc, bf);
3331 	sc->sc_stats.ast_tx_swfiltered++;
3332 
3333 	ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3334 }
3335 
3336 /*
3337  * Handle a completed filtered frame from the given TID.
3338  * This just enables/pauses the filtered frame state if required
3339  * and appends the filtered frame to the filtered queue.
3340  */
3341 static void
3342 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3343     struct ath_buf *bf)
3344 {
3345 
3346 	ATH_TX_LOCK_ASSERT(sc);
3347 
3348 	if (! tid->isfiltered) {
3349 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n",
3350 		    __func__);
3351 		tid->isfiltered = 1;
3352 		ath_tx_tid_pause(sc, tid);
3353 	}
3354 
3355 	/* Add the frame to the filter queue */
3356 	ath_tx_tid_filt_addbuf(sc, tid, bf);
3357 }
3358 
3359 /*
3360  * Complete the filtered frame TX completion.
3361  *
3362  * If there are no more frames in the hardware queue, unpause/unfilter
3363  * the TID if applicable.  Otherwise we will wait for a node PS transition
3364  * to unfilter.
3365  */
3366 static void
3367 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3368 {
3369 	struct ath_buf *bf;
3370 
3371 	ATH_TX_LOCK_ASSERT(sc);
3372 
3373 	if (tid->hwq_depth != 0)
3374 		return;
3375 
3376 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n",
3377 	    __func__);
3378 	tid->isfiltered = 0;
3379 	/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3380 	ath_tx_set_clrdmask(sc, tid->an);
3381 
3382 	/* XXX this is really quite inefficient */
3383 	while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3384 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3385 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3386 	}
3387 
3388 	ath_tx_tid_resume(sc, tid);
3389 }
3390 
3391 /*
3392  * Called when a single (aggregate or otherwise) frame is completed.
3393  *
3394  * Returns 1 if the buffer could be added to the filtered list
3395  * (cloned or otherwise), 0 if the buffer couldn't be added to the
3396  * filtered list (failed clone; expired retry) and the caller should
3397  * free it and handle it like a failure (eg by sending a BAR.)
3398  */
3399 static int
3400 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3401     struct ath_buf *bf)
3402 {
3403 	struct ath_buf *nbf;
3404 	int retval;
3405 
3406 	ATH_TX_LOCK_ASSERT(sc);
3407 
3408 	/*
3409 	 * Don't allow a filtered frame to live forever.
3410 	 */
3411 	if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3412 		sc->sc_stats.ast_tx_swretrymax++;
3413 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3414 		    "%s: bf=%p, seqno=%d, exceeded retries\n",
3415 		    __func__,
3416 		    bf,
3417 		    bf->bf_state.bfs_seqno);
3418 		return (0);
3419 	}
3420 
3421 	/*
3422 	 * A busy buffer can't be added to the retry list.
3423 	 * It needs to be cloned.
3424 	 */
3425 	if (bf->bf_flags & ATH_BUF_BUSY) {
3426 		nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3427 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3428 		    "%s: busy buffer clone: %p -> %p\n",
3429 		    __func__, bf, nbf);
3430 	} else {
3431 		nbf = bf;
3432 	}
3433 
3434 	if (nbf == NULL) {
3435 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3436 		    "%s: busy buffer couldn't be cloned (%p)!\n",
3437 		    __func__, bf);
3438 		retval = 1;
3439 	} else {
3440 		ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3441 		retval = 0;
3442 	}
3443 	ath_tx_tid_filt_comp_complete(sc, tid);
3444 
3445 	return (retval);
3446 }
3447 
3448 static void
3449 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3450     struct ath_buf *bf_first, ath_bufhead *bf_q)
3451 {
3452 	struct ath_buf *bf, *bf_next, *nbf;
3453 
3454 	ATH_TX_LOCK_ASSERT(sc);
3455 
3456 	bf = bf_first;
3457 	while (bf) {
3458 		bf_next = bf->bf_next;
3459 		bf->bf_next = NULL;	/* Remove it from the aggr list */
3460 
3461 		/*
3462 		 * Don't allow a filtered frame to live forever.
3463 		 */
3464 		if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3465 			sc->sc_stats.ast_tx_swretrymax++;
3466 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3467 			    "%s: bf=%p, seqno=%d, exceeded retries\n",
3468 			    __func__,
3469 			    bf,
3470 			    bf->bf_state.bfs_seqno);
3471 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3472 			goto next;
3473 		}
3474 
3475 		if (bf->bf_flags & ATH_BUF_BUSY) {
3476 			nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3477 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3478 			    "%s: busy buffer cloned: %p -> %p",
3479 			    __func__, bf, nbf);
3480 		} else {
3481 			nbf = bf;
3482 		}
3483 
3484 		/*
3485 		 * If the buffer couldn't be cloned, add it to bf_q;
3486 		 * the caller will free the buffer(s) as required.
3487 		 */
3488 		if (nbf == NULL) {
3489 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3490 			    "%s: buffer couldn't be cloned! (%p)\n",
3491 			    __func__, bf);
3492 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3493 		} else {
3494 			ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3495 		}
3496 next:
3497 		bf = bf_next;
3498 	}
3499 
3500 	ath_tx_tid_filt_comp_complete(sc, tid);
3501 }
3502 
3503 /*
3504  * Suspend the queue because we need to TX a BAR.
3505  */
3506 static void
3507 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3508 {
3509 
3510 	ATH_TX_LOCK_ASSERT(sc);
3511 
3512 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3513 	    "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3514 	    __func__,
3515 	    tid->tid,
3516 	    tid->bar_wait,
3517 	    tid->bar_tx);
3518 
3519 	/* We shouldn't be called when bar_tx is 1 */
3520 	if (tid->bar_tx) {
3521 		device_printf(sc->sc_dev, "%s: bar_tx is 1?!\n",
3522 		    __func__);
3523 	}
3524 
3525 	/* If we've already been called, just be patient. */
3526 	if (tid->bar_wait)
3527 		return;
3528 
3529 	/* Wait! */
3530 	tid->bar_wait = 1;
3531 
3532 	/* Only one pause, no matter how many frames fail */
3533 	ath_tx_tid_pause(sc, tid);
3534 }
3535 
3536 /*
3537  * We've finished with BAR handling - either we succeeded or
3538  * failed. Either way, unsuspend TX.
3539  */
3540 static void
3541 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3542 {
3543 
3544 	ATH_TX_LOCK_ASSERT(sc);
3545 
3546 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3547 	    "%s: %6D: TID=%d, called\n",
3548 	    __func__,
3549 	    tid->an->an_node.ni_macaddr,
3550 	    ":",
3551 	    tid->tid);
3552 
3553 	if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3554 		device_printf(sc->sc_dev,
3555 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3556 		    __func__,
3557 		    tid->an->an_node.ni_macaddr,
3558 		    ":",
3559 		    tid->tid,
3560 		    tid->bar_tx,
3561 		    tid->bar_wait);
3562 	}
3563 
3564 	tid->bar_tx = tid->bar_wait = 0;
3565 	ath_tx_tid_resume(sc, tid);
3566 }
3567 
3568 /*
3569  * Return whether we're ready to TX a BAR frame.
3570  *
3571  * Requires the TID lock be held.
3572  */
3573 static int
3574 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3575 {
3576 
3577 	ATH_TX_LOCK_ASSERT(sc);
3578 
3579 	if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3580 		return (0);
3581 
3582 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3583 	    "%s: %6D: TID=%d, bar ready\n",
3584 	    __func__,
3585 	    tid->an->an_node.ni_macaddr,
3586 	    ":",
3587 	    tid->tid);
3588 
3589 	return (1);
3590 }
3591 
3592 /*
3593  * Check whether the current TID is ready to have a BAR
3594  * TXed and if so, do the TX.
3595  *
3596  * Since the TID/TXQ lock can't be held during a call to
3597  * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3598  * sending the BAR and locking it again.
3599  *
3600  * Eventually, the code to send the BAR should be broken out
3601  * from this routine so the lock doesn't have to be reacquired
3602  * just to be immediately dropped by the caller.
3603  */
3604 static void
3605 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3606 {
3607 	struct ieee80211_tx_ampdu *tap;
3608 
3609 	ATH_TX_LOCK_ASSERT(sc);
3610 
3611 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3612 	    "%s: %6D: TID=%d, called\n",
3613 	    __func__,
3614 	    tid->an->an_node.ni_macaddr,
3615 	    ":",
3616 	    tid->tid);
3617 
3618 	tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3619 
3620 	/*
3621 	 * This is an error condition!
3622 	 */
3623 	if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3624 		device_printf(sc->sc_dev,
3625 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3626 		    __func__,
3627 		    tid->an->an_node.ni_macaddr,
3628 		    ":",
3629 		    tid->tid,
3630 		    tid->bar_tx,
3631 		    tid->bar_wait);
3632 		return;
3633 	}
3634 
3635 	/* Don't do anything if we still have pending frames */
3636 	if (tid->hwq_depth > 0) {
3637 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3638 		    "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3639 		    __func__,
3640 		    tid->an->an_node.ni_macaddr,
3641 		    ":",
3642 		    tid->tid,
3643 		    tid->hwq_depth);
3644 		return;
3645 	}
3646 
3647 	/* We're now about to TX */
3648 	tid->bar_tx = 1;
3649 
3650 	/*
3651 	 * Override the clrdmask configuration for the next frame,
3652 	 * just to get the ball rolling.
3653 	 */
3654 	ath_tx_set_clrdmask(sc, tid->an);
3655 
3656 	/*
3657 	 * Calculate new BAW left edge, now that all frames have either
3658 	 * succeeded or failed.
3659 	 *
3660 	 * XXX verify this is _actually_ the valid value to begin at!
3661 	 */
3662 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3663 	    "%s: %6D: TID=%d, new BAW left edge=%d\n",
3664 	    __func__,
3665 	    tid->an->an_node.ni_macaddr,
3666 	    ":",
3667 	    tid->tid,
3668 	    tap->txa_start);
3669 
3670 	/* Try sending the BAR frame */
3671 	/* We can't hold the lock here! */
3672 
3673 	ATH_TX_UNLOCK(sc);
3674 	if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3675 		/* Success? Now we wait for notification that it's done */
3676 		ATH_TX_LOCK(sc);
3677 		return;
3678 	}
3679 
3680 	/* Failure? For now, warn loudly and continue */
3681 	ATH_TX_LOCK(sc);
3682 	device_printf(sc->sc_dev,
3683 	    "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3684 	    __func__,
3685 	    tid->an->an_node.ni_macaddr,
3686 	    ":",
3687 	    tid->tid);
3688 	ath_tx_tid_bar_unsuspend(sc, tid);
3689 }
3690 
3691 static void
3692 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3693     struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3694 {
3695 
3696 	ATH_TX_LOCK_ASSERT(sc);
3697 
3698 	/*
3699 	 * If the current TID is running AMPDU, update
3700 	 * the BAW.
3701 	 */
3702 	if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3703 	    bf->bf_state.bfs_dobaw) {
3704 		/*
3705 		 * Only remove the frame from the BAW if it's
3706 		 * been transmitted at least once; this means
3707 		 * the frame was in the BAW to begin with.
3708 		 */
3709 		if (bf->bf_state.bfs_retries > 0) {
3710 			ath_tx_update_baw(sc, an, tid, bf);
3711 			bf->bf_state.bfs_dobaw = 0;
3712 		}
3713 #if 0
3714 		/*
3715 		 * This has become a non-fatal error now
3716 		 */
3717 		if (! bf->bf_state.bfs_addedbaw)
3718 			device_printf(sc->sc_dev,
3719 			    "%s: wasn't added: seqno %d\n",
3720 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
3721 #endif
3722 	}
3723 
3724 	/* Strip it out of an aggregate list if it was in one */
3725 	bf->bf_next = NULL;
3726 
3727 	/* Insert on the free queue to be freed by the caller */
3728 	TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3729 }
3730 
3731 static void
3732 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3733     const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3734 {
3735 	struct ieee80211_node *ni = &an->an_node;
3736 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
3737 	struct ieee80211_tx_ampdu *tap;
3738 
3739 	tap = ath_tx_get_tx_tid(an, tid->tid);
3740 
3741 	device_printf(sc->sc_dev,
3742 	    "%s: %s: node %p: bf=%p: addbaw=%d, dobaw=%d, "
3743 	    "seqno=%d, retry=%d\n",
3744 	    __func__, pfx, ni, bf,
3745 	    bf->bf_state.bfs_addedbaw,
3746 	    bf->bf_state.bfs_dobaw,
3747 	    SEQNO(bf->bf_state.bfs_seqno),
3748 	    bf->bf_state.bfs_retries);
3749 	device_printf(sc->sc_dev,
3750 	    "%s: node %p: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3751 	        __func__, ni, bf,
3752 	    txq->axq_qnum,
3753 	    txq->axq_depth,
3754 	    txq->axq_aggr_depth);
3755 
3756 	device_printf(sc->sc_dev,
3757 	    "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, isfiltered=%d\n",
3758 	    __func__, ni, bf,
3759 	    tid->axq_depth,
3760 	    tid->hwq_depth,
3761 	    tid->bar_wait,
3762 	    tid->isfiltered);
3763 	device_printf(sc->sc_dev,
3764 	    "%s: node %p: tid %d: "
3765 	    "sched=%d, paused=%d, "
3766 	    "incomp=%d, baw_head=%d, "
3767 	    "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3768 	     __func__, ni, tid->tid,
3769 	     tid->sched, tid->paused,
3770 	     tid->incomp, tid->baw_head,
3771 	     tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3772 	     ni->ni_txseqs[tid->tid]);
3773 
3774 	/* XXX Dump the frame, see what it is? */
3775 	ieee80211_dump_pkt(ni->ni_ic,
3776 	    mtod(bf->bf_m, const uint8_t *),
3777 	    bf->bf_m->m_len, 0, -1);
3778 }
3779 
3780 /*
3781  * Free any packets currently pending in the software TX queue.
3782  *
3783  * This will be called when a node is being deleted.
3784  *
3785  * It can also be called on an active node during an interface
3786  * reset or state transition.
3787  *
3788  * (From Linux/reference):
3789  *
3790  * TODO: For frame(s) that are in the retry state, we will reuse the
3791  * sequence number(s) without setting the retry bit. The
3792  * alternative is to give up on these and BAR the receiver's window
3793  * forward.
3794  */
3795 static void
3796 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3797     struct ath_tid *tid, ath_bufhead *bf_cq)
3798 {
3799 	struct ath_buf *bf;
3800 	struct ieee80211_tx_ampdu *tap;
3801 	struct ieee80211_node *ni = &an->an_node;
3802 	int t;
3803 
3804 	tap = ath_tx_get_tx_tid(an, tid->tid);
3805 
3806 	ATH_TX_LOCK_ASSERT(sc);
3807 
3808 	/* Walk the queue, free frames */
3809 	t = 0;
3810 	for (;;) {
3811 		bf = ATH_TID_FIRST(tid);
3812 		if (bf == NULL) {
3813 			break;
3814 		}
3815 
3816 		if (t == 0) {
3817 			ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3818 			t = 1;
3819 		}
3820 
3821 		ATH_TID_REMOVE(tid, bf, bf_list);
3822 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3823 	}
3824 
3825 	/* And now, drain the filtered frame queue */
3826 	t = 0;
3827 	for (;;) {
3828 		bf = ATH_TID_FILT_FIRST(tid);
3829 		if (bf == NULL)
3830 			break;
3831 
3832 		if (t == 0) {
3833 			ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3834 			t = 1;
3835 		}
3836 
3837 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3838 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3839 	}
3840 
3841 	/*
3842 	 * Override the clrdmask configuration for the next frame
3843 	 * in case there is some future transmission, just to get
3844 	 * the ball rolling.
3845 	 *
3846 	 * This won't hurt things if the TID is about to be freed.
3847 	 */
3848 	ath_tx_set_clrdmask(sc, tid->an);
3849 
3850 	/*
3851 	 * Now that it's completed, grab the TID lock and update
3852 	 * the sequence number and BAW window.
3853 	 * Because sequence numbers have been assigned to frames
3854 	 * that haven't been sent yet, it's entirely possible
3855 	 * we'll be called with some pending frames that have not
3856 	 * been transmitted.
3857 	 *
3858 	 * The cleaner solution is to do the sequence number allocation
3859 	 * when the packet is first transmitted - and thus the "retries"
3860 	 * check above would be enough to update the BAW/seqno.
3861 	 */
3862 
3863 	/* But don't do it for non-QoS TIDs */
3864 	if (tap) {
3865 #if 1
3866 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3867 		    "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3868 		    __func__,
3869 		    ni->ni_macaddr,
3870 		    ":",
3871 		    an,
3872 		    tid->tid,
3873 		    tap->txa_start);
3874 #endif
3875 		ni->ni_txseqs[tid->tid] = tap->txa_start;
3876 		tid->baw_tail = tid->baw_head;
3877 	}
3878 }
3879 
3880 /*
3881  * Reset the TID state.  This must be only called once the node has
3882  * had its frames flushed from this TID, to ensure that no other
3883  * pause / unpause logic can kick in.
3884  */
3885 static void
3886 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3887 {
3888 
3889 #if 0
3890 	tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
3891 	tid->paused = tid->sched = tid->addba_tx_pending = 0;
3892 	tid->incomp = tid->cleanup_inprogress = 0;
3893 #endif
3894 
3895 	/*
3896 	 * If we have a bar_wait set, we need to unpause the TID
3897 	 * here.  Otherwise once cleanup has finished, the TID won't
3898 	 * have the right paused counter.
3899 	 *
3900 	 * XXX I'm not going through resume here - I don't want the
3901 	 * node to be rescheuled just yet.  This however should be
3902 	 * methodized!
3903 	 */
3904 	if (tid->bar_wait) {
3905 		if (tid->paused > 0) {
3906 			tid->paused --;
3907 		}
3908 	}
3909 
3910 	/*
3911 	 * XXX same with a currently filtered TID.
3912 	 *
3913 	 * Since this is being called during a flush, we assume that
3914 	 * the filtered frame list is actually empty.
3915 	 *
3916 	 * XXX TODO: add in a check to ensure that the filtered queue
3917 	 * depth is actually 0!
3918 	 */
3919 	if (tid->isfiltered) {
3920 		if (tid->paused > 0) {
3921 			tid->paused --;
3922 		}
3923 	}
3924 
3925 	/*
3926 	 * Clear BAR, filtered frames, scheduled and ADDBA pending.
3927 	 * The TID may be going through cleanup from the last association
3928 	 * where things in the BAW are still in the hardware queue.
3929 	 */
3930 	tid->bar_wait = 0;
3931 	tid->bar_tx = 0;
3932 	tid->isfiltered = 0;
3933 	tid->sched = 0;
3934 	tid->addba_tx_pending = 0;
3935 
3936 	/*
3937 	 * XXX TODO: it may just be enough to walk the HWQs and mark
3938 	 * frames for that node as non-aggregate; or mark the ath_node
3939 	 * with something that indicates that aggregation is no longer
3940 	 * occuring.  Then we can just toss the BAW complaints and
3941 	 * do a complete hard reset of state here - no pause, no
3942 	 * complete counter, etc.
3943 	 */
3944 
3945 }
3946 
3947 /*
3948  * Flush all software queued packets for the given node.
3949  *
3950  * This occurs when a completion handler frees the last buffer
3951  * for a node, and the node is thus freed. This causes the node
3952  * to be cleaned up, which ends up calling ath_tx_node_flush.
3953  */
3954 void
3955 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
3956 {
3957 	int tid;
3958 	ath_bufhead bf_cq;
3959 	struct ath_buf *bf;
3960 
3961 	TAILQ_INIT(&bf_cq);
3962 
3963 	ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
3964 	    &an->an_node);
3965 
3966 	ATH_TX_LOCK(sc);
3967 	DPRINTF(sc, ATH_DEBUG_NODE,
3968 	    "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
3969 	    "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
3970 	    __func__,
3971 	    an->an_node.ni_macaddr,
3972 	    ":",
3973 	    an->an_is_powersave,
3974 	    an->an_stack_psq,
3975 	    an->an_tim_set,
3976 	    an->an_swq_depth,
3977 	    an->clrdmask,
3978 	    an->an_leak_count);
3979 
3980 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
3981 		struct ath_tid *atid = &an->an_tid[tid];
3982 
3983 		/* Free packets */
3984 		ath_tx_tid_drain(sc, an, atid, &bf_cq);
3985 
3986 		/* Remove this tid from the list of active tids */
3987 		ath_tx_tid_unsched(sc, atid);
3988 
3989 		/* Reset the per-TID pause, BAR, etc state */
3990 		ath_tx_tid_reset(sc, atid);
3991 	}
3992 
3993 	/*
3994 	 * Clear global leak count
3995 	 */
3996 	an->an_leak_count = 0;
3997 	ATH_TX_UNLOCK(sc);
3998 
3999 	/* Handle completed frames */
4000 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4001 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4002 		ath_tx_default_comp(sc, bf, 0);
4003 	}
4004 }
4005 
4006 /*
4007  * Drain all the software TXQs currently with traffic queued.
4008  */
4009 void
4010 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4011 {
4012 	struct ath_tid *tid;
4013 	ath_bufhead bf_cq;
4014 	struct ath_buf *bf;
4015 
4016 	TAILQ_INIT(&bf_cq);
4017 	ATH_TX_LOCK(sc);
4018 
4019 	/*
4020 	 * Iterate over all active tids for the given txq,
4021 	 * flushing and unsched'ing them
4022 	 */
4023 	while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4024 		tid = TAILQ_FIRST(&txq->axq_tidq);
4025 		ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4026 		ath_tx_tid_unsched(sc, tid);
4027 	}
4028 
4029 	ATH_TX_UNLOCK(sc);
4030 
4031 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4032 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4033 		ath_tx_default_comp(sc, bf, 0);
4034 	}
4035 }
4036 
4037 /*
4038  * Handle completion of non-aggregate session frames.
4039  *
4040  * This (currently) doesn't implement software retransmission of
4041  * non-aggregate frames!
4042  *
4043  * Software retransmission of non-aggregate frames needs to obey
4044  * the strict sequence number ordering, and drop any frames that
4045  * will fail this.
4046  *
4047  * For now, filtered frames and frame transmission will cause
4048  * all kinds of issues.  So we don't support them.
4049  *
4050  * So anyone queuing frames via ath_tx_normal_xmit() or
4051  * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4052  */
4053 void
4054 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4055 {
4056 	struct ieee80211_node *ni = bf->bf_node;
4057 	struct ath_node *an = ATH_NODE(ni);
4058 	int tid = bf->bf_state.bfs_tid;
4059 	struct ath_tid *atid = &an->an_tid[tid];
4060 	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4061 
4062 	/* The TID state is protected behind the TXQ lock */
4063 	ATH_TX_LOCK(sc);
4064 
4065 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4066 	    __func__, bf, fail, atid->hwq_depth - 1);
4067 
4068 	atid->hwq_depth--;
4069 
4070 #if 0
4071 	/*
4072 	 * If the frame was filtered, stick it on the filter frame
4073 	 * queue and complain about it.  It shouldn't happen!
4074 	 */
4075 	if ((ts->ts_status & HAL_TXERR_FILT) ||
4076 	    (ts->ts_status != 0 && atid->isfiltered)) {
4077 		device_printf(sc->sc_dev,
4078 		    "%s: isfiltered=%d, ts_status=%d: huh?\n",
4079 		    __func__,
4080 		    atid->isfiltered,
4081 		    ts->ts_status);
4082 		ath_tx_tid_filt_comp_buf(sc, atid, bf);
4083 	}
4084 #endif
4085 	if (atid->isfiltered)
4086 		device_printf(sc->sc_dev, "%s: filtered?!\n", __func__);
4087 	if (atid->hwq_depth < 0)
4088 		device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
4089 		    __func__, atid->hwq_depth);
4090 
4091 	/*
4092 	 * If the queue is filtered, potentially mark it as complete
4093 	 * and reschedule it as needed.
4094 	 *
4095 	 * This is required as there may be a subsequent TX descriptor
4096 	 * for this end-node that has CLRDMASK set, so it's quite possible
4097 	 * that a filtered frame will be followed by a non-filtered
4098 	 * (complete or otherwise) frame.
4099 	 *
4100 	 * XXX should we do this before we complete the frame?
4101 	 */
4102 	if (atid->isfiltered)
4103 		ath_tx_tid_filt_comp_complete(sc, atid);
4104 	ATH_TX_UNLOCK(sc);
4105 
4106 	/*
4107 	 * punt to rate control if we're not being cleaned up
4108 	 * during a hw queue drain and the frame wanted an ACK.
4109 	 */
4110 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4111 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4112 		    ts, bf->bf_state.bfs_pktlen,
4113 		    1, (ts->ts_status == 0) ? 0 : 1);
4114 
4115 	ath_tx_default_comp(sc, bf, fail);
4116 }
4117 
4118 /*
4119  * Handle cleanup of aggregate session packets that aren't
4120  * an A-MPDU.
4121  *
4122  * There's no need to update the BAW here - the session is being
4123  * torn down.
4124  */
4125 static void
4126 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4127 {
4128 	struct ieee80211_node *ni = bf->bf_node;
4129 	struct ath_node *an = ATH_NODE(ni);
4130 	int tid = bf->bf_state.bfs_tid;
4131 	struct ath_tid *atid = &an->an_tid[tid];
4132 
4133 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4134 	    __func__, tid, atid->incomp);
4135 
4136 	ATH_TX_LOCK(sc);
4137 	atid->incomp--;
4138 	if (atid->incomp == 0) {
4139 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4140 		    "%s: TID %d: cleaned up! resume!\n",
4141 		    __func__, tid);
4142 		atid->cleanup_inprogress = 0;
4143 		ath_tx_tid_resume(sc, atid);
4144 	}
4145 	ATH_TX_UNLOCK(sc);
4146 
4147 	ath_tx_default_comp(sc, bf, 0);
4148 }
4149 
4150 /*
4151  * Performs transmit side cleanup when TID changes from aggregated to
4152  * unaggregated.
4153  *
4154  * - Discard all retry frames from the s/w queue.
4155  * - Fix the tx completion function for all buffers in s/w queue.
4156  * - Count the number of unacked frames, and let transmit completion
4157  *   handle it later.
4158  *
4159  * The caller is responsible for pausing the TID and unpausing the
4160  * TID if no cleanup was required. Otherwise the cleanup path will
4161  * unpause the TID once the last hardware queued frame is completed.
4162  */
4163 static void
4164 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4165     ath_bufhead *bf_cq)
4166 {
4167 	struct ath_tid *atid = &an->an_tid[tid];
4168 	struct ieee80211_tx_ampdu *tap;
4169 	struct ath_buf *bf, *bf_next;
4170 
4171 	ATH_TX_LOCK_ASSERT(sc);
4172 
4173 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4174 	    "%s: TID %d: called\n", __func__, tid);
4175 
4176 	/*
4177 	 * Move the filtered frames to the TX queue, before
4178 	 * we run off and discard/process things.
4179 	 */
4180 	/* XXX this is really quite inefficient */
4181 	while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4182 		ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4183 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4184 	}
4185 
4186 	/*
4187 	 * Update the frames in the software TX queue:
4188 	 *
4189 	 * + Discard retry frames in the queue
4190 	 * + Fix the completion function to be non-aggregate
4191 	 */
4192 	bf = ATH_TID_FIRST(atid);
4193 	while (bf) {
4194 		if (bf->bf_state.bfs_isretried) {
4195 			bf_next = TAILQ_NEXT(bf, bf_list);
4196 			ATH_TID_REMOVE(atid, bf, bf_list);
4197 			if (bf->bf_state.bfs_dobaw) {
4198 				ath_tx_update_baw(sc, an, atid, bf);
4199 				if (! bf->bf_state.bfs_addedbaw)
4200 					device_printf(sc->sc_dev,
4201 					    "%s: wasn't added: seqno %d\n",
4202 					    __func__,
4203 					    SEQNO(bf->bf_state.bfs_seqno));
4204 			}
4205 			bf->bf_state.bfs_dobaw = 0;
4206 			/*
4207 			 * Call the default completion handler with "fail" just
4208 			 * so upper levels are suitably notified about this.
4209 			 */
4210 			TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4211 			bf = bf_next;
4212 			continue;
4213 		}
4214 		/* Give these the default completion handler */
4215 		bf->bf_comp = ath_tx_normal_comp;
4216 		bf = TAILQ_NEXT(bf, bf_list);
4217 	}
4218 
4219 	/*
4220 	 * Calculate what hardware-queued frames exist based
4221 	 * on the current BAW size. Ie, what frames have been
4222 	 * added to the TX hardware queue for this TID but
4223 	 * not yet ACKed.
4224 	 */
4225 	tap = ath_tx_get_tx_tid(an, tid);
4226 	/* Need the lock - fiddling with BAW */
4227 	while (atid->baw_head != atid->baw_tail) {
4228 		if (atid->tx_buf[atid->baw_head]) {
4229 			atid->incomp++;
4230 			atid->cleanup_inprogress = 1;
4231 			atid->tx_buf[atid->baw_head] = NULL;
4232 		}
4233 		INCR(atid->baw_head, ATH_TID_MAX_BUFS);
4234 		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
4235 	}
4236 
4237 	if (atid->cleanup_inprogress)
4238 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4239 		    "%s: TID %d: cleanup needed: %d packets\n",
4240 		    __func__, tid, atid->incomp);
4241 
4242 	/* Owner now must free completed frames */
4243 }
4244 
4245 static struct ath_buf *
4246 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4247     struct ath_tid *tid, struct ath_buf *bf)
4248 {
4249 	struct ath_buf *nbf;
4250 	int error;
4251 
4252 	/*
4253 	 * Clone the buffer.  This will handle the dma unmap and
4254 	 * copy the node reference to the new buffer.  If this
4255 	 * works out, 'bf' will have no DMA mapping, no mbuf
4256 	 * pointer and no node reference.
4257 	 */
4258 	nbf = ath_buf_clone(sc, bf);
4259 
4260 #if 0
4261 	device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n",
4262 	    __func__);
4263 #endif
4264 
4265 	if (nbf == NULL) {
4266 		/* Failed to clone */
4267 		device_printf(sc->sc_dev,
4268 		    "%s: failed to clone a busy buffer\n",
4269 		    __func__);
4270 		return NULL;
4271 	}
4272 
4273 	/* Setup the dma for the new buffer */
4274 	error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4275 	if (error != 0) {
4276 		device_printf(sc->sc_dev,
4277 		    "%s: failed to setup dma for clone\n",
4278 		    __func__);
4279 		/*
4280 		 * Put this at the head of the list, not tail;
4281 		 * that way it doesn't interfere with the
4282 		 * busy buffer logic (which uses the tail of
4283 		 * the list.)
4284 		 */
4285 		ATH_TXBUF_LOCK(sc);
4286 		ath_returnbuf_head(sc, nbf);
4287 		ATH_TXBUF_UNLOCK(sc);
4288 		return NULL;
4289 	}
4290 
4291 	/* Update BAW if required, before we free the original buf */
4292 	if (bf->bf_state.bfs_dobaw)
4293 		ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4294 
4295 	/* Free original buffer; return new buffer */
4296 	ath_freebuf(sc, bf);
4297 
4298 	return nbf;
4299 }
4300 
4301 /*
4302  * Handle retrying an unaggregate frame in an aggregate
4303  * session.
4304  *
4305  * If too many retries occur, pause the TID, wait for
4306  * any further retransmits (as there's no reason why
4307  * non-aggregate frames in an aggregate session are
4308  * transmitted in-order; they just have to be in-BAW)
4309  * and then queue a BAR.
4310  */
4311 static void
4312 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4313 {
4314 	struct ieee80211_node *ni = bf->bf_node;
4315 	struct ath_node *an = ATH_NODE(ni);
4316 	int tid = bf->bf_state.bfs_tid;
4317 	struct ath_tid *atid = &an->an_tid[tid];
4318 	struct ieee80211_tx_ampdu *tap;
4319 
4320 	ATH_TX_LOCK(sc);
4321 
4322 	tap = ath_tx_get_tx_tid(an, tid);
4323 
4324 	/*
4325 	 * If the buffer is marked as busy, we can't directly
4326 	 * reuse it. Instead, try to clone the buffer.
4327 	 * If the clone is successful, recycle the old buffer.
4328 	 * If the clone is unsuccessful, set bfs_retries to max
4329 	 * to force the next bit of code to free the buffer
4330 	 * for us.
4331 	 */
4332 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4333 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4334 		struct ath_buf *nbf;
4335 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4336 		if (nbf)
4337 			/* bf has been freed at this point */
4338 			bf = nbf;
4339 		else
4340 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4341 	}
4342 
4343 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4344 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4345 		    "%s: exceeded retries; seqno %d\n",
4346 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4347 		sc->sc_stats.ast_tx_swretrymax++;
4348 
4349 		/* Update BAW anyway */
4350 		if (bf->bf_state.bfs_dobaw) {
4351 			ath_tx_update_baw(sc, an, atid, bf);
4352 			if (! bf->bf_state.bfs_addedbaw)
4353 				device_printf(sc->sc_dev,
4354 				    "%s: wasn't added: seqno %d\n",
4355 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4356 		}
4357 		bf->bf_state.bfs_dobaw = 0;
4358 
4359 		/* Suspend the TX queue and get ready to send the BAR */
4360 		ath_tx_tid_bar_suspend(sc, atid);
4361 
4362 		/* Send the BAR if there are no other frames waiting */
4363 		if (ath_tx_tid_bar_tx_ready(sc, atid))
4364 			ath_tx_tid_bar_tx(sc, atid);
4365 
4366 		ATH_TX_UNLOCK(sc);
4367 
4368 		/* Free buffer, bf is free after this call */
4369 		ath_tx_default_comp(sc, bf, 0);
4370 		return;
4371 	}
4372 
4373 	/*
4374 	 * This increments the retry counter as well as
4375 	 * sets the retry flag in the ath_buf and packet
4376 	 * body.
4377 	 */
4378 	ath_tx_set_retry(sc, bf);
4379 	sc->sc_stats.ast_tx_swretries++;
4380 
4381 	/*
4382 	 * Insert this at the head of the queue, so it's
4383 	 * retried before any current/subsequent frames.
4384 	 */
4385 	ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4386 	ath_tx_tid_sched(sc, atid);
4387 	/* Send the BAR if there are no other frames waiting */
4388 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4389 		ath_tx_tid_bar_tx(sc, atid);
4390 
4391 	ATH_TX_UNLOCK(sc);
4392 }
4393 
4394 /*
4395  * Common code for aggregate excessive retry/subframe retry.
4396  * If retrying, queues buffers to bf_q. If not, frees the
4397  * buffers.
4398  *
4399  * XXX should unify this with ath_tx_aggr_retry_unaggr()
4400  */
4401 static int
4402 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4403     ath_bufhead *bf_q)
4404 {
4405 	struct ieee80211_node *ni = bf->bf_node;
4406 	struct ath_node *an = ATH_NODE(ni);
4407 	int tid = bf->bf_state.bfs_tid;
4408 	struct ath_tid *atid = &an->an_tid[tid];
4409 
4410 	ATH_TX_LOCK_ASSERT(sc);
4411 
4412 	/* XXX clr11naggr should be done for all subframes */
4413 	ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4414 	ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4415 
4416 	/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4417 
4418 	/*
4419 	 * If the buffer is marked as busy, we can't directly
4420 	 * reuse it. Instead, try to clone the buffer.
4421 	 * If the clone is successful, recycle the old buffer.
4422 	 * If the clone is unsuccessful, set bfs_retries to max
4423 	 * to force the next bit of code to free the buffer
4424 	 * for us.
4425 	 */
4426 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4427 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4428 		struct ath_buf *nbf;
4429 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4430 		if (nbf)
4431 			/* bf has been freed at this point */
4432 			bf = nbf;
4433 		else
4434 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4435 	}
4436 
4437 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4438 		sc->sc_stats.ast_tx_swretrymax++;
4439 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4440 		    "%s: max retries: seqno %d\n",
4441 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4442 		ath_tx_update_baw(sc, an, atid, bf);
4443 		if (! bf->bf_state.bfs_addedbaw)
4444 			device_printf(sc->sc_dev,
4445 			    "%s: wasn't added: seqno %d\n",
4446 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4447 		bf->bf_state.bfs_dobaw = 0;
4448 		return 1;
4449 	}
4450 
4451 	ath_tx_set_retry(sc, bf);
4452 	sc->sc_stats.ast_tx_swretries++;
4453 	bf->bf_next = NULL;		/* Just to make sure */
4454 
4455 	/* Clear the aggregate state */
4456 	bf->bf_state.bfs_aggr = 0;
4457 	bf->bf_state.bfs_ndelim = 0;	/* ??? needed? */
4458 	bf->bf_state.bfs_nframes = 1;
4459 
4460 	TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4461 	return 0;
4462 }
4463 
4464 /*
4465  * error pkt completion for an aggregate destination
4466  */
4467 static void
4468 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4469     struct ath_tid *tid)
4470 {
4471 	struct ieee80211_node *ni = bf_first->bf_node;
4472 	struct ath_node *an = ATH_NODE(ni);
4473 	struct ath_buf *bf_next, *bf;
4474 	ath_bufhead bf_q;
4475 	int drops = 0;
4476 	struct ieee80211_tx_ampdu *tap;
4477 	ath_bufhead bf_cq;
4478 
4479 	TAILQ_INIT(&bf_q);
4480 	TAILQ_INIT(&bf_cq);
4481 
4482 	/*
4483 	 * Update rate control - all frames have failed.
4484 	 *
4485 	 * XXX use the length in the first frame in the series;
4486 	 * XXX just so things are consistent for now.
4487 	 */
4488 	ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4489 	    &bf_first->bf_status.ds_txstat,
4490 	    bf_first->bf_state.bfs_pktlen,
4491 	    bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4492 
4493 	ATH_TX_LOCK(sc);
4494 	tap = ath_tx_get_tx_tid(an, tid->tid);
4495 	sc->sc_stats.ast_tx_aggr_failall++;
4496 
4497 	/* Retry all subframes */
4498 	bf = bf_first;
4499 	while (bf) {
4500 		bf_next = bf->bf_next;
4501 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4502 		sc->sc_stats.ast_tx_aggr_fail++;
4503 		if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4504 			drops++;
4505 			bf->bf_next = NULL;
4506 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4507 		}
4508 		bf = bf_next;
4509 	}
4510 
4511 	/* Prepend all frames to the beginning of the queue */
4512 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4513 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4514 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4515 	}
4516 
4517 	/*
4518 	 * Schedule the TID to be re-tried.
4519 	 */
4520 	ath_tx_tid_sched(sc, tid);
4521 
4522 	/*
4523 	 * send bar if we dropped any frames
4524 	 *
4525 	 * Keep the txq lock held for now, as we need to ensure
4526 	 * that ni_txseqs[] is consistent (as it's being updated
4527 	 * in the ifnet TX context or raw TX context.)
4528 	 */
4529 	if (drops) {
4530 		/* Suspend the TX queue and get ready to send the BAR */
4531 		ath_tx_tid_bar_suspend(sc, tid);
4532 	}
4533 
4534 	/*
4535 	 * Send BAR if required
4536 	 */
4537 	if (ath_tx_tid_bar_tx_ready(sc, tid))
4538 		ath_tx_tid_bar_tx(sc, tid);
4539 
4540 	ATH_TX_UNLOCK(sc);
4541 
4542 	/* Complete frames which errored out */
4543 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4544 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4545 		ath_tx_default_comp(sc, bf, 0);
4546 	}
4547 }
4548 
4549 /*
4550  * Handle clean-up of packets from an aggregate list.
4551  *
4552  * There's no need to update the BAW here - the session is being
4553  * torn down.
4554  */
4555 static void
4556 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4557 {
4558 	struct ath_buf *bf, *bf_next;
4559 	struct ieee80211_node *ni = bf_first->bf_node;
4560 	struct ath_node *an = ATH_NODE(ni);
4561 	int tid = bf_first->bf_state.bfs_tid;
4562 	struct ath_tid *atid = &an->an_tid[tid];
4563 
4564 	ATH_TX_LOCK(sc);
4565 
4566 	/* update incomp */
4567 	bf = bf_first;
4568 	while (bf) {
4569 		atid->incomp--;
4570 		bf = bf->bf_next;
4571 	}
4572 
4573 	if (atid->incomp == 0) {
4574 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4575 		    "%s: TID %d: cleaned up! resume!\n",
4576 		    __func__, tid);
4577 		atid->cleanup_inprogress = 0;
4578 		ath_tx_tid_resume(sc, atid);
4579 	}
4580 
4581 	/* Send BAR if required */
4582 	/* XXX why would we send a BAR when transitioning to non-aggregation? */
4583 	/*
4584 	 * XXX TODO: we should likely just tear down the BAR state here,
4585 	 * rather than sending a BAR.
4586 	 */
4587 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4588 		ath_tx_tid_bar_tx(sc, atid);
4589 
4590 	ATH_TX_UNLOCK(sc);
4591 
4592 	/* Handle frame completion */
4593 	bf = bf_first;
4594 	while (bf) {
4595 		bf_next = bf->bf_next;
4596 		ath_tx_default_comp(sc, bf, 1);
4597 		bf = bf_next;
4598 	}
4599 }
4600 
4601 /*
4602  * Handle completion of an set of aggregate frames.
4603  *
4604  * Note: the completion handler is the last descriptor in the aggregate,
4605  * not the last descriptor in the first frame.
4606  */
4607 static void
4608 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4609     int fail)
4610 {
4611 	//struct ath_desc *ds = bf->bf_lastds;
4612 	struct ieee80211_node *ni = bf_first->bf_node;
4613 	struct ath_node *an = ATH_NODE(ni);
4614 	int tid = bf_first->bf_state.bfs_tid;
4615 	struct ath_tid *atid = &an->an_tid[tid];
4616 	struct ath_tx_status ts;
4617 	struct ieee80211_tx_ampdu *tap;
4618 	ath_bufhead bf_q;
4619 	ath_bufhead bf_cq;
4620 	int seq_st, tx_ok;
4621 	int hasba, isaggr;
4622 	uint32_t ba[2];
4623 	struct ath_buf *bf, *bf_next;
4624 	int ba_index;
4625 	int drops = 0;
4626 	int nframes = 0, nbad = 0, nf;
4627 	int pktlen;
4628 	/* XXX there's too much on the stack? */
4629 	struct ath_rc_series rc[ATH_RC_NUM];
4630 	int txseq;
4631 
4632 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4633 	    __func__, atid->hwq_depth);
4634 
4635 	/*
4636 	 * Take a copy; this may be needed -after- bf_first
4637 	 * has been completed and freed.
4638 	 */
4639 	ts = bf_first->bf_status.ds_txstat;
4640 
4641 	TAILQ_INIT(&bf_q);
4642 	TAILQ_INIT(&bf_cq);
4643 
4644 	/* The TID state is kept behind the TXQ lock */
4645 	ATH_TX_LOCK(sc);
4646 
4647 	atid->hwq_depth--;
4648 	if (atid->hwq_depth < 0)
4649 		device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
4650 		    __func__, atid->hwq_depth);
4651 
4652 	/*
4653 	 * If the TID is filtered, handle completing the filter
4654 	 * transition before potentially kicking it to the cleanup
4655 	 * function.
4656 	 *
4657 	 * XXX this is duplicate work, ew.
4658 	 */
4659 	if (atid->isfiltered)
4660 		ath_tx_tid_filt_comp_complete(sc, atid);
4661 
4662 	/*
4663 	 * Punt cleanup to the relevant function, not our problem now
4664 	 */
4665 	if (atid->cleanup_inprogress) {
4666 		if (atid->isfiltered)
4667 			device_printf(sc->sc_dev,
4668 			    "%s: isfiltered=1, normal_comp?\n",
4669 			    __func__);
4670 		ATH_TX_UNLOCK(sc);
4671 		ath_tx_comp_cleanup_aggr(sc, bf_first);
4672 		return;
4673 	}
4674 
4675 	/*
4676 	 * If the frame is filtered, transition to filtered frame
4677 	 * mode and add this to the filtered frame list.
4678 	 *
4679 	 * XXX TODO: figure out how this interoperates with
4680 	 * BAR, pause and cleanup states.
4681 	 */
4682 	if ((ts.ts_status & HAL_TXERR_FILT) ||
4683 	    (ts.ts_status != 0 && atid->isfiltered)) {
4684 		if (fail != 0)
4685 			device_printf(sc->sc_dev,
4686 			    "%s: isfiltered=1, fail=%d\n", __func__, fail);
4687 		ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4688 
4689 		/* Remove from BAW */
4690 		TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4691 			if (bf->bf_state.bfs_addedbaw)
4692 				drops++;
4693 			if (bf->bf_state.bfs_dobaw) {
4694 				ath_tx_update_baw(sc, an, atid, bf);
4695 				if (! bf->bf_state.bfs_addedbaw)
4696 					device_printf(sc->sc_dev,
4697 					    "%s: wasn't added: seqno %d\n",
4698 					    __func__,
4699 					    SEQNO(bf->bf_state.bfs_seqno));
4700 			}
4701 			bf->bf_state.bfs_dobaw = 0;
4702 		}
4703 		/*
4704 		 * If any intermediate frames in the BAW were dropped when
4705 		 * handling filtering things, send a BAR.
4706 		 */
4707 		if (drops)
4708 			ath_tx_tid_bar_suspend(sc, atid);
4709 
4710 		/*
4711 		 * Finish up by sending a BAR if required and freeing
4712 		 * the frames outside of the TX lock.
4713 		 */
4714 		goto finish_send_bar;
4715 	}
4716 
4717 	/*
4718 	 * XXX for now, use the first frame in the aggregate for
4719 	 * XXX rate control completion; it's at least consistent.
4720 	 */
4721 	pktlen = bf_first->bf_state.bfs_pktlen;
4722 
4723 	/*
4724 	 * Handle errors first!
4725 	 *
4726 	 * Here, handle _any_ error as a "exceeded retries" error.
4727 	 * Later on (when filtered frames are to be specially handled)
4728 	 * it'll have to be expanded.
4729 	 */
4730 #if 0
4731 	if (ts.ts_status & HAL_TXERR_XRETRY) {
4732 #endif
4733 	if (ts.ts_status != 0) {
4734 		ATH_TX_UNLOCK(sc);
4735 		ath_tx_comp_aggr_error(sc, bf_first, atid);
4736 		return;
4737 	}
4738 
4739 	tap = ath_tx_get_tx_tid(an, tid);
4740 
4741 	/*
4742 	 * extract starting sequence and block-ack bitmap
4743 	 */
4744 	/* XXX endian-ness of seq_st, ba? */
4745 	seq_st = ts.ts_seqnum;
4746 	hasba = !! (ts.ts_flags & HAL_TX_BA);
4747 	tx_ok = (ts.ts_status == 0);
4748 	isaggr = bf_first->bf_state.bfs_aggr;
4749 	ba[0] = ts.ts_ba_low;
4750 	ba[1] = ts.ts_ba_high;
4751 
4752 	/*
4753 	 * Copy the TX completion status and the rate control
4754 	 * series from the first descriptor, as it may be freed
4755 	 * before the rate control code can get its grubby fingers
4756 	 * into things.
4757 	 */
4758 	memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4759 
4760 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4761 	    "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4762 	    "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4763 	    __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4764 	    isaggr, seq_st, hasba, ba[0], ba[1]);
4765 
4766 	/*
4767 	 * The reference driver doesn't do this; it simply ignores
4768 	 * this check in its entirety.
4769 	 *
4770 	 * I've seen this occur when using iperf to send traffic
4771 	 * out tid 1 - the aggregate frames are all marked as TID 1,
4772 	 * but the TXSTATUS has TID=0.  So, let's just ignore this
4773 	 * check.
4774 	 */
4775 #if 0
4776 	/* Occasionally, the MAC sends a tx status for the wrong TID. */
4777 	if (tid != ts.ts_tid) {
4778 		device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n",
4779 		    __func__, tid, ts.ts_tid);
4780 		tx_ok = 0;
4781 	}
4782 #endif
4783 
4784 	/* AR5416 BA bug; this requires an interface reset */
4785 	if (isaggr && tx_ok && (! hasba)) {
4786 		device_printf(sc->sc_dev,
4787 		    "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4788 		    "seq_st=%d\n",
4789 		    __func__, hasba, tx_ok, isaggr, seq_st);
4790 		/* XXX TODO: schedule an interface reset */
4791 #ifdef ATH_DEBUG
4792 		ath_printtxbuf(sc, bf_first,
4793 		    sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4794 #endif
4795 	}
4796 
4797 	/*
4798 	 * Walk the list of frames, figure out which ones were correctly
4799 	 * sent and which weren't.
4800 	 */
4801 	bf = bf_first;
4802 	nf = bf_first->bf_state.bfs_nframes;
4803 
4804 	/* bf_first is going to be invalid once this list is walked */
4805 	bf_first = NULL;
4806 
4807 	/*
4808 	 * Walk the list of completed frames and determine
4809 	 * which need to be completed and which need to be
4810 	 * retransmitted.
4811 	 *
4812 	 * For completed frames, the completion functions need
4813 	 * to be called at the end of this function as the last
4814 	 * node reference may free the node.
4815 	 *
4816 	 * Finally, since the TXQ lock can't be held during the
4817 	 * completion callback (to avoid lock recursion),
4818 	 * the completion calls have to be done outside of the
4819 	 * lock.
4820 	 */
4821 	while (bf) {
4822 		nframes++;
4823 		ba_index = ATH_BA_INDEX(seq_st,
4824 		    SEQNO(bf->bf_state.bfs_seqno));
4825 		bf_next = bf->bf_next;
4826 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4827 
4828 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4829 		    "%s: checking bf=%p seqno=%d; ack=%d\n",
4830 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
4831 		    ATH_BA_ISSET(ba, ba_index));
4832 
4833 		if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
4834 			sc->sc_stats.ast_tx_aggr_ok++;
4835 			ath_tx_update_baw(sc, an, atid, bf);
4836 			bf->bf_state.bfs_dobaw = 0;
4837 			if (! bf->bf_state.bfs_addedbaw)
4838 				device_printf(sc->sc_dev,
4839 				    "%s: wasn't added: seqno %d\n",
4840 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4841 			bf->bf_next = NULL;
4842 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4843 		} else {
4844 			sc->sc_stats.ast_tx_aggr_fail++;
4845 			if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4846 				drops++;
4847 				bf->bf_next = NULL;
4848 				TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4849 			}
4850 			nbad++;
4851 		}
4852 		bf = bf_next;
4853 	}
4854 
4855 	/*
4856 	 * Now that the BAW updates have been done, unlock
4857 	 *
4858 	 * txseq is grabbed before the lock is released so we
4859 	 * have a consistent view of what -was- in the BAW.
4860 	 * Anything after this point will not yet have been
4861 	 * TXed.
4862 	 */
4863 	txseq = tap->txa_start;
4864 	ATH_TX_UNLOCK(sc);
4865 
4866 	if (nframes != nf)
4867 		device_printf(sc->sc_dev,
4868 		    "%s: num frames seen=%d; bf nframes=%d\n",
4869 		    __func__, nframes, nf);
4870 
4871 	/*
4872 	 * Now we know how many frames were bad, call the rate
4873 	 * control code.
4874 	 */
4875 	if (fail == 0)
4876 		ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
4877 		    nbad);
4878 
4879 	/*
4880 	 * send bar if we dropped any frames
4881 	 */
4882 	if (drops) {
4883 		/* Suspend the TX queue and get ready to send the BAR */
4884 		ATH_TX_LOCK(sc);
4885 		ath_tx_tid_bar_suspend(sc, atid);
4886 		ATH_TX_UNLOCK(sc);
4887 	}
4888 
4889 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4890 	    "%s: txa_start now %d\n", __func__, tap->txa_start);
4891 
4892 	ATH_TX_LOCK(sc);
4893 
4894 	/* Prepend all frames to the beginning of the queue */
4895 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4896 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4897 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4898 	}
4899 
4900 	/*
4901 	 * Reschedule to grab some further frames.
4902 	 */
4903 	ath_tx_tid_sched(sc, atid);
4904 
4905 	/*
4906 	 * If the queue is filtered, re-schedule as required.
4907 	 *
4908 	 * This is required as there may be a subsequent TX descriptor
4909 	 * for this end-node that has CLRDMASK set, so it's quite possible
4910 	 * that a filtered frame will be followed by a non-filtered
4911 	 * (complete or otherwise) frame.
4912 	 *
4913 	 * XXX should we do this before we complete the frame?
4914 	 */
4915 	if (atid->isfiltered)
4916 		ath_tx_tid_filt_comp_complete(sc, atid);
4917 
4918 finish_send_bar:
4919 
4920 	/*
4921 	 * Send BAR if required
4922 	 */
4923 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4924 		ath_tx_tid_bar_tx(sc, atid);
4925 
4926 	ATH_TX_UNLOCK(sc);
4927 
4928 	/* Do deferred completion */
4929 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4930 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4931 		ath_tx_default_comp(sc, bf, 0);
4932 	}
4933 }
4934 
4935 /*
4936  * Handle completion of unaggregated frames in an ADDBA
4937  * session.
4938  *
4939  * Fail is set to 1 if the entry is being freed via a call to
4940  * ath_tx_draintxq().
4941  */
4942 static void
4943 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
4944 {
4945 	struct ieee80211_node *ni = bf->bf_node;
4946 	struct ath_node *an = ATH_NODE(ni);
4947 	int tid = bf->bf_state.bfs_tid;
4948 	struct ath_tid *atid = &an->an_tid[tid];
4949 	struct ath_tx_status ts;
4950 	int drops = 0;
4951 
4952 	/*
4953 	 * Take a copy of this; filtering/cloning the frame may free the
4954 	 * bf pointer.
4955 	 */
4956 	ts = bf->bf_status.ds_txstat;
4957 
4958 	/*
4959 	 * Update rate control status here, before we possibly
4960 	 * punt to retry or cleanup.
4961 	 *
4962 	 * Do it outside of the TXQ lock.
4963 	 */
4964 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4965 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4966 		    &bf->bf_status.ds_txstat,
4967 		    bf->bf_state.bfs_pktlen,
4968 		    1, (ts.ts_status == 0) ? 0 : 1);
4969 
4970 	/*
4971 	 * This is called early so atid->hwq_depth can be tracked.
4972 	 * This unfortunately means that it's released and regrabbed
4973 	 * during retry and cleanup. That's rather inefficient.
4974 	 */
4975 	ATH_TX_LOCK(sc);
4976 
4977 	if (tid == IEEE80211_NONQOS_TID)
4978 		device_printf(sc->sc_dev, "%s: TID=16!\n", __func__);
4979 
4980 	DPRINTF(sc, ATH_DEBUG_SW_TX,
4981 	    "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
4982 	    __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
4983 	    SEQNO(bf->bf_state.bfs_seqno));
4984 
4985 	atid->hwq_depth--;
4986 	if (atid->hwq_depth < 0)
4987 		device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
4988 		    __func__, atid->hwq_depth);
4989 
4990 	/*
4991 	 * If the TID is filtered, handle completing the filter
4992 	 * transition before potentially kicking it to the cleanup
4993 	 * function.
4994 	 */
4995 	if (atid->isfiltered)
4996 		ath_tx_tid_filt_comp_complete(sc, atid);
4997 
4998 	/*
4999 	 * If a cleanup is in progress, punt to comp_cleanup;
5000 	 * rather than handling it here. It's thus their
5001 	 * responsibility to clean up, call the completion
5002 	 * function in net80211, etc.
5003 	 */
5004 	if (atid->cleanup_inprogress) {
5005 		if (atid->isfiltered)
5006 			device_printf(sc->sc_dev,
5007 			    "%s: isfiltered=1, normal_comp?\n",
5008 			    __func__);
5009 		ATH_TX_UNLOCK(sc);
5010 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5011 		    __func__);
5012 		ath_tx_comp_cleanup_unaggr(sc, bf);
5013 		return;
5014 	}
5015 
5016 	/*
5017 	 * XXX TODO: how does cleanup, BAR and filtered frame handling
5018 	 * overlap?
5019 	 *
5020 	 * If the frame is filtered OR if it's any failure but
5021 	 * the TID is filtered, the frame must be added to the
5022 	 * filtered frame list.
5023 	 *
5024 	 * However - a busy buffer can't be added to the filtered
5025 	 * list as it will end up being recycled without having
5026 	 * been made available for the hardware.
5027 	 */
5028 	if ((ts.ts_status & HAL_TXERR_FILT) ||
5029 	    (ts.ts_status != 0 && atid->isfiltered)) {
5030 		int freeframe;
5031 
5032 		if (fail != 0)
5033 			device_printf(sc->sc_dev,
5034 			    "%s: isfiltered=1, fail=%d\n",
5035 			    __func__,
5036 			    fail);
5037 		freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5038 		if (freeframe) {
5039 			/* Remove from BAW */
5040 			if (bf->bf_state.bfs_addedbaw)
5041 				drops++;
5042 			if (bf->bf_state.bfs_dobaw) {
5043 				ath_tx_update_baw(sc, an, atid, bf);
5044 				if (! bf->bf_state.bfs_addedbaw)
5045 					device_printf(sc->sc_dev,
5046 					    "%s: wasn't added: seqno %d\n",
5047 					    __func__, SEQNO(bf->bf_state.bfs_seqno));
5048 			}
5049 			bf->bf_state.bfs_dobaw = 0;
5050 		}
5051 
5052 		/*
5053 		 * If the frame couldn't be filtered, treat it as a drop and
5054 		 * prepare to send a BAR.
5055 		 */
5056 		if (freeframe && drops)
5057 			ath_tx_tid_bar_suspend(sc, atid);
5058 
5059 		/*
5060 		 * Send BAR if required
5061 		 */
5062 		if (ath_tx_tid_bar_tx_ready(sc, atid))
5063 			ath_tx_tid_bar_tx(sc, atid);
5064 
5065 		ATH_TX_UNLOCK(sc);
5066 		/*
5067 		 * If freeframe is set, then the frame couldn't be
5068 		 * cloned and bf is still valid.  Just complete/free it.
5069 		 */
5070 		if (freeframe)
5071 			ath_tx_default_comp(sc, bf, fail);
5072 
5073 
5074 		return;
5075 	}
5076 	/*
5077 	 * Don't bother with the retry check if all frames
5078 	 * are being failed (eg during queue deletion.)
5079 	 */
5080 #if 0
5081 	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5082 #endif
5083 	if (fail == 0 && ts.ts_status != 0) {
5084 		ATH_TX_UNLOCK(sc);
5085 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5086 		    __func__);
5087 		ath_tx_aggr_retry_unaggr(sc, bf);
5088 		return;
5089 	}
5090 
5091 	/* Success? Complete */
5092 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5093 	    __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5094 	if (bf->bf_state.bfs_dobaw) {
5095 		ath_tx_update_baw(sc, an, atid, bf);
5096 		bf->bf_state.bfs_dobaw = 0;
5097 		if (! bf->bf_state.bfs_addedbaw)
5098 			device_printf(sc->sc_dev,
5099 			    "%s: wasn't added: seqno %d\n",
5100 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
5101 	}
5102 
5103 	/*
5104 	 * If the queue is filtered, re-schedule as required.
5105 	 *
5106 	 * This is required as there may be a subsequent TX descriptor
5107 	 * for this end-node that has CLRDMASK set, so it's quite possible
5108 	 * that a filtered frame will be followed by a non-filtered
5109 	 * (complete or otherwise) frame.
5110 	 *
5111 	 * XXX should we do this before we complete the frame?
5112 	 */
5113 	if (atid->isfiltered)
5114 		ath_tx_tid_filt_comp_complete(sc, atid);
5115 
5116 	/*
5117 	 * Send BAR if required
5118 	 */
5119 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5120 		ath_tx_tid_bar_tx(sc, atid);
5121 
5122 	ATH_TX_UNLOCK(sc);
5123 
5124 	ath_tx_default_comp(sc, bf, fail);
5125 	/* bf is freed at this point */
5126 }
5127 
5128 void
5129 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5130 {
5131 	if (bf->bf_state.bfs_aggr)
5132 		ath_tx_aggr_comp_aggr(sc, bf, fail);
5133 	else
5134 		ath_tx_aggr_comp_unaggr(sc, bf, fail);
5135 }
5136 
5137 /*
5138  * Schedule some packets from the given node/TID to the hardware.
5139  *
5140  * This is the aggregate version.
5141  */
5142 void
5143 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5144     struct ath_tid *tid)
5145 {
5146 	struct ath_buf *bf;
5147 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5148 	struct ieee80211_tx_ampdu *tap;
5149 	ATH_AGGR_STATUS status;
5150 	ath_bufhead bf_q;
5151 
5152 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5153 	ATH_TX_LOCK_ASSERT(sc);
5154 
5155 	/*
5156 	 * XXX TODO: If we're called for a queue that we're leaking frames to,
5157 	 * ensure we only leak one.
5158 	 */
5159 
5160 	tap = ath_tx_get_tx_tid(an, tid->tid);
5161 
5162 	if (tid->tid == IEEE80211_NONQOS_TID)
5163 		device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n",
5164 		    __func__);
5165 
5166 	for (;;) {
5167 		status = ATH_AGGR_DONE;
5168 
5169 		/*
5170 		 * If the upper layer has paused the TID, don't
5171 		 * queue any further packets.
5172 		 *
5173 		 * This can also occur from the completion task because
5174 		 * of packet loss; but as its serialised with this code,
5175 		 * it won't "appear" half way through queuing packets.
5176 		 */
5177 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5178 			break;
5179 
5180 		bf = ATH_TID_FIRST(tid);
5181 		if (bf == NULL) {
5182 			break;
5183 		}
5184 
5185 		/*
5186 		 * If the packet doesn't fall within the BAW (eg a NULL
5187 		 * data frame), schedule it directly; continue.
5188 		 */
5189 		if (! bf->bf_state.bfs_dobaw) {
5190 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5191 			    "%s: non-baw packet\n",
5192 			    __func__);
5193 			ATH_TID_REMOVE(tid, bf, bf_list);
5194 
5195 			if (bf->bf_state.bfs_nframes > 1)
5196 				device_printf(sc->sc_dev,
5197 				    "%s: aggr=%d, nframes=%d\n",
5198 				    __func__,
5199 				    bf->bf_state.bfs_aggr,
5200 				    bf->bf_state.bfs_nframes);
5201 
5202 			/*
5203 			 * This shouldn't happen - such frames shouldn't
5204 			 * ever have been queued as an aggregate in the
5205 			 * first place.  However, make sure the fields
5206 			 * are correctly setup just to be totally sure.
5207 			 */
5208 			bf->bf_state.bfs_aggr = 0;
5209 			bf->bf_state.bfs_nframes = 1;
5210 
5211 			/* Update CLRDMASK just before this frame is queued */
5212 			ath_tx_update_clrdmask(sc, tid, bf);
5213 
5214 			ath_tx_do_ratelookup(sc, bf);
5215 			ath_tx_calc_duration(sc, bf);
5216 			ath_tx_calc_protection(sc, bf);
5217 			ath_tx_set_rtscts(sc, bf);
5218 			ath_tx_rate_fill_rcflags(sc, bf);
5219 			ath_tx_setds(sc, bf);
5220 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5221 
5222 			sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5223 
5224 			/* Queue the packet; continue */
5225 			goto queuepkt;
5226 		}
5227 
5228 		TAILQ_INIT(&bf_q);
5229 
5230 		/*
5231 		 * Do a rate control lookup on the first frame in the
5232 		 * list. The rate control code needs that to occur
5233 		 * before it can determine whether to TX.
5234 		 * It's inaccurate because the rate control code doesn't
5235 		 * really "do" aggregate lookups, so it only considers
5236 		 * the size of the first frame.
5237 		 */
5238 		ath_tx_do_ratelookup(sc, bf);
5239 		bf->bf_state.bfs_rc[3].rix = 0;
5240 		bf->bf_state.bfs_rc[3].tries = 0;
5241 
5242 		ath_tx_calc_duration(sc, bf);
5243 		ath_tx_calc_protection(sc, bf);
5244 
5245 		ath_tx_set_rtscts(sc, bf);
5246 		ath_tx_rate_fill_rcflags(sc, bf);
5247 
5248 		status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5249 
5250 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5251 		    "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5252 
5253 		/*
5254 		 * No frames to be picked up - out of BAW
5255 		 */
5256 		if (TAILQ_EMPTY(&bf_q))
5257 			break;
5258 
5259 		/*
5260 		 * This assumes that the descriptor list in the ath_bufhead
5261 		 * are already linked together via bf_next pointers.
5262 		 */
5263 		bf = TAILQ_FIRST(&bf_q);
5264 
5265 		if (status == ATH_AGGR_8K_LIMITED)
5266 			sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5267 
5268 		/*
5269 		 * If it's the only frame send as non-aggregate
5270 		 * assume that ath_tx_form_aggr() has checked
5271 		 * whether it's in the BAW and added it appropriately.
5272 		 */
5273 		if (bf->bf_state.bfs_nframes == 1) {
5274 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5275 			    "%s: single-frame aggregate\n", __func__);
5276 
5277 			/* Update CLRDMASK just before this frame is queued */
5278 			ath_tx_update_clrdmask(sc, tid, bf);
5279 
5280 			bf->bf_state.bfs_aggr = 0;
5281 			bf->bf_state.bfs_ndelim = 0;
5282 			ath_tx_setds(sc, bf);
5283 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5284 			if (status == ATH_AGGR_BAW_CLOSED)
5285 				sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5286 			else
5287 				sc->sc_aggr_stats.aggr_single_pkt++;
5288 		} else {
5289 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5290 			    "%s: multi-frame aggregate: %d frames, "
5291 			    "length %d\n",
5292 			     __func__, bf->bf_state.bfs_nframes,
5293 			    bf->bf_state.bfs_al);
5294 			bf->bf_state.bfs_aggr = 1;
5295 			sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5296 			sc->sc_aggr_stats.aggr_aggr_pkt++;
5297 
5298 			/* Update CLRDMASK just before this frame is queued */
5299 			ath_tx_update_clrdmask(sc, tid, bf);
5300 
5301 			/*
5302 			 * Calculate the duration/protection as required.
5303 			 */
5304 			ath_tx_calc_duration(sc, bf);
5305 			ath_tx_calc_protection(sc, bf);
5306 
5307 			/*
5308 			 * Update the rate and rtscts information based on the
5309 			 * rate decision made by the rate control code;
5310 			 * the first frame in the aggregate needs it.
5311 			 */
5312 			ath_tx_set_rtscts(sc, bf);
5313 
5314 			/*
5315 			 * Setup the relevant descriptor fields
5316 			 * for aggregation. The first descriptor
5317 			 * already points to the rest in the chain.
5318 			 */
5319 			ath_tx_setds_11n(sc, bf);
5320 
5321 		}
5322 	queuepkt:
5323 		/* Set completion handler, multi-frame aggregate or not */
5324 		bf->bf_comp = ath_tx_aggr_comp;
5325 
5326 		if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5327 		    device_printf(sc->sc_dev, "%s: TID=16?\n", __func__);
5328 
5329 		/*
5330 		 * Update leak count and frame config if were leaking frames.
5331 		 *
5332 		 * XXX TODO: it should update all frames in an aggregate
5333 		 * correctly!
5334 		 */
5335 		ath_tx_leak_count_update(sc, tid, bf);
5336 
5337 		/* Punt to txq */
5338 		ath_tx_handoff(sc, txq, bf);
5339 
5340 		/* Track outstanding buffer count to hardware */
5341 		/* aggregates are "one" buffer */
5342 		tid->hwq_depth++;
5343 
5344 		/*
5345 		 * Break out if ath_tx_form_aggr() indicated
5346 		 * there can't be any further progress (eg BAW is full.)
5347 		 * Checking for an empty txq is done above.
5348 		 *
5349 		 * XXX locking on txq here?
5350 		 */
5351 		/* XXX TXQ locking */
5352 		if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5353 		    (status == ATH_AGGR_BAW_CLOSED ||
5354 		     status == ATH_AGGR_LEAK_CLOSED))
5355 			break;
5356 	}
5357 }
5358 
5359 /*
5360  * Schedule some packets from the given node/TID to the hardware.
5361  *
5362  * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5363  * It just dumps frames into the TXQ.  We should limit how deep
5364  * the transmit queue can grow for frames dispatched to the given
5365  * TXQ.
5366  *
5367  * To avoid locking issues, either we need to own the TXQ lock
5368  * at this point, or we need to pass in the maximum frame count
5369  * from the caller.
5370  */
5371 void
5372 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5373     struct ath_tid *tid)
5374 {
5375 	struct ath_buf *bf;
5376 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5377 
5378 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5379 	    __func__, an, tid->tid);
5380 
5381 	ATH_TX_LOCK_ASSERT(sc);
5382 
5383 	/* Check - is AMPDU pending or running? then print out something */
5384 	if (ath_tx_ampdu_pending(sc, an, tid->tid))
5385 		device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n",
5386 		    __func__, tid->tid);
5387 	if (ath_tx_ampdu_running(sc, an, tid->tid))
5388 		device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n",
5389 		    __func__, tid->tid);
5390 
5391 	for (;;) {
5392 
5393 		/*
5394 		 * If the upper layers have paused the TID, don't
5395 		 * queue any further packets.
5396 		 *
5397 		 * XXX if we are leaking frames, make sure we decrement
5398 		 * that counter _and_ we continue here.
5399 		 */
5400 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5401 			break;
5402 
5403 		bf = ATH_TID_FIRST(tid);
5404 		if (bf == NULL) {
5405 			break;
5406 		}
5407 
5408 		ATH_TID_REMOVE(tid, bf, bf_list);
5409 
5410 		/* Sanity check! */
5411 		if (tid->tid != bf->bf_state.bfs_tid) {
5412 			device_printf(sc->sc_dev, "%s: bfs_tid %d !="
5413 			    " tid %d\n",
5414 			    __func__, bf->bf_state.bfs_tid, tid->tid);
5415 		}
5416 		/* Normal completion handler */
5417 		bf->bf_comp = ath_tx_normal_comp;
5418 
5419 		/*
5420 		 * Override this for now, until the non-aggregate
5421 		 * completion handler correctly handles software retransmits.
5422 		 */
5423 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5424 
5425 		/* Update CLRDMASK just before this frame is queued */
5426 		ath_tx_update_clrdmask(sc, tid, bf);
5427 
5428 		/* Program descriptors + rate control */
5429 		ath_tx_do_ratelookup(sc, bf);
5430 		ath_tx_calc_duration(sc, bf);
5431 		ath_tx_calc_protection(sc, bf);
5432 		ath_tx_set_rtscts(sc, bf);
5433 		ath_tx_rate_fill_rcflags(sc, bf);
5434 		ath_tx_setds(sc, bf);
5435 
5436 		/*
5437 		 * Update the current leak count if
5438 		 * we're leaking frames; and set the
5439 		 * MORE flag as appropriate.
5440 		 */
5441 		ath_tx_leak_count_update(sc, tid, bf);
5442 
5443 		/* Track outstanding buffer count to hardware */
5444 		/* aggregates are "one" buffer */
5445 		tid->hwq_depth++;
5446 
5447 		/* Punt to hardware or software txq */
5448 		ath_tx_handoff(sc, txq, bf);
5449 	}
5450 }
5451 
5452 /*
5453  * Schedule some packets to the given hardware queue.
5454  *
5455  * This function walks the list of TIDs (ie, ath_node TIDs
5456  * with queued traffic) and attempts to schedule traffic
5457  * from them.
5458  *
5459  * TID scheduling is implemented as a FIFO, with TIDs being
5460  * added to the end of the queue after some frames have been
5461  * scheduled.
5462  */
5463 void
5464 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5465 {
5466 	struct ath_tid *tid, *next, *last;
5467 
5468 	ATH_TX_LOCK_ASSERT(sc);
5469 
5470 	/*
5471 	 * Don't schedule if the hardware queue is busy.
5472 	 * This (hopefully) gives some more time to aggregate
5473 	 * some packets in the aggregation queue.
5474 	 *
5475 	 * XXX It doesn't stop a parallel sender from sneaking
5476 	 * in transmitting a frame!
5477 	 */
5478 	/* XXX TXQ locking */
5479 	if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5480 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5481 		return;
5482 	}
5483 	if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5484 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5485 		return;
5486 	}
5487 
5488 	last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5489 
5490 	TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5491 		/*
5492 		 * Suspend paused queues here; they'll be resumed
5493 		 * once the addba completes or times out.
5494 		 */
5495 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5496 		    __func__, tid->tid, tid->paused);
5497 		ath_tx_tid_unsched(sc, tid);
5498 		/*
5499 		 * This node may be in power-save and we're leaking
5500 		 * a frame; be careful.
5501 		 */
5502 		if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5503 			continue;
5504 		}
5505 		if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5506 			ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5507 		else
5508 			ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5509 
5510 		/* Not empty? Re-schedule */
5511 		if (tid->axq_depth != 0)
5512 			ath_tx_tid_sched(sc, tid);
5513 
5514 		/*
5515 		 * Give the software queue time to aggregate more
5516 		 * packets.  If we aren't running aggregation then
5517 		 * we should still limit the hardware queue depth.
5518 		 */
5519 		/* XXX TXQ locking */
5520 		if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5521 			break;
5522 		}
5523 		if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5524 			break;
5525 		}
5526 
5527 		/*
5528 		 * If this was the last entry on the original list, stop.
5529 		 * Otherwise nodes that have been rescheduled onto the end
5530 		 * of the TID FIFO list will just keep being rescheduled.
5531 		 *
5532 		 * XXX What should we do about nodes that were paused
5533 		 * but are pending a leaking frame in response to a ps-poll?
5534 		 * They'll be put at the front of the list; so they'll
5535 		 * prematurely trigger this condition! Ew.
5536 		 */
5537 		if (tid == last)
5538 			break;
5539 	}
5540 }
5541 
5542 /*
5543  * TX addba handling
5544  */
5545 
5546 /*
5547  * Return net80211 TID struct pointer, or NULL for none
5548  */
5549 struct ieee80211_tx_ampdu *
5550 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5551 {
5552 	struct ieee80211_node *ni = &an->an_node;
5553 	struct ieee80211_tx_ampdu *tap;
5554 
5555 	if (tid == IEEE80211_NONQOS_TID)
5556 		return NULL;
5557 
5558 	tap = &ni->ni_tx_ampdu[tid];
5559 	return tap;
5560 }
5561 
5562 /*
5563  * Is AMPDU-TX running?
5564  */
5565 static int
5566 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5567 {
5568 	struct ieee80211_tx_ampdu *tap;
5569 
5570 	if (tid == IEEE80211_NONQOS_TID)
5571 		return 0;
5572 
5573 	tap = ath_tx_get_tx_tid(an, tid);
5574 	if (tap == NULL)
5575 		return 0;	/* Not valid; default to not running */
5576 
5577 	return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5578 }
5579 
5580 /*
5581  * Is AMPDU-TX negotiation pending?
5582  */
5583 static int
5584 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5585 {
5586 	struct ieee80211_tx_ampdu *tap;
5587 
5588 	if (tid == IEEE80211_NONQOS_TID)
5589 		return 0;
5590 
5591 	tap = ath_tx_get_tx_tid(an, tid);
5592 	if (tap == NULL)
5593 		return 0;	/* Not valid; default to not pending */
5594 
5595 	return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5596 }
5597 
5598 /*
5599  * Is AMPDU-TX pending for the given TID?
5600  */
5601 
5602 
5603 /*
5604  * Method to handle sending an ADDBA request.
5605  *
5606  * We tap this so the relevant flags can be set to pause the TID
5607  * whilst waiting for the response.
5608  *
5609  * XXX there's no timeout handler we can override?
5610  */
5611 int
5612 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5613     int dialogtoken, int baparamset, int batimeout)
5614 {
5615 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5616 	int tid = tap->txa_tid;
5617 	struct ath_node *an = ATH_NODE(ni);
5618 	struct ath_tid *atid = &an->an_tid[tid];
5619 
5620 	/*
5621 	 * XXX danger Will Robinson!
5622 	 *
5623 	 * Although the taskqueue may be running and scheduling some more
5624 	 * packets, these should all be _before_ the addba sequence number.
5625 	 * However, net80211 will keep self-assigning sequence numbers
5626 	 * until addba has been negotiated.
5627 	 *
5628 	 * In the past, these packets would be "paused" (which still works
5629 	 * fine, as they're being scheduled to the driver in the same
5630 	 * serialised method which is calling the addba request routine)
5631 	 * and when the aggregation session begins, they'll be dequeued
5632 	 * as aggregate packets and added to the BAW. However, now there's
5633 	 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5634 	 * packets. Thus they never get included in the BAW tracking and
5635 	 * this can cause the initial burst of packets after the addba
5636 	 * negotiation to "hang", as they quickly fall outside the BAW.
5637 	 *
5638 	 * The "eventual" solution should be to tag these packets with
5639 	 * dobaw. Although net80211 has given us a sequence number,
5640 	 * it'll be "after" the left edge of the BAW and thus it'll
5641 	 * fall within it.
5642 	 */
5643 	ATH_TX_LOCK(sc);
5644 	/*
5645 	 * This is a bit annoying.  Until net80211 HT code inherits some
5646 	 * (any) locking, we may have this called in parallel BUT only
5647 	 * one response/timeout will be called.  Grr.
5648 	 */
5649 	if (atid->addba_tx_pending == 0) {
5650 		ath_tx_tid_pause(sc, atid);
5651 		atid->addba_tx_pending = 1;
5652 	}
5653 	ATH_TX_UNLOCK(sc);
5654 
5655 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5656 	    "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5657 	    __func__,
5658 	    ni->ni_macaddr,
5659 	    ":",
5660 	    dialogtoken, baparamset, batimeout);
5661 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5662 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5663 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5664 
5665 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5666 	    batimeout);
5667 }
5668 
5669 /*
5670  * Handle an ADDBA response.
5671  *
5672  * We unpause the queue so TX'ing can resume.
5673  *
5674  * Any packets TX'ed from this point should be "aggregate" (whether
5675  * aggregate or not) so the BAW is updated.
5676  *
5677  * Note! net80211 keeps self-assigning sequence numbers until
5678  * ampdu is negotiated. This means the initially-negotiated BAW left
5679  * edge won't match the ni->ni_txseq.
5680  *
5681  * So, being very dirty, the BAW left edge is "slid" here to match
5682  * ni->ni_txseq.
5683  *
5684  * What likely SHOULD happen is that all packets subsequent to the
5685  * addba request should be tagged as aggregate and queued as non-aggregate
5686  * frames; thus updating the BAW. For now though, I'll just slide the
5687  * window.
5688  */
5689 int
5690 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5691     int status, int code, int batimeout)
5692 {
5693 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5694 	int tid = tap->txa_tid;
5695 	struct ath_node *an = ATH_NODE(ni);
5696 	struct ath_tid *atid = &an->an_tid[tid];
5697 	int r;
5698 
5699 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5700 	    "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
5701 	    ni->ni_macaddr,
5702 	    ":",
5703 	    status, code, batimeout);
5704 
5705 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5706 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5707 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5708 
5709 	/*
5710 	 * Call this first, so the interface flags get updated
5711 	 * before the TID is unpaused. Otherwise a race condition
5712 	 * exists where the unpaused TID still doesn't yet have
5713 	 * IEEE80211_AGGR_RUNNING set.
5714 	 */
5715 	r = sc->sc_addba_response(ni, tap, status, code, batimeout);
5716 
5717 	ATH_TX_LOCK(sc);
5718 	atid->addba_tx_pending = 0;
5719 	/*
5720 	 * XXX dirty!
5721 	 * Slide the BAW left edge to wherever net80211 left it for us.
5722 	 * Read above for more information.
5723 	 */
5724 	tap->txa_start = ni->ni_txseqs[tid];
5725 	ath_tx_tid_resume(sc, atid);
5726 	ATH_TX_UNLOCK(sc);
5727 	return r;
5728 }
5729 
5730 
5731 /*
5732  * Stop ADDBA on a queue.
5733  *
5734  * This can be called whilst BAR TX is currently active on the queue,
5735  * so make sure this is unblocked before continuing.
5736  */
5737 void
5738 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5739 {
5740 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5741 	int tid = tap->txa_tid;
5742 	struct ath_node *an = ATH_NODE(ni);
5743 	struct ath_tid *atid = &an->an_tid[tid];
5744 	ath_bufhead bf_cq;
5745 	struct ath_buf *bf;
5746 
5747 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
5748 	    __func__,
5749 	    ni->ni_macaddr,
5750 	    ":");
5751 
5752 	/*
5753 	 * Pause TID traffic early, so there aren't any races
5754 	 * Unblock the pending BAR held traffic, if it's currently paused.
5755 	 */
5756 	ATH_TX_LOCK(sc);
5757 	ath_tx_tid_pause(sc, atid);
5758 	if (atid->bar_wait) {
5759 		/*
5760 		 * bar_unsuspend() expects bar_tx == 1, as it should be
5761 		 * called from the TX completion path.  This quietens
5762 		 * the warning.  It's cleared for us anyway.
5763 		 */
5764 		atid->bar_tx = 1;
5765 		ath_tx_tid_bar_unsuspend(sc, atid);
5766 	}
5767 	ATH_TX_UNLOCK(sc);
5768 
5769 	/* There's no need to hold the TXQ lock here */
5770 	sc->sc_addba_stop(ni, tap);
5771 
5772 	/*
5773 	 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
5774 	 * it'll set the cleanup flag, and it'll be unpaused once
5775 	 * things have been cleaned up.
5776 	 */
5777 	TAILQ_INIT(&bf_cq);
5778 	ATH_TX_LOCK(sc);
5779 	ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
5780 	/*
5781 	 * Unpause the TID if no cleanup is required.
5782 	 */
5783 	if (! atid->cleanup_inprogress)
5784 		ath_tx_tid_resume(sc, atid);
5785 	ATH_TX_UNLOCK(sc);
5786 
5787 	/* Handle completing frames and fail them */
5788 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5789 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5790 		ath_tx_default_comp(sc, bf, 1);
5791 	}
5792 
5793 }
5794 
5795 /*
5796  * Handle a node reassociation.
5797  *
5798  * We may have a bunch of frames queued to the hardware; those need
5799  * to be marked as cleanup.
5800  */
5801 void
5802 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
5803 {
5804 	struct ath_tid *tid;
5805 	int i;
5806 	ath_bufhead bf_cq;
5807 	struct ath_buf *bf;
5808 
5809 	TAILQ_INIT(&bf_cq);
5810 
5811 	ATH_TX_UNLOCK_ASSERT(sc);
5812 
5813 	ATH_TX_LOCK(sc);
5814 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
5815 		tid = &an->an_tid[i];
5816 		if (tid->hwq_depth == 0)
5817 			continue;
5818 		ath_tx_tid_pause(sc, tid);
5819 		DPRINTF(sc, ATH_DEBUG_NODE,
5820 		    "%s: %6D: TID %d: cleaning up TID\n",
5821 		    __func__,
5822 		    an->an_node.ni_macaddr,
5823 		    ":",
5824 		    i);
5825 		ath_tx_tid_cleanup(sc, an, i, &bf_cq);
5826 		/*
5827 		 * Unpause the TID if no cleanup is required.
5828 		 */
5829 		if (! tid->cleanup_inprogress)
5830 			ath_tx_tid_resume(sc, tid);
5831 	}
5832 	ATH_TX_UNLOCK(sc);
5833 
5834 	/* Handle completing frames and fail them */
5835 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5836 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5837 		ath_tx_default_comp(sc, bf, 1);
5838 	}
5839 }
5840 
5841 /*
5842  * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
5843  * it simply tears down the aggregation session. Ew.
5844  *
5845  * It however will call ieee80211_ampdu_stop() which will call
5846  * ic->ic_addba_stop().
5847  *
5848  * XXX This uses a hard-coded max BAR count value; the whole
5849  * XXX BAR TX success or failure should be better handled!
5850  */
5851 void
5852 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5853     int status)
5854 {
5855 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5856 	int tid = tap->txa_tid;
5857 	struct ath_node *an = ATH_NODE(ni);
5858 	struct ath_tid *atid = &an->an_tid[tid];
5859 	int attempts = tap->txa_attempts;
5860 
5861 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5862 	    "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n",
5863 	    __func__,
5864 	    ni->ni_macaddr,
5865 	    ":",
5866 	    tap->txa_tid,
5867 	    atid->tid,
5868 	    status,
5869 	    attempts);
5870 
5871 	/* Note: This may update the BAW details */
5872 	sc->sc_bar_response(ni, tap, status);
5873 
5874 	/* Unpause the TID */
5875 	/*
5876 	 * XXX if this is attempt=50, the TID will be downgraded
5877 	 * XXX to a non-aggregate session. So we must unpause the
5878 	 * XXX TID here or it'll never be done.
5879 	 *
5880 	 * Also, don't call it if bar_tx/bar_wait are 0; something
5881 	 * has beaten us to the punch? (XXX figure out what?)
5882 	 */
5883 	if (status == 0 || attempts == 50) {
5884 		ATH_TX_LOCK(sc);
5885 		if (atid->bar_tx == 0 || atid->bar_wait == 0)
5886 			device_printf(sc->sc_dev,
5887 			    "%s: huh? bar_tx=%d, bar_wait=%d\n",
5888 			    __func__,
5889 			    atid->bar_tx, atid->bar_wait);
5890 		else
5891 			ath_tx_tid_bar_unsuspend(sc, atid);
5892 		ATH_TX_UNLOCK(sc);
5893 	}
5894 }
5895 
5896 /*
5897  * This is called whenever the pending ADDBA request times out.
5898  * Unpause and reschedule the TID.
5899  */
5900 void
5901 ath_addba_response_timeout(struct ieee80211_node *ni,
5902     struct ieee80211_tx_ampdu *tap)
5903 {
5904 	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5905 	int tid = tap->txa_tid;
5906 	struct ath_node *an = ATH_NODE(ni);
5907 	struct ath_tid *atid = &an->an_tid[tid];
5908 
5909 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5910 	    "%s: %6D: TID=%d, called; resuming\n",
5911 	    __func__,
5912 	    ni->ni_macaddr,
5913 	    ":",
5914 	    tid);
5915 
5916 	ATH_TX_LOCK(sc);
5917 	atid->addba_tx_pending = 0;
5918 	ATH_TX_UNLOCK(sc);
5919 
5920 	/* Note: This updates the aggregate state to (again) pending */
5921 	sc->sc_addba_response_timeout(ni, tap);
5922 
5923 	/* Unpause the TID; which reschedules it */
5924 	ATH_TX_LOCK(sc);
5925 	ath_tx_tid_resume(sc, atid);
5926 	ATH_TX_UNLOCK(sc);
5927 }
5928 
5929 /*
5930  * Check if a node is asleep or not.
5931  */
5932 int
5933 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
5934 {
5935 
5936 	ATH_TX_LOCK_ASSERT(sc);
5937 
5938 	return (an->an_is_powersave);
5939 }
5940 
5941 /*
5942  * Mark a node as currently "in powersaving."
5943  * This suspends all traffic on the node.
5944  *
5945  * This must be called with the node/tx locks free.
5946  *
5947  * XXX TODO: the locking silliness below is due to how the node
5948  * locking currently works.  Right now, the node lock is grabbed
5949  * to do rate control lookups and these are done with the TX
5950  * queue lock held.  This means the node lock can't be grabbed
5951  * first here or a LOR will occur.
5952  *
5953  * Eventually (hopefully!) the TX path code will only grab
5954  * the TXQ lock when transmitting and the ath_node lock when
5955  * doing node/TID operations.  There are other complications -
5956  * the sched/unsched operations involve walking the per-txq
5957  * 'active tid' list and this requires both locks to be held.
5958  */
5959 void
5960 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
5961 {
5962 	struct ath_tid *atid;
5963 	struct ath_txq *txq;
5964 	int tid;
5965 
5966 	ATH_TX_UNLOCK_ASSERT(sc);
5967 
5968 	/* Suspend all traffic on the node */
5969 	ATH_TX_LOCK(sc);
5970 
5971 	if (an->an_is_powersave) {
5972 		device_printf(sc->sc_dev,
5973 		    "%s: %6D: node was already asleep!\n",
5974 		    __func__,
5975 		    an->an_node.ni_macaddr,
5976 		    ":");
5977 		ATH_TX_UNLOCK(sc);
5978 		return;
5979 	}
5980 
5981 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
5982 		atid = &an->an_tid[tid];
5983 		txq = sc->sc_ac2q[atid->ac];
5984 
5985 		ath_tx_tid_pause(sc, atid);
5986 	}
5987 
5988 	/* Mark node as in powersaving */
5989 	an->an_is_powersave = 1;
5990 
5991 	ATH_TX_UNLOCK(sc);
5992 }
5993 
5994 /*
5995  * Mark a node as currently "awake."
5996  * This resumes all traffic to the node.
5997  */
5998 void
5999 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6000 {
6001 	struct ath_tid *atid;
6002 	struct ath_txq *txq;
6003 	int tid;
6004 
6005 	ATH_TX_UNLOCK_ASSERT(sc);
6006 
6007 	ATH_TX_LOCK(sc);
6008 
6009 	/* !? */
6010 	if (an->an_is_powersave == 0) {
6011 		ATH_TX_UNLOCK(sc);
6012 		device_printf(sc->sc_dev,
6013 		    "%s: an=%p: node was already awake\n",
6014 		    __func__, an);
6015 		return;
6016 	}
6017 
6018 	/* Mark node as awake */
6019 	an->an_is_powersave = 0;
6020 	/*
6021 	 * Clear any pending leaked frame requests
6022 	 */
6023 	an->an_leak_count = 0;
6024 
6025 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6026 		atid = &an->an_tid[tid];
6027 		txq = sc->sc_ac2q[atid->ac];
6028 
6029 		ath_tx_tid_resume(sc, atid);
6030 	}
6031 	ATH_TX_UNLOCK(sc);
6032 }
6033 
6034 static int
6035 ath_legacy_dma_txsetup(struct ath_softc *sc)
6036 {
6037 
6038 	/* nothing new needed */
6039 	return (0);
6040 }
6041 
6042 static int
6043 ath_legacy_dma_txteardown(struct ath_softc *sc)
6044 {
6045 
6046 	/* nothing new needed */
6047 	return (0);
6048 }
6049 
6050 void
6051 ath_xmit_setup_legacy(struct ath_softc *sc)
6052 {
6053 	/*
6054 	 * For now, just set the descriptor length to sizeof(ath_desc);
6055 	 * worry about extracting the real length out of the HAL later.
6056 	 */
6057 	sc->sc_tx_desclen = sizeof(struct ath_desc);
6058 	sc->sc_tx_statuslen = sizeof(struct ath_desc);
6059 	sc->sc_tx_nmaps = 1;	/* only one buffer per TX desc */
6060 
6061 	sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6062 	sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6063 	sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6064 
6065 	sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6066 	sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6067 
6068 	sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6069 }
6070