1 /*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 * redistribution must be conditioned upon including a substantially
15 * similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /*
35 * Driver for the Atheros Wireless LAN controller.
36 *
37 * This software is derived from work of Atsushi Onoe; his contribution
38 * is greatly appreciated.
39 */
40
41 #include "opt_inet.h"
42 #include "opt_ath.h"
43 #include "opt_wlan.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/errno.h>
55 #include <sys/callout.h>
56 #include <sys/bus.h>
57 #include <sys/endian.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/caps.h>
61 #include <sys/ktr.h>
62
63 #if defined(__DragonFly__)
64 /* empty */
65 #else
66 #include <machine/bus.h>
67 #endif
68
69 #include <net/if.h>
70 #include <net/if_var.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_arp.h>
75 #include <net/ethernet.h>
76 #include <net/if_llc.h>
77
78 #include <netproto/802_11/ieee80211_var.h>
79 #include <netproto/802_11/ieee80211_regdomain.h>
80 #ifdef IEEE80211_SUPPORT_SUPERG
81 #include <netproto/802_11/ieee80211_superg.h>
82 #endif
83 #ifdef IEEE80211_SUPPORT_TDMA
84 #include <netproto/802_11/ieee80211_tdma.h>
85 #endif
86 #include <netproto/802_11/ieee80211_ht.h>
87
88 #include <net/bpf.h>
89
90 #ifdef INET
91 #include <netinet/in.h>
92 #include <netinet/if_ether.h>
93 #endif
94
95 #include <dev/netif/ath/ath/if_athvar.h>
96 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
97 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
98
99 #include <dev/netif/ath/ath/if_ath_debug.h>
100
101 #ifdef ATH_TX99_DIAG
102 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
103 #endif
104
105 #include <dev/netif/ath/ath/if_ath_misc.h>
106 #include <dev/netif/ath/ath/if_ath_tx.h>
107 #include <dev/netif/ath/ath/if_ath_tx_ht.h>
108
109 #ifdef ATH_DEBUG_ALQ
110 #include <dev/netif/ath/ath/if_ath_alq.h>
111 #endif
112
113 #if defined(__DragonFly__)
114 extern const char* ath_hal_ether_sprintf(const uint8_t *mac);
115 #endif
116
117 /*
118 * How many retries to perform in software
119 */
120 #define SWMAX_RETRIES 10
121
122 /*
123 * What queue to throw the non-QoS TID traffic into
124 */
125 #define ATH_NONQOS_TID_AC WME_AC_VO
126
127 #if 0
128 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
129 #endif
130 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
131 int tid);
132 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
133 int tid);
134 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
135 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
136 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
137 struct ieee80211_node *ni, struct mbuf *m0, int *tid);
138 static struct ath_buf *
139 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
140 struct ath_tid *tid, struct ath_buf *bf);
141
142 #ifdef ATH_DEBUG_ALQ
143 void
ath_tx_alq_post(struct ath_softc * sc,struct ath_buf * bf_first)144 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
145 {
146 struct ath_buf *bf;
147 int i, n;
148 const char *ds;
149
150 /* XXX we should skip out early if debugging isn't enabled! */
151 bf = bf_first;
152
153 while (bf != NULL) {
154 /* XXX should ensure bf_nseg > 0! */
155 if (bf->bf_nseg == 0)
156 break;
157 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
158 for (i = 0, ds = (const char *) bf->bf_desc;
159 i < n;
160 i++, ds += sc->sc_tx_desclen) {
161 if_ath_alq_post(&sc->sc_alq,
162 ATH_ALQ_EDMA_TXDESC,
163 sc->sc_tx_desclen,
164 ds);
165 }
166 bf = bf->bf_next;
167 }
168 }
169 #endif /* ATH_DEBUG_ALQ */
170
171 /*
172 * Whether to use the 11n rate scenario functions or not
173 */
174 static inline int
ath_tx_is_11n(struct ath_softc * sc)175 ath_tx_is_11n(struct ath_softc *sc)
176 {
177 return ((sc->sc_ah->ah_magic == 0x20065416) ||
178 (sc->sc_ah->ah_magic == 0x19741014));
179 }
180
181 /*
182 * Obtain the current TID from the given frame.
183 *
184 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
185 * This has implications for which AC/priority the packet is placed
186 * in.
187 */
188 static int
ath_tx_gettid(struct ath_softc * sc,const struct mbuf * m0)189 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
190 {
191 const struct ieee80211_frame *wh;
192 int pri = M_WME_GETAC(m0);
193
194 wh = mtod(m0, const struct ieee80211_frame *);
195 if (! IEEE80211_QOS_HAS_SEQ(wh))
196 return IEEE80211_NONQOS_TID;
197 else
198 return WME_AC_TO_TID(pri);
199 }
200
201 static void
ath_tx_set_retry(struct ath_softc * sc,struct ath_buf * bf)202 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
203 {
204 struct ieee80211_frame *wh;
205
206 wh = mtod(bf->bf_m, struct ieee80211_frame *);
207 /* Only update/resync if needed */
208 if (bf->bf_state.bfs_isretried == 0) {
209 wh->i_fc[1] |= IEEE80211_FC1_RETRY;
210 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
211 BUS_DMASYNC_PREWRITE);
212 }
213 bf->bf_state.bfs_isretried = 1;
214 bf->bf_state.bfs_retries ++;
215 }
216
217 /*
218 * Determine what the correct AC queue for the given frame
219 * should be.
220 *
221 * This code assumes that the TIDs map consistently to
222 * the underlying hardware (or software) ath_txq.
223 * Since the sender may try to set an AC which is
224 * arbitrary, non-QoS TIDs may end up being put on
225 * completely different ACs. There's no way to put a
226 * TID into multiple ath_txq's for scheduling, so
227 * for now we override the AC/TXQ selection and set
228 * non-QOS TID frames into the BE queue.
229 *
230 * This may be completely incorrect - specifically,
231 * some management frames may end up out of order
232 * compared to the QoS traffic they're controlling.
233 * I'll look into this later.
234 */
235 static int
ath_tx_getac(struct ath_softc * sc,const struct mbuf * m0)236 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
237 {
238 const struct ieee80211_frame *wh;
239 int pri = M_WME_GETAC(m0);
240 wh = mtod(m0, const struct ieee80211_frame *);
241 if (IEEE80211_QOS_HAS_SEQ(wh))
242 return pri;
243
244 return ATH_NONQOS_TID_AC;
245 }
246
247 void
ath_txfrag_cleanup(struct ath_softc * sc,ath_bufhead * frags,struct ieee80211_node * ni)248 ath_txfrag_cleanup(struct ath_softc *sc,
249 ath_bufhead *frags, struct ieee80211_node *ni)
250 {
251 struct ath_buf *bf, *next;
252
253 ATH_TXBUF_LOCK_ASSERT(sc);
254
255 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
256 /* NB: bf assumed clean */
257 TAILQ_REMOVE(frags, bf, bf_list);
258 ath_returnbuf_head(sc, bf);
259 ieee80211_node_decref(ni);
260 }
261 }
262
263 /*
264 * Setup xmit of a fragmented frame. Allocate a buffer
265 * for each frag and bump the node reference count to
266 * reflect the held reference to be setup by ath_tx_start.
267 */
268 int
ath_txfrag_setup(struct ath_softc * sc,ath_bufhead * frags,struct mbuf * m0,struct ieee80211_node * ni)269 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
270 struct mbuf *m0, struct ieee80211_node *ni)
271 {
272 struct mbuf *m;
273 struct ath_buf *bf;
274
275 ATH_TXBUF_LOCK(sc);
276 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
277 /* XXX non-management? */
278 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
279 if (bf == NULL) { /* out of buffers, cleanup */
280 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
281 __func__);
282 ath_txfrag_cleanup(sc, frags, ni);
283 break;
284 }
285 ieee80211_node_incref(ni);
286 TAILQ_INSERT_TAIL(frags, bf, bf_list);
287 }
288 ATH_TXBUF_UNLOCK(sc);
289
290 return !TAILQ_EMPTY(frags);
291 }
292
293 static int
ath_tx_dmasetup(struct ath_softc * sc,struct ath_buf * bf,struct mbuf * m0)294 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
295 {
296 #if defined(__DragonFly__)
297 #else
298 struct mbuf *m;
299 #endif
300 int error;
301
302 /*
303 * Load the DMA map so any coalescing is done. This
304 * also calculates the number of descriptors we need.
305 */
306 #if defined(__DragonFly__)
307 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m0,
308 bf->bf_segs, 1, &bf->bf_nseg,
309 BUS_DMA_NOWAIT);
310 #else
311 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
312 bf->bf_segs, &bf->bf_nseg,
313 BUS_DMA_NOWAIT);
314 #endif
315 if (error == EFBIG) {
316 /* XXX packet requires too many descriptors */
317 bf->bf_nseg = ATH_MAX_SCATTER + 1;
318 } else if (error != 0) {
319 sc->sc_stats.ast_tx_busdma++;
320 ieee80211_free_mbuf(m0);
321 return error;
322 }
323 /*
324 * Discard null packets and check for packets that
325 * require too many TX descriptors. We try to convert
326 * the latter to a cluster.
327 */
328 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
329 sc->sc_stats.ast_tx_linear++;
330 #if defined(__DragonFly__)
331 error = bus_dmamap_load_mbuf_defrag(sc->sc_dmat,
332 bf->bf_dmamap, &m0,
333 bf->bf_segs, ATH_TXDESC,
334 &bf->bf_nseg, BUS_DMA_NOWAIT);
335 #else
336 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
337 if (m == NULL) {
338 ieee80211_free_mbuf(m0);
339 sc->sc_stats.ast_tx_nombuf++;
340 return ENOMEM;
341 }
342 m0 = m;
343 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
344 bf->bf_segs, &bf->bf_nseg,
345 BUS_DMA_NOWAIT);
346 #endif
347 if (error != 0) {
348 sc->sc_stats.ast_tx_busdma++;
349 ieee80211_free_mbuf(m0);
350 return error;
351 }
352 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
353 ("too many segments after defrag; nseg %u", bf->bf_nseg));
354 } else if (bf->bf_nseg == 0) { /* null packet, discard */
355 sc->sc_stats.ast_tx_nodata++;
356 ieee80211_free_mbuf(m0);
357 return EIO;
358 }
359 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
360 __func__, m0, m0->m_pkthdr.len);
361 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
362 bf->bf_m = m0;
363
364 return 0;
365 }
366
367 /*
368 * Chain together segments+descriptors for a frame - 11n or otherwise.
369 *
370 * For aggregates, this is called on each frame in the aggregate.
371 */
372 static void
ath_tx_chaindesclist(struct ath_softc * sc,struct ath_desc * ds0,struct ath_buf * bf,int is_aggr,int is_first_subframe,int is_last_subframe)373 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
374 struct ath_buf *bf, int is_aggr, int is_first_subframe,
375 int is_last_subframe)
376 {
377 struct ath_hal *ah = sc->sc_ah;
378 char *ds;
379 int i, bp, dsp;
380 HAL_DMA_ADDR bufAddrList[4];
381 uint32_t segLenList[4];
382 int numTxMaps = 1;
383 int isFirstDesc = 1;
384
385 /*
386 * XXX There's txdma and txdma_mgmt; the descriptor
387 * sizes must match.
388 */
389 struct ath_descdma *dd = &sc->sc_txdma;
390
391 /*
392 * Fillin the remainder of the descriptor info.
393 */
394
395 /*
396 * We need the number of TX data pointers in each descriptor.
397 * EDMA and later chips support 4 TX buffers per descriptor;
398 * previous chips just support one.
399 */
400 numTxMaps = sc->sc_tx_nmaps;
401
402 /*
403 * For EDMA and later chips ensure the TX map is fully populated
404 * before advancing to the next descriptor.
405 */
406 ds = (char *) bf->bf_desc;
407 bp = dsp = 0;
408 bzero(bufAddrList, sizeof(bufAddrList));
409 bzero(segLenList, sizeof(segLenList));
410 for (i = 0; i < bf->bf_nseg; i++) {
411 bufAddrList[bp] = bf->bf_segs[i].ds_addr;
412 segLenList[bp] = bf->bf_segs[i].ds_len;
413 bp++;
414
415 /*
416 * Go to the next segment if this isn't the last segment
417 * and there's space in the current TX map.
418 */
419 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
420 continue;
421
422 /*
423 * Last segment or we're out of buffer pointers.
424 */
425 bp = 0;
426
427 if (i == bf->bf_nseg - 1)
428 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
429 else
430 ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
431 bf->bf_daddr + dd->dd_descsize * (dsp + 1));
432
433 /*
434 * XXX This assumes that bfs_txq is the actual destination
435 * hardware queue at this point. It may not have been
436 * assigned, it may actually be pointing to the multicast
437 * software TXQ id. These must be fixed!
438 */
439 ath_hal_filltxdesc(ah, (struct ath_desc *) ds
440 , bufAddrList
441 , segLenList
442 , bf->bf_descid /* XXX desc id */
443 , bf->bf_state.bfs_tx_queue
444 , isFirstDesc /* first segment */
445 , i == bf->bf_nseg - 1 /* last segment */
446 , (struct ath_desc *) ds0 /* first descriptor */
447 );
448
449 /*
450 * Make sure the 11n aggregate fields are cleared.
451 *
452 * XXX TODO: this doesn't need to be called for
453 * aggregate frames; as it'll be called on all
454 * sub-frames. Since the descriptors are in
455 * non-cacheable memory, this leads to some
456 * rather slow writes on MIPS/ARM platforms.
457 */
458 if (ath_tx_is_11n(sc))
459 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
460
461 /*
462 * If 11n is enabled, set it up as if it's an aggregate
463 * frame.
464 */
465 if (is_last_subframe) {
466 ath_hal_set11n_aggr_last(sc->sc_ah,
467 (struct ath_desc *) ds);
468 } else if (is_aggr) {
469 /*
470 * This clears the aggrlen field; so
471 * the caller needs to call set_aggr_first()!
472 *
473 * XXX TODO: don't call this for the first
474 * descriptor in the first frame in an
475 * aggregate!
476 */
477 ath_hal_set11n_aggr_middle(sc->sc_ah,
478 (struct ath_desc *) ds,
479 bf->bf_state.bfs_ndelim);
480 }
481 isFirstDesc = 0;
482 bf->bf_lastds = (struct ath_desc *) ds;
483
484 /*
485 * Don't forget to skip to the next descriptor.
486 */
487 ds += sc->sc_tx_desclen;
488 dsp++;
489
490 /*
491 * .. and don't forget to blank these out!
492 */
493 bzero(bufAddrList, sizeof(bufAddrList));
494 bzero(segLenList, sizeof(segLenList));
495 }
496 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
497 }
498
499 /*
500 * Set the rate control fields in the given descriptor based on
501 * the bf_state fields and node state.
502 *
503 * The bfs fields should already be set with the relevant rate
504 * control information, including whether MRR is to be enabled.
505 *
506 * Since the FreeBSD HAL currently sets up the first TX rate
507 * in ath_hal_setuptxdesc(), this will setup the MRR
508 * conditionally for the pre-11n chips, and call ath_buf_set_rate
509 * unconditionally for 11n chips. These require the 11n rate
510 * scenario to be set if MCS rates are enabled, so it's easier
511 * to just always call it. The caller can then only set rates 2, 3
512 * and 4 if multi-rate retry is needed.
513 */
514 static void
ath_tx_set_ratectrl(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf)515 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
516 struct ath_buf *bf)
517 {
518 struct ath_rc_series *rc = bf->bf_state.bfs_rc;
519
520 /* If mrr is disabled, blank tries 1, 2, 3 */
521 if (! bf->bf_state.bfs_ismrr)
522 rc[1].tries = rc[2].tries = rc[3].tries = 0;
523
524 #if 0
525 /*
526 * If NOACK is set, just set ntries=1.
527 */
528 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
529 rc[1].tries = rc[2].tries = rc[3].tries = 0;
530 rc[0].tries = 1;
531 }
532 #endif
533
534 /*
535 * Always call - that way a retried descriptor will
536 * have the MRR fields overwritten.
537 *
538 * XXX TODO: see if this is really needed - setting up
539 * the first descriptor should set the MRR fields to 0
540 * for us anyway.
541 */
542 if (ath_tx_is_11n(sc)) {
543 ath_buf_set_rate(sc, ni, bf);
544 } else {
545 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
546 , rc[1].ratecode, rc[1].tries
547 , rc[2].ratecode, rc[2].tries
548 , rc[3].ratecode, rc[3].tries
549 );
550 }
551 }
552
553 /*
554 * Setup segments+descriptors for an 11n aggregate.
555 * bf_first is the first buffer in the aggregate.
556 * The descriptor list must already been linked together using
557 * bf->bf_next.
558 */
559 static void
ath_tx_setds_11n(struct ath_softc * sc,struct ath_buf * bf_first)560 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
561 {
562 struct ath_buf *bf, *bf_prev = NULL;
563 struct ath_desc *ds0 = bf_first->bf_desc;
564
565 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
566 __func__, bf_first->bf_state.bfs_nframes,
567 bf_first->bf_state.bfs_al);
568
569 bf = bf_first;
570
571 if (bf->bf_state.bfs_txrate0 == 0)
572 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
573 __func__, bf, 0);
574 if (bf->bf_state.bfs_rc[0].ratecode == 0)
575 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
576 __func__, bf, 0);
577
578 /*
579 * Setup all descriptors of all subframes - this will
580 * call ath_hal_set11naggrmiddle() on every frame.
581 */
582 while (bf != NULL) {
583 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
584 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
585 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
586 SEQNO(bf->bf_state.bfs_seqno));
587
588 /*
589 * Setup the initial fields for the first descriptor - all
590 * the non-11n specific stuff.
591 */
592 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
593 , bf->bf_state.bfs_pktlen /* packet length */
594 , bf->bf_state.bfs_hdrlen /* header length */
595 , bf->bf_state.bfs_atype /* Atheros packet type */
596 , bf->bf_state.bfs_txpower /* txpower */
597 , bf->bf_state.bfs_txrate0
598 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
599 , bf->bf_state.bfs_keyix /* key cache index */
600 , bf->bf_state.bfs_txantenna /* antenna mode */
601 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
602 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
603 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
604 );
605
606 /*
607 * First descriptor? Setup the rate control and initial
608 * aggregate header information.
609 */
610 if (bf == bf_first) {
611 /*
612 * setup first desc with rate and aggr info
613 */
614 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
615 }
616
617 /*
618 * Setup the descriptors for a multi-descriptor frame.
619 * This is both aggregate and non-aggregate aware.
620 */
621 ath_tx_chaindesclist(sc, ds0, bf,
622 1, /* is_aggr */
623 !! (bf == bf_first), /* is_first_subframe */
624 !! (bf->bf_next == NULL) /* is_last_subframe */
625 );
626
627 if (bf == bf_first) {
628 /*
629 * Initialise the first 11n aggregate with the
630 * aggregate length and aggregate enable bits.
631 */
632 ath_hal_set11n_aggr_first(sc->sc_ah,
633 ds0,
634 bf->bf_state.bfs_al,
635 bf->bf_state.bfs_ndelim);
636 }
637
638 /*
639 * Link the last descriptor of the previous frame
640 * to the beginning descriptor of this frame.
641 */
642 if (bf_prev != NULL)
643 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
644 bf->bf_daddr);
645
646 /* Save a copy so we can link the next descriptor in */
647 bf_prev = bf;
648 bf = bf->bf_next;
649 }
650
651 /*
652 * Set the first descriptor bf_lastds field to point to
653 * the last descriptor in the last subframe, that's where
654 * the status update will occur.
655 */
656 bf_first->bf_lastds = bf_prev->bf_lastds;
657
658 /*
659 * And bf_last in the first descriptor points to the end of
660 * the aggregate list.
661 */
662 bf_first->bf_last = bf_prev;
663
664 /*
665 * For non-AR9300 NICs, which require the rate control
666 * in the final descriptor - let's set that up now.
667 *
668 * This is because the filltxdesc() HAL call doesn't
669 * populate the last segment with rate control information
670 * if firstSeg is also true. For non-aggregate frames
671 * that is fine, as the first frame already has rate control
672 * info. But if the last frame in an aggregate has one
673 * descriptor, both firstseg and lastseg will be true and
674 * the rate info isn't copied.
675 *
676 * This is inefficient on MIPS/ARM platforms that have
677 * non-cachable memory for TX descriptors, but we'll just
678 * make do for now.
679 *
680 * As to why the rate table is stashed in the last descriptor
681 * rather than the first descriptor? Because proctxdesc()
682 * is called on the final descriptor in an MPDU or A-MPDU -
683 * ie, the one that gets updated by the hardware upon
684 * completion. That way proctxdesc() doesn't need to know
685 * about the first _and_ last TX descriptor.
686 */
687 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
688
689 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
690 }
691
692 /*
693 * Hand-off a frame to the multicast TX queue.
694 *
695 * This is a software TXQ which will be appended to the CAB queue
696 * during the beacon setup code.
697 *
698 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
699 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
700 * with the actual hardware txq, or all of this will fall apart.
701 *
702 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
703 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
704 * correctly.
705 */
706 static void
ath_tx_handoff_mcast(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)707 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
708 struct ath_buf *bf)
709 {
710 ATH_TX_LOCK_ASSERT(sc);
711
712 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
713 ("%s: busy status 0x%x", __func__, bf->bf_flags));
714
715 /*
716 * Ensure that the tx queue is the cabq, so things get
717 * mapped correctly.
718 */
719 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
720 DPRINTF(sc, ATH_DEBUG_XMIT,
721 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
722 __func__, bf, bf->bf_state.bfs_tx_queue,
723 txq->axq_qnum);
724 }
725
726 ATH_TXQ_LOCK(txq);
727 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
728 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
729 struct ieee80211_frame *wh;
730
731 /* mark previous frame */
732 wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
733 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
734 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
735 BUS_DMASYNC_PREWRITE);
736
737 /* link descriptor */
738 ath_hal_settxdesclink(sc->sc_ah,
739 bf_last->bf_lastds,
740 bf->bf_daddr);
741 }
742 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
743 ATH_TXQ_UNLOCK(txq);
744 }
745
746 /*
747 * Hand-off packet to a hardware queue.
748 */
749 static void
ath_tx_handoff_hw(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)750 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
751 struct ath_buf *bf)
752 {
753 struct ath_hal *ah = sc->sc_ah;
754 struct ath_buf *bf_first;
755
756 /*
757 * Insert the frame on the outbound list and pass it on
758 * to the hardware. Multicast frames buffered for power
759 * save stations and transmit from the CAB queue are stored
760 * on a s/w only queue and loaded on to the CAB queue in
761 * the SWBA handler since frames only go out on DTIM and
762 * to avoid possible races.
763 */
764 ATH_TX_LOCK_ASSERT(sc);
765 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
766 ("%s: busy status 0x%x", __func__, bf->bf_flags));
767 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
768 ("ath_tx_handoff_hw called for mcast queue"));
769
770 /*
771 * XXX We should instead just verify that sc_txstart_cnt
772 * or ath_txproc_cnt > 0. That would mean that
773 * the reset is going to be waiting for us to complete.
774 */
775 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
776 device_printf(sc->sc_dev,
777 "%s: TX dispatch without holding txcount/txstart refcnt!\n",
778 __func__);
779 }
780
781 /*
782 * XXX .. this is going to cause the hardware to get upset;
783 * so we really should find some way to drop or queue
784 * things.
785 */
786
787 ATH_TXQ_LOCK(txq);
788
789 /*
790 * XXX TODO: if there's a holdingbf, then
791 * ATH_TXQ_PUTRUNNING should be clear.
792 *
793 * If there is a holdingbf and the list is empty,
794 * then axq_link should be pointing to the holdingbf.
795 *
796 * Otherwise it should point to the last descriptor
797 * in the last ath_buf.
798 *
799 * In any case, we should really ensure that we
800 * update the previous descriptor link pointer to
801 * this descriptor, regardless of all of the above state.
802 *
803 * For now this is captured by having axq_link point
804 * to either the holdingbf (if the TXQ list is empty)
805 * or the end of the list (if the TXQ list isn't empty.)
806 * I'd rather just kill axq_link here and do it as above.
807 */
808
809 /*
810 * Append the frame to the TX queue.
811 */
812 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
813 ATH_KTR(sc, ATH_KTR_TX, 3,
814 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
815 "depth=%d",
816 txq->axq_qnum,
817 bf,
818 txq->axq_depth);
819
820 /*
821 * If there's a link pointer, update it.
822 *
823 * XXX we should replace this with the above logic, just
824 * to kill axq_link with fire.
825 */
826 if (txq->axq_link != NULL) {
827 *txq->axq_link = bf->bf_daddr;
828 DPRINTF(sc, ATH_DEBUG_XMIT,
829 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
830 txq->axq_qnum, txq->axq_link,
831 (caddr_t)bf->bf_daddr, bf->bf_desc,
832 txq->axq_depth);
833 ATH_KTR(sc, ATH_KTR_TX, 5,
834 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
835 "lastds=%d",
836 txq->axq_qnum, txq->axq_link,
837 (caddr_t)bf->bf_daddr, bf->bf_desc,
838 bf->bf_lastds);
839 }
840
841 /*
842 * If we've not pushed anything into the hardware yet,
843 * push the head of the queue into the TxDP.
844 *
845 * Once we've started DMA, there's no guarantee that
846 * updating the TxDP with a new value will actually work.
847 * So we just don't do that - if we hit the end of the list,
848 * we keep that buffer around (the "holding buffer") and
849 * re-start DMA by updating the link pointer of _that_
850 * descriptor and then restart DMA.
851 */
852 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
853 bf_first = TAILQ_FIRST(&txq->axq_q);
854 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
855 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
856 DPRINTF(sc, ATH_DEBUG_XMIT,
857 "%s: TXDP[%u] = %p (%p) depth %d\n",
858 __func__, txq->axq_qnum,
859 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
860 txq->axq_depth);
861 ATH_KTR(sc, ATH_KTR_TX, 5,
862 "ath_tx_handoff: TXDP[%u] = %p (%p) "
863 "lastds=%p depth %d",
864 txq->axq_qnum,
865 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
866 bf_first->bf_lastds,
867 txq->axq_depth);
868 }
869
870 /*
871 * Ensure that the bf TXQ matches this TXQ, so later
872 * checking and holding buffer manipulation is sane.
873 */
874 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
875 DPRINTF(sc, ATH_DEBUG_XMIT,
876 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
877 __func__, bf, bf->bf_state.bfs_tx_queue,
878 txq->axq_qnum);
879 }
880
881 /*
882 * Track aggregate queue depth.
883 */
884 if (bf->bf_state.bfs_aggr)
885 txq->axq_aggr_depth++;
886
887 /*
888 * Update the link pointer.
889 */
890 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
891
892 /*
893 * Start DMA.
894 *
895 * If we wrote a TxDP above, DMA will start from here.
896 *
897 * If DMA is running, it'll do nothing.
898 *
899 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
900 * or VEOL) then it stops at the last transmitted write.
901 * We then append a new frame by updating the link pointer
902 * in that descriptor and then kick TxE here; it will re-read
903 * that last descriptor and find the new descriptor to transmit.
904 *
905 * This is why we keep the holding descriptor around.
906 */
907 ath_hal_txstart(ah, txq->axq_qnum);
908 ATH_TXQ_UNLOCK(txq);
909 ATH_KTR(sc, ATH_KTR_TX, 1,
910 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
911 }
912
913 /*
914 * Restart TX DMA for the given TXQ.
915 *
916 * This must be called whether the queue is empty or not.
917 */
918 static void
ath_legacy_tx_dma_restart(struct ath_softc * sc,struct ath_txq * txq)919 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
920 {
921 struct ath_buf *bf, *bf_last;
922
923 ATH_TXQ_LOCK_ASSERT(txq);
924
925 /* XXX make this ATH_TXQ_FIRST */
926 bf = TAILQ_FIRST(&txq->axq_q);
927 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
928
929 if (bf == NULL)
930 return;
931
932 DPRINTF(sc, ATH_DEBUG_RESET,
933 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
934 __func__,
935 txq->axq_qnum,
936 bf,
937 bf_last,
938 (uint32_t) bf->bf_daddr);
939
940 #ifdef ATH_DEBUG
941 if (sc->sc_debug & ATH_DEBUG_RESET)
942 ath_tx_dump(sc, txq);
943 #endif
944
945 /*
946 * This is called from a restart, so DMA is known to be
947 * completely stopped.
948 */
949 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
950 ("%s: Q%d: called with PUTRUNNING=1\n",
951 __func__,
952 txq->axq_qnum));
953
954 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
955 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
956
957 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
958 &txq->axq_link);
959 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
960 }
961
962 /*
963 * Hand off a packet to the hardware (or mcast queue.)
964 *
965 * The relevant hardware txq should be locked.
966 */
967 static void
ath_legacy_xmit_handoff(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)968 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
969 struct ath_buf *bf)
970 {
971 ATH_TX_LOCK_ASSERT(sc);
972
973 #ifdef ATH_DEBUG_ALQ
974 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
975 ath_tx_alq_post(sc, bf);
976 #endif
977
978 if (txq->axq_qnum == ATH_TXQ_SWQ)
979 ath_tx_handoff_mcast(sc, txq, bf);
980 else
981 ath_tx_handoff_hw(sc, txq, bf);
982 }
983
984 static int
ath_tx_tag_crypto(struct ath_softc * sc,struct ieee80211_node * ni,struct mbuf * m0,int iswep,int isfrag,int * hdrlen,int * pktlen,int * keyix)985 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
986 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
987 int *keyix)
988 {
989 DPRINTF(sc, ATH_DEBUG_XMIT,
990 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
991 __func__,
992 *hdrlen,
993 *pktlen,
994 isfrag,
995 iswep,
996 m0);
997
998 if (iswep) {
999 const struct ieee80211_cipher *cip;
1000 struct ieee80211_key *k;
1001
1002 /*
1003 * Construct the 802.11 header+trailer for an encrypted
1004 * frame. The only reason this can fail is because of an
1005 * unknown or unsupported cipher/key type.
1006 */
1007 k = ieee80211_crypto_encap(ni, m0);
1008 if (k == NULL) {
1009 /*
1010 * This can happen when the key is yanked after the
1011 * frame was queued. Just discard the frame; the
1012 * 802.11 layer counts failures and provides
1013 * debugging/diagnostics.
1014 */
1015 return (0);
1016 }
1017 /*
1018 * Adjust the packet + header lengths for the crypto
1019 * additions and calculate the h/w key index. When
1020 * a s/w mic is done the frame will have had any mic
1021 * added to it prior to entry so m0->m_pkthdr.len will
1022 * account for it. Otherwise we need to add it to the
1023 * packet length.
1024 */
1025 cip = k->wk_cipher;
1026 (*hdrlen) += cip->ic_header;
1027 (*pktlen) += cip->ic_header + cip->ic_trailer;
1028 /* NB: frags always have any TKIP MIC done in s/w */
1029 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1030 (*pktlen) += cip->ic_miclen;
1031 (*keyix) = k->wk_keyix;
1032 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1033 /*
1034 * Use station key cache slot, if assigned.
1035 */
1036 (*keyix) = ni->ni_ucastkey.wk_keyix;
1037 if ((*keyix) == IEEE80211_KEYIX_NONE)
1038 (*keyix) = HAL_TXKEYIX_INVALID;
1039 } else
1040 (*keyix) = HAL_TXKEYIX_INVALID;
1041
1042 return (1);
1043 }
1044
1045 /*
1046 * Calculate whether interoperability protection is required for
1047 * this frame.
1048 *
1049 * This requires the rate control information be filled in,
1050 * as the protection requirement depends upon the current
1051 * operating mode / PHY.
1052 */
1053 static void
ath_tx_calc_protection(struct ath_softc * sc,struct ath_buf * bf)1054 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1055 {
1056 struct ieee80211_frame *wh;
1057 uint8_t rix;
1058 uint16_t flags;
1059 int shortPreamble;
1060 const HAL_RATE_TABLE *rt = sc->sc_currates;
1061 struct ieee80211com *ic = &sc->sc_ic;
1062
1063 flags = bf->bf_state.bfs_txflags;
1064 rix = bf->bf_state.bfs_rc[0].rix;
1065 shortPreamble = bf->bf_state.bfs_shpream;
1066 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1067
1068 /*
1069 * If 802.11g protection is enabled, determine whether
1070 * to use RTS/CTS or just CTS. Note that this is only
1071 * done for OFDM unicast frames.
1072 */
1073 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1074 rt->info[rix].phy == IEEE80211_T_OFDM &&
1075 (flags & HAL_TXDESC_NOACK) == 0) {
1076 bf->bf_state.bfs_doprot = 1;
1077 /* XXX fragments must use CCK rates w/ protection */
1078 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1079 flags |= HAL_TXDESC_RTSENA;
1080 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1081 flags |= HAL_TXDESC_CTSENA;
1082 }
1083 /*
1084 * For frags it would be desirable to use the
1085 * highest CCK rate for RTS/CTS. But stations
1086 * farther away may detect it at a lower CCK rate
1087 * so use the configured protection rate instead
1088 * (for now).
1089 */
1090 sc->sc_stats.ast_tx_protect++;
1091 }
1092
1093 /*
1094 * If 11n protection is enabled and it's a HT frame,
1095 * enable RTS.
1096 *
1097 * XXX ic_htprotmode or ic_curhtprotmode?
1098 * XXX should it_htprotmode only matter if ic_curhtprotmode
1099 * XXX indicates it's not a HT pure environment?
1100 */
1101 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1102 rt->info[rix].phy == IEEE80211_T_HT &&
1103 (flags & HAL_TXDESC_NOACK) == 0) {
1104 flags |= HAL_TXDESC_RTSENA;
1105 sc->sc_stats.ast_tx_htprotect++;
1106 }
1107 bf->bf_state.bfs_txflags = flags;
1108 }
1109
1110 /*
1111 * Update the frame duration given the currently selected rate.
1112 *
1113 * This also updates the frame duration value, so it will require
1114 * a DMA flush.
1115 */
1116 static void
ath_tx_calc_duration(struct ath_softc * sc,struct ath_buf * bf)1117 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1118 {
1119 struct ieee80211_frame *wh;
1120 uint8_t rix;
1121 uint16_t flags;
1122 int shortPreamble;
1123 struct ath_hal *ah = sc->sc_ah;
1124 const HAL_RATE_TABLE *rt = sc->sc_currates;
1125 int isfrag = bf->bf_m->m_flags & M_FRAG;
1126
1127 flags = bf->bf_state.bfs_txflags;
1128 rix = bf->bf_state.bfs_rc[0].rix;
1129 shortPreamble = bf->bf_state.bfs_shpream;
1130 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1131
1132 /*
1133 * Calculate duration. This logically belongs in the 802.11
1134 * layer but it lacks sufficient information to calculate it.
1135 */
1136 if ((flags & HAL_TXDESC_NOACK) == 0 &&
1137 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1138 u_int16_t dur;
1139 if (shortPreamble)
1140 dur = rt->info[rix].spAckDuration;
1141 else
1142 dur = rt->info[rix].lpAckDuration;
1143 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1144 dur += dur; /* additional SIFS+ACK */
1145 /*
1146 * Include the size of next fragment so NAV is
1147 * updated properly. The last fragment uses only
1148 * the ACK duration
1149 *
1150 * XXX TODO: ensure that the rate lookup for each
1151 * fragment is the same as the rate used by the
1152 * first fragment!
1153 */
1154 dur += ath_hal_computetxtime(ah,
1155 rt,
1156 bf->bf_nextfraglen,
1157 rix, shortPreamble);
1158 }
1159 if (isfrag) {
1160 /*
1161 * Force hardware to use computed duration for next
1162 * fragment by disabling multi-rate retry which updates
1163 * duration based on the multi-rate duration table.
1164 */
1165 bf->bf_state.bfs_ismrr = 0;
1166 bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1167 /* XXX update bfs_rc[0].try? */
1168 }
1169
1170 /* Update the duration field itself */
1171 *(u_int16_t *)wh->i_dur = htole16(dur);
1172 }
1173 }
1174
1175 static uint8_t
ath_tx_get_rtscts_rate(struct ath_hal * ah,const HAL_RATE_TABLE * rt,int cix,int shortPreamble)1176 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1177 int cix, int shortPreamble)
1178 {
1179 uint8_t ctsrate;
1180
1181 /*
1182 * CTS transmit rate is derived from the transmit rate
1183 * by looking in the h/w rate table. We must also factor
1184 * in whether or not a short preamble is to be used.
1185 */
1186 /* NB: cix is set above where RTS/CTS is enabled */
1187 KASSERT(cix != 0xff, ("cix not setup"));
1188 ctsrate = rt->info[cix].rateCode;
1189
1190 /* XXX this should only matter for legacy rates */
1191 if (shortPreamble)
1192 ctsrate |= rt->info[cix].shortPreamble;
1193
1194 return (ctsrate);
1195 }
1196
1197 /*
1198 * Calculate the RTS/CTS duration for legacy frames.
1199 */
1200 static int
ath_tx_calc_ctsduration(struct ath_hal * ah,int rix,int cix,int shortPreamble,int pktlen,const HAL_RATE_TABLE * rt,int flags)1201 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1202 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1203 int flags)
1204 {
1205 int ctsduration = 0;
1206
1207 /* This mustn't be called for HT modes */
1208 if (rt->info[cix].phy == IEEE80211_T_HT) {
1209 kprintf("%s: HT rate where it shouldn't be (0x%x)\n",
1210 __func__, rt->info[cix].rateCode);
1211 return (-1);
1212 }
1213
1214 /*
1215 * Compute the transmit duration based on the frame
1216 * size and the size of an ACK frame. We call into the
1217 * HAL to do the computation since it depends on the
1218 * characteristics of the actual PHY being used.
1219 *
1220 * NB: CTS is assumed the same size as an ACK so we can
1221 * use the precalculated ACK durations.
1222 */
1223 if (shortPreamble) {
1224 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1225 ctsduration += rt->info[cix].spAckDuration;
1226 ctsduration += ath_hal_computetxtime(ah,
1227 rt, pktlen, rix, AH_TRUE);
1228 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1229 ctsduration += rt->info[rix].spAckDuration;
1230 } else {
1231 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1232 ctsduration += rt->info[cix].lpAckDuration;
1233 ctsduration += ath_hal_computetxtime(ah,
1234 rt, pktlen, rix, AH_FALSE);
1235 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1236 ctsduration += rt->info[rix].lpAckDuration;
1237 }
1238
1239 return (ctsduration);
1240 }
1241
1242 /*
1243 * Update the given ath_buf with updated rts/cts setup and duration
1244 * values.
1245 *
1246 * To support rate lookups for each software retry, the rts/cts rate
1247 * and cts duration must be re-calculated.
1248 *
1249 * This function assumes the RTS/CTS flags have been set as needed;
1250 * mrr has been disabled; and the rate control lookup has been done.
1251 *
1252 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1253 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1254 */
1255 static void
ath_tx_set_rtscts(struct ath_softc * sc,struct ath_buf * bf)1256 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1257 {
1258 uint16_t ctsduration = 0;
1259 uint8_t ctsrate = 0;
1260 uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1261 uint8_t cix = 0;
1262 const HAL_RATE_TABLE *rt = sc->sc_currates;
1263
1264 /*
1265 * No RTS/CTS enabled? Don't bother.
1266 */
1267 if ((bf->bf_state.bfs_txflags &
1268 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1269 /* XXX is this really needed? */
1270 bf->bf_state.bfs_ctsrate = 0;
1271 bf->bf_state.bfs_ctsduration = 0;
1272 return;
1273 }
1274
1275 /*
1276 * If protection is enabled, use the protection rix control
1277 * rate. Otherwise use the rate0 control rate.
1278 */
1279 if (bf->bf_state.bfs_doprot)
1280 rix = sc->sc_protrix;
1281 else
1282 rix = bf->bf_state.bfs_rc[0].rix;
1283
1284 /*
1285 * If the raw path has hard-coded ctsrate0 to something,
1286 * use it.
1287 */
1288 if (bf->bf_state.bfs_ctsrate0 != 0)
1289 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1290 else
1291 /* Control rate from above */
1292 cix = rt->info[rix].controlRate;
1293
1294 /* Calculate the rtscts rate for the given cix */
1295 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1296 bf->bf_state.bfs_shpream);
1297
1298 /* The 11n chipsets do ctsduration calculations for you */
1299 if (! ath_tx_is_11n(sc))
1300 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1301 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1302 rt, bf->bf_state.bfs_txflags);
1303
1304 /* Squirrel away in ath_buf */
1305 bf->bf_state.bfs_ctsrate = ctsrate;
1306 bf->bf_state.bfs_ctsduration = ctsduration;
1307
1308 /*
1309 * Must disable multi-rate retry when using RTS/CTS.
1310 */
1311 if (!sc->sc_mrrprot) {
1312 bf->bf_state.bfs_ismrr = 0;
1313 bf->bf_state.bfs_try0 =
1314 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1315 }
1316 }
1317
1318 /*
1319 * Setup the descriptor chain for a normal or fast-frame
1320 * frame.
1321 *
1322 * XXX TODO: extend to include the destination hardware QCU ID.
1323 * Make sure that is correct. Make sure that when being added
1324 * to the mcastq, the CABQ QCUID is set or things will get a bit
1325 * odd.
1326 */
1327 static void
ath_tx_setds(struct ath_softc * sc,struct ath_buf * bf)1328 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1329 {
1330 struct ath_desc *ds = bf->bf_desc;
1331 struct ath_hal *ah = sc->sc_ah;
1332
1333 if (bf->bf_state.bfs_txrate0 == 0)
1334 DPRINTF(sc, ATH_DEBUG_XMIT,
1335 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1336
1337 ath_hal_setuptxdesc(ah, ds
1338 , bf->bf_state.bfs_pktlen /* packet length */
1339 , bf->bf_state.bfs_hdrlen /* header length */
1340 , bf->bf_state.bfs_atype /* Atheros packet type */
1341 , bf->bf_state.bfs_txpower /* txpower */
1342 , bf->bf_state.bfs_txrate0
1343 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
1344 , bf->bf_state.bfs_keyix /* key cache index */
1345 , bf->bf_state.bfs_txantenna /* antenna mode */
1346 , bf->bf_state.bfs_txflags /* flags */
1347 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
1348 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
1349 );
1350
1351 /*
1352 * This will be overridden when the descriptor chain is written.
1353 */
1354 bf->bf_lastds = ds;
1355 bf->bf_last = bf;
1356
1357 /* Set rate control and descriptor chain for this frame */
1358 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1359 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1360 }
1361
1362 /*
1363 * Do a rate lookup.
1364 *
1365 * This performs a rate lookup for the given ath_buf only if it's required.
1366 * Non-data frames and raw frames don't require it.
1367 *
1368 * This populates the primary and MRR entries; MRR values are
1369 * then disabled later on if something requires it (eg RTS/CTS on
1370 * pre-11n chipsets.
1371 *
1372 * This needs to be done before the RTS/CTS fields are calculated
1373 * as they may depend upon the rate chosen.
1374 */
1375 static void
ath_tx_do_ratelookup(struct ath_softc * sc,struct ath_buf * bf)1376 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1377 {
1378 uint8_t rate, rix;
1379 int try0;
1380
1381 if (! bf->bf_state.bfs_doratelookup)
1382 return;
1383
1384 /* Get rid of any previous state */
1385 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1386
1387 ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1388 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1389 bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1390
1391 /* In case MRR is disabled, make sure rc[0] is setup correctly */
1392 bf->bf_state.bfs_rc[0].rix = rix;
1393 bf->bf_state.bfs_rc[0].ratecode = rate;
1394 bf->bf_state.bfs_rc[0].tries = try0;
1395
1396 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1397 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1398 bf->bf_state.bfs_rc);
1399 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1400
1401 sc->sc_txrix = rix; /* for LED blinking */
1402 sc->sc_lastdatarix = rix; /* for fast frames */
1403 bf->bf_state.bfs_try0 = try0;
1404 bf->bf_state.bfs_txrate0 = rate;
1405 }
1406
1407 /*
1408 * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1409 */
1410 static void
ath_tx_update_clrdmask(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)1411 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1412 struct ath_buf *bf)
1413 {
1414 struct ath_node *an = ATH_NODE(bf->bf_node);
1415
1416 ATH_TX_LOCK_ASSERT(sc);
1417
1418 if (an->clrdmask == 1) {
1419 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1420 an->clrdmask = 0;
1421 }
1422 }
1423
1424 /*
1425 * Return whether this frame should be software queued or
1426 * direct dispatched.
1427 *
1428 * When doing powersave, BAR frames should be queued but other management
1429 * frames should be directly sent.
1430 *
1431 * When not doing powersave, stick BAR frames into the hardware queue
1432 * so it goes out even though the queue is paused.
1433 *
1434 * For now, management frames are also software queued by default.
1435 */
1436 static int
ath_tx_should_swq_frame(struct ath_softc * sc,struct ath_node * an,struct mbuf * m0,int * queue_to_head)1437 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1438 struct mbuf *m0, int *queue_to_head)
1439 {
1440 struct ieee80211_node *ni = &an->an_node;
1441 struct ieee80211_frame *wh;
1442 uint8_t type, subtype;
1443
1444 wh = mtod(m0, struct ieee80211_frame *);
1445 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1446 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1447
1448 (*queue_to_head) = 0;
1449
1450 /* If it's not in powersave - direct-dispatch BAR */
1451 if ((ATH_NODE(ni)->an_is_powersave == 0)
1452 && type == IEEE80211_FC0_TYPE_CTL &&
1453 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1454 DPRINTF(sc, ATH_DEBUG_SW_TX,
1455 "%s: BAR: TX'ing direct\n", __func__);
1456 return (0);
1457 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1458 && type == IEEE80211_FC0_TYPE_CTL &&
1459 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1460 /* BAR TX whilst asleep; queue */
1461 DPRINTF(sc, ATH_DEBUG_SW_TX,
1462 "%s: swq: TX'ing\n", __func__);
1463 (*queue_to_head) = 1;
1464 return (1);
1465 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1466 && (type == IEEE80211_FC0_TYPE_MGT ||
1467 type == IEEE80211_FC0_TYPE_CTL)) {
1468 /*
1469 * Other control/mgmt frame; bypass software queuing
1470 * for now!
1471 */
1472 #if defined(__DragonFly__)
1473 DPRINTF(sc, ATH_DEBUG_XMIT,
1474 "%s: %s: Node is asleep; sending mgmt "
1475 "(type=%d, subtype=%d)\n",
1476 __func__, ath_hal_ether_sprintf(ni->ni_macaddr),
1477 type, subtype);
1478 #else
1479 DPRINTF(sc, ATH_DEBUG_XMIT,
1480 "%s: %6D: Node is asleep; sending mgmt "
1481 "(type=%d, subtype=%d)\n",
1482 __func__, ni->ni_macaddr, ":", type, subtype);
1483 #endif
1484 return (0);
1485 } else {
1486 return (1);
1487 }
1488 }
1489
1490
1491 /*
1492 * Transmit the given frame to the hardware.
1493 *
1494 * The frame must already be setup; rate control must already have
1495 * been done.
1496 *
1497 * XXX since the TXQ lock is being held here (and I dislike holding
1498 * it for this long when not doing software aggregation), later on
1499 * break this function into "setup_normal" and "xmit_normal". The
1500 * lock only needs to be held for the ath_tx_handoff call.
1501 *
1502 * XXX we don't update the leak count here - if we're doing
1503 * direct frame dispatch, we need to be able to do it without
1504 * decrementing the leak count (eg multicast queue frames.)
1505 */
1506 static void
ath_tx_xmit_normal(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)1507 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1508 struct ath_buf *bf)
1509 {
1510 struct ath_node *an = ATH_NODE(bf->bf_node);
1511 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1512
1513 ATH_TX_LOCK_ASSERT(sc);
1514
1515 /*
1516 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1517 * set a completion handler however it doesn't (yet) properly
1518 * handle the strict ordering requirements needed for normal,
1519 * non-aggregate session frames.
1520 *
1521 * Once this is implemented, only set CLRDMASK like this for
1522 * frames that must go out - eg management/raw frames.
1523 */
1524 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1525
1526 /* Setup the descriptor before handoff */
1527 ath_tx_do_ratelookup(sc, bf);
1528 ath_tx_calc_duration(sc, bf);
1529 ath_tx_calc_protection(sc, bf);
1530 ath_tx_set_rtscts(sc, bf);
1531 ath_tx_rate_fill_rcflags(sc, bf);
1532 ath_tx_setds(sc, bf);
1533
1534 /* Track per-TID hardware queue depth correctly */
1535 tid->hwq_depth++;
1536
1537 /* Assign the completion handler */
1538 bf->bf_comp = ath_tx_normal_comp;
1539
1540 /* Hand off to hardware */
1541 ath_tx_handoff(sc, txq, bf);
1542 }
1543
1544 /*
1545 * Do the basic frame setup stuff that's required before the frame
1546 * is added to a software queue.
1547 *
1548 * All frames get mostly the same treatment and it's done once.
1549 * Retransmits fiddle with things like the rate control setup,
1550 * setting the retransmit bit in the packet; doing relevant DMA/bus
1551 * syncing and relinking it (back) into the hardware TX queue.
1552 *
1553 * Note that this may cause the mbuf to be reallocated, so
1554 * m0 may not be valid.
1555 */
1556 static int
ath_tx_normal_setup(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0,struct ath_txq * txq)1557 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1558 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1559 {
1560 struct ieee80211vap *vap = ni->ni_vap;
1561 struct ath_hal *ah = sc->sc_ah;
1562 struct ieee80211com *ic = &sc->sc_ic;
1563 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1564 int error, iswep, ismcast, isfrag, ismrr;
1565 int keyix, hdrlen, pktlen, try0 = 0;
1566 u_int8_t rix = 0, txrate = 0;
1567 struct ath_desc *ds;
1568 struct ieee80211_frame *wh;
1569 u_int subtype, flags;
1570 HAL_PKT_TYPE atype;
1571 const HAL_RATE_TABLE *rt;
1572 HAL_BOOL shortPreamble;
1573 struct ath_node *an;
1574 u_int pri;
1575
1576 /*
1577 * To ensure that both sequence numbers and the CCMP PN handling
1578 * is "correct", make sure that the relevant TID queue is locked.
1579 * Otherwise the CCMP PN and seqno may appear out of order, causing
1580 * re-ordered frames to have out of order CCMP PN's, resulting
1581 * in many, many frame drops.
1582 */
1583 ATH_TX_LOCK_ASSERT(sc);
1584
1585 wh = mtod(m0, struct ieee80211_frame *);
1586 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1587 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1588 isfrag = m0->m_flags & M_FRAG;
1589 hdrlen = ieee80211_anyhdrsize(wh);
1590 /*
1591 * Packet length must not include any
1592 * pad bytes; deduct them here.
1593 */
1594 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1595
1596 /* Handle encryption twiddling if needed */
1597 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1598 &pktlen, &keyix)) {
1599 ieee80211_free_mbuf(m0);
1600 return EIO;
1601 }
1602
1603 /* packet header may have moved, reset our local pointer */
1604 wh = mtod(m0, struct ieee80211_frame *);
1605
1606 pktlen += IEEE80211_CRC_LEN;
1607
1608 /*
1609 * Load the DMA map so any coalescing is done. This
1610 * also calculates the number of descriptors we need.
1611 */
1612 error = ath_tx_dmasetup(sc, bf, m0);
1613 if (error != 0)
1614 return error;
1615 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1616 bf->bf_node = ni; /* NB: held reference */
1617 m0 = bf->bf_m; /* NB: may have changed */
1618 wh = mtod(m0, struct ieee80211_frame *);
1619
1620 /* setup descriptors */
1621 ds = bf->bf_desc;
1622 rt = sc->sc_currates;
1623 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1624
1625 /*
1626 * NB: the 802.11 layer marks whether or not we should
1627 * use short preamble based on the current mode and
1628 * negotiated parameters.
1629 */
1630 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1631 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1632 shortPreamble = AH_TRUE;
1633 sc->sc_stats.ast_tx_shortpre++;
1634 } else {
1635 shortPreamble = AH_FALSE;
1636 }
1637
1638 an = ATH_NODE(ni);
1639 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
1640 flags = 0;
1641 ismrr = 0; /* default no multi-rate retry*/
1642 pri = M_WME_GETAC(m0); /* honor classification */
1643 /* XXX use txparams instead of fixed values */
1644 /*
1645 * Calculate Atheros packet type from IEEE80211 packet header,
1646 * setup for rate calculations, and select h/w transmit queue.
1647 */
1648 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1649 case IEEE80211_FC0_TYPE_MGT:
1650 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1651 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1652 atype = HAL_PKT_TYPE_BEACON;
1653 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1654 atype = HAL_PKT_TYPE_PROBE_RESP;
1655 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1656 atype = HAL_PKT_TYPE_ATIM;
1657 else
1658 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
1659 rix = an->an_mgmtrix;
1660 txrate = rt->info[rix].rateCode;
1661 if (shortPreamble)
1662 txrate |= rt->info[rix].shortPreamble;
1663 try0 = ATH_TXMGTTRY;
1664 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1665 break;
1666 case IEEE80211_FC0_TYPE_CTL:
1667 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
1668 rix = an->an_mgmtrix;
1669 txrate = rt->info[rix].rateCode;
1670 if (shortPreamble)
1671 txrate |= rt->info[rix].shortPreamble;
1672 try0 = ATH_TXMGTTRY;
1673 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1674 break;
1675 case IEEE80211_FC0_TYPE_DATA:
1676 atype = HAL_PKT_TYPE_NORMAL; /* default */
1677 /*
1678 * Data frames: multicast frames go out at a fixed rate,
1679 * EAPOL frames use the mgmt frame rate; otherwise consult
1680 * the rate control module for the rate to use.
1681 */
1682 if (ismcast) {
1683 rix = an->an_mcastrix;
1684 txrate = rt->info[rix].rateCode;
1685 if (shortPreamble)
1686 txrate |= rt->info[rix].shortPreamble;
1687 try0 = 1;
1688 } else if (m0->m_flags & M_EAPOL) {
1689 /* XXX? maybe always use long preamble? */
1690 rix = an->an_mgmtrix;
1691 txrate = rt->info[rix].rateCode;
1692 if (shortPreamble)
1693 txrate |= rt->info[rix].shortPreamble;
1694 try0 = ATH_TXMAXTRY; /* XXX?too many? */
1695 } else {
1696 /*
1697 * Do rate lookup on each TX, rather than using
1698 * the hard-coded TX information decided here.
1699 */
1700 ismrr = 1;
1701 bf->bf_state.bfs_doratelookup = 1;
1702 }
1703 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1704 flags |= HAL_TXDESC_NOACK;
1705 break;
1706 default:
1707 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1708 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1709 /* XXX statistic */
1710 /* XXX free tx dmamap */
1711 ieee80211_free_mbuf(m0);
1712 return EIO;
1713 }
1714
1715 /*
1716 * There are two known scenarios where the frame AC doesn't match
1717 * what the destination TXQ is.
1718 *
1719 * + non-QoS frames (eg management?) that the net80211 stack has
1720 * assigned a higher AC to, but since it's a non-QoS TID, it's
1721 * being thrown into TID 16. TID 16 gets the AC_BE queue.
1722 * It's quite possible that management frames should just be
1723 * direct dispatched to hardware rather than go via the software
1724 * queue; that should be investigated in the future. There are
1725 * some specific scenarios where this doesn't make sense, mostly
1726 * surrounding ADDBA request/response - hence why that is special
1727 * cased.
1728 *
1729 * + Multicast frames going into the VAP mcast queue. That shows up
1730 * as "TXQ 11".
1731 *
1732 * This driver should eventually support separate TID and TXQ locking,
1733 * allowing for arbitrary AC frames to appear on arbitrary software
1734 * queues, being queued to the "correct" hardware queue when needed.
1735 */
1736 #if 0
1737 if (txq != sc->sc_ac2q[pri]) {
1738 DPRINTF(sc, ATH_DEBUG_XMIT,
1739 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1740 __func__,
1741 txq,
1742 txq->axq_qnum,
1743 pri,
1744 sc->sc_ac2q[pri],
1745 sc->sc_ac2q[pri]->axq_qnum);
1746 }
1747 #endif
1748
1749 /*
1750 * Calculate miscellaneous flags.
1751 */
1752 if (ismcast) {
1753 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
1754 } else if (pktlen > vap->iv_rtsthreshold &&
1755 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1756 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
1757 sc->sc_stats.ast_tx_rts++;
1758 }
1759 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
1760 sc->sc_stats.ast_tx_noack++;
1761 #ifdef IEEE80211_SUPPORT_TDMA
1762 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1763 DPRINTF(sc, ATH_DEBUG_TDMA,
1764 "%s: discard frame, ACK required w/ TDMA\n", __func__);
1765 sc->sc_stats.ast_tdma_ack++;
1766 /* XXX free tx dmamap */
1767 ieee80211_free_mbuf(m0);
1768 return EIO;
1769 }
1770 #endif
1771
1772 /*
1773 * Determine if a tx interrupt should be generated for
1774 * this descriptor. We take a tx interrupt to reap
1775 * descriptors when the h/w hits an EOL condition or
1776 * when the descriptor is specifically marked to generate
1777 * an interrupt. We periodically mark descriptors in this
1778 * way to insure timely replenishing of the supply needed
1779 * for sending frames. Defering interrupts reduces system
1780 * load and potentially allows more concurrent work to be
1781 * done but if done to aggressively can cause senders to
1782 * backup.
1783 *
1784 * NB: use >= to deal with sc_txintrperiod changing
1785 * dynamically through sysctl.
1786 */
1787 if (flags & HAL_TXDESC_INTREQ) {
1788 txq->axq_intrcnt = 0;
1789 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1790 flags |= HAL_TXDESC_INTREQ;
1791 txq->axq_intrcnt = 0;
1792 }
1793
1794 /* This point forward is actual TX bits */
1795
1796 /*
1797 * At this point we are committed to sending the frame
1798 * and we don't need to look at m_nextpkt; clear it in
1799 * case this frame is part of frag chain.
1800 */
1801 m0->m_nextpkt = NULL;
1802
1803 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1804 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1805 sc->sc_hwmap[rix].ieeerate, -1);
1806
1807 if (ieee80211_radiotap_active_vap(vap)) {
1808 u_int64_t tsf = ath_hal_gettsf64(ah);
1809
1810 sc->sc_tx_th.wt_tsf = htole64(tsf);
1811 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1812 if (iswep)
1813 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1814 if (isfrag)
1815 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1816 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1817 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1818 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1819
1820 ieee80211_radiotap_tx(vap, m0);
1821 }
1822
1823 /* Blank the legacy rate array */
1824 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1825
1826 /*
1827 * ath_buf_set_rate needs at least one rate/try to setup
1828 * the rate scenario.
1829 */
1830 bf->bf_state.bfs_rc[0].rix = rix;
1831 bf->bf_state.bfs_rc[0].tries = try0;
1832 bf->bf_state.bfs_rc[0].ratecode = txrate;
1833
1834 /* Store the decided rate index values away */
1835 bf->bf_state.bfs_pktlen = pktlen;
1836 bf->bf_state.bfs_hdrlen = hdrlen;
1837 bf->bf_state.bfs_atype = atype;
1838 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1839 bf->bf_state.bfs_txrate0 = txrate;
1840 bf->bf_state.bfs_try0 = try0;
1841 bf->bf_state.bfs_keyix = keyix;
1842 bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1843 bf->bf_state.bfs_txflags = flags;
1844 bf->bf_state.bfs_shpream = shortPreamble;
1845
1846 /* XXX this should be done in ath_tx_setrate() */
1847 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1848 bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1849 bf->bf_state.bfs_ctsduration = 0;
1850 bf->bf_state.bfs_ismrr = ismrr;
1851
1852 return 0;
1853 }
1854
1855 /*
1856 * Queue a frame to the hardware or software queue.
1857 *
1858 * This can be called by the net80211 code.
1859 *
1860 * XXX what about locking? Or, push the seqno assign into the
1861 * XXX aggregate scheduler so its serialised?
1862 *
1863 * XXX When sending management frames via ath_raw_xmit(),
1864 * should CLRDMASK be set unconditionally?
1865 */
1866 int
ath_tx_start(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0)1867 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1868 struct ath_buf *bf, struct mbuf *m0)
1869 {
1870 struct ieee80211vap *vap = ni->ni_vap;
1871 struct ath_vap *avp = ATH_VAP(vap);
1872 int r = 0;
1873 u_int pri;
1874 int tid;
1875 struct ath_txq *txq;
1876 int ismcast;
1877 const struct ieee80211_frame *wh;
1878 int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1879 ieee80211_seq seqno;
1880 uint8_t type, subtype;
1881 int queue_to_head;
1882
1883 ATH_TX_LOCK_ASSERT(sc);
1884
1885 /*
1886 * Determine the target hardware queue.
1887 *
1888 * For multicast frames, the txq gets overridden appropriately
1889 * depending upon the state of PS.
1890 *
1891 * For any other frame, we do a TID/QoS lookup inside the frame
1892 * to see what the TID should be. If it's a non-QoS frame, the
1893 * AC and TID are overridden. The TID/TXQ code assumes the
1894 * TID is on a predictable hardware TXQ, so we don't support
1895 * having a node TID queued to multiple hardware TXQs.
1896 * This may change in the future but would require some locking
1897 * fudgery.
1898 */
1899 pri = ath_tx_getac(sc, m0);
1900 tid = ath_tx_gettid(sc, m0);
1901
1902 txq = sc->sc_ac2q[pri];
1903 wh = mtod(m0, struct ieee80211_frame *);
1904 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1905 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1906 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1907
1908 /*
1909 * Enforce how deep the multicast queue can grow.
1910 *
1911 * XXX duplicated in ath_raw_xmit().
1912 */
1913 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1914 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1915 > sc->sc_txq_mcastq_maxdepth) {
1916 sc->sc_stats.ast_tx_mcastq_overflow++;
1917 m_freem(m0);
1918 return (ENOBUFS);
1919 }
1920 }
1921
1922 /*
1923 * Enforce how deep the unicast queue can grow.
1924 *
1925 * If the node is in power save then we don't want
1926 * the software queue to grow too deep, or a node may
1927 * end up consuming all of the ath_buf entries.
1928 *
1929 * For now, only do this for DATA frames.
1930 *
1931 * We will want to cap how many management/control
1932 * frames get punted to the software queue so it doesn't
1933 * fill up. But the correct solution isn't yet obvious.
1934 * In any case, this check should at least let frames pass
1935 * that we are direct-dispatching.
1936 *
1937 * XXX TODO: duplicate this to the raw xmit path!
1938 */
1939 if (type == IEEE80211_FC0_TYPE_DATA &&
1940 ATH_NODE(ni)->an_is_powersave &&
1941 ATH_NODE(ni)->an_swq_depth >
1942 sc->sc_txq_node_psq_maxdepth) {
1943 sc->sc_stats.ast_tx_node_psq_overflow++;
1944 m_freem(m0);
1945 return (ENOBUFS);
1946 }
1947
1948 /* A-MPDU TX */
1949 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1950 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1951 is_ampdu = is_ampdu_tx | is_ampdu_pending;
1952
1953 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1954 __func__, tid, pri, is_ampdu);
1955
1956 /* Set local packet state, used to queue packets to hardware */
1957 bf->bf_state.bfs_tid = tid;
1958 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1959 bf->bf_state.bfs_pri = pri;
1960
1961 #if 1
1962 /*
1963 * When servicing one or more stations in power-save mode
1964 * (or) if there is some mcast data waiting on the mcast
1965 * queue (to prevent out of order delivery) multicast frames
1966 * must be bufferd until after the beacon.
1967 *
1968 * TODO: we should lock the mcastq before we check the length.
1969 */
1970 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1971 txq = &avp->av_mcastq;
1972 /*
1973 * Mark the frame as eventually belonging on the CAB
1974 * queue, so the descriptor setup functions will
1975 * correctly initialise the descriptor 'qcuId' field.
1976 */
1977 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1978 }
1979 #endif
1980
1981 /* Do the generic frame setup */
1982 /* XXX should just bzero the bf_state? */
1983 bf->bf_state.bfs_dobaw = 0;
1984
1985 /* A-MPDU TX? Manually set sequence number */
1986 /*
1987 * Don't do it whilst pending; the net80211 layer still
1988 * assigns them.
1989 */
1990 if (is_ampdu_tx) {
1991 /*
1992 * Always call; this function will
1993 * handle making sure that null data frames
1994 * don't get a sequence number from the current
1995 * TID and thus mess with the BAW.
1996 */
1997 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
1998
1999 /*
2000 * Don't add QoS NULL frames to the BAW.
2001 */
2002 if (IEEE80211_QOS_HAS_SEQ(wh) &&
2003 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2004 bf->bf_state.bfs_dobaw = 1;
2005 }
2006 }
2007
2008 /*
2009 * If needed, the sequence number has been assigned.
2010 * Squirrel it away somewhere easy to get to.
2011 */
2012 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2013
2014 /* Is ampdu pending? fetch the seqno and print it out */
2015 if (is_ampdu_pending)
2016 DPRINTF(sc, ATH_DEBUG_SW_TX,
2017 "%s: tid %d: ampdu pending, seqno %d\n",
2018 __func__, tid, M_SEQNO_GET(m0));
2019
2020 /* This also sets up the DMA map */
2021 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2022
2023 if (r != 0)
2024 goto done;
2025
2026 /* At this point m0 could have changed! */
2027 m0 = bf->bf_m;
2028
2029 #if 1
2030 /*
2031 * If it's a multicast frame, do a direct-dispatch to the
2032 * destination hardware queue. Don't bother software
2033 * queuing it.
2034 */
2035 /*
2036 * If it's a BAR frame, do a direct dispatch to the
2037 * destination hardware queue. Don't bother software
2038 * queuing it, as the TID will now be paused.
2039 * Sending a BAR frame can occur from the net80211 txa timer
2040 * (ie, retries) or from the ath txtask (completion call.)
2041 * It queues directly to hardware because the TID is paused
2042 * at this point (and won't be unpaused until the BAR has
2043 * either been TXed successfully or max retries has been
2044 * reached.)
2045 */
2046 /*
2047 * Until things are better debugged - if this node is asleep
2048 * and we're sending it a non-BAR frame, direct dispatch it.
2049 * Why? Because we need to figure out what's actually being
2050 * sent - eg, during reassociation/reauthentication after
2051 * the node (last) disappeared whilst asleep, the driver should
2052 * have unpaused/unsleep'ed the node. So until that is
2053 * sorted out, use this workaround.
2054 */
2055 if (txq == &avp->av_mcastq) {
2056 DPRINTF(sc, ATH_DEBUG_SW_TX,
2057 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2058 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2059 ath_tx_xmit_normal(sc, txq, bf);
2060 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2061 &queue_to_head)) {
2062 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2063 } else {
2064 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2065 ath_tx_xmit_normal(sc, txq, bf);
2066 }
2067 #else
2068 /*
2069 * For now, since there's no software queue,
2070 * direct-dispatch to the hardware.
2071 */
2072 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2073 /*
2074 * Update the current leak count if
2075 * we're leaking frames; and set the
2076 * MORE flag as appropriate.
2077 */
2078 ath_tx_leak_count_update(sc, tid, bf);
2079 ath_tx_xmit_normal(sc, txq, bf);
2080 #endif
2081 done:
2082 return 0;
2083 }
2084
2085 static int
ath_tx_raw_start(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0,const struct ieee80211_bpf_params * params)2086 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2087 struct ath_buf *bf, struct mbuf *m0,
2088 const struct ieee80211_bpf_params *params)
2089 {
2090 struct ieee80211com *ic = &sc->sc_ic;
2091 struct ath_hal *ah = sc->sc_ah;
2092 struct ieee80211vap *vap = ni->ni_vap;
2093 int error, ismcast, ismrr;
2094 int keyix, hdrlen, pktlen, try0, txantenna;
2095 u_int8_t rix, txrate;
2096 struct ieee80211_frame *wh;
2097 u_int flags;
2098 HAL_PKT_TYPE atype;
2099 const HAL_RATE_TABLE *rt;
2100 struct ath_desc *ds;
2101 u_int pri;
2102 int o_tid = -1;
2103 int do_override;
2104 uint8_t type, subtype;
2105 int queue_to_head;
2106 struct ath_node *an = ATH_NODE(ni);
2107
2108 ATH_TX_LOCK_ASSERT(sc);
2109
2110 wh = mtod(m0, struct ieee80211_frame *);
2111 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2112 hdrlen = ieee80211_anyhdrsize(wh);
2113 /*
2114 * Packet length must not include any
2115 * pad bytes; deduct them here.
2116 */
2117 /* XXX honor IEEE80211_BPF_DATAPAD */
2118 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2119
2120 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2121 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2122
2123 ATH_KTR(sc, ATH_KTR_TX, 2,
2124 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2125
2126 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2127 __func__, ismcast);
2128
2129 pri = params->ibp_pri & 3;
2130 /* Override pri if the frame isn't a QoS one */
2131 if (! IEEE80211_QOS_HAS_SEQ(wh))
2132 pri = ath_tx_getac(sc, m0);
2133
2134 /* XXX If it's an ADDBA, override the correct queue */
2135 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2136
2137 /* Map ADDBA to the correct priority */
2138 if (do_override) {
2139 #if 0
2140 DPRINTF(sc, ATH_DEBUG_XMIT,
2141 "%s: overriding tid %d pri %d -> %d\n",
2142 __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2143 #endif
2144 pri = TID_TO_WME_AC(o_tid);
2145 }
2146
2147 /* Handle encryption twiddling if needed */
2148 if (! ath_tx_tag_crypto(sc, ni,
2149 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2150 &hdrlen, &pktlen, &keyix)) {
2151 ieee80211_free_mbuf(m0);
2152 return EIO;
2153 }
2154 /* packet header may have moved, reset our local pointer */
2155 wh = mtod(m0, struct ieee80211_frame *);
2156
2157 /* Do the generic frame setup */
2158 /* XXX should just bzero the bf_state? */
2159 bf->bf_state.bfs_dobaw = 0;
2160
2161 error = ath_tx_dmasetup(sc, bf, m0);
2162 if (error != 0)
2163 return error;
2164 m0 = bf->bf_m; /* NB: may have changed */
2165 wh = mtod(m0, struct ieee80211_frame *);
2166 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2167 bf->bf_node = ni; /* NB: held reference */
2168
2169 /* Always enable CLRDMASK for raw frames for now.. */
2170 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
2171 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
2172 if (params->ibp_flags & IEEE80211_BPF_RTS)
2173 flags |= HAL_TXDESC_RTSENA;
2174 else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2175 /* XXX assume 11g/11n protection? */
2176 bf->bf_state.bfs_doprot = 1;
2177 flags |= HAL_TXDESC_CTSENA;
2178 }
2179 /* XXX leave ismcast to injector? */
2180 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2181 flags |= HAL_TXDESC_NOACK;
2182
2183 rt = sc->sc_currates;
2184 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2185
2186 /* Fetch first rate information */
2187 rix = ath_tx_findrix(sc, params->ibp_rate0);
2188 try0 = params->ibp_try0;
2189
2190 /*
2191 * Override EAPOL rate as appropriate.
2192 */
2193 if (m0->m_flags & M_EAPOL) {
2194 /* XXX? maybe always use long preamble? */
2195 rix = an->an_mgmtrix;
2196 try0 = ATH_TXMAXTRY; /* XXX?too many? */
2197 }
2198
2199 txrate = rt->info[rix].rateCode;
2200 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2201 txrate |= rt->info[rix].shortPreamble;
2202 sc->sc_txrix = rix;
2203 ismrr = (params->ibp_try1 != 0);
2204 txantenna = params->ibp_pri >> 2;
2205 if (txantenna == 0) /* XXX? */
2206 txantenna = sc->sc_txantenna;
2207
2208 /*
2209 * Since ctsrate is fixed, store it away for later
2210 * use when the descriptor fields are being set.
2211 */
2212 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2213 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2214
2215 /*
2216 * NB: we mark all packets as type PSPOLL so the h/w won't
2217 * set the sequence number, duration, etc.
2218 */
2219 atype = HAL_PKT_TYPE_PSPOLL;
2220
2221 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2222 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2223 sc->sc_hwmap[rix].ieeerate, -1);
2224
2225 if (ieee80211_radiotap_active_vap(vap)) {
2226 u_int64_t tsf = ath_hal_gettsf64(ah);
2227
2228 sc->sc_tx_th.wt_tsf = htole64(tsf);
2229 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2230 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2231 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2232 if (m0->m_flags & M_FRAG)
2233 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2234 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2235 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2236 ieee80211_get_node_txpower(ni));
2237 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2238
2239 ieee80211_radiotap_tx(vap, m0);
2240 }
2241
2242 /*
2243 * Formulate first tx descriptor with tx controls.
2244 */
2245 ds = bf->bf_desc;
2246 /* XXX check return value? */
2247
2248 /* Store the decided rate index values away */
2249 bf->bf_state.bfs_pktlen = pktlen;
2250 bf->bf_state.bfs_hdrlen = hdrlen;
2251 bf->bf_state.bfs_atype = atype;
2252 bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2253 ieee80211_get_node_txpower(ni));
2254 bf->bf_state.bfs_txrate0 = txrate;
2255 bf->bf_state.bfs_try0 = try0;
2256 bf->bf_state.bfs_keyix = keyix;
2257 bf->bf_state.bfs_txantenna = txantenna;
2258 bf->bf_state.bfs_txflags = flags;
2259 bf->bf_state.bfs_shpream =
2260 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2261
2262 /* Set local packet state, used to queue packets to hardware */
2263 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2264 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2265 bf->bf_state.bfs_pri = pri;
2266
2267 /* XXX this should be done in ath_tx_setrate() */
2268 bf->bf_state.bfs_ctsrate = 0;
2269 bf->bf_state.bfs_ctsduration = 0;
2270 bf->bf_state.bfs_ismrr = ismrr;
2271
2272 /* Blank the legacy rate array */
2273 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2274
2275 bf->bf_state.bfs_rc[0].rix = rix;
2276 bf->bf_state.bfs_rc[0].tries = try0;
2277 bf->bf_state.bfs_rc[0].ratecode = txrate;
2278
2279 if (ismrr) {
2280 int rix;
2281
2282 rix = ath_tx_findrix(sc, params->ibp_rate1);
2283 bf->bf_state.bfs_rc[1].rix = rix;
2284 bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2285
2286 rix = ath_tx_findrix(sc, params->ibp_rate2);
2287 bf->bf_state.bfs_rc[2].rix = rix;
2288 bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2289
2290 rix = ath_tx_findrix(sc, params->ibp_rate3);
2291 bf->bf_state.bfs_rc[3].rix = rix;
2292 bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2293 }
2294 /*
2295 * All the required rate control decisions have been made;
2296 * fill in the rc flags.
2297 */
2298 ath_tx_rate_fill_rcflags(sc, bf);
2299
2300 /* NB: no buffered multicast in power save support */
2301
2302 /*
2303 * If we're overiding the ADDBA destination, dump directly
2304 * into the hardware queue, right after any pending
2305 * frames to that node are.
2306 */
2307 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2308 __func__, do_override);
2309
2310 #if 1
2311 /*
2312 * Put addba frames in the right place in the right TID/HWQ.
2313 */
2314 if (do_override) {
2315 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2316 /*
2317 * XXX if it's addba frames, should we be leaking
2318 * them out via the frame leak method?
2319 * XXX for now let's not risk it; but we may wish
2320 * to investigate this later.
2321 */
2322 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2323 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2324 &queue_to_head)) {
2325 /* Queue to software queue */
2326 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2327 } else {
2328 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2329 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2330 }
2331 #else
2332 /* Direct-dispatch to the hardware */
2333 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2334 /*
2335 * Update the current leak count if
2336 * we're leaking frames; and set the
2337 * MORE flag as appropriate.
2338 */
2339 ath_tx_leak_count_update(sc, tid, bf);
2340 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2341 #endif
2342 return 0;
2343 }
2344
2345 /*
2346 * Send a raw frame.
2347 *
2348 * This can be called by net80211.
2349 */
2350 int
ath_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)2351 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2352 const struct ieee80211_bpf_params *params)
2353 {
2354 struct ieee80211com *ic = ni->ni_ic;
2355 struct ath_softc *sc = ic->ic_softc;
2356 struct ath_buf *bf;
2357 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2358 int error = 0;
2359
2360 ATH_PCU_LOCK(sc);
2361 if (sc->sc_inreset_cnt > 0) {
2362 DPRINTF(sc, ATH_DEBUG_XMIT,
2363 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2364 error = EIO;
2365 ATH_PCU_UNLOCK(sc);
2366 goto badbad;
2367 }
2368 sc->sc_txstart_cnt++;
2369 ATH_PCU_UNLOCK(sc);
2370
2371 /* Wake the hardware up already */
2372 ATH_LOCK(sc);
2373 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2374 ATH_UNLOCK(sc);
2375
2376 ATH_TX_LOCK(sc);
2377
2378 if (!sc->sc_running || sc->sc_invalid) {
2379 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2380 __func__, sc->sc_running, sc->sc_invalid);
2381 m_freem(m);
2382 error = ENETDOWN;
2383 goto bad;
2384 }
2385
2386 /*
2387 * Enforce how deep the multicast queue can grow.
2388 *
2389 * XXX duplicated in ath_tx_start().
2390 */
2391 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2392 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2393 > sc->sc_txq_mcastq_maxdepth) {
2394 sc->sc_stats.ast_tx_mcastq_overflow++;
2395 error = ENOBUFS;
2396 }
2397
2398 if (error != 0) {
2399 m_freem(m);
2400 goto bad;
2401 }
2402 }
2403
2404 /*
2405 * Grab a TX buffer and associated resources.
2406 */
2407 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2408 if (bf == NULL) {
2409 sc->sc_stats.ast_tx_nobuf++;
2410 m_freem(m);
2411 error = ENOBUFS;
2412 goto bad;
2413 }
2414 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2415 m, params, bf);
2416
2417 if (params == NULL) {
2418 /*
2419 * Legacy path; interpret frame contents to decide
2420 * precisely how to send the frame.
2421 */
2422 if (ath_tx_start(sc, ni, bf, m)) {
2423 error = EIO; /* XXX */
2424 goto bad2;
2425 }
2426 } else {
2427 /*
2428 * Caller supplied explicit parameters to use in
2429 * sending the frame.
2430 */
2431 if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2432 error = EIO; /* XXX */
2433 goto bad2;
2434 }
2435 }
2436 sc->sc_wd_timer = 5;
2437 sc->sc_stats.ast_tx_raw++;
2438
2439 /*
2440 * Update the TIM - if there's anything queued to the
2441 * software queue and power save is enabled, we should
2442 * set the TIM.
2443 */
2444 ath_tx_update_tim(sc, ni, 1);
2445
2446 ATH_TX_UNLOCK(sc);
2447
2448 ATH_PCU_LOCK(sc);
2449 sc->sc_txstart_cnt--;
2450 ATH_PCU_UNLOCK(sc);
2451
2452
2453 /* Put the hardware back to sleep if required */
2454 ATH_LOCK(sc);
2455 ath_power_restore_power_state(sc);
2456 ATH_UNLOCK(sc);
2457
2458 return 0;
2459
2460 bad2:
2461 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2462 "bf=%p",
2463 m,
2464 params,
2465 bf);
2466 ATH_TXBUF_LOCK(sc);
2467 ath_returnbuf_head(sc, bf);
2468 ATH_TXBUF_UNLOCK(sc);
2469
2470 bad:
2471 ATH_TX_UNLOCK(sc);
2472
2473 ATH_PCU_LOCK(sc);
2474 sc->sc_txstart_cnt--;
2475 ATH_PCU_UNLOCK(sc);
2476
2477 /* Put the hardware back to sleep if required */
2478 ATH_LOCK(sc);
2479 ath_power_restore_power_state(sc);
2480 ATH_UNLOCK(sc);
2481
2482 badbad:
2483 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2484 m, params);
2485 sc->sc_stats.ast_tx_raw_fail++;
2486
2487 return error;
2488 }
2489
2490 /* Some helper functions */
2491
2492 /*
2493 * ADDBA (and potentially others) need to be placed in the same
2494 * hardware queue as the TID/node it's relating to. This is so
2495 * it goes out after any pending non-aggregate frames to the
2496 * same node/TID.
2497 *
2498 * If this isn't done, the ADDBA can go out before the frames
2499 * queued in hardware. Even though these frames have a sequence
2500 * number -earlier- than the ADDBA can be transmitted (but
2501 * no frames whose sequence numbers are after the ADDBA should
2502 * be!) they'll arrive after the ADDBA - and the receiving end
2503 * will simply drop them as being out of the BAW.
2504 *
2505 * The frames can't be appended to the TID software queue - it'll
2506 * never be sent out. So these frames have to be directly
2507 * dispatched to the hardware, rather than queued in software.
2508 * So if this function returns true, the TXQ has to be
2509 * overridden and it has to be directly dispatched.
2510 *
2511 * It's a dirty hack, but someone's gotta do it.
2512 */
2513
2514 /*
2515 * XXX doesn't belong here!
2516 */
2517 static int
ieee80211_is_action(struct ieee80211_frame * wh)2518 ieee80211_is_action(struct ieee80211_frame *wh)
2519 {
2520 /* Type: Management frame? */
2521 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2522 IEEE80211_FC0_TYPE_MGT)
2523 return 0;
2524
2525 /* Subtype: Action frame? */
2526 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2527 IEEE80211_FC0_SUBTYPE_ACTION)
2528 return 0;
2529
2530 return 1;
2531 }
2532
2533 #define MS(_v, _f) (((_v) & _f) >> _f##_S)
2534 /*
2535 * Return an alternate TID for ADDBA request frames.
2536 *
2537 * Yes, this likely should be done in the net80211 layer.
2538 */
2539 static int
ath_tx_action_frame_override_queue(struct ath_softc * sc,struct ieee80211_node * ni,struct mbuf * m0,int * tid)2540 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2541 struct ieee80211_node *ni,
2542 struct mbuf *m0, int *tid)
2543 {
2544 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2545 struct ieee80211_action_ba_addbarequest *ia;
2546 uint8_t *frm;
2547 uint16_t baparamset;
2548
2549 /* Not action frame? Bail */
2550 if (! ieee80211_is_action(wh))
2551 return 0;
2552
2553 /* XXX Not needed for frames we send? */
2554 #if 0
2555 /* Correct length? */
2556 if (! ieee80211_parse_action(ni, m))
2557 return 0;
2558 #endif
2559
2560 /* Extract out action frame */
2561 frm = (u_int8_t *)&wh[1];
2562 ia = (struct ieee80211_action_ba_addbarequest *) frm;
2563
2564 /* Not ADDBA? Bail */
2565 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2566 return 0;
2567 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2568 return 0;
2569
2570 /* Extract TID, return it */
2571 baparamset = le16toh(ia->rq_baparamset);
2572 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2573
2574 return 1;
2575 }
2576 #undef MS
2577
2578 /* Per-node software queue operations */
2579
2580 /*
2581 * Add the current packet to the given BAW.
2582 * It is assumed that the current packet
2583 *
2584 * + fits inside the BAW;
2585 * + already has had a sequence number allocated.
2586 *
2587 * Since the BAW status may be modified by both the ath task and
2588 * the net80211/ifnet contexts, the TID must be locked.
2589 */
2590 void
ath_tx_addto_baw(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,struct ath_buf * bf)2591 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2592 struct ath_tid *tid, struct ath_buf *bf)
2593 {
2594 int index, cindex;
2595 struct ieee80211_tx_ampdu *tap;
2596
2597 ATH_TX_LOCK_ASSERT(sc);
2598
2599 if (bf->bf_state.bfs_isretried)
2600 return;
2601
2602 tap = ath_tx_get_tx_tid(an, tid->tid);
2603
2604 if (! bf->bf_state.bfs_dobaw) {
2605 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2606 "%s: dobaw=0, seqno=%d, window %d:%d\n",
2607 __func__, SEQNO(bf->bf_state.bfs_seqno),
2608 tap->txa_start, tap->txa_wnd);
2609 }
2610
2611 if (bf->bf_state.bfs_addedbaw)
2612 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2613 "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2614 "baw head=%d tail=%d\n",
2615 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2616 tap->txa_start, tap->txa_wnd, tid->baw_head,
2617 tid->baw_tail);
2618
2619 /*
2620 * Verify that the given sequence number is not outside of the
2621 * BAW. Complain loudly if that's the case.
2622 */
2623 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2624 SEQNO(bf->bf_state.bfs_seqno))) {
2625 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2626 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2627 "baw head=%d tail=%d\n",
2628 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2629 tap->txa_start, tap->txa_wnd, tid->baw_head,
2630 tid->baw_tail);
2631 }
2632
2633 /*
2634 * ni->ni_txseqs[] is the currently allocated seqno.
2635 * the txa state contains the current baw start.
2636 */
2637 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2638 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2639 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2640 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2641 "baw head=%d tail=%d\n",
2642 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2643 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2644 tid->baw_tail);
2645
2646
2647 #if 0
2648 assert(tid->tx_buf[cindex] == NULL);
2649 #endif
2650 if (tid->tx_buf[cindex] != NULL) {
2651 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2652 "%s: ba packet dup (index=%d, cindex=%d, "
2653 "head=%d, tail=%d)\n",
2654 __func__, index, cindex, tid->baw_head, tid->baw_tail);
2655 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2656 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2657 __func__,
2658 tid->tx_buf[cindex],
2659 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2660 bf,
2661 SEQNO(bf->bf_state.bfs_seqno)
2662 );
2663 }
2664 tid->tx_buf[cindex] = bf;
2665
2666 if (index >= ((tid->baw_tail - tid->baw_head) &
2667 (ATH_TID_MAX_BUFS - 1))) {
2668 tid->baw_tail = cindex;
2669 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2670 }
2671 }
2672
2673 /*
2674 * Flip the BAW buffer entry over from the existing one to the new one.
2675 *
2676 * When software retransmitting a (sub-)frame, it is entirely possible that
2677 * the frame ath_buf is marked as BUSY and can't be immediately reused.
2678 * In that instance the buffer is cloned and the new buffer is used for
2679 * retransmit. We thus need to update the ath_buf slot in the BAW buf
2680 * tracking array to maintain consistency.
2681 */
2682 static void
ath_tx_switch_baw_buf(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,struct ath_buf * old_bf,struct ath_buf * new_bf)2683 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2684 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2685 {
2686 int index, cindex;
2687 struct ieee80211_tx_ampdu *tap;
2688 int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2689
2690 ATH_TX_LOCK_ASSERT(sc);
2691
2692 tap = ath_tx_get_tx_tid(an, tid->tid);
2693 index = ATH_BA_INDEX(tap->txa_start, seqno);
2694 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2695
2696 /*
2697 * Just warn for now; if it happens then we should find out
2698 * about it. It's highly likely the aggregation session will
2699 * soon hang.
2700 */
2701 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2702 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2703 "%s: retransmitted buffer"
2704 " has mismatching seqno's, BA session may hang.\n",
2705 __func__);
2706 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2707 "%s: old seqno=%d, new_seqno=%d\n", __func__,
2708 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2709 }
2710
2711 if (tid->tx_buf[cindex] != old_bf) {
2712 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2713 "%s: ath_buf pointer incorrect; "
2714 " has m BA session may hang.\n", __func__);
2715 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2716 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2717 }
2718
2719 tid->tx_buf[cindex] = new_bf;
2720 }
2721
2722 /*
2723 * seq_start - left edge of BAW
2724 * seq_next - current/next sequence number to allocate
2725 *
2726 * Since the BAW status may be modified by both the ath task and
2727 * the net80211/ifnet contexts, the TID must be locked.
2728 */
2729 static void
ath_tx_update_baw(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,const struct ath_buf * bf)2730 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2731 struct ath_tid *tid, const struct ath_buf *bf)
2732 {
2733 int index, cindex;
2734 struct ieee80211_tx_ampdu *tap;
2735 int seqno = SEQNO(bf->bf_state.bfs_seqno);
2736
2737 ATH_TX_LOCK_ASSERT(sc);
2738
2739 tap = ath_tx_get_tx_tid(an, tid->tid);
2740 index = ATH_BA_INDEX(tap->txa_start, seqno);
2741 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2742
2743 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2744 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2745 "baw head=%d, tail=%d\n",
2746 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2747 cindex, tid->baw_head, tid->baw_tail);
2748
2749 /*
2750 * If this occurs then we have a big problem - something else
2751 * has slid tap->txa_start along without updating the BAW
2752 * tracking start/end pointers. Thus the TX BAW state is now
2753 * completely busted.
2754 *
2755 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2756 * it's quite possible that a cloned buffer is making its way
2757 * here and causing it to fire off. Disable TDMA for now.
2758 */
2759 if (tid->tx_buf[cindex] != bf) {
2760 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2761 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2762 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2763 tid->tx_buf[cindex],
2764 (tid->tx_buf[cindex] != NULL) ?
2765 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2766 }
2767
2768 tid->tx_buf[cindex] = NULL;
2769
2770 while (tid->baw_head != tid->baw_tail &&
2771 !tid->tx_buf[tid->baw_head]) {
2772 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2773 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2774 }
2775 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2776 "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2777 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2778 }
2779
2780 static void
ath_tx_leak_count_update(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)2781 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2782 struct ath_buf *bf)
2783 {
2784 struct ieee80211_frame *wh;
2785
2786 ATH_TX_LOCK_ASSERT(sc);
2787
2788 if (tid->an->an_leak_count > 0) {
2789 wh = mtod(bf->bf_m, struct ieee80211_frame *);
2790
2791 /*
2792 * Update MORE based on the software/net80211 queue states.
2793 */
2794 if ((tid->an->an_stack_psq > 0)
2795 || (tid->an->an_swq_depth > 0))
2796 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2797 else
2798 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2799
2800 #if defined(__DragonFly__)
2801 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2802 "%s: %s: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2803 __func__,
2804 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
2805 tid->an->an_leak_count,
2806 tid->an->an_stack_psq,
2807 tid->an->an_swq_depth,
2808 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2809 #else
2810 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2811 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2812 __func__,
2813 tid->an->an_node.ni_macaddr,
2814 ":",
2815 tid->an->an_leak_count,
2816 tid->an->an_stack_psq,
2817 tid->an->an_swq_depth,
2818 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2819 #endif
2820
2821 /*
2822 * Re-sync the underlying buffer.
2823 */
2824 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2825 BUS_DMASYNC_PREWRITE);
2826
2827 tid->an->an_leak_count --;
2828 }
2829 }
2830
2831 static int
ath_tx_tid_can_tx_or_sched(struct ath_softc * sc,struct ath_tid * tid)2832 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2833 {
2834
2835 ATH_TX_LOCK_ASSERT(sc);
2836
2837 if (tid->an->an_leak_count > 0) {
2838 return (1);
2839 }
2840 if (tid->paused)
2841 return (0);
2842 return (1);
2843 }
2844
2845 /*
2846 * Mark the current node/TID as ready to TX.
2847 *
2848 * This is done to make it easy for the software scheduler to
2849 * find which nodes have data to send.
2850 *
2851 * The TXQ lock must be held.
2852 */
2853 void
ath_tx_tid_sched(struct ath_softc * sc,struct ath_tid * tid)2854 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2855 {
2856 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2857
2858 ATH_TX_LOCK_ASSERT(sc);
2859
2860 /*
2861 * If we are leaking out a frame to this destination
2862 * for PS-POLL, ensure that we allow scheduling to
2863 * occur.
2864 */
2865 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2866 return; /* paused, can't schedule yet */
2867
2868 if (tid->sched)
2869 return; /* already scheduled */
2870
2871 tid->sched = 1;
2872
2873 #if 0
2874 /*
2875 * If this is a sleeping node we're leaking to, given
2876 * it a higher priority. This is so bad for QoS it hurts.
2877 */
2878 if (tid->an->an_leak_count) {
2879 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2880 } else {
2881 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2882 }
2883 #endif
2884
2885 /*
2886 * We can't do the above - it'll confuse the TXQ software
2887 * scheduler which will keep checking the _head_ TID
2888 * in the list to see if it has traffic. If we queue
2889 * a TID to the head of the list and it doesn't transmit,
2890 * we'll check it again.
2891 *
2892 * So, get the rest of this leaking frames support working
2893 * and reliable first and _then_ optimise it so they're
2894 * pushed out in front of any other pending software
2895 * queued nodes.
2896 */
2897 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2898 }
2899
2900 /*
2901 * Mark the current node as no longer needing to be polled for
2902 * TX packets.
2903 *
2904 * The TXQ lock must be held.
2905 */
2906 static void
ath_tx_tid_unsched(struct ath_softc * sc,struct ath_tid * tid)2907 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2908 {
2909 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2910
2911 ATH_TX_LOCK_ASSERT(sc);
2912
2913 if (tid->sched == 0)
2914 return;
2915
2916 tid->sched = 0;
2917 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2918 }
2919
2920 /*
2921 * Assign a sequence number manually to the given frame.
2922 *
2923 * This should only be called for A-MPDU TX frames.
2924 */
2925 static ieee80211_seq
ath_tx_tid_seqno_assign(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0)2926 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2927 struct ath_buf *bf, struct mbuf *m0)
2928 {
2929 struct ieee80211_frame *wh;
2930 int tid, pri;
2931 ieee80211_seq seqno;
2932 uint8_t subtype;
2933
2934 /* TID lookup */
2935 wh = mtod(m0, struct ieee80211_frame *);
2936 pri = M_WME_GETAC(m0); /* honor classification */
2937 tid = WME_AC_TO_TID(pri);
2938 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2939 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2940
2941 /* XXX Is it a control frame? Ignore */
2942
2943 /* Does the packet require a sequence number? */
2944 if (! IEEE80211_QOS_HAS_SEQ(wh))
2945 return -1;
2946
2947 ATH_TX_LOCK_ASSERT(sc);
2948
2949 /*
2950 * Is it a QOS NULL Data frame? Give it a sequence number from
2951 * the default TID (IEEE80211_NONQOS_TID.)
2952 *
2953 * The RX path of everything I've looked at doesn't include the NULL
2954 * data frame sequence number in the aggregation state updates, so
2955 * assigning it a sequence number there will cause a BAW hole on the
2956 * RX side.
2957 */
2958 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2959 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2960 /* XXX no locking for this TID? This is a bit of a problem. */
2961 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2962 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2963 } else {
2964 /* Manually assign sequence number */
2965 seqno = ni->ni_txseqs[tid];
2966 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2967 }
2968 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2969 M_SEQNO_SET(m0, seqno);
2970
2971 /* Return so caller can do something with it if needed */
2972 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno);
2973 return seqno;
2974 }
2975
2976 /*
2977 * Attempt to direct dispatch an aggregate frame to hardware.
2978 * If the frame is out of BAW, queue.
2979 * Otherwise, schedule it as a single frame.
2980 */
2981 static void
ath_tx_xmit_aggr(struct ath_softc * sc,struct ath_node * an,struct ath_txq * txq,struct ath_buf * bf)2982 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2983 struct ath_txq *txq, struct ath_buf *bf)
2984 {
2985 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2986 struct ieee80211_tx_ampdu *tap;
2987
2988 ATH_TX_LOCK_ASSERT(sc);
2989
2990 tap = ath_tx_get_tx_tid(an, tid->tid);
2991
2992 /* paused? queue */
2993 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2994 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2995 /* XXX don't sched - we're paused! */
2996 return;
2997 }
2998
2999 /* outside baw? queue */
3000 if (bf->bf_state.bfs_dobaw &&
3001 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
3002 SEQNO(bf->bf_state.bfs_seqno)))) {
3003 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3004 ath_tx_tid_sched(sc, tid);
3005 return;
3006 }
3007
3008 /*
3009 * This is a temporary check and should be removed once
3010 * all the relevant code paths have been fixed.
3011 *
3012 * During aggregate retries, it's possible that the head
3013 * frame will fail (which has the bfs_aggr and bfs_nframes
3014 * fields set for said aggregate) and will be retried as
3015 * a single frame. In this instance, the values should
3016 * be reset or the completion code will get upset with you.
3017 */
3018 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
3019 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3020 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
3021 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
3022 bf->bf_state.bfs_aggr = 0;
3023 bf->bf_state.bfs_nframes = 1;
3024 }
3025
3026 /* Update CLRDMASK just before this frame is queued */
3027 ath_tx_update_clrdmask(sc, tid, bf);
3028
3029 /* Direct dispatch to hardware */
3030 ath_tx_do_ratelookup(sc, bf);
3031 ath_tx_calc_duration(sc, bf);
3032 ath_tx_calc_protection(sc, bf);
3033 ath_tx_set_rtscts(sc, bf);
3034 ath_tx_rate_fill_rcflags(sc, bf);
3035 ath_tx_setds(sc, bf);
3036
3037 /* Statistics */
3038 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3039
3040 /* Track per-TID hardware queue depth correctly */
3041 tid->hwq_depth++;
3042
3043 /* Add to BAW */
3044 if (bf->bf_state.bfs_dobaw) {
3045 ath_tx_addto_baw(sc, an, tid, bf);
3046 bf->bf_state.bfs_addedbaw = 1;
3047 }
3048
3049 /* Set completion handler, multi-frame aggregate or not */
3050 bf->bf_comp = ath_tx_aggr_comp;
3051
3052 /*
3053 * Update the current leak count if
3054 * we're leaking frames; and set the
3055 * MORE flag as appropriate.
3056 */
3057 ath_tx_leak_count_update(sc, tid, bf);
3058
3059 /* Hand off to hardware */
3060 ath_tx_handoff(sc, txq, bf);
3061 }
3062
3063 /*
3064 * Attempt to send the packet.
3065 * If the queue isn't busy, direct-dispatch.
3066 * If the queue is busy enough, queue the given packet on the
3067 * relevant software queue.
3068 */
3069 void
ath_tx_swq(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_txq * txq,int queue_to_head,struct ath_buf * bf)3070 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3071 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3072 {
3073 struct ath_node *an = ATH_NODE(ni);
3074 struct ieee80211_frame *wh;
3075 struct ath_tid *atid;
3076 int pri, tid;
3077 struct mbuf *m0 = bf->bf_m;
3078
3079 ATH_TX_LOCK_ASSERT(sc);
3080
3081 /* Fetch the TID - non-QoS frames get assigned to TID 16 */
3082 wh = mtod(m0, struct ieee80211_frame *);
3083 pri = ath_tx_getac(sc, m0);
3084 tid = ath_tx_gettid(sc, m0);
3085 atid = &an->an_tid[tid];
3086
3087 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3088 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3089
3090 /* Set local packet state, used to queue packets to hardware */
3091 /* XXX potentially duplicate info, re-check */
3092 bf->bf_state.bfs_tid = tid;
3093 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3094 bf->bf_state.bfs_pri = pri;
3095
3096 /*
3097 * If the hardware queue isn't busy, queue it directly.
3098 * If the hardware queue is busy, queue it.
3099 * If the TID is paused or the traffic it outside BAW, software
3100 * queue it.
3101 *
3102 * If the node is in power-save and we're leaking a frame,
3103 * leak a single frame.
3104 */
3105 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3106 /* TID is paused, queue */
3107 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3108 /*
3109 * If the caller requested that it be sent at a high
3110 * priority, queue it at the head of the list.
3111 */
3112 if (queue_to_head)
3113 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3114 else
3115 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3116 } else if (ath_tx_ampdu_pending(sc, an, tid)) {
3117 /* AMPDU pending; queue */
3118 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3119 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3120 /* XXX sched? */
3121 } else if (ath_tx_ampdu_running(sc, an, tid)) {
3122 /* AMPDU running, attempt direct dispatch if possible */
3123
3124 /*
3125 * Always queue the frame to the tail of the list.
3126 */
3127 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3128
3129 /*
3130 * If the hardware queue isn't busy, direct dispatch
3131 * the head frame in the list. Don't schedule the
3132 * TID - let it build some more frames first?
3133 *
3134 * When running A-MPDU, always just check the hardware
3135 * queue depth against the aggregate frame limit.
3136 * We don't want to burst a large number of single frames
3137 * out to the hardware; we want to aggressively hold back.
3138 *
3139 * Otherwise, schedule the TID.
3140 */
3141 /* XXX TXQ locking */
3142 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3143 bf = ATH_TID_FIRST(atid);
3144 ATH_TID_REMOVE(atid, bf, bf_list);
3145
3146 /*
3147 * Ensure it's definitely treated as a non-AMPDU
3148 * frame - this information may have been left
3149 * over from a previous attempt.
3150 */
3151 bf->bf_state.bfs_aggr = 0;
3152 bf->bf_state.bfs_nframes = 1;
3153
3154 /* Queue to the hardware */
3155 ath_tx_xmit_aggr(sc, an, txq, bf);
3156 DPRINTF(sc, ATH_DEBUG_SW_TX,
3157 "%s: xmit_aggr\n",
3158 __func__);
3159 } else {
3160 DPRINTF(sc, ATH_DEBUG_SW_TX,
3161 "%s: ampdu; swq'ing\n",
3162 __func__);
3163
3164 ath_tx_tid_sched(sc, atid);
3165 }
3166 /*
3167 * If we're not doing A-MPDU, be prepared to direct dispatch
3168 * up to both limits if possible. This particular corner
3169 * case may end up with packet starvation between aggregate
3170 * traffic and non-aggregate traffic: we want to ensure
3171 * that non-aggregate stations get a few frames queued to the
3172 * hardware before the aggregate station(s) get their chance.
3173 *
3174 * So if you only ever see a couple of frames direct dispatched
3175 * to the hardware from a non-AMPDU client, check both here
3176 * and in the software queue dispatcher to ensure that those
3177 * non-AMPDU stations get a fair chance to transmit.
3178 */
3179 /* XXX TXQ locking */
3180 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3181 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3182 /* AMPDU not running, attempt direct dispatch */
3183 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3184 /* See if clrdmask needs to be set */
3185 ath_tx_update_clrdmask(sc, atid, bf);
3186
3187 /*
3188 * Update the current leak count if
3189 * we're leaking frames; and set the
3190 * MORE flag as appropriate.
3191 */
3192 ath_tx_leak_count_update(sc, atid, bf);
3193
3194 /*
3195 * Dispatch the frame.
3196 */
3197 ath_tx_xmit_normal(sc, txq, bf);
3198 } else {
3199 /* Busy; queue */
3200 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3201 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3202 ath_tx_tid_sched(sc, atid);
3203 }
3204 }
3205
3206 /*
3207 * Only set the clrdmask bit if none of the nodes are currently
3208 * filtered.
3209 *
3210 * XXX TODO: go through all the callers and check to see
3211 * which are being called in the context of looping over all
3212 * TIDs (eg, if all tids are being paused, resumed, etc.)
3213 * That'll avoid O(n^2) complexity here.
3214 */
3215 static void
ath_tx_set_clrdmask(struct ath_softc * sc,struct ath_node * an)3216 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3217 {
3218 int i;
3219
3220 ATH_TX_LOCK_ASSERT(sc);
3221
3222 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3223 if (an->an_tid[i].isfiltered == 1)
3224 return;
3225 }
3226 an->clrdmask = 1;
3227 }
3228
3229 /*
3230 * Configure the per-TID node state.
3231 *
3232 * This likely belongs in if_ath_node.c but I can't think of anywhere
3233 * else to put it just yet.
3234 *
3235 * This sets up the SLISTs and the mutex as appropriate.
3236 */
3237 void
ath_tx_tid_init(struct ath_softc * sc,struct ath_node * an)3238 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3239 {
3240 int i, j;
3241 struct ath_tid *atid;
3242
3243 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3244 atid = &an->an_tid[i];
3245
3246 /* XXX now with this bzer(), is the field 0'ing needed? */
3247 bzero(atid, sizeof(*atid));
3248
3249 TAILQ_INIT(&atid->tid_q);
3250 TAILQ_INIT(&atid->filtq.tid_q);
3251 atid->tid = i;
3252 atid->an = an;
3253 for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3254 atid->tx_buf[j] = NULL;
3255 atid->baw_head = atid->baw_tail = 0;
3256 atid->paused = 0;
3257 atid->sched = 0;
3258 atid->hwq_depth = 0;
3259 atid->cleanup_inprogress = 0;
3260 if (i == IEEE80211_NONQOS_TID)
3261 atid->ac = ATH_NONQOS_TID_AC;
3262 else
3263 atid->ac = TID_TO_WME_AC(i);
3264 }
3265 an->clrdmask = 1; /* Always start by setting this bit */
3266 }
3267
3268 /*
3269 * Pause the current TID. This stops packets from being transmitted
3270 * on it.
3271 *
3272 * Since this is also called from upper layers as well as the driver,
3273 * it will get the TID lock.
3274 */
3275 static void
ath_tx_tid_pause(struct ath_softc * sc,struct ath_tid * tid)3276 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3277 {
3278
3279 ATH_TX_LOCK_ASSERT(sc);
3280 tid->paused++;
3281 #if defined(__DragonFly__)
3282 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%s]: tid=%d, paused = %d\n",
3283 __func__,
3284 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3285 tid->tid,
3286 tid->paused);
3287 #else
3288 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3289 __func__,
3290 tid->an->an_node.ni_macaddr, ":",
3291 tid->tid,
3292 tid->paused);
3293 #endif
3294 }
3295
3296 /*
3297 * Unpause the current TID, and schedule it if needed.
3298 */
3299 static void
ath_tx_tid_resume(struct ath_softc * sc,struct ath_tid * tid)3300 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3301 {
3302 ATH_TX_LOCK_ASSERT(sc);
3303
3304 /*
3305 * There's some odd places where ath_tx_tid_resume() is called
3306 * when it shouldn't be; this works around that particular issue
3307 * until it's actually resolved.
3308 */
3309 if (tid->paused == 0) {
3310 #if defined(__DragonFly__)
3311 device_printf(sc->sc_dev,
3312 "%s: [%s]: tid=%d, paused=0?\n",
3313 __func__,
3314 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3315 tid->tid);
3316 #else
3317 device_printf(sc->sc_dev,
3318 "%s: [%6D]: tid=%d, paused=0?\n",
3319 __func__,
3320 tid->an->an_node.ni_macaddr, ":",
3321 tid->tid);
3322 #endif
3323 } else {
3324 tid->paused--;
3325 }
3326
3327 #if defined(__DragonFly__)
3328 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3329 "%s: [%s]: tid=%d, unpaused = %d\n",
3330 __func__,
3331 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3332 tid->tid,
3333 tid->paused);
3334 #else
3335 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3336 "%s: [%6D]: tid=%d, unpaused = %d\n",
3337 __func__,
3338 tid->an->an_node.ni_macaddr, ":",
3339 tid->tid,
3340 tid->paused);
3341 #endif
3342
3343 if (tid->paused)
3344 return;
3345
3346 /*
3347 * Override the clrdmask configuration for the next frame
3348 * from this TID, just to get the ball rolling.
3349 */
3350 ath_tx_set_clrdmask(sc, tid->an);
3351
3352 if (tid->axq_depth == 0)
3353 return;
3354
3355 /* XXX isfiltered shouldn't ever be 0 at this point */
3356 if (tid->isfiltered == 1) {
3357 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3358 __func__);
3359 return;
3360 }
3361
3362 ath_tx_tid_sched(sc, tid);
3363
3364 /*
3365 * Queue the software TX scheduler.
3366 */
3367 ath_tx_swq_kick(sc);
3368 }
3369
3370 /*
3371 * Add the given ath_buf to the TID filtered frame list.
3372 * This requires the TID be filtered.
3373 */
3374 static void
ath_tx_tid_filt_addbuf(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)3375 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3376 struct ath_buf *bf)
3377 {
3378
3379 ATH_TX_LOCK_ASSERT(sc);
3380
3381 if (!tid->isfiltered)
3382 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3383 __func__);
3384
3385 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3386
3387 /* Set the retry bit and bump the retry counter */
3388 ath_tx_set_retry(sc, bf);
3389 sc->sc_stats.ast_tx_swfiltered++;
3390
3391 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3392 }
3393
3394 /*
3395 * Handle a completed filtered frame from the given TID.
3396 * This just enables/pauses the filtered frame state if required
3397 * and appends the filtered frame to the filtered queue.
3398 */
3399 static void
ath_tx_tid_filt_comp_buf(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)3400 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3401 struct ath_buf *bf)
3402 {
3403
3404 ATH_TX_LOCK_ASSERT(sc);
3405
3406 if (! tid->isfiltered) {
3407 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3408 __func__, tid->tid);
3409 tid->isfiltered = 1;
3410 ath_tx_tid_pause(sc, tid);
3411 }
3412
3413 /* Add the frame to the filter queue */
3414 ath_tx_tid_filt_addbuf(sc, tid, bf);
3415 }
3416
3417 /*
3418 * Complete the filtered frame TX completion.
3419 *
3420 * If there are no more frames in the hardware queue, unpause/unfilter
3421 * the TID if applicable. Otherwise we will wait for a node PS transition
3422 * to unfilter.
3423 */
3424 static void
ath_tx_tid_filt_comp_complete(struct ath_softc * sc,struct ath_tid * tid)3425 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3426 {
3427 struct ath_buf *bf;
3428 int do_resume = 0;
3429
3430 ATH_TX_LOCK_ASSERT(sc);
3431
3432 if (tid->hwq_depth != 0)
3433 return;
3434
3435 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3436 __func__, tid->tid);
3437 if (tid->isfiltered == 1) {
3438 tid->isfiltered = 0;
3439 do_resume = 1;
3440 }
3441
3442 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3443 ath_tx_set_clrdmask(sc, tid->an);
3444
3445 /* XXX this is really quite inefficient */
3446 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3447 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3448 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3449 }
3450
3451 /* And only resume if we had paused before */
3452 if (do_resume)
3453 ath_tx_tid_resume(sc, tid);
3454 }
3455
3456 /*
3457 * Called when a single (aggregate or otherwise) frame is completed.
3458 *
3459 * Returns 0 if the buffer could be added to the filtered list
3460 * (cloned or otherwise), 1 if the buffer couldn't be added to the
3461 * filtered list (failed clone; expired retry) and the caller should
3462 * free it and handle it like a failure (eg by sending a BAR.)
3463 *
3464 * since the buffer may be cloned, bf must be not touched after this
3465 * if the return value is 0.
3466 */
3467 static int
ath_tx_tid_filt_comp_single(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)3468 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3469 struct ath_buf *bf)
3470 {
3471 struct ath_buf *nbf;
3472 int retval;
3473
3474 ATH_TX_LOCK_ASSERT(sc);
3475
3476 /*
3477 * Don't allow a filtered frame to live forever.
3478 */
3479 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3480 sc->sc_stats.ast_tx_swretrymax++;
3481 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3482 "%s: bf=%p, seqno=%d, exceeded retries\n",
3483 __func__,
3484 bf,
3485 SEQNO(bf->bf_state.bfs_seqno));
3486 retval = 1; /* error */
3487 goto finish;
3488 }
3489
3490 /*
3491 * A busy buffer can't be added to the retry list.
3492 * It needs to be cloned.
3493 */
3494 if (bf->bf_flags & ATH_BUF_BUSY) {
3495 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3496 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3497 "%s: busy buffer clone: %p -> %p\n",
3498 __func__, bf, nbf);
3499 } else {
3500 nbf = bf;
3501 }
3502
3503 if (nbf == NULL) {
3504 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3505 "%s: busy buffer couldn't be cloned (%p)!\n",
3506 __func__, bf);
3507 retval = 1; /* error */
3508 } else {
3509 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3510 retval = 0; /* ok */
3511 }
3512 finish:
3513 ath_tx_tid_filt_comp_complete(sc, tid);
3514
3515 return (retval);
3516 }
3517
3518 static void
ath_tx_tid_filt_comp_aggr(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf_first,ath_bufhead * bf_q)3519 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3520 struct ath_buf *bf_first, ath_bufhead *bf_q)
3521 {
3522 struct ath_buf *bf, *bf_next, *nbf;
3523
3524 ATH_TX_LOCK_ASSERT(sc);
3525
3526 bf = bf_first;
3527 while (bf) {
3528 bf_next = bf->bf_next;
3529 bf->bf_next = NULL; /* Remove it from the aggr list */
3530
3531 /*
3532 * Don't allow a filtered frame to live forever.
3533 */
3534 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3535 sc->sc_stats.ast_tx_swretrymax++;
3536 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3537 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3538 __func__,
3539 tid->tid,
3540 bf,
3541 SEQNO(bf->bf_state.bfs_seqno));
3542 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3543 goto next;
3544 }
3545
3546 if (bf->bf_flags & ATH_BUF_BUSY) {
3547 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3548 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3549 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3550 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3551 } else {
3552 nbf = bf;
3553 }
3554
3555 /*
3556 * If the buffer couldn't be cloned, add it to bf_q;
3557 * the caller will free the buffer(s) as required.
3558 */
3559 if (nbf == NULL) {
3560 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3561 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3562 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3563 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3564 } else {
3565 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3566 }
3567 next:
3568 bf = bf_next;
3569 }
3570
3571 ath_tx_tid_filt_comp_complete(sc, tid);
3572 }
3573
3574 /*
3575 * Suspend the queue because we need to TX a BAR.
3576 */
3577 static void
ath_tx_tid_bar_suspend(struct ath_softc * sc,struct ath_tid * tid)3578 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3579 {
3580
3581 ATH_TX_LOCK_ASSERT(sc);
3582
3583 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3584 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3585 __func__,
3586 tid->tid,
3587 tid->bar_wait,
3588 tid->bar_tx);
3589
3590 /* We shouldn't be called when bar_tx is 1 */
3591 if (tid->bar_tx) {
3592 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3593 "%s: bar_tx is 1?!\n", __func__);
3594 }
3595
3596 /* If we've already been called, just be patient. */
3597 if (tid->bar_wait)
3598 return;
3599
3600 /* Wait! */
3601 tid->bar_wait = 1;
3602
3603 /* Only one pause, no matter how many frames fail */
3604 ath_tx_tid_pause(sc, tid);
3605 }
3606
3607 /*
3608 * We've finished with BAR handling - either we succeeded or
3609 * failed. Either way, unsuspend TX.
3610 */
3611 static void
ath_tx_tid_bar_unsuspend(struct ath_softc * sc,struct ath_tid * tid)3612 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3613 {
3614
3615 ATH_TX_LOCK_ASSERT(sc);
3616
3617 #if defined(__DragonFly__)
3618 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3619 "%s: %s: TID=%d, called\n",
3620 __func__,
3621 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3622 tid->tid);
3623 #else
3624 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3625 "%s: %6D: TID=%d, called\n",
3626 __func__,
3627 tid->an->an_node.ni_macaddr,
3628 ":",
3629 tid->tid);
3630 #endif
3631
3632 if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3633 #if defined(__DragonFly__)
3634 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3635 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3636 __func__,
3637 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3638 tid->tid, tid->bar_tx, tid->bar_wait);
3639 #else
3640 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3641 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3642 __func__, tid->an->an_node.ni_macaddr, ":",
3643 tid->tid, tid->bar_tx, tid->bar_wait);
3644 #endif
3645 }
3646
3647 tid->bar_tx = tid->bar_wait = 0;
3648 ath_tx_tid_resume(sc, tid);
3649 }
3650
3651 /*
3652 * Return whether we're ready to TX a BAR frame.
3653 *
3654 * Requires the TID lock be held.
3655 */
3656 static int
ath_tx_tid_bar_tx_ready(struct ath_softc * sc,struct ath_tid * tid)3657 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3658 {
3659
3660 ATH_TX_LOCK_ASSERT(sc);
3661
3662 if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3663 return (0);
3664
3665 #if defined(__DragonFly__)
3666 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3667 "%s: %s: TID=%d, bar ready\n",
3668 __func__,
3669 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3670 tid->tid);
3671 #else
3672 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3673 "%s: %6D: TID=%d, bar ready\n",
3674 __func__,
3675 tid->an->an_node.ni_macaddr,
3676 ":",
3677 tid->tid);
3678 #endif
3679
3680 return (1);
3681 }
3682
3683 /*
3684 * Check whether the current TID is ready to have a BAR
3685 * TXed and if so, do the TX.
3686 *
3687 * Since the TID/TXQ lock can't be held during a call to
3688 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3689 * sending the BAR and locking it again.
3690 *
3691 * Eventually, the code to send the BAR should be broken out
3692 * from this routine so the lock doesn't have to be reacquired
3693 * just to be immediately dropped by the caller.
3694 */
3695 static void
ath_tx_tid_bar_tx(struct ath_softc * sc,struct ath_tid * tid)3696 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3697 {
3698 struct ieee80211_tx_ampdu *tap;
3699
3700 ATH_TX_LOCK_ASSERT(sc);
3701
3702 #if defined(__DragonFly__)
3703 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3704 "%s: %s: TID=%d, called\n",
3705 __func__,
3706 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3707 tid->tid);
3708 #else
3709 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3710 "%s: %6D: TID=%d, called\n",
3711 __func__,
3712 tid->an->an_node.ni_macaddr,
3713 ":",
3714 tid->tid);
3715 #endif
3716
3717 tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3718
3719 /*
3720 * This is an error condition!
3721 */
3722 if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3723 #if defined(__DragonFly__)
3724 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3725 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3726 __func__,
3727 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3728 tid->tid, tid->bar_tx, tid->bar_wait);
3729 #else
3730 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3731 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3732 __func__, tid->an->an_node.ni_macaddr, ":",
3733 tid->tid, tid->bar_tx, tid->bar_wait);
3734 #endif
3735 return;
3736 }
3737
3738 /* Don't do anything if we still have pending frames */
3739 if (tid->hwq_depth > 0) {
3740 #if defined(__DragonFly__)
3741 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3742 "%s: %s: TID=%d, hwq_depth=%d, waiting\n",
3743 __func__,
3744 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3745 tid->tid,
3746 tid->hwq_depth);
3747 #else
3748 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3749 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3750 __func__,
3751 tid->an->an_node.ni_macaddr,
3752 ":",
3753 tid->tid,
3754 tid->hwq_depth);
3755 #endif
3756 return;
3757 }
3758
3759 /* We're now about to TX */
3760 tid->bar_tx = 1;
3761
3762 /*
3763 * Override the clrdmask configuration for the next frame,
3764 * just to get the ball rolling.
3765 */
3766 ath_tx_set_clrdmask(sc, tid->an);
3767
3768 /*
3769 * Calculate new BAW left edge, now that all frames have either
3770 * succeeded or failed.
3771 *
3772 * XXX verify this is _actually_ the valid value to begin at!
3773 */
3774 #if defined(__DragonFly__)
3775 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3776 "%s: %s: TID=%d, new BAW left edge=%d\n",
3777 __func__,
3778 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3779 tid->tid,
3780 tap->txa_start);
3781 #else
3782 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3783 "%s: %6D: TID=%d, new BAW left edge=%d\n",
3784 __func__,
3785 tid->an->an_node.ni_macaddr,
3786 ":",
3787 tid->tid,
3788 tap->txa_start);
3789 #endif
3790
3791 /* Try sending the BAR frame */
3792 /* We can't hold the lock here! */
3793
3794 ATH_TX_UNLOCK(sc);
3795 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3796 /* Success? Now we wait for notification that it's done */
3797 ATH_TX_LOCK(sc);
3798 return;
3799 }
3800
3801 /* Failure? For now, warn loudly and continue */
3802 ATH_TX_LOCK(sc);
3803 #if defined(__DragonFly__)
3804 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3805 "%s: %s: TID=%d, failed to TX BAR, continue!\n",
3806 __func__,
3807 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr),
3808 tid->tid);
3809 #else
3810 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3811 "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3812 __func__, tid->an->an_node.ni_macaddr, ":",
3813 tid->tid);
3814 #endif
3815 ath_tx_tid_bar_unsuspend(sc, tid);
3816 }
3817
3818 static void
ath_tx_tid_drain_pkt(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,ath_bufhead * bf_cq,struct ath_buf * bf)3819 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3820 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3821 {
3822
3823 ATH_TX_LOCK_ASSERT(sc);
3824
3825 /*
3826 * If the current TID is running AMPDU, update
3827 * the BAW.
3828 */
3829 if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3830 bf->bf_state.bfs_dobaw) {
3831 /*
3832 * Only remove the frame from the BAW if it's
3833 * been transmitted at least once; this means
3834 * the frame was in the BAW to begin with.
3835 */
3836 if (bf->bf_state.bfs_retries > 0) {
3837 ath_tx_update_baw(sc, an, tid, bf);
3838 bf->bf_state.bfs_dobaw = 0;
3839 }
3840 #if 0
3841 /*
3842 * This has become a non-fatal error now
3843 */
3844 if (! bf->bf_state.bfs_addedbaw)
3845 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3846 "%s: wasn't added: seqno %d\n",
3847 __func__, SEQNO(bf->bf_state.bfs_seqno));
3848 #endif
3849 }
3850
3851 /* Strip it out of an aggregate list if it was in one */
3852 bf->bf_next = NULL;
3853
3854 /* Insert on the free queue to be freed by the caller */
3855 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3856 }
3857
3858 static void
ath_tx_tid_drain_print(struct ath_softc * sc,struct ath_node * an,const char * pfx,struct ath_tid * tid,struct ath_buf * bf)3859 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3860 const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3861 {
3862 struct ieee80211_node *ni = &an->an_node;
3863 struct ath_txq *txq;
3864 struct ieee80211_tx_ampdu *tap;
3865
3866 txq = sc->sc_ac2q[tid->ac];
3867 tap = ath_tx_get_tx_tid(an, tid->tid);
3868
3869 #if defined(__DragonFly__)
3870 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3871 "%s: %s: %s: bf=%p: addbaw=%d, dobaw=%d, "
3872 "seqno=%d, retry=%d\n",
3873 __func__,
3874 pfx,
3875 ath_hal_ether_sprintf(ni->ni_macaddr),
3876 bf,
3877 bf->bf_state.bfs_addedbaw,
3878 bf->bf_state.bfs_dobaw,
3879 SEQNO(bf->bf_state.bfs_seqno),
3880 bf->bf_state.bfs_retries);
3881 #else
3882 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3883 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3884 "seqno=%d, retry=%d\n",
3885 __func__,
3886 pfx,
3887 ni->ni_macaddr,
3888 ":",
3889 bf,
3890 bf->bf_state.bfs_addedbaw,
3891 bf->bf_state.bfs_dobaw,
3892 SEQNO(bf->bf_state.bfs_seqno),
3893 bf->bf_state.bfs_retries);
3894 #endif
3895 #if defined(__DragonFly__)
3896 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3897 "%s: %s: %s: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3898 __func__,
3899 pfx,
3900 ath_hal_ether_sprintf(ni->ni_macaddr),
3901 bf,
3902 txq->axq_qnum,
3903 txq->axq_depth,
3904 txq->axq_aggr_depth);
3905 #else
3906 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3907 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3908 __func__,
3909 pfx,
3910 ni->ni_macaddr,
3911 ":",
3912 bf,
3913 txq->axq_qnum,
3914 txq->axq_depth,
3915 txq->axq_aggr_depth);
3916 #endif
3917 #if defined(__DragonFly__)
3918 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3919 "%s: %s: %s: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3920 "isfiltered=%d\n",
3921 __func__,
3922 pfx,
3923 ath_hal_ether_sprintf(ni->ni_macaddr),
3924 bf,
3925 tid->axq_depth,
3926 tid->hwq_depth,
3927 tid->bar_wait,
3928 tid->isfiltered);
3929 #else
3930 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3931 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3932 "isfiltered=%d\n",
3933 __func__,
3934 pfx,
3935 ni->ni_macaddr,
3936 ":",
3937 bf,
3938 tid->axq_depth,
3939 tid->hwq_depth,
3940 tid->bar_wait,
3941 tid->isfiltered);
3942 #endif
3943 #if defined(__DragonFly__)
3944 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3945 "%s: %s: %s: tid %d: "
3946 "sched=%d, paused=%d, "
3947 "incomp=%d, baw_head=%d, "
3948 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3949 __func__,
3950 pfx,
3951 ath_hal_ether_sprintf(ni->ni_macaddr),
3952 tid->tid,
3953 tid->sched, tid->paused,
3954 tid->incomp, tid->baw_head,
3955 tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3956 ni->ni_txseqs[tid->tid]);
3957 #else
3958 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3959 "%s: %s: %6D: tid %d: "
3960 "sched=%d, paused=%d, "
3961 "incomp=%d, baw_head=%d, "
3962 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3963 __func__,
3964 pfx,
3965 ni->ni_macaddr,
3966 ":",
3967 tid->tid,
3968 tid->sched, tid->paused,
3969 tid->incomp, tid->baw_head,
3970 tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3971 ni->ni_txseqs[tid->tid]);
3972 #endif
3973 /* XXX Dump the frame, see what it is? */
3974 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3975 ieee80211_dump_pkt(ni->ni_ic,
3976 mtod(bf->bf_m, const uint8_t *),
3977 bf->bf_m->m_len, 0, -1);
3978 }
3979
3980 /*
3981 * Free any packets currently pending in the software TX queue.
3982 *
3983 * This will be called when a node is being deleted.
3984 *
3985 * It can also be called on an active node during an interface
3986 * reset or state transition.
3987 *
3988 * (From Linux/reference):
3989 *
3990 * TODO: For frame(s) that are in the retry state, we will reuse the
3991 * sequence number(s) without setting the retry bit. The
3992 * alternative is to give up on these and BAR the receiver's window
3993 * forward.
3994 */
3995 static void
ath_tx_tid_drain(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,ath_bufhead * bf_cq)3996 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3997 struct ath_tid *tid, ath_bufhead *bf_cq)
3998 {
3999 struct ath_buf *bf;
4000 struct ieee80211_tx_ampdu *tap;
4001 struct ieee80211_node *ni = &an->an_node;
4002 int t;
4003
4004 tap = ath_tx_get_tx_tid(an, tid->tid);
4005
4006 ATH_TX_LOCK_ASSERT(sc);
4007
4008 /* Walk the queue, free frames */
4009 t = 0;
4010 for (;;) {
4011 bf = ATH_TID_FIRST(tid);
4012 if (bf == NULL) {
4013 break;
4014 }
4015
4016 if (t == 0) {
4017 ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
4018 // t = 1;
4019 }
4020
4021 ATH_TID_REMOVE(tid, bf, bf_list);
4022 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
4023 }
4024
4025 /* And now, drain the filtered frame queue */
4026 t = 0;
4027 for (;;) {
4028 bf = ATH_TID_FILT_FIRST(tid);
4029 if (bf == NULL)
4030 break;
4031
4032 if (t == 0) {
4033 ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
4034 // t = 1;
4035 }
4036
4037 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
4038 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
4039 }
4040
4041 /*
4042 * Override the clrdmask configuration for the next frame
4043 * in case there is some future transmission, just to get
4044 * the ball rolling.
4045 *
4046 * This won't hurt things if the TID is about to be freed.
4047 */
4048 ath_tx_set_clrdmask(sc, tid->an);
4049
4050 /*
4051 * Now that it's completed, grab the TID lock and update
4052 * the sequence number and BAW window.
4053 * Because sequence numbers have been assigned to frames
4054 * that haven't been sent yet, it's entirely possible
4055 * we'll be called with some pending frames that have not
4056 * been transmitted.
4057 *
4058 * The cleaner solution is to do the sequence number allocation
4059 * when the packet is first transmitted - and thus the "retries"
4060 * check above would be enough to update the BAW/seqno.
4061 */
4062
4063 /* But don't do it for non-QoS TIDs */
4064 if (tap) {
4065 #if 1
4066 #if defined(__DragonFly__)
4067 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4068 "%s: %s: node %p: TID %d: sliding BAW left edge to %d\n",
4069 __func__,
4070 ath_hal_ether_sprintf(ni->ni_macaddr),
4071 an,
4072 tid->tid,
4073 tap->txa_start);
4074 #else
4075 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4076 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
4077 __func__,
4078 ni->ni_macaddr,
4079 ":",
4080 an,
4081 tid->tid,
4082 tap->txa_start);
4083 #endif
4084 #endif
4085 ni->ni_txseqs[tid->tid] = tap->txa_start;
4086 tid->baw_tail = tid->baw_head;
4087 }
4088 }
4089
4090 /*
4091 * Reset the TID state. This must be only called once the node has
4092 * had its frames flushed from this TID, to ensure that no other
4093 * pause / unpause logic can kick in.
4094 */
4095 static void
ath_tx_tid_reset(struct ath_softc * sc,struct ath_tid * tid)4096 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
4097 {
4098
4099 #if 0
4100 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
4101 tid->paused = tid->sched = tid->addba_tx_pending = 0;
4102 tid->incomp = tid->cleanup_inprogress = 0;
4103 #endif
4104
4105 /*
4106 * If we have a bar_wait set, we need to unpause the TID
4107 * here. Otherwise once cleanup has finished, the TID won't
4108 * have the right paused counter.
4109 *
4110 * XXX I'm not going through resume here - I don't want the
4111 * node to be rescheuled just yet. This however should be
4112 * methodized!
4113 */
4114 if (tid->bar_wait) {
4115 if (tid->paused > 0) {
4116 tid->paused --;
4117 }
4118 }
4119
4120 /*
4121 * XXX same with a currently filtered TID.
4122 *
4123 * Since this is being called during a flush, we assume that
4124 * the filtered frame list is actually empty.
4125 *
4126 * XXX TODO: add in a check to ensure that the filtered queue
4127 * depth is actually 0!
4128 */
4129 if (tid->isfiltered) {
4130 if (tid->paused > 0) {
4131 tid->paused --;
4132 }
4133 }
4134
4135 /*
4136 * Clear BAR, filtered frames, scheduled and ADDBA pending.
4137 * The TID may be going through cleanup from the last association
4138 * where things in the BAW are still in the hardware queue.
4139 */
4140 tid->bar_wait = 0;
4141 tid->bar_tx = 0;
4142 tid->isfiltered = 0;
4143 tid->sched = 0;
4144 tid->addba_tx_pending = 0;
4145
4146 /*
4147 * XXX TODO: it may just be enough to walk the HWQs and mark
4148 * frames for that node as non-aggregate; or mark the ath_node
4149 * with something that indicates that aggregation is no longer
4150 * occurring. Then we can just toss the BAW complaints and
4151 * do a complete hard reset of state here - no pause, no
4152 * complete counter, etc.
4153 */
4154
4155 }
4156
4157 /*
4158 * Flush all software queued packets for the given node.
4159 *
4160 * This occurs when a completion handler frees the last buffer
4161 * for a node, and the node is thus freed. This causes the node
4162 * to be cleaned up, which ends up calling ath_tx_node_flush.
4163 */
4164 void
ath_tx_node_flush(struct ath_softc * sc,struct ath_node * an)4165 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
4166 {
4167 int tid;
4168 ath_bufhead bf_cq;
4169 struct ath_buf *bf;
4170
4171 TAILQ_INIT(&bf_cq);
4172
4173 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
4174 &an->an_node);
4175
4176 ATH_TX_LOCK(sc);
4177 #if defined(__DragonFly__)
4178 DPRINTF(sc, ATH_DEBUG_NODE,
4179 "%s: %s: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
4180 "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
4181 __func__,
4182 ath_hal_ether_sprintf(an->an_node.ni_macaddr),
4183 an->an_is_powersave,
4184 an->an_stack_psq,
4185 an->an_tim_set,
4186 an->an_swq_depth,
4187 an->clrdmask,
4188 an->an_leak_count);
4189 #else
4190 DPRINTF(sc, ATH_DEBUG_NODE,
4191 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
4192 "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
4193 __func__,
4194 an->an_node.ni_macaddr,
4195 ":",
4196 an->an_is_powersave,
4197 an->an_stack_psq,
4198 an->an_tim_set,
4199 an->an_swq_depth,
4200 an->clrdmask,
4201 an->an_leak_count);
4202 #endif
4203
4204 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
4205 struct ath_tid *atid = &an->an_tid[tid];
4206
4207 /* Free packets */
4208 ath_tx_tid_drain(sc, an, atid, &bf_cq);
4209
4210 /* Remove this tid from the list of active tids */
4211 ath_tx_tid_unsched(sc, atid);
4212
4213 /* Reset the per-TID pause, BAR, etc state */
4214 ath_tx_tid_reset(sc, atid);
4215 }
4216
4217 /*
4218 * Clear global leak count
4219 */
4220 an->an_leak_count = 0;
4221 ATH_TX_UNLOCK(sc);
4222
4223 /* Handle completed frames */
4224 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4225 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4226 ath_tx_default_comp(sc, bf, 0);
4227 }
4228 }
4229
4230 /*
4231 * Drain all the software TXQs currently with traffic queued.
4232 */
4233 void
ath_tx_txq_drain(struct ath_softc * sc,struct ath_txq * txq)4234 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4235 {
4236 struct ath_tid *tid;
4237 ath_bufhead bf_cq;
4238 struct ath_buf *bf;
4239
4240 TAILQ_INIT(&bf_cq);
4241 ATH_TX_LOCK(sc);
4242
4243 /*
4244 * Iterate over all active tids for the given txq,
4245 * flushing and unsched'ing them
4246 */
4247 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4248 tid = TAILQ_FIRST(&txq->axq_tidq);
4249 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4250 ath_tx_tid_unsched(sc, tid);
4251 }
4252
4253 ATH_TX_UNLOCK(sc);
4254
4255 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4256 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4257 ath_tx_default_comp(sc, bf, 0);
4258 }
4259 }
4260
4261 /*
4262 * Handle completion of non-aggregate session frames.
4263 *
4264 * This (currently) doesn't implement software retransmission of
4265 * non-aggregate frames!
4266 *
4267 * Software retransmission of non-aggregate frames needs to obey
4268 * the strict sequence number ordering, and drop any frames that
4269 * will fail this.
4270 *
4271 * For now, filtered frames and frame transmission will cause
4272 * all kinds of issues. So we don't support them.
4273 *
4274 * So anyone queuing frames via ath_tx_normal_xmit() or
4275 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4276 */
4277 void
ath_tx_normal_comp(struct ath_softc * sc,struct ath_buf * bf,int fail)4278 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4279 {
4280 struct ieee80211_node *ni = bf->bf_node;
4281 struct ath_node *an = ATH_NODE(ni);
4282 int tid = bf->bf_state.bfs_tid;
4283 struct ath_tid *atid = &an->an_tid[tid];
4284 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4285
4286 /* The TID state is protected behind the TXQ lock */
4287 ATH_TX_LOCK(sc);
4288
4289 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4290 __func__, bf, fail, atid->hwq_depth - 1);
4291
4292 atid->hwq_depth--;
4293
4294 #if 0
4295 /*
4296 * If the frame was filtered, stick it on the filter frame
4297 * queue and complain about it. It shouldn't happen!
4298 */
4299 if ((ts->ts_status & HAL_TXERR_FILT) ||
4300 (ts->ts_status != 0 && atid->isfiltered)) {
4301 DPRINTF(sc, ATH_DEBUG_SW_TX,
4302 "%s: isfiltered=%d, ts_status=%d: huh?\n",
4303 __func__,
4304 atid->isfiltered,
4305 ts->ts_status);
4306 ath_tx_tid_filt_comp_buf(sc, atid, bf);
4307 }
4308 #endif
4309 if (atid->isfiltered)
4310 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4311 if (atid->hwq_depth < 0)
4312 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4313 __func__, atid->hwq_depth);
4314
4315 /* If the TID is being cleaned up, track things */
4316 /* XXX refactor! */
4317 if (atid->cleanup_inprogress) {
4318 atid->incomp--;
4319 if (atid->incomp == 0) {
4320 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4321 "%s: TID %d: cleaned up! resume!\n",
4322 __func__, tid);
4323 atid->cleanup_inprogress = 0;
4324 ath_tx_tid_resume(sc, atid);
4325 }
4326 }
4327
4328 /*
4329 * If the queue is filtered, potentially mark it as complete
4330 * and reschedule it as needed.
4331 *
4332 * This is required as there may be a subsequent TX descriptor
4333 * for this end-node that has CLRDMASK set, so it's quite possible
4334 * that a filtered frame will be followed by a non-filtered
4335 * (complete or otherwise) frame.
4336 *
4337 * XXX should we do this before we complete the frame?
4338 */
4339 if (atid->isfiltered)
4340 ath_tx_tid_filt_comp_complete(sc, atid);
4341 ATH_TX_UNLOCK(sc);
4342
4343 /*
4344 * punt to rate control if we're not being cleaned up
4345 * during a hw queue drain and the frame wanted an ACK.
4346 */
4347 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4348 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4349 ts, bf->bf_state.bfs_pktlen,
4350 1, (ts->ts_status == 0) ? 0 : 1);
4351
4352 ath_tx_default_comp(sc, bf, fail);
4353 }
4354
4355 /*
4356 * Handle cleanup of aggregate session packets that aren't
4357 * an A-MPDU.
4358 *
4359 * There's no need to update the BAW here - the session is being
4360 * torn down.
4361 */
4362 static void
ath_tx_comp_cleanup_unaggr(struct ath_softc * sc,struct ath_buf * bf)4363 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4364 {
4365 struct ieee80211_node *ni = bf->bf_node;
4366 struct ath_node *an = ATH_NODE(ni);
4367 int tid = bf->bf_state.bfs_tid;
4368 struct ath_tid *atid = &an->an_tid[tid];
4369
4370 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4371 __func__, tid, atid->incomp);
4372
4373 ATH_TX_LOCK(sc);
4374 atid->incomp--;
4375
4376 /* XXX refactor! */
4377 if (bf->bf_state.bfs_dobaw) {
4378 ath_tx_update_baw(sc, an, atid, bf);
4379 if (!bf->bf_state.bfs_addedbaw)
4380 DPRINTF(sc, ATH_DEBUG_SW_TX,
4381 "%s: wasn't added: seqno %d\n",
4382 __func__, SEQNO(bf->bf_state.bfs_seqno));
4383 }
4384
4385 if (atid->incomp == 0) {
4386 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4387 "%s: TID %d: cleaned up! resume!\n",
4388 __func__, tid);
4389 atid->cleanup_inprogress = 0;
4390 ath_tx_tid_resume(sc, atid);
4391 }
4392 ATH_TX_UNLOCK(sc);
4393
4394 ath_tx_default_comp(sc, bf, 0);
4395 }
4396
4397
4398 /*
4399 * This as it currently stands is a bit dumb. Ideally we'd just
4400 * fail the frame the normal way and have it permanently fail
4401 * via the normal aggregate completion path.
4402 */
4403 static void
ath_tx_tid_cleanup_frame(struct ath_softc * sc,struct ath_node * an,int tid,struct ath_buf * bf_head,ath_bufhead * bf_cq)4404 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4405 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4406 {
4407 struct ath_tid *atid = &an->an_tid[tid];
4408 struct ath_buf *bf, *bf_next;
4409
4410 ATH_TX_LOCK_ASSERT(sc);
4411
4412 /*
4413 * Remove this frame from the queue.
4414 */
4415 ATH_TID_REMOVE(atid, bf_head, bf_list);
4416
4417 /*
4418 * Loop over all the frames in the aggregate.
4419 */
4420 bf = bf_head;
4421 while (bf != NULL) {
4422 bf_next = bf->bf_next; /* next aggregate frame, or NULL */
4423
4424 /*
4425 * If it's been added to the BAW we need to kick
4426 * it out of the BAW before we continue.
4427 *
4428 * XXX if it's an aggregate, assert that it's in the
4429 * BAW - we shouldn't have it be in an aggregate
4430 * otherwise!
4431 */
4432 if (bf->bf_state.bfs_addedbaw) {
4433 ath_tx_update_baw(sc, an, atid, bf);
4434 bf->bf_state.bfs_dobaw = 0;
4435 }
4436
4437 /*
4438 * Give it the default completion handler.
4439 */
4440 bf->bf_comp = ath_tx_normal_comp;
4441 bf->bf_next = NULL;
4442
4443 /*
4444 * Add it to the list to free.
4445 */
4446 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4447
4448 /*
4449 * Now advance to the next frame in the aggregate.
4450 */
4451 bf = bf_next;
4452 }
4453 }
4454
4455 /*
4456 * Performs transmit side cleanup when TID changes from aggregated to
4457 * unaggregated and during reassociation.
4458 *
4459 * For now, this just tosses everything from the TID software queue
4460 * whether or not it has been retried and marks the TID as
4461 * pending completion if there's anything for this TID queued to
4462 * the hardware.
4463 *
4464 * The caller is responsible for pausing the TID and unpausing the
4465 * TID if no cleanup was required. Otherwise the cleanup path will
4466 * unpause the TID once the last hardware queued frame is completed.
4467 */
4468 static void
ath_tx_tid_cleanup(struct ath_softc * sc,struct ath_node * an,int tid,ath_bufhead * bf_cq)4469 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4470 ath_bufhead *bf_cq)
4471 {
4472 struct ath_tid *atid = &an->an_tid[tid];
4473 struct ath_buf *bf, *bf_next;
4474
4475 ATH_TX_LOCK_ASSERT(sc);
4476
4477 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4478 "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4479 atid->cleanup_inprogress);
4480
4481 /*
4482 * Move the filtered frames to the TX queue, before
4483 * we run off and discard/process things.
4484 */
4485
4486 /* XXX this is really quite inefficient */
4487 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4488 ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4489 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4490 }
4491
4492 /*
4493 * Update the frames in the software TX queue:
4494 *
4495 * + Discard retry frames in the queue
4496 * + Fix the completion function to be non-aggregate
4497 */
4498 bf = ATH_TID_FIRST(atid);
4499 while (bf) {
4500 /*
4501 * Grab the next frame in the list, we may
4502 * be fiddling with the list.
4503 */
4504 bf_next = TAILQ_NEXT(bf, bf_list);
4505
4506 /*
4507 * Free the frame and all subframes.
4508 */
4509 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4510
4511 /*
4512 * Next frame!
4513 */
4514 bf = bf_next;
4515 }
4516
4517 /*
4518 * If there's anything in the hardware queue we wait
4519 * for the TID HWQ to empty.
4520 */
4521 if (atid->hwq_depth > 0) {
4522 /*
4523 * XXX how about we kill atid->incomp, and instead
4524 * replace it with a macro that checks that atid->hwq_depth
4525 * is 0?
4526 */
4527 atid->incomp = atid->hwq_depth;
4528 atid->cleanup_inprogress = 1;
4529 }
4530
4531 if (atid->cleanup_inprogress)
4532 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4533 "%s: TID %d: cleanup needed: %d packets\n",
4534 __func__, tid, atid->incomp);
4535
4536 /* Owner now must free completed frames */
4537 }
4538
4539 static struct ath_buf *
ath_tx_retry_clone(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,struct ath_buf * bf)4540 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4541 struct ath_tid *tid, struct ath_buf *bf)
4542 {
4543 struct ath_buf *nbf;
4544 int error;
4545
4546 /*
4547 * Clone the buffer. This will handle the dma unmap and
4548 * copy the node reference to the new buffer. If this
4549 * works out, 'bf' will have no DMA mapping, no mbuf
4550 * pointer and no node reference.
4551 */
4552 nbf = ath_buf_clone(sc, bf);
4553
4554 #if 0
4555 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4556 __func__);
4557 #endif
4558
4559 if (nbf == NULL) {
4560 /* Failed to clone */
4561 DPRINTF(sc, ATH_DEBUG_XMIT,
4562 "%s: failed to clone a busy buffer\n",
4563 __func__);
4564 return NULL;
4565 }
4566
4567 /* Setup the dma for the new buffer */
4568 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4569 if (error != 0) {
4570 DPRINTF(sc, ATH_DEBUG_XMIT,
4571 "%s: failed to setup dma for clone\n",
4572 __func__);
4573 /*
4574 * Put this at the head of the list, not tail;
4575 * that way it doesn't interfere with the
4576 * busy buffer logic (which uses the tail of
4577 * the list.)
4578 */
4579 ATH_TXBUF_LOCK(sc);
4580 ath_returnbuf_head(sc, nbf);
4581 ATH_TXBUF_UNLOCK(sc);
4582 return NULL;
4583 }
4584
4585 /* Update BAW if required, before we free the original buf */
4586 if (bf->bf_state.bfs_dobaw)
4587 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4588
4589 /* Free original buffer; return new buffer */
4590 ath_freebuf(sc, bf);
4591
4592 return nbf;
4593 }
4594
4595 /*
4596 * Handle retrying an unaggregate frame in an aggregate
4597 * session.
4598 *
4599 * If too many retries occur, pause the TID, wait for
4600 * any further retransmits (as there's no reason why
4601 * non-aggregate frames in an aggregate session are
4602 * transmitted in-order; they just have to be in-BAW)
4603 * and then queue a BAR.
4604 */
4605 static void
ath_tx_aggr_retry_unaggr(struct ath_softc * sc,struct ath_buf * bf)4606 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4607 {
4608 struct ieee80211_node *ni = bf->bf_node;
4609 struct ath_node *an = ATH_NODE(ni);
4610 int tid = bf->bf_state.bfs_tid;
4611 struct ath_tid *atid = &an->an_tid[tid];
4612 struct ieee80211_tx_ampdu *tap;
4613
4614 ATH_TX_LOCK(sc);
4615
4616 tap = ath_tx_get_tx_tid(an, tid);
4617
4618 /*
4619 * If the buffer is marked as busy, we can't directly
4620 * reuse it. Instead, try to clone the buffer.
4621 * If the clone is successful, recycle the old buffer.
4622 * If the clone is unsuccessful, set bfs_retries to max
4623 * to force the next bit of code to free the buffer
4624 * for us.
4625 */
4626 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4627 (bf->bf_flags & ATH_BUF_BUSY)) {
4628 struct ath_buf *nbf;
4629 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4630 if (nbf)
4631 /* bf has been freed at this point */
4632 bf = nbf;
4633 else
4634 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4635 }
4636
4637 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4638 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4639 "%s: exceeded retries; seqno %d\n",
4640 __func__, SEQNO(bf->bf_state.bfs_seqno));
4641 sc->sc_stats.ast_tx_swretrymax++;
4642
4643 /* Update BAW anyway */
4644 if (bf->bf_state.bfs_dobaw) {
4645 ath_tx_update_baw(sc, an, atid, bf);
4646 if (! bf->bf_state.bfs_addedbaw)
4647 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4648 "%s: wasn't added: seqno %d\n",
4649 __func__, SEQNO(bf->bf_state.bfs_seqno));
4650 }
4651 bf->bf_state.bfs_dobaw = 0;
4652
4653 /* Suspend the TX queue and get ready to send the BAR */
4654 ath_tx_tid_bar_suspend(sc, atid);
4655
4656 /* Send the BAR if there are no other frames waiting */
4657 if (ath_tx_tid_bar_tx_ready(sc, atid))
4658 ath_tx_tid_bar_tx(sc, atid);
4659
4660 ATH_TX_UNLOCK(sc);
4661
4662 /* Free buffer, bf is free after this call */
4663 ath_tx_default_comp(sc, bf, 0);
4664 return;
4665 }
4666
4667 /*
4668 * This increments the retry counter as well as
4669 * sets the retry flag in the ath_buf and packet
4670 * body.
4671 */
4672 ath_tx_set_retry(sc, bf);
4673 sc->sc_stats.ast_tx_swretries++;
4674
4675 /*
4676 * Insert this at the head of the queue, so it's
4677 * retried before any current/subsequent frames.
4678 */
4679 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4680 ath_tx_tid_sched(sc, atid);
4681 /* Send the BAR if there are no other frames waiting */
4682 if (ath_tx_tid_bar_tx_ready(sc, atid))
4683 ath_tx_tid_bar_tx(sc, atid);
4684
4685 ATH_TX_UNLOCK(sc);
4686 }
4687
4688 /*
4689 * Common code for aggregate excessive retry/subframe retry.
4690 * If retrying, queues buffers to bf_q. If not, frees the
4691 * buffers.
4692 *
4693 * XXX should unify this with ath_tx_aggr_retry_unaggr()
4694 */
4695 static int
ath_tx_retry_subframe(struct ath_softc * sc,struct ath_buf * bf,ath_bufhead * bf_q)4696 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4697 ath_bufhead *bf_q)
4698 {
4699 struct ieee80211_node *ni = bf->bf_node;
4700 struct ath_node *an = ATH_NODE(ni);
4701 int tid = bf->bf_state.bfs_tid;
4702 struct ath_tid *atid = &an->an_tid[tid];
4703
4704 ATH_TX_LOCK_ASSERT(sc);
4705
4706 /* XXX clr11naggr should be done for all subframes */
4707 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4708 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4709
4710 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4711
4712 /*
4713 * If the buffer is marked as busy, we can't directly
4714 * reuse it. Instead, try to clone the buffer.
4715 * If the clone is successful, recycle the old buffer.
4716 * If the clone is unsuccessful, set bfs_retries to max
4717 * to force the next bit of code to free the buffer
4718 * for us.
4719 */
4720 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4721 (bf->bf_flags & ATH_BUF_BUSY)) {
4722 struct ath_buf *nbf;
4723 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4724 if (nbf)
4725 /* bf has been freed at this point */
4726 bf = nbf;
4727 else
4728 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4729 }
4730
4731 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4732 sc->sc_stats.ast_tx_swretrymax++;
4733 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4734 "%s: max retries: seqno %d\n",
4735 __func__, SEQNO(bf->bf_state.bfs_seqno));
4736 ath_tx_update_baw(sc, an, atid, bf);
4737 if (!bf->bf_state.bfs_addedbaw)
4738 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4739 "%s: wasn't added: seqno %d\n",
4740 __func__, SEQNO(bf->bf_state.bfs_seqno));
4741 bf->bf_state.bfs_dobaw = 0;
4742 return 1;
4743 }
4744
4745 ath_tx_set_retry(sc, bf);
4746 sc->sc_stats.ast_tx_swretries++;
4747 bf->bf_next = NULL; /* Just to make sure */
4748
4749 /* Clear the aggregate state */
4750 bf->bf_state.bfs_aggr = 0;
4751 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
4752 bf->bf_state.bfs_nframes = 1;
4753
4754 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4755 return 0;
4756 }
4757
4758 /*
4759 * error pkt completion for an aggregate destination
4760 */
4761 static void
ath_tx_comp_aggr_error(struct ath_softc * sc,struct ath_buf * bf_first,struct ath_tid * tid)4762 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4763 struct ath_tid *tid)
4764 {
4765 struct ieee80211_node *ni = bf_first->bf_node;
4766 struct ath_node *an = ATH_NODE(ni);
4767 struct ath_buf *bf_next, *bf;
4768 ath_bufhead bf_q;
4769 int drops = 0;
4770 struct ieee80211_tx_ampdu *tap;
4771 ath_bufhead bf_cq;
4772
4773 TAILQ_INIT(&bf_q);
4774 TAILQ_INIT(&bf_cq);
4775
4776 /*
4777 * Update rate control - all frames have failed.
4778 *
4779 * XXX use the length in the first frame in the series;
4780 * XXX just so things are consistent for now.
4781 */
4782 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4783 &bf_first->bf_status.ds_txstat,
4784 bf_first->bf_state.bfs_pktlen,
4785 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4786
4787 ATH_TX_LOCK(sc);
4788 tap = ath_tx_get_tx_tid(an, tid->tid);
4789 sc->sc_stats.ast_tx_aggr_failall++;
4790
4791 /* Retry all subframes */
4792 bf = bf_first;
4793 while (bf) {
4794 bf_next = bf->bf_next;
4795 bf->bf_next = NULL; /* Remove it from the aggr list */
4796 sc->sc_stats.ast_tx_aggr_fail++;
4797 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4798 drops++;
4799 bf->bf_next = NULL;
4800 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4801 }
4802 bf = bf_next;
4803 }
4804
4805 /* Prepend all frames to the beginning of the queue */
4806 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4807 TAILQ_REMOVE(&bf_q, bf, bf_list);
4808 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4809 }
4810
4811 /*
4812 * Schedule the TID to be re-tried.
4813 */
4814 ath_tx_tid_sched(sc, tid);
4815
4816 /*
4817 * send bar if we dropped any frames
4818 *
4819 * Keep the txq lock held for now, as we need to ensure
4820 * that ni_txseqs[] is consistent (as it's being updated
4821 * in the ifnet TX context or raw TX context.)
4822 */
4823 if (drops) {
4824 /* Suspend the TX queue and get ready to send the BAR */
4825 ath_tx_tid_bar_suspend(sc, tid);
4826 }
4827
4828 /*
4829 * Send BAR if required
4830 */
4831 if (ath_tx_tid_bar_tx_ready(sc, tid))
4832 ath_tx_tid_bar_tx(sc, tid);
4833
4834 ATH_TX_UNLOCK(sc);
4835
4836 /* Complete frames which errored out */
4837 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4838 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4839 ath_tx_default_comp(sc, bf, 0);
4840 }
4841 }
4842
4843 /*
4844 * Handle clean-up of packets from an aggregate list.
4845 *
4846 * There's no need to update the BAW here - the session is being
4847 * torn down.
4848 */
4849 static void
ath_tx_comp_cleanup_aggr(struct ath_softc * sc,struct ath_buf * bf_first)4850 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4851 {
4852 struct ath_buf *bf, *bf_next;
4853 struct ieee80211_node *ni = bf_first->bf_node;
4854 struct ath_node *an = ATH_NODE(ni);
4855 int tid = bf_first->bf_state.bfs_tid;
4856 struct ath_tid *atid = &an->an_tid[tid];
4857
4858 ATH_TX_LOCK(sc);
4859
4860 /* update incomp */
4861 atid->incomp--;
4862
4863 /* Update the BAW */
4864 bf = bf_first;
4865 while (bf) {
4866 /* XXX refactor! */
4867 if (bf->bf_state.bfs_dobaw) {
4868 ath_tx_update_baw(sc, an, atid, bf);
4869 if (!bf->bf_state.bfs_addedbaw)
4870 DPRINTF(sc, ATH_DEBUG_SW_TX,
4871 "%s: wasn't added: seqno %d\n",
4872 __func__, SEQNO(bf->bf_state.bfs_seqno));
4873 }
4874 bf = bf->bf_next;
4875 }
4876
4877 if (atid->incomp == 0) {
4878 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4879 "%s: TID %d: cleaned up! resume!\n",
4880 __func__, tid);
4881 atid->cleanup_inprogress = 0;
4882 ath_tx_tid_resume(sc, atid);
4883 }
4884
4885 /* Send BAR if required */
4886 /* XXX why would we send a BAR when transitioning to non-aggregation? */
4887 /*
4888 * XXX TODO: we should likely just tear down the BAR state here,
4889 * rather than sending a BAR.
4890 */
4891 if (ath_tx_tid_bar_tx_ready(sc, atid))
4892 ath_tx_tid_bar_tx(sc, atid);
4893
4894 ATH_TX_UNLOCK(sc);
4895
4896 /* Handle frame completion as individual frames */
4897 bf = bf_first;
4898 while (bf) {
4899 bf_next = bf->bf_next;
4900 bf->bf_next = NULL;
4901 ath_tx_default_comp(sc, bf, 1);
4902 bf = bf_next;
4903 }
4904 }
4905
4906 /*
4907 * Handle completion of an set of aggregate frames.
4908 *
4909 * Note: the completion handler is the last descriptor in the aggregate,
4910 * not the last descriptor in the first frame.
4911 */
4912 static void
ath_tx_aggr_comp_aggr(struct ath_softc * sc,struct ath_buf * bf_first,int fail)4913 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4914 int fail)
4915 {
4916 //struct ath_desc *ds = bf->bf_lastds;
4917 struct ieee80211_node *ni = bf_first->bf_node;
4918 struct ath_node *an = ATH_NODE(ni);
4919 int tid = bf_first->bf_state.bfs_tid;
4920 struct ath_tid *atid = &an->an_tid[tid];
4921 struct ath_tx_status ts;
4922 struct ieee80211_tx_ampdu *tap;
4923 ath_bufhead bf_q;
4924 ath_bufhead bf_cq;
4925 int seq_st, tx_ok;
4926 int hasba, isaggr;
4927 uint32_t ba[2];
4928 struct ath_buf *bf, *bf_next;
4929 int ba_index;
4930 int drops = 0;
4931 int nframes = 0, nbad = 0, nf;
4932 int pktlen;
4933 /* XXX there's too much on the stack? */
4934 struct ath_rc_series rc[ATH_RC_NUM];
4935 int txseq;
4936
4937 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4938 __func__, atid->hwq_depth);
4939
4940 /*
4941 * Take a copy; this may be needed -after- bf_first
4942 * has been completed and freed.
4943 */
4944 ts = bf_first->bf_status.ds_txstat;
4945
4946 TAILQ_INIT(&bf_q);
4947 TAILQ_INIT(&bf_cq);
4948
4949 /* The TID state is kept behind the TXQ lock */
4950 ATH_TX_LOCK(sc);
4951
4952 atid->hwq_depth--;
4953 if (atid->hwq_depth < 0)
4954 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4955 __func__, atid->hwq_depth);
4956
4957 /*
4958 * If the TID is filtered, handle completing the filter
4959 * transition before potentially kicking it to the cleanup
4960 * function.
4961 *
4962 * XXX this is duplicate work, ew.
4963 */
4964 if (atid->isfiltered)
4965 ath_tx_tid_filt_comp_complete(sc, atid);
4966
4967 /*
4968 * Punt cleanup to the relevant function, not our problem now
4969 */
4970 if (atid->cleanup_inprogress) {
4971 if (atid->isfiltered)
4972 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4973 "%s: isfiltered=1, normal_comp?\n",
4974 __func__);
4975 ATH_TX_UNLOCK(sc);
4976 ath_tx_comp_cleanup_aggr(sc, bf_first);
4977 return;
4978 }
4979
4980 /*
4981 * If the frame is filtered, transition to filtered frame
4982 * mode and add this to the filtered frame list.
4983 *
4984 * XXX TODO: figure out how this interoperates with
4985 * BAR, pause and cleanup states.
4986 */
4987 if ((ts.ts_status & HAL_TXERR_FILT) ||
4988 (ts.ts_status != 0 && atid->isfiltered)) {
4989 if (fail != 0)
4990 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4991 "%s: isfiltered=1, fail=%d\n", __func__, fail);
4992 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4993
4994 /* Remove from BAW */
4995 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4996 if (bf->bf_state.bfs_addedbaw)
4997 drops++;
4998 if (bf->bf_state.bfs_dobaw) {
4999 ath_tx_update_baw(sc, an, atid, bf);
5000 if (!bf->bf_state.bfs_addedbaw)
5001 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5002 "%s: wasn't added: seqno %d\n",
5003 __func__,
5004 SEQNO(bf->bf_state.bfs_seqno));
5005 }
5006 bf->bf_state.bfs_dobaw = 0;
5007 }
5008 /*
5009 * If any intermediate frames in the BAW were dropped when
5010 * handling filtering things, send a BAR.
5011 */
5012 if (drops)
5013 ath_tx_tid_bar_suspend(sc, atid);
5014
5015 /*
5016 * Finish up by sending a BAR if required and freeing
5017 * the frames outside of the TX lock.
5018 */
5019 goto finish_send_bar;
5020 }
5021
5022 /*
5023 * XXX for now, use the first frame in the aggregate for
5024 * XXX rate control completion; it's at least consistent.
5025 */
5026 pktlen = bf_first->bf_state.bfs_pktlen;
5027
5028 /*
5029 * Handle errors first!
5030 *
5031 * Here, handle _any_ error as a "exceeded retries" error.
5032 * Later on (when filtered frames are to be specially handled)
5033 * it'll have to be expanded.
5034 */
5035 #if 0
5036 if (ts.ts_status & HAL_TXERR_XRETRY) {
5037 #endif
5038 if (ts.ts_status != 0) {
5039 ATH_TX_UNLOCK(sc);
5040 ath_tx_comp_aggr_error(sc, bf_first, atid);
5041 return;
5042 }
5043
5044 tap = ath_tx_get_tx_tid(an, tid);
5045
5046 /*
5047 * extract starting sequence and block-ack bitmap
5048 */
5049 /* XXX endian-ness of seq_st, ba? */
5050 seq_st = ts.ts_seqnum;
5051 hasba = !! (ts.ts_flags & HAL_TX_BA);
5052 tx_ok = (ts.ts_status == 0);
5053 isaggr = bf_first->bf_state.bfs_aggr;
5054 ba[0] = ts.ts_ba_low;
5055 ba[1] = ts.ts_ba_high;
5056
5057 /*
5058 * Copy the TX completion status and the rate control
5059 * series from the first descriptor, as it may be freed
5060 * before the rate control code can get its grubby fingers
5061 * into things.
5062 */
5063 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
5064
5065 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5066 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
5067 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
5068 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
5069 isaggr, seq_st, hasba, ba[0], ba[1]);
5070
5071 /*
5072 * The reference driver doesn't do this; it simply ignores
5073 * this check in its entirety.
5074 *
5075 * I've seen this occur when using iperf to send traffic
5076 * out tid 1 - the aggregate frames are all marked as TID 1,
5077 * but the TXSTATUS has TID=0. So, let's just ignore this
5078 * check.
5079 */
5080 #if 0
5081 /* Occasionally, the MAC sends a tx status for the wrong TID. */
5082 if (tid != ts.ts_tid) {
5083 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
5084 __func__, tid, ts.ts_tid);
5085 tx_ok = 0;
5086 }
5087 #endif
5088
5089 /* AR5416 BA bug; this requires an interface reset */
5090 if (isaggr && tx_ok && (! hasba)) {
5091 device_printf(sc->sc_dev,
5092 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
5093 "seq_st=%d\n",
5094 __func__, hasba, tx_ok, isaggr, seq_st);
5095 /* XXX TODO: schedule an interface reset */
5096 #ifdef ATH_DEBUG
5097 ath_printtxbuf(sc, bf_first,
5098 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5099 #endif
5100 }
5101
5102 /*
5103 * Walk the list of frames, figure out which ones were correctly
5104 * sent and which weren't.
5105 */
5106 bf = bf_first;
5107 nf = bf_first->bf_state.bfs_nframes;
5108
5109 /* bf_first is going to be invalid once this list is walked */
5110 bf_first = NULL;
5111
5112 /*
5113 * Walk the list of completed frames and determine
5114 * which need to be completed and which need to be
5115 * retransmitted.
5116 *
5117 * For completed frames, the completion functions need
5118 * to be called at the end of this function as the last
5119 * node reference may free the node.
5120 *
5121 * Finally, since the TXQ lock can't be held during the
5122 * completion callback (to avoid lock recursion),
5123 * the completion calls have to be done outside of the
5124 * lock.
5125 */
5126 while (bf) {
5127 nframes++;
5128 ba_index = ATH_BA_INDEX(seq_st,
5129 SEQNO(bf->bf_state.bfs_seqno));
5130 bf_next = bf->bf_next;
5131 bf->bf_next = NULL; /* Remove it from the aggr list */
5132
5133 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5134 "%s: checking bf=%p seqno=%d; ack=%d\n",
5135 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5136 ATH_BA_ISSET(ba, ba_index));
5137
5138 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
5139 sc->sc_stats.ast_tx_aggr_ok++;
5140 ath_tx_update_baw(sc, an, atid, bf);
5141 bf->bf_state.bfs_dobaw = 0;
5142 if (!bf->bf_state.bfs_addedbaw)
5143 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5144 "%s: wasn't added: seqno %d\n",
5145 __func__, SEQNO(bf->bf_state.bfs_seqno));
5146 bf->bf_next = NULL;
5147 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5148 } else {
5149 sc->sc_stats.ast_tx_aggr_fail++;
5150 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
5151 drops++;
5152 bf->bf_next = NULL;
5153 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5154 }
5155 nbad++;
5156 }
5157 bf = bf_next;
5158 }
5159
5160 /*
5161 * Now that the BAW updates have been done, unlock
5162 *
5163 * txseq is grabbed before the lock is released so we
5164 * have a consistent view of what -was- in the BAW.
5165 * Anything after this point will not yet have been
5166 * TXed.
5167 */
5168 txseq = tap->txa_start;
5169 ATH_TX_UNLOCK(sc);
5170
5171 if (nframes != nf)
5172 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5173 "%s: num frames seen=%d; bf nframes=%d\n",
5174 __func__, nframes, nf);
5175
5176 /*
5177 * Now we know how many frames were bad, call the rate
5178 * control code.
5179 */
5180 if (fail == 0)
5181 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
5182 nbad);
5183
5184 /*
5185 * send bar if we dropped any frames
5186 */
5187 if (drops) {
5188 /* Suspend the TX queue and get ready to send the BAR */
5189 ATH_TX_LOCK(sc);
5190 ath_tx_tid_bar_suspend(sc, atid);
5191 ATH_TX_UNLOCK(sc);
5192 }
5193
5194 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5195 "%s: txa_start now %d\n", __func__, tap->txa_start);
5196
5197 ATH_TX_LOCK(sc);
5198
5199 /* Prepend all frames to the beginning of the queue */
5200 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
5201 TAILQ_REMOVE(&bf_q, bf, bf_list);
5202 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5203 }
5204
5205 /*
5206 * Reschedule to grab some further frames.
5207 */
5208 ath_tx_tid_sched(sc, atid);
5209
5210 /*
5211 * If the queue is filtered, re-schedule as required.
5212 *
5213 * This is required as there may be a subsequent TX descriptor
5214 * for this end-node that has CLRDMASK set, so it's quite possible
5215 * that a filtered frame will be followed by a non-filtered
5216 * (complete or otherwise) frame.
5217 *
5218 * XXX should we do this before we complete the frame?
5219 */
5220 if (atid->isfiltered)
5221 ath_tx_tid_filt_comp_complete(sc, atid);
5222
5223 finish_send_bar:
5224
5225 /*
5226 * Send BAR if required
5227 */
5228 if (ath_tx_tid_bar_tx_ready(sc, atid))
5229 ath_tx_tid_bar_tx(sc, atid);
5230
5231 ATH_TX_UNLOCK(sc);
5232
5233 /* Do deferred completion */
5234 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5235 TAILQ_REMOVE(&bf_cq, bf, bf_list);
5236 ath_tx_default_comp(sc, bf, 0);
5237 }
5238 }
5239
5240 /*
5241 * Handle completion of unaggregated frames in an ADDBA
5242 * session.
5243 *
5244 * Fail is set to 1 if the entry is being freed via a call to
5245 * ath_tx_draintxq().
5246 */
5247 static void
5248 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5249 {
5250 struct ieee80211_node *ni = bf->bf_node;
5251 struct ath_node *an = ATH_NODE(ni);
5252 int tid = bf->bf_state.bfs_tid;
5253 struct ath_tid *atid = &an->an_tid[tid];
5254 struct ath_tx_status ts;
5255 int drops = 0;
5256
5257 /*
5258 * Take a copy of this; filtering/cloning the frame may free the
5259 * bf pointer.
5260 */
5261 ts = bf->bf_status.ds_txstat;
5262
5263 /*
5264 * Update rate control status here, before we possibly
5265 * punt to retry or cleanup.
5266 *
5267 * Do it outside of the TXQ lock.
5268 */
5269 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5270 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5271 &bf->bf_status.ds_txstat,
5272 bf->bf_state.bfs_pktlen,
5273 1, (ts.ts_status == 0) ? 0 : 1);
5274
5275 /*
5276 * This is called early so atid->hwq_depth can be tracked.
5277 * This unfortunately means that it's released and regrabbed
5278 * during retry and cleanup. That's rather inefficient.
5279 */
5280 ATH_TX_LOCK(sc);
5281
5282 if (tid == IEEE80211_NONQOS_TID)
5283 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5284
5285 DPRINTF(sc, ATH_DEBUG_SW_TX,
5286 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5287 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5288 SEQNO(bf->bf_state.bfs_seqno));
5289
5290 atid->hwq_depth--;
5291 if (atid->hwq_depth < 0)
5292 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5293 __func__, atid->hwq_depth);
5294
5295 /*
5296 * If the TID is filtered, handle completing the filter
5297 * transition before potentially kicking it to the cleanup
5298 * function.
5299 */
5300 if (atid->isfiltered)
5301 ath_tx_tid_filt_comp_complete(sc, atid);
5302
5303 /*
5304 * If a cleanup is in progress, punt to comp_cleanup;
5305 * rather than handling it here. It's thus their
5306 * responsibility to clean up, call the completion
5307 * function in net80211, etc.
5308 */
5309 if (atid->cleanup_inprogress) {
5310 if (atid->isfiltered)
5311 DPRINTF(sc, ATH_DEBUG_SW_TX,
5312 "%s: isfiltered=1, normal_comp?\n",
5313 __func__);
5314 ATH_TX_UNLOCK(sc);
5315 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5316 __func__);
5317 ath_tx_comp_cleanup_unaggr(sc, bf);
5318 return;
5319 }
5320
5321 /*
5322 * XXX TODO: how does cleanup, BAR and filtered frame handling
5323 * overlap?
5324 *
5325 * If the frame is filtered OR if it's any failure but
5326 * the TID is filtered, the frame must be added to the
5327 * filtered frame list.
5328 *
5329 * However - a busy buffer can't be added to the filtered
5330 * list as it will end up being recycled without having
5331 * been made available for the hardware.
5332 */
5333 if ((ts.ts_status & HAL_TXERR_FILT) ||
5334 (ts.ts_status != 0 && atid->isfiltered)) {
5335 int freeframe;
5336
5337 if (fail != 0)
5338 DPRINTF(sc, ATH_DEBUG_SW_TX,
5339 "%s: isfiltered=1, fail=%d\n",
5340 __func__, fail);
5341 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5342 /*
5343 * If freeframe=0 then bf is no longer ours; don't
5344 * touch it.
5345 */
5346 if (freeframe) {
5347 /* Remove from BAW */
5348 if (bf->bf_state.bfs_addedbaw)
5349 drops++;
5350 if (bf->bf_state.bfs_dobaw) {
5351 ath_tx_update_baw(sc, an, atid, bf);
5352 if (!bf->bf_state.bfs_addedbaw)
5353 DPRINTF(sc, ATH_DEBUG_SW_TX,
5354 "%s: wasn't added: seqno %d\n",
5355 __func__, SEQNO(bf->bf_state.bfs_seqno));
5356 }
5357 bf->bf_state.bfs_dobaw = 0;
5358 }
5359
5360 /*
5361 * If the frame couldn't be filtered, treat it as a drop and
5362 * prepare to send a BAR.
5363 */
5364 if (freeframe && drops)
5365 ath_tx_tid_bar_suspend(sc, atid);
5366
5367 /*
5368 * Send BAR if required
5369 */
5370 if (ath_tx_tid_bar_tx_ready(sc, atid))
5371 ath_tx_tid_bar_tx(sc, atid);
5372
5373 ATH_TX_UNLOCK(sc);
5374 /*
5375 * If freeframe is set, then the frame couldn't be
5376 * cloned and bf is still valid. Just complete/free it.
5377 */
5378 if (freeframe)
5379 ath_tx_default_comp(sc, bf, fail);
5380
5381 return;
5382 }
5383 /*
5384 * Don't bother with the retry check if all frames
5385 * are being failed (eg during queue deletion.)
5386 */
5387 #if 0
5388 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5389 #endif
5390 if (fail == 0 && ts.ts_status != 0) {
5391 ATH_TX_UNLOCK(sc);
5392 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5393 __func__);
5394 ath_tx_aggr_retry_unaggr(sc, bf);
5395 return;
5396 }
5397
5398 /* Success? Complete */
5399 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5400 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5401 if (bf->bf_state.bfs_dobaw) {
5402 ath_tx_update_baw(sc, an, atid, bf);
5403 bf->bf_state.bfs_dobaw = 0;
5404 if (!bf->bf_state.bfs_addedbaw)
5405 DPRINTF(sc, ATH_DEBUG_SW_TX,
5406 "%s: wasn't added: seqno %d\n",
5407 __func__, SEQNO(bf->bf_state.bfs_seqno));
5408 }
5409
5410 /*
5411 * If the queue is filtered, re-schedule as required.
5412 *
5413 * This is required as there may be a subsequent TX descriptor
5414 * for this end-node that has CLRDMASK set, so it's quite possible
5415 * that a filtered frame will be followed by a non-filtered
5416 * (complete or otherwise) frame.
5417 *
5418 * XXX should we do this before we complete the frame?
5419 */
5420 if (atid->isfiltered)
5421 ath_tx_tid_filt_comp_complete(sc, atid);
5422
5423 /*
5424 * Send BAR if required
5425 */
5426 if (ath_tx_tid_bar_tx_ready(sc, atid))
5427 ath_tx_tid_bar_tx(sc, atid);
5428
5429 ATH_TX_UNLOCK(sc);
5430
5431 ath_tx_default_comp(sc, bf, fail);
5432 /* bf is freed at this point */
5433 }
5434
5435 void
5436 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5437 {
5438 if (bf->bf_state.bfs_aggr)
5439 ath_tx_aggr_comp_aggr(sc, bf, fail);
5440 else
5441 ath_tx_aggr_comp_unaggr(sc, bf, fail);
5442 }
5443
5444 /*
5445 * Schedule some packets from the given node/TID to the hardware.
5446 *
5447 * This is the aggregate version.
5448 */
5449 void
5450 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5451 struct ath_tid *tid)
5452 {
5453 struct ath_buf *bf;
5454 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5455 struct ieee80211_tx_ampdu *tap;
5456 ATH_AGGR_STATUS status;
5457 ath_bufhead bf_q;
5458
5459 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5460 ATH_TX_LOCK_ASSERT(sc);
5461
5462 /*
5463 * XXX TODO: If we're called for a queue that we're leaking frames to,
5464 * ensure we only leak one.
5465 */
5466
5467 tap = ath_tx_get_tx_tid(an, tid->tid);
5468
5469 if (tid->tid == IEEE80211_NONQOS_TID)
5470 DPRINTF(sc, ATH_DEBUG_SW_TX,
5471 "%s: called for TID=NONQOS_TID?\n", __func__);
5472
5473 for (;;) {
5474 status = ATH_AGGR_DONE;
5475
5476 /*
5477 * If the upper layer has paused the TID, don't
5478 * queue any further packets.
5479 *
5480 * This can also occur from the completion task because
5481 * of packet loss; but as its serialised with this code,
5482 * it won't "appear" half way through queuing packets.
5483 */
5484 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5485 break;
5486
5487 bf = ATH_TID_FIRST(tid);
5488 if (bf == NULL) {
5489 break;
5490 }
5491
5492 /*
5493 * If the packet doesn't fall within the BAW (eg a NULL
5494 * data frame), schedule it directly; continue.
5495 */
5496 if (! bf->bf_state.bfs_dobaw) {
5497 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5498 "%s: non-baw packet\n",
5499 __func__);
5500 ATH_TID_REMOVE(tid, bf, bf_list);
5501
5502 if (bf->bf_state.bfs_nframes > 1)
5503 DPRINTF(sc, ATH_DEBUG_SW_TX,
5504 "%s: aggr=%d, nframes=%d\n",
5505 __func__,
5506 bf->bf_state.bfs_aggr,
5507 bf->bf_state.bfs_nframes);
5508
5509 /*
5510 * This shouldn't happen - such frames shouldn't
5511 * ever have been queued as an aggregate in the
5512 * first place. However, make sure the fields
5513 * are correctly setup just to be totally sure.
5514 */
5515 bf->bf_state.bfs_aggr = 0;
5516 bf->bf_state.bfs_nframes = 1;
5517
5518 /* Update CLRDMASK just before this frame is queued */
5519 ath_tx_update_clrdmask(sc, tid, bf);
5520
5521 ath_tx_do_ratelookup(sc, bf);
5522 ath_tx_calc_duration(sc, bf);
5523 ath_tx_calc_protection(sc, bf);
5524 ath_tx_set_rtscts(sc, bf);
5525 ath_tx_rate_fill_rcflags(sc, bf);
5526 ath_tx_setds(sc, bf);
5527 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5528
5529 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5530
5531 /* Queue the packet; continue */
5532 goto queuepkt;
5533 }
5534
5535 TAILQ_INIT(&bf_q);
5536
5537 /*
5538 * Do a rate control lookup on the first frame in the
5539 * list. The rate control code needs that to occur
5540 * before it can determine whether to TX.
5541 * It's inaccurate because the rate control code doesn't
5542 * really "do" aggregate lookups, so it only considers
5543 * the size of the first frame.
5544 */
5545 ath_tx_do_ratelookup(sc, bf);
5546 bf->bf_state.bfs_rc[3].rix = 0;
5547 bf->bf_state.bfs_rc[3].tries = 0;
5548
5549 ath_tx_calc_duration(sc, bf);
5550 ath_tx_calc_protection(sc, bf);
5551
5552 ath_tx_set_rtscts(sc, bf);
5553 ath_tx_rate_fill_rcflags(sc, bf);
5554
5555 status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5556
5557 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5558 "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5559
5560 /*
5561 * No frames to be picked up - out of BAW
5562 */
5563 if (TAILQ_EMPTY(&bf_q))
5564 break;
5565
5566 /*
5567 * This assumes that the descriptor list in the ath_bufhead
5568 * are already linked together via bf_next pointers.
5569 */
5570 bf = TAILQ_FIRST(&bf_q);
5571
5572 if (status == ATH_AGGR_8K_LIMITED)
5573 sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5574
5575 /*
5576 * If it's the only frame send as non-aggregate
5577 * assume that ath_tx_form_aggr() has checked
5578 * whether it's in the BAW and added it appropriately.
5579 */
5580 if (bf->bf_state.bfs_nframes == 1) {
5581 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5582 "%s: single-frame aggregate\n", __func__);
5583
5584 /* Update CLRDMASK just before this frame is queued */
5585 ath_tx_update_clrdmask(sc, tid, bf);
5586
5587 bf->bf_state.bfs_aggr = 0;
5588 bf->bf_state.bfs_ndelim = 0;
5589 ath_tx_setds(sc, bf);
5590 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5591 if (status == ATH_AGGR_BAW_CLOSED)
5592 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5593 else
5594 sc->sc_aggr_stats.aggr_single_pkt++;
5595 } else {
5596 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5597 "%s: multi-frame aggregate: %d frames, "
5598 "length %d\n",
5599 __func__, bf->bf_state.bfs_nframes,
5600 bf->bf_state.bfs_al);
5601 bf->bf_state.bfs_aggr = 1;
5602 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5603 sc->sc_aggr_stats.aggr_aggr_pkt++;
5604
5605 /* Update CLRDMASK just before this frame is queued */
5606 ath_tx_update_clrdmask(sc, tid, bf);
5607
5608 /*
5609 * Calculate the duration/protection as required.
5610 */
5611 ath_tx_calc_duration(sc, bf);
5612 ath_tx_calc_protection(sc, bf);
5613
5614 /*
5615 * Update the rate and rtscts information based on the
5616 * rate decision made by the rate control code;
5617 * the first frame in the aggregate needs it.
5618 */
5619 ath_tx_set_rtscts(sc, bf);
5620
5621 /*
5622 * Setup the relevant descriptor fields
5623 * for aggregation. The first descriptor
5624 * already points to the rest in the chain.
5625 */
5626 ath_tx_setds_11n(sc, bf);
5627
5628 }
5629 queuepkt:
5630 /* Set completion handler, multi-frame aggregate or not */
5631 bf->bf_comp = ath_tx_aggr_comp;
5632
5633 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5634 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5635
5636 /*
5637 * Update leak count and frame config if were leaking frames.
5638 *
5639 * XXX TODO: it should update all frames in an aggregate
5640 * correctly!
5641 */
5642 ath_tx_leak_count_update(sc, tid, bf);
5643
5644 /* Punt to txq */
5645 ath_tx_handoff(sc, txq, bf);
5646
5647 /* Track outstanding buffer count to hardware */
5648 /* aggregates are "one" buffer */
5649 tid->hwq_depth++;
5650
5651 /*
5652 * Break out if ath_tx_form_aggr() indicated
5653 * there can't be any further progress (eg BAW is full.)
5654 * Checking for an empty txq is done above.
5655 *
5656 * XXX locking on txq here?
5657 */
5658 /* XXX TXQ locking */
5659 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5660 (status == ATH_AGGR_BAW_CLOSED ||
5661 status == ATH_AGGR_LEAK_CLOSED))
5662 break;
5663 }
5664 }
5665
5666 /*
5667 * Schedule some packets from the given node/TID to the hardware.
5668 *
5669 * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5670 * It just dumps frames into the TXQ. We should limit how deep
5671 * the transmit queue can grow for frames dispatched to the given
5672 * TXQ.
5673 *
5674 * To avoid locking issues, either we need to own the TXQ lock
5675 * at this point, or we need to pass in the maximum frame count
5676 * from the caller.
5677 */
5678 void
5679 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5680 struct ath_tid *tid)
5681 {
5682 struct ath_buf *bf;
5683 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5684
5685 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5686 __func__, an, tid->tid);
5687
5688 ATH_TX_LOCK_ASSERT(sc);
5689
5690 /* Check - is AMPDU pending or running? then print out something */
5691 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5692 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5693 __func__, tid->tid);
5694 if (ath_tx_ampdu_running(sc, an, tid->tid))
5695 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5696 __func__, tid->tid);
5697
5698 for (;;) {
5699
5700 /*
5701 * If the upper layers have paused the TID, don't
5702 * queue any further packets.
5703 *
5704 * XXX if we are leaking frames, make sure we decrement
5705 * that counter _and_ we continue here.
5706 */
5707 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5708 break;
5709
5710 bf = ATH_TID_FIRST(tid);
5711 if (bf == NULL) {
5712 break;
5713 }
5714
5715 ATH_TID_REMOVE(tid, bf, bf_list);
5716
5717 /* Sanity check! */
5718 if (tid->tid != bf->bf_state.bfs_tid) {
5719 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5720 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5721 tid->tid);
5722 }
5723 /* Normal completion handler */
5724 bf->bf_comp = ath_tx_normal_comp;
5725
5726 /*
5727 * Override this for now, until the non-aggregate
5728 * completion handler correctly handles software retransmits.
5729 */
5730 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5731
5732 /* Update CLRDMASK just before this frame is queued */
5733 ath_tx_update_clrdmask(sc, tid, bf);
5734
5735 /* Program descriptors + rate control */
5736 ath_tx_do_ratelookup(sc, bf);
5737 ath_tx_calc_duration(sc, bf);
5738 ath_tx_calc_protection(sc, bf);
5739 ath_tx_set_rtscts(sc, bf);
5740 ath_tx_rate_fill_rcflags(sc, bf);
5741 ath_tx_setds(sc, bf);
5742
5743 /*
5744 * Update the current leak count if
5745 * we're leaking frames; and set the
5746 * MORE flag as appropriate.
5747 */
5748 ath_tx_leak_count_update(sc, tid, bf);
5749
5750 /* Track outstanding buffer count to hardware */
5751 /* aggregates are "one" buffer */
5752 tid->hwq_depth++;
5753
5754 /* Punt to hardware or software txq */
5755 ath_tx_handoff(sc, txq, bf);
5756 }
5757 }
5758
5759 /*
5760 * Schedule some packets to the given hardware queue.
5761 *
5762 * This function walks the list of TIDs (ie, ath_node TIDs
5763 * with queued traffic) and attempts to schedule traffic
5764 * from them.
5765 *
5766 * TID scheduling is implemented as a FIFO, with TIDs being
5767 * added to the end of the queue after some frames have been
5768 * scheduled.
5769 */
5770 void
5771 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5772 {
5773 struct ath_tid *tid, *next, *last;
5774
5775 ATH_TX_LOCK_ASSERT(sc);
5776
5777 /*
5778 * Don't schedule if the hardware queue is busy.
5779 * This (hopefully) gives some more time to aggregate
5780 * some packets in the aggregation queue.
5781 *
5782 * XXX It doesn't stop a parallel sender from sneaking
5783 * in transmitting a frame!
5784 */
5785 /* XXX TXQ locking */
5786 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5787 sc->sc_aggr_stats.aggr_sched_nopkt++;
5788 return;
5789 }
5790 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5791 sc->sc_aggr_stats.aggr_sched_nopkt++;
5792 return;
5793 }
5794
5795 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5796
5797 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5798 /*
5799 * Suspend paused queues here; they'll be resumed
5800 * once the addba completes or times out.
5801 */
5802 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5803 __func__, tid->tid, tid->paused);
5804 ath_tx_tid_unsched(sc, tid);
5805 /*
5806 * This node may be in power-save and we're leaking
5807 * a frame; be careful.
5808 */
5809 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5810 goto loop_done;
5811 }
5812 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5813 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5814 else
5815 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5816
5817 /* Not empty? Re-schedule */
5818 if (tid->axq_depth != 0)
5819 ath_tx_tid_sched(sc, tid);
5820
5821 /*
5822 * Give the software queue time to aggregate more
5823 * packets. If we aren't running aggregation then
5824 * we should still limit the hardware queue depth.
5825 */
5826 /* XXX TXQ locking */
5827 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5828 break;
5829 }
5830 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5831 break;
5832 }
5833 loop_done:
5834 /*
5835 * If this was the last entry on the original list, stop.
5836 * Otherwise nodes that have been rescheduled onto the end
5837 * of the TID FIFO list will just keep being rescheduled.
5838 *
5839 * XXX What should we do about nodes that were paused
5840 * but are pending a leaking frame in response to a ps-poll?
5841 * They'll be put at the front of the list; so they'll
5842 * prematurely trigger this condition! Ew.
5843 */
5844 if (tid == last)
5845 break;
5846 }
5847 }
5848
5849 /*
5850 * TX addba handling
5851 */
5852
5853 /*
5854 * Return net80211 TID struct pointer, or NULL for none
5855 */
5856 struct ieee80211_tx_ampdu *
5857 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5858 {
5859 struct ieee80211_node *ni = &an->an_node;
5860 struct ieee80211_tx_ampdu *tap;
5861
5862 if (tid == IEEE80211_NONQOS_TID)
5863 return NULL;
5864
5865 tap = &ni->ni_tx_ampdu[tid];
5866 return tap;
5867 }
5868
5869 /*
5870 * Is AMPDU-TX running?
5871 */
5872 static int
5873 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5874 {
5875 struct ieee80211_tx_ampdu *tap;
5876
5877 if (tid == IEEE80211_NONQOS_TID)
5878 return 0;
5879
5880 tap = ath_tx_get_tx_tid(an, tid);
5881 if (tap == NULL)
5882 return 0; /* Not valid; default to not running */
5883
5884 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5885 }
5886
5887 /*
5888 * Is AMPDU-TX negotiation pending?
5889 */
5890 static int
5891 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5892 {
5893 struct ieee80211_tx_ampdu *tap;
5894
5895 if (tid == IEEE80211_NONQOS_TID)
5896 return 0;
5897
5898 tap = ath_tx_get_tx_tid(an, tid);
5899 if (tap == NULL)
5900 return 0; /* Not valid; default to not pending */
5901
5902 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5903 }
5904
5905 /*
5906 * Is AMPDU-TX pending for the given TID?
5907 */
5908
5909
5910 /*
5911 * Method to handle sending an ADDBA request.
5912 *
5913 * We tap this so the relevant flags can be set to pause the TID
5914 * whilst waiting for the response.
5915 *
5916 * XXX there's no timeout handler we can override?
5917 */
5918 int
5919 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5920 int dialogtoken, int baparamset, int batimeout)
5921 {
5922 struct ath_softc *sc = ni->ni_ic->ic_softc;
5923 int tid = tap->txa_tid;
5924 struct ath_node *an = ATH_NODE(ni);
5925 struct ath_tid *atid = &an->an_tid[tid];
5926
5927 /*
5928 * XXX danger Will Robinson!
5929 *
5930 * Although the taskqueue may be running and scheduling some more
5931 * packets, these should all be _before_ the addba sequence number.
5932 * However, net80211 will keep self-assigning sequence numbers
5933 * until addba has been negotiated.
5934 *
5935 * In the past, these packets would be "paused" (which still works
5936 * fine, as they're being scheduled to the driver in the same
5937 * serialised method which is calling the addba request routine)
5938 * and when the aggregation session begins, they'll be dequeued
5939 * as aggregate packets and added to the BAW. However, now there's
5940 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5941 * packets. Thus they never get included in the BAW tracking and
5942 * this can cause the initial burst of packets after the addba
5943 * negotiation to "hang", as they quickly fall outside the BAW.
5944 *
5945 * The "eventual" solution should be to tag these packets with
5946 * dobaw. Although net80211 has given us a sequence number,
5947 * it'll be "after" the left edge of the BAW and thus it'll
5948 * fall within it.
5949 */
5950 ATH_TX_LOCK(sc);
5951 /*
5952 * This is a bit annoying. Until net80211 HT code inherits some
5953 * (any) locking, we may have this called in parallel BUT only
5954 * one response/timeout will be called. Grr.
5955 */
5956 if (atid->addba_tx_pending == 0) {
5957 ath_tx_tid_pause(sc, atid);
5958 atid->addba_tx_pending = 1;
5959 }
5960 ATH_TX_UNLOCK(sc);
5961
5962 #if defined(__DragonFly__)
5963 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5964 "%s: %s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5965 __func__,
5966 ath_hal_ether_sprintf(ni->ni_macaddr),
5967 dialogtoken, baparamset, batimeout);
5968 #else
5969 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5970 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5971 __func__,
5972 ni->ni_macaddr,
5973 ":",
5974 dialogtoken, baparamset, batimeout);
5975 #endif
5976 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5977 "%s: txa_start=%d, ni_txseqs=%d\n",
5978 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5979
5980 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5981 batimeout);
5982 }
5983
5984 /*
5985 * Handle an ADDBA response.
5986 *
5987 * We unpause the queue so TX'ing can resume.
5988 *
5989 * Any packets TX'ed from this point should be "aggregate" (whether
5990 * aggregate or not) so the BAW is updated.
5991 *
5992 * Note! net80211 keeps self-assigning sequence numbers until
5993 * ampdu is negotiated. This means the initially-negotiated BAW left
5994 * edge won't match the ni->ni_txseq.
5995 *
5996 * So, being very dirty, the BAW left edge is "slid" here to match
5997 * ni->ni_txseq.
5998 *
5999 * What likely SHOULD happen is that all packets subsequent to the
6000 * addba request should be tagged as aggregate and queued as non-aggregate
6001 * frames; thus updating the BAW. For now though, I'll just slide the
6002 * window.
6003 */
6004 int
6005 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
6006 int status, int code, int batimeout)
6007 {
6008 struct ath_softc *sc = ni->ni_ic->ic_softc;
6009 int tid = tap->txa_tid;
6010 struct ath_node *an = ATH_NODE(ni);
6011 struct ath_tid *atid = &an->an_tid[tid];
6012 int r;
6013
6014 #if defined(__DragonFly__)
6015 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6016 "%s: %s: called; status=%d, code=%d, batimeout=%d\n", __func__,
6017 ath_hal_ether_sprintf(ni->ni_macaddr),
6018 status, code, batimeout);
6019 #else
6020 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6021 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
6022 ni->ni_macaddr,
6023 ":",
6024 status, code, batimeout);
6025 #endif
6026
6027 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6028 "%s: txa_start=%d, ni_txseqs=%d\n",
6029 __func__, tap->txa_start, ni->ni_txseqs[tid]);
6030
6031 /*
6032 * Call this first, so the interface flags get updated
6033 * before the TID is unpaused. Otherwise a race condition
6034 * exists where the unpaused TID still doesn't yet have
6035 * IEEE80211_AGGR_RUNNING set.
6036 */
6037 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6038
6039 ATH_TX_LOCK(sc);
6040 atid->addba_tx_pending = 0;
6041 /*
6042 * XXX dirty!
6043 * Slide the BAW left edge to wherever net80211 left it for us.
6044 * Read above for more information.
6045 */
6046 tap->txa_start = ni->ni_txseqs[tid];
6047 ath_tx_tid_resume(sc, atid);
6048 ATH_TX_UNLOCK(sc);
6049 return r;
6050 }
6051
6052
6053 /*
6054 * Stop ADDBA on a queue.
6055 *
6056 * This can be called whilst BAR TX is currently active on the queue,
6057 * so make sure this is unblocked before continuing.
6058 */
6059 void
6060 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
6061 {
6062 struct ath_softc *sc = ni->ni_ic->ic_softc;
6063 int tid = tap->txa_tid;
6064 struct ath_node *an = ATH_NODE(ni);
6065 struct ath_tid *atid = &an->an_tid[tid];
6066 ath_bufhead bf_cq;
6067 struct ath_buf *bf;
6068
6069 #if defined(__DragonFly__)
6070 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %s: called\n",
6071 __func__,
6072 ath_hal_ether_sprintf(ni->ni_macaddr));
6073 #else
6074 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
6075 __func__,
6076 ni->ni_macaddr,
6077 ":");
6078 #endif
6079
6080 /*
6081 * Pause TID traffic early, so there aren't any races
6082 * Unblock the pending BAR held traffic, if it's currently paused.
6083 */
6084 ATH_TX_LOCK(sc);
6085 ath_tx_tid_pause(sc, atid);
6086 if (atid->bar_wait) {
6087 /*
6088 * bar_unsuspend() expects bar_tx == 1, as it should be
6089 * called from the TX completion path. This quietens
6090 * the warning. It's cleared for us anyway.
6091 */
6092 atid->bar_tx = 1;
6093 ath_tx_tid_bar_unsuspend(sc, atid);
6094 }
6095 ATH_TX_UNLOCK(sc);
6096
6097 /* There's no need to hold the TXQ lock here */
6098 sc->sc_addba_stop(ni, tap);
6099
6100 /*
6101 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
6102 * it'll set the cleanup flag, and it'll be unpaused once
6103 * things have been cleaned up.
6104 */
6105 TAILQ_INIT(&bf_cq);
6106 ATH_TX_LOCK(sc);
6107
6108 /*
6109 * In case there's a followup call to this, only call it
6110 * if we don't have a cleanup in progress.
6111 *
6112 * Since we've paused the queue above, we need to make
6113 * sure we unpause if there's already a cleanup in
6114 * progress - it means something else is also doing
6115 * this stuff, so we don't need to also keep it paused.
6116 */
6117 if (atid->cleanup_inprogress) {
6118 ath_tx_tid_resume(sc, atid);
6119 } else {
6120 ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
6121 /*
6122 * Unpause the TID if no cleanup is required.
6123 */
6124 if (! atid->cleanup_inprogress)
6125 ath_tx_tid_resume(sc, atid);
6126 }
6127 ATH_TX_UNLOCK(sc);
6128
6129 /* Handle completing frames and fail them */
6130 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6131 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6132 ath_tx_default_comp(sc, bf, 1);
6133 }
6134
6135 }
6136
6137 /*
6138 * Handle a node reassociation.
6139 *
6140 * We may have a bunch of frames queued to the hardware; those need
6141 * to be marked as cleanup.
6142 */
6143 void
6144 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
6145 {
6146 struct ath_tid *tid;
6147 int i;
6148 ath_bufhead bf_cq;
6149 struct ath_buf *bf;
6150
6151 TAILQ_INIT(&bf_cq);
6152
6153 ATH_TX_UNLOCK_ASSERT(sc);
6154
6155 ATH_TX_LOCK(sc);
6156 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
6157 tid = &an->an_tid[i];
6158 if (tid->hwq_depth == 0)
6159 continue;
6160 #if defined(__DragonFly__)
6161 DPRINTF(sc, ATH_DEBUG_NODE,
6162 "%s: %s: TID %d: cleaning up TID\n",
6163 __func__,
6164 ath_hal_ether_sprintf(an->an_node.ni_macaddr),
6165 i);
6166 #else
6167 DPRINTF(sc, ATH_DEBUG_NODE,
6168 "%s: %6D: TID %d: cleaning up TID\n",
6169 __func__,
6170 an->an_node.ni_macaddr,
6171 ":",
6172 i);
6173 #endif
6174 /*
6175 * In case there's a followup call to this, only call it
6176 * if we don't have a cleanup in progress.
6177 */
6178 if (! tid->cleanup_inprogress) {
6179 ath_tx_tid_pause(sc, tid);
6180 ath_tx_tid_cleanup(sc, an, i, &bf_cq);
6181 /*
6182 * Unpause the TID if no cleanup is required.
6183 */
6184 if (! tid->cleanup_inprogress)
6185 ath_tx_tid_resume(sc, tid);
6186 }
6187 }
6188 ATH_TX_UNLOCK(sc);
6189
6190 /* Handle completing frames and fail them */
6191 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6192 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6193 ath_tx_default_comp(sc, bf, 1);
6194 }
6195 }
6196
6197 /*
6198 * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
6199 * it simply tears down the aggregation session. Ew.
6200 *
6201 * It however will call ieee80211_ampdu_stop() which will call
6202 * ic->ic_addba_stop().
6203 *
6204 * XXX This uses a hard-coded max BAR count value; the whole
6205 * XXX BAR TX success or failure should be better handled!
6206 */
6207 void
6208 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
6209 int status)
6210 {
6211 struct ath_softc *sc = ni->ni_ic->ic_softc;
6212 int tid = tap->txa_tid;
6213 struct ath_node *an = ATH_NODE(ni);
6214 struct ath_tid *atid = &an->an_tid[tid];
6215 int attempts = tap->txa_attempts;
6216 int old_txa_start;
6217
6218 #if defined(__DragonFly__)
6219 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6220 "%s: %s: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
6221 __func__,
6222 ath_hal_ether_sprintf(ni->ni_macaddr),
6223 tap->txa_tid,
6224 atid->tid,
6225 status,
6226 attempts,
6227 tap->txa_start,
6228 tap->txa_seqpending);
6229 #else
6230 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6231 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
6232 __func__,
6233 ni->ni_macaddr,
6234 ":",
6235 tap->txa_tid,
6236 atid->tid,
6237 status,
6238 attempts,
6239 tap->txa_start,
6240 tap->txa_seqpending);
6241 #endif
6242
6243 /* Note: This may update the BAW details */
6244 /*
6245 * XXX What if this does slide the BAW along? We need to somehow
6246 * XXX either fix things when it does happen, or prevent the
6247 * XXX seqpending value to be anything other than exactly what
6248 * XXX the hell we want!
6249 *
6250 * XXX So for now, how I do this inside the TX lock for now
6251 * XXX and just correct it afterwards? The below condition should
6252 * XXX never happen and if it does I need to fix all kinds of things.
6253 */
6254 ATH_TX_LOCK(sc);
6255 old_txa_start = tap->txa_start;
6256 sc->sc_bar_response(ni, tap, status);
6257 if (tap->txa_start != old_txa_start) {
6258 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6259 __func__,
6260 tid,
6261 tap->txa_start,
6262 old_txa_start);
6263 }
6264 tap->txa_start = old_txa_start;
6265 ATH_TX_UNLOCK(sc);
6266
6267 /* Unpause the TID */
6268 /*
6269 * XXX if this is attempt=50, the TID will be downgraded
6270 * XXX to a non-aggregate session. So we must unpause the
6271 * XXX TID here or it'll never be done.
6272 *
6273 * Also, don't call it if bar_tx/bar_wait are 0; something
6274 * has beaten us to the punch? (XXX figure out what?)
6275 */
6276 if (status == 0 || attempts == 50) {
6277 ATH_TX_LOCK(sc);
6278 if (atid->bar_tx == 0 || atid->bar_wait == 0)
6279 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6280 "%s: huh? bar_tx=%d, bar_wait=%d\n",
6281 __func__,
6282 atid->bar_tx, atid->bar_wait);
6283 else
6284 ath_tx_tid_bar_unsuspend(sc, atid);
6285 ATH_TX_UNLOCK(sc);
6286 }
6287 }
6288
6289 /*
6290 * This is called whenever the pending ADDBA request times out.
6291 * Unpause and reschedule the TID.
6292 */
6293 void
6294 ath_addba_response_timeout(struct ieee80211_node *ni,
6295 struct ieee80211_tx_ampdu *tap)
6296 {
6297 struct ath_softc *sc = ni->ni_ic->ic_softc;
6298 int tid = tap->txa_tid;
6299 struct ath_node *an = ATH_NODE(ni);
6300 struct ath_tid *atid = &an->an_tid[tid];
6301
6302 #if defined(__DragonFly__)
6303 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6304 "%s: %s: TID=%d, called; resuming\n",
6305 __func__,
6306 ath_hal_ether_sprintf(ni->ni_macaddr),
6307 tid);
6308 #else
6309 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6310 "%s: %6D: TID=%d, called; resuming\n",
6311 __func__,
6312 ni->ni_macaddr,
6313 ":",
6314 tid);
6315 #endif
6316
6317 ATH_TX_LOCK(sc);
6318 atid->addba_tx_pending = 0;
6319 ATH_TX_UNLOCK(sc);
6320
6321 /* Note: This updates the aggregate state to (again) pending */
6322 sc->sc_addba_response_timeout(ni, tap);
6323
6324 /* Unpause the TID; which reschedules it */
6325 ATH_TX_LOCK(sc);
6326 ath_tx_tid_resume(sc, atid);
6327 ATH_TX_UNLOCK(sc);
6328 }
6329
6330 /*
6331 * Check if a node is asleep or not.
6332 */
6333 int
6334 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6335 {
6336
6337 ATH_TX_LOCK_ASSERT(sc);
6338
6339 return (an->an_is_powersave);
6340 }
6341
6342 /*
6343 * Mark a node as currently "in powersaving."
6344 * This suspends all traffic on the node.
6345 *
6346 * This must be called with the node/tx locks free.
6347 *
6348 * XXX TODO: the locking silliness below is due to how the node
6349 * locking currently works. Right now, the node lock is grabbed
6350 * to do rate control lookups and these are done with the TX
6351 * queue lock held. This means the node lock can't be grabbed
6352 * first here or a LOR will occur.
6353 *
6354 * Eventually (hopefully!) the TX path code will only grab
6355 * the TXQ lock when transmitting and the ath_node lock when
6356 * doing node/TID operations. There are other complications -
6357 * the sched/unsched operations involve walking the per-txq
6358 * 'active tid' list and this requires both locks to be held.
6359 */
6360 void
6361 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6362 {
6363 struct ath_tid *atid;
6364 struct ath_txq *txq;
6365 int tid;
6366
6367 ATH_TX_UNLOCK_ASSERT(sc);
6368
6369 /* Suspend all traffic on the node */
6370 ATH_TX_LOCK(sc);
6371
6372 if (an->an_is_powersave) {
6373 #if defined(__DragonFly__)
6374 DPRINTF(sc, ATH_DEBUG_XMIT,
6375 "%s: %s: node was already asleep!\n",
6376 __func__, ath_hal_ether_sprintf(an->an_node.ni_macaddr));
6377 #else
6378 DPRINTF(sc, ATH_DEBUG_XMIT,
6379 "%s: %6D: node was already asleep!\n",
6380 __func__, an->an_node.ni_macaddr, ":");
6381 #endif
6382 ATH_TX_UNLOCK(sc);
6383 return;
6384 }
6385
6386 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6387 atid = &an->an_tid[tid];
6388 txq = sc->sc_ac2q[atid->ac];
6389
6390 ath_tx_tid_pause(sc, atid);
6391 }
6392
6393 /* Mark node as in powersaving */
6394 an->an_is_powersave = 1;
6395
6396 ATH_TX_UNLOCK(sc);
6397 }
6398
6399 /*
6400 * Mark a node as currently "awake."
6401 * This resumes all traffic to the node.
6402 */
6403 void
6404 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6405 {
6406 struct ath_tid *atid;
6407 struct ath_txq *txq;
6408 int tid;
6409
6410 ATH_TX_UNLOCK_ASSERT(sc);
6411
6412 ATH_TX_LOCK(sc);
6413
6414 /* !? */
6415 if (an->an_is_powersave == 0) {
6416 ATH_TX_UNLOCK(sc);
6417 DPRINTF(sc, ATH_DEBUG_XMIT,
6418 "%s: an=%p: node was already awake\n",
6419 __func__, an);
6420 return;
6421 }
6422
6423 /* Mark node as awake */
6424 an->an_is_powersave = 0;
6425 /*
6426 * Clear any pending leaked frame requests
6427 */
6428 an->an_leak_count = 0;
6429
6430 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6431 atid = &an->an_tid[tid];
6432 txq = sc->sc_ac2q[atid->ac];
6433
6434 ath_tx_tid_resume(sc, atid);
6435 }
6436 ATH_TX_UNLOCK(sc);
6437 }
6438
6439 static int
6440 ath_legacy_dma_txsetup(struct ath_softc *sc)
6441 {
6442
6443 /* nothing new needed */
6444 return (0);
6445 }
6446
6447 static int
6448 ath_legacy_dma_txteardown(struct ath_softc *sc)
6449 {
6450
6451 /* nothing new needed */
6452 return (0);
6453 }
6454
6455 void
6456 ath_xmit_setup_legacy(struct ath_softc *sc)
6457 {
6458 /*
6459 * For now, just set the descriptor length to sizeof(ath_desc);
6460 * worry about extracting the real length out of the HAL later.
6461 */
6462 sc->sc_tx_desclen = sizeof(struct ath_desc);
6463 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6464 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6465
6466 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6467 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6468 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6469
6470 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6471 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6472
6473 sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6474 }
6475