1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41 #include "opt_inet.h" 42 #include "opt_ath.h" 43 #include "opt_wlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysctl.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/errno.h> 56 #include <sys/callout.h> 57 #include <sys/bus.h> 58 #include <sys/endian.h> 59 #include <sys/kthread.h> 60 #include <sys/taskqueue.h> 61 #include <sys/priv.h> 62 #include <sys/ktr.h> 63 64 #if defined(__DragonFly__) 65 /* empty */ 66 #else 67 #include <machine/bus.h> 68 #endif 69 70 #include <net/if.h> 71 #include <net/if_var.h> 72 #include <net/if_dl.h> 73 #include <net/if_media.h> 74 #include <net/if_types.h> 75 #include <net/if_arp.h> 76 #include <net/ethernet.h> 77 #include <net/if_llc.h> 78 79 #include <netproto/802_11/ieee80211_var.h> 80 #include <netproto/802_11/ieee80211_regdomain.h> 81 #ifdef IEEE80211_SUPPORT_SUPERG 82 #include <netproto/802_11/ieee80211_superg.h> 83 #endif 84 #ifdef IEEE80211_SUPPORT_TDMA 85 #include <netproto/802_11/ieee80211_tdma.h> 86 #endif 87 #include <netproto/802_11/ieee80211_ht.h> 88 89 #include <net/bpf.h> 90 91 #ifdef INET 92 #include <netinet/in.h> 93 #include <netinet/if_ether.h> 94 #endif 95 96 #include <dev/netif/ath/ath/if_athvar.h> 97 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 98 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 99 100 #include <dev/netif/ath/ath/if_ath_debug.h> 101 102 #ifdef ATH_TX99_DIAG 103 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 104 #endif 105 106 #include <dev/netif/ath/ath/if_ath_misc.h> 107 #include <dev/netif/ath/ath/if_ath_tx.h> 108 #include <dev/netif/ath/ath/if_ath_tx_ht.h> 109 110 #ifdef ATH_DEBUG_ALQ 111 #include <dev/netif/ath/ath/if_ath_alq.h> 112 #endif 113 114 #if defined(__DragonFly__) 115 extern const char* ath_hal_ether_sprintf(const uint8_t *mac); 116 #endif 117 118 /* 119 * How many retries to perform in software 120 */ 121 #define SWMAX_RETRIES 10 122 123 /* 124 * What queue to throw the non-QoS TID traffic into 125 */ 126 #define ATH_NONQOS_TID_AC WME_AC_VO 127 128 #if 0 129 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 130 #endif 131 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 132 int tid); 133 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 134 int tid); 135 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 136 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 137 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 138 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 139 static struct ath_buf * 140 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 141 struct ath_tid *tid, struct ath_buf *bf); 142 143 #ifdef ATH_DEBUG_ALQ 144 void 145 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 146 { 147 struct ath_buf *bf; 148 int i, n; 149 const char *ds; 150 151 /* XXX we should skip out early if debugging isn't enabled! */ 152 bf = bf_first; 153 154 while (bf != NULL) { 155 /* XXX should ensure bf_nseg > 0! */ 156 if (bf->bf_nseg == 0) 157 break; 158 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 159 for (i = 0, ds = (const char *) bf->bf_desc; 160 i < n; 161 i++, ds += sc->sc_tx_desclen) { 162 if_ath_alq_post(&sc->sc_alq, 163 ATH_ALQ_EDMA_TXDESC, 164 sc->sc_tx_desclen, 165 ds); 166 } 167 bf = bf->bf_next; 168 } 169 } 170 #endif /* ATH_DEBUG_ALQ */ 171 172 /* 173 * Whether to use the 11n rate scenario functions or not 174 */ 175 static inline int 176 ath_tx_is_11n(struct ath_softc *sc) 177 { 178 return ((sc->sc_ah->ah_magic == 0x20065416) || 179 (sc->sc_ah->ah_magic == 0x19741014)); 180 } 181 182 /* 183 * Obtain the current TID from the given frame. 184 * 185 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 186 * This has implications for which AC/priority the packet is placed 187 * in. 188 */ 189 static int 190 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 191 { 192 const struct ieee80211_frame *wh; 193 int pri = M_WME_GETAC(m0); 194 195 wh = mtod(m0, const struct ieee80211_frame *); 196 if (! IEEE80211_QOS_HAS_SEQ(wh)) 197 return IEEE80211_NONQOS_TID; 198 else 199 return WME_AC_TO_TID(pri); 200 } 201 202 static void 203 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 204 { 205 struct ieee80211_frame *wh; 206 207 wh = mtod(bf->bf_m, struct ieee80211_frame *); 208 /* Only update/resync if needed */ 209 if (bf->bf_state.bfs_isretried == 0) { 210 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 211 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 212 BUS_DMASYNC_PREWRITE); 213 } 214 bf->bf_state.bfs_isretried = 1; 215 bf->bf_state.bfs_retries ++; 216 } 217 218 /* 219 * Determine what the correct AC queue for the given frame 220 * should be. 221 * 222 * This code assumes that the TIDs map consistently to 223 * the underlying hardware (or software) ath_txq. 224 * Since the sender may try to set an AC which is 225 * arbitrary, non-QoS TIDs may end up being put on 226 * completely different ACs. There's no way to put a 227 * TID into multiple ath_txq's for scheduling, so 228 * for now we override the AC/TXQ selection and set 229 * non-QOS TID frames into the BE queue. 230 * 231 * This may be completely incorrect - specifically, 232 * some management frames may end up out of order 233 * compared to the QoS traffic they're controlling. 234 * I'll look into this later. 235 */ 236 static int 237 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 238 { 239 const struct ieee80211_frame *wh; 240 int pri = M_WME_GETAC(m0); 241 wh = mtod(m0, const struct ieee80211_frame *); 242 if (IEEE80211_QOS_HAS_SEQ(wh)) 243 return pri; 244 245 return ATH_NONQOS_TID_AC; 246 } 247 248 void 249 ath_txfrag_cleanup(struct ath_softc *sc, 250 ath_bufhead *frags, struct ieee80211_node *ni) 251 { 252 struct ath_buf *bf, *next; 253 254 ATH_TXBUF_LOCK_ASSERT(sc); 255 256 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 257 /* NB: bf assumed clean */ 258 TAILQ_REMOVE(frags, bf, bf_list); 259 ath_returnbuf_head(sc, bf); 260 ieee80211_node_decref(ni); 261 } 262 } 263 264 /* 265 * Setup xmit of a fragmented frame. Allocate a buffer 266 * for each frag and bump the node reference count to 267 * reflect the held reference to be setup by ath_tx_start. 268 */ 269 int 270 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 271 struct mbuf *m0, struct ieee80211_node *ni) 272 { 273 struct mbuf *m; 274 struct ath_buf *bf; 275 276 ATH_TXBUF_LOCK(sc); 277 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 278 /* XXX non-management? */ 279 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 280 if (bf == NULL) { /* out of buffers, cleanup */ 281 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 282 __func__); 283 ath_txfrag_cleanup(sc, frags, ni); 284 break; 285 } 286 ieee80211_node_incref(ni); 287 TAILQ_INSERT_TAIL(frags, bf, bf_list); 288 } 289 ATH_TXBUF_UNLOCK(sc); 290 291 return !TAILQ_EMPTY(frags); 292 } 293 294 static int 295 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 296 { 297 #if defined(__DragonFly__) 298 #else 299 struct mbuf *m; 300 #endif 301 int error; 302 303 /* 304 * Load the DMA map so any coalescing is done. This 305 * also calculates the number of descriptors we need. 306 */ 307 #if defined(__DragonFly__) 308 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m0, 309 bf->bf_segs, 1, &bf->bf_nseg, 310 BUS_DMA_NOWAIT); 311 #else 312 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 313 bf->bf_segs, &bf->bf_nseg, 314 BUS_DMA_NOWAIT); 315 #endif 316 if (error == EFBIG) { 317 /* XXX packet requires too many descriptors */ 318 bf->bf_nseg = ATH_MAX_SCATTER + 1; 319 } else if (error != 0) { 320 sc->sc_stats.ast_tx_busdma++; 321 ieee80211_free_mbuf(m0); 322 return error; 323 } 324 /* 325 * Discard null packets and check for packets that 326 * require too many TX descriptors. We try to convert 327 * the latter to a cluster. 328 */ 329 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 330 sc->sc_stats.ast_tx_linear++; 331 #if defined(__DragonFly__) 332 error = bus_dmamap_load_mbuf_defrag(sc->sc_dmat, 333 bf->bf_dmamap, &m0, 334 bf->bf_segs, ATH_TXDESC, 335 &bf->bf_nseg, BUS_DMA_NOWAIT); 336 #else 337 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 338 if (m == NULL) { 339 ieee80211_free_mbuf(m0); 340 sc->sc_stats.ast_tx_nombuf++; 341 return ENOMEM; 342 } 343 m0 = m; 344 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 345 bf->bf_segs, &bf->bf_nseg, 346 BUS_DMA_NOWAIT); 347 #endif 348 if (error != 0) { 349 sc->sc_stats.ast_tx_busdma++; 350 ieee80211_free_mbuf(m0); 351 return error; 352 } 353 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 354 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 355 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 356 sc->sc_stats.ast_tx_nodata++; 357 ieee80211_free_mbuf(m0); 358 return EIO; 359 } 360 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 361 __func__, m0, m0->m_pkthdr.len); 362 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 363 bf->bf_m = m0; 364 365 return 0; 366 } 367 368 /* 369 * Chain together segments+descriptors for a frame - 11n or otherwise. 370 * 371 * For aggregates, this is called on each frame in the aggregate. 372 */ 373 static void 374 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 375 struct ath_buf *bf, int is_aggr, int is_first_subframe, 376 int is_last_subframe) 377 { 378 struct ath_hal *ah = sc->sc_ah; 379 char *ds; 380 int i, bp, dsp; 381 HAL_DMA_ADDR bufAddrList[4]; 382 uint32_t segLenList[4]; 383 int numTxMaps = 1; 384 int isFirstDesc = 1; 385 386 /* 387 * XXX There's txdma and txdma_mgmt; the descriptor 388 * sizes must match. 389 */ 390 struct ath_descdma *dd = &sc->sc_txdma; 391 392 /* 393 * Fillin the remainder of the descriptor info. 394 */ 395 396 /* 397 * We need the number of TX data pointers in each descriptor. 398 * EDMA and later chips support 4 TX buffers per descriptor; 399 * previous chips just support one. 400 */ 401 numTxMaps = sc->sc_tx_nmaps; 402 403 /* 404 * For EDMA and later chips ensure the TX map is fully populated 405 * before advancing to the next descriptor. 406 */ 407 ds = (char *) bf->bf_desc; 408 bp = dsp = 0; 409 bzero(bufAddrList, sizeof(bufAddrList)); 410 bzero(segLenList, sizeof(segLenList)); 411 for (i = 0; i < bf->bf_nseg; i++) { 412 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 413 segLenList[bp] = bf->bf_segs[i].ds_len; 414 bp++; 415 416 /* 417 * Go to the next segment if this isn't the last segment 418 * and there's space in the current TX map. 419 */ 420 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 421 continue; 422 423 /* 424 * Last segment or we're out of buffer pointers. 425 */ 426 bp = 0; 427 428 if (i == bf->bf_nseg - 1) 429 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 430 else 431 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 432 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 433 434 /* 435 * XXX This assumes that bfs_txq is the actual destination 436 * hardware queue at this point. It may not have been 437 * assigned, it may actually be pointing to the multicast 438 * software TXQ id. These must be fixed! 439 */ 440 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 441 , bufAddrList 442 , segLenList 443 , bf->bf_descid /* XXX desc id */ 444 , bf->bf_state.bfs_tx_queue 445 , isFirstDesc /* first segment */ 446 , i == bf->bf_nseg - 1 /* last segment */ 447 , (struct ath_desc *) ds0 /* first descriptor */ 448 ); 449 450 /* 451 * Make sure the 11n aggregate fields are cleared. 452 * 453 * XXX TODO: this doesn't need to be called for 454 * aggregate frames; as it'll be called on all 455 * sub-frames. Since the descriptors are in 456 * non-cacheable memory, this leads to some 457 * rather slow writes on MIPS/ARM platforms. 458 */ 459 if (ath_tx_is_11n(sc)) 460 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 461 462 /* 463 * If 11n is enabled, set it up as if it's an aggregate 464 * frame. 465 */ 466 if (is_last_subframe) { 467 ath_hal_set11n_aggr_last(sc->sc_ah, 468 (struct ath_desc *) ds); 469 } else if (is_aggr) { 470 /* 471 * This clears the aggrlen field; so 472 * the caller needs to call set_aggr_first()! 473 * 474 * XXX TODO: don't call this for the first 475 * descriptor in the first frame in an 476 * aggregate! 477 */ 478 ath_hal_set11n_aggr_middle(sc->sc_ah, 479 (struct ath_desc *) ds, 480 bf->bf_state.bfs_ndelim); 481 } 482 isFirstDesc = 0; 483 bf->bf_lastds = (struct ath_desc *) ds; 484 485 /* 486 * Don't forget to skip to the next descriptor. 487 */ 488 ds += sc->sc_tx_desclen; 489 dsp++; 490 491 /* 492 * .. and don't forget to blank these out! 493 */ 494 bzero(bufAddrList, sizeof(bufAddrList)); 495 bzero(segLenList, sizeof(segLenList)); 496 } 497 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 498 } 499 500 /* 501 * Set the rate control fields in the given descriptor based on 502 * the bf_state fields and node state. 503 * 504 * The bfs fields should already be set with the relevant rate 505 * control information, including whether MRR is to be enabled. 506 * 507 * Since the FreeBSD HAL currently sets up the first TX rate 508 * in ath_hal_setuptxdesc(), this will setup the MRR 509 * conditionally for the pre-11n chips, and call ath_buf_set_rate 510 * unconditionally for 11n chips. These require the 11n rate 511 * scenario to be set if MCS rates are enabled, so it's easier 512 * to just always call it. The caller can then only set rates 2, 3 513 * and 4 if multi-rate retry is needed. 514 */ 515 static void 516 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 517 struct ath_buf *bf) 518 { 519 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 520 521 /* If mrr is disabled, blank tries 1, 2, 3 */ 522 if (! bf->bf_state.bfs_ismrr) 523 rc[1].tries = rc[2].tries = rc[3].tries = 0; 524 525 #if 0 526 /* 527 * If NOACK is set, just set ntries=1. 528 */ 529 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 530 rc[1].tries = rc[2].tries = rc[3].tries = 0; 531 rc[0].tries = 1; 532 } 533 #endif 534 535 /* 536 * Always call - that way a retried descriptor will 537 * have the MRR fields overwritten. 538 * 539 * XXX TODO: see if this is really needed - setting up 540 * the first descriptor should set the MRR fields to 0 541 * for us anyway. 542 */ 543 if (ath_tx_is_11n(sc)) { 544 ath_buf_set_rate(sc, ni, bf); 545 } else { 546 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 547 , rc[1].ratecode, rc[1].tries 548 , rc[2].ratecode, rc[2].tries 549 , rc[3].ratecode, rc[3].tries 550 ); 551 } 552 } 553 554 /* 555 * Setup segments+descriptors for an 11n aggregate. 556 * bf_first is the first buffer in the aggregate. 557 * The descriptor list must already been linked together using 558 * bf->bf_next. 559 */ 560 static void 561 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 562 { 563 struct ath_buf *bf, *bf_prev = NULL; 564 struct ath_desc *ds0 = bf_first->bf_desc; 565 566 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 567 __func__, bf_first->bf_state.bfs_nframes, 568 bf_first->bf_state.bfs_al); 569 570 bf = bf_first; 571 572 if (bf->bf_state.bfs_txrate0 == 0) 573 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 574 __func__, bf, 0); 575 if (bf->bf_state.bfs_rc[0].ratecode == 0) 576 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 577 __func__, bf, 0); 578 579 /* 580 * Setup all descriptors of all subframes - this will 581 * call ath_hal_set11naggrmiddle() on every frame. 582 */ 583 while (bf != NULL) { 584 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 585 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 586 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 587 SEQNO(bf->bf_state.bfs_seqno)); 588 589 /* 590 * Setup the initial fields for the first descriptor - all 591 * the non-11n specific stuff. 592 */ 593 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 594 , bf->bf_state.bfs_pktlen /* packet length */ 595 , bf->bf_state.bfs_hdrlen /* header length */ 596 , bf->bf_state.bfs_atype /* Atheros packet type */ 597 , bf->bf_state.bfs_txpower /* txpower */ 598 , bf->bf_state.bfs_txrate0 599 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 600 , bf->bf_state.bfs_keyix /* key cache index */ 601 , bf->bf_state.bfs_txantenna /* antenna mode */ 602 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 603 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 604 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 605 ); 606 607 /* 608 * First descriptor? Setup the rate control and initial 609 * aggregate header information. 610 */ 611 if (bf == bf_first) { 612 /* 613 * setup first desc with rate and aggr info 614 */ 615 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 616 } 617 618 /* 619 * Setup the descriptors for a multi-descriptor frame. 620 * This is both aggregate and non-aggregate aware. 621 */ 622 ath_tx_chaindesclist(sc, ds0, bf, 623 1, /* is_aggr */ 624 !! (bf == bf_first), /* is_first_subframe */ 625 !! (bf->bf_next == NULL) /* is_last_subframe */ 626 ); 627 628 if (bf == bf_first) { 629 /* 630 * Initialise the first 11n aggregate with the 631 * aggregate length and aggregate enable bits. 632 */ 633 ath_hal_set11n_aggr_first(sc->sc_ah, 634 ds0, 635 bf->bf_state.bfs_al, 636 bf->bf_state.bfs_ndelim); 637 } 638 639 /* 640 * Link the last descriptor of the previous frame 641 * to the beginning descriptor of this frame. 642 */ 643 if (bf_prev != NULL) 644 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 645 bf->bf_daddr); 646 647 /* Save a copy so we can link the next descriptor in */ 648 bf_prev = bf; 649 bf = bf->bf_next; 650 } 651 652 /* 653 * Set the first descriptor bf_lastds field to point to 654 * the last descriptor in the last subframe, that's where 655 * the status update will occur. 656 */ 657 bf_first->bf_lastds = bf_prev->bf_lastds; 658 659 /* 660 * And bf_last in the first descriptor points to the end of 661 * the aggregate list. 662 */ 663 bf_first->bf_last = bf_prev; 664 665 /* 666 * For non-AR9300 NICs, which require the rate control 667 * in the final descriptor - let's set that up now. 668 * 669 * This is because the filltxdesc() HAL call doesn't 670 * populate the last segment with rate control information 671 * if firstSeg is also true. For non-aggregate frames 672 * that is fine, as the first frame already has rate control 673 * info. But if the last frame in an aggregate has one 674 * descriptor, both firstseg and lastseg will be true and 675 * the rate info isn't copied. 676 * 677 * This is inefficient on MIPS/ARM platforms that have 678 * non-cachable memory for TX descriptors, but we'll just 679 * make do for now. 680 * 681 * As to why the rate table is stashed in the last descriptor 682 * rather than the first descriptor? Because proctxdesc() 683 * is called on the final descriptor in an MPDU or A-MPDU - 684 * ie, the one that gets updated by the hardware upon 685 * completion. That way proctxdesc() doesn't need to know 686 * about the first _and_ last TX descriptor. 687 */ 688 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 689 690 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 691 } 692 693 /* 694 * Hand-off a frame to the multicast TX queue. 695 * 696 * This is a software TXQ which will be appended to the CAB queue 697 * during the beacon setup code. 698 * 699 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 700 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 701 * with the actual hardware txq, or all of this will fall apart. 702 * 703 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 704 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 705 * correctly. 706 */ 707 static void 708 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 709 struct ath_buf *bf) 710 { 711 ATH_TX_LOCK_ASSERT(sc); 712 713 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 714 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 715 716 /* 717 * Ensure that the tx queue is the cabq, so things get 718 * mapped correctly. 719 */ 720 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 721 DPRINTF(sc, ATH_DEBUG_XMIT, 722 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 723 __func__, bf, bf->bf_state.bfs_tx_queue, 724 txq->axq_qnum); 725 } 726 727 ATH_TXQ_LOCK(txq); 728 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 729 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 730 struct ieee80211_frame *wh; 731 732 /* mark previous frame */ 733 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 734 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 735 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 736 BUS_DMASYNC_PREWRITE); 737 738 /* link descriptor */ 739 ath_hal_settxdesclink(sc->sc_ah, 740 bf_last->bf_lastds, 741 bf->bf_daddr); 742 } 743 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 744 ATH_TXQ_UNLOCK(txq); 745 } 746 747 /* 748 * Hand-off packet to a hardware queue. 749 */ 750 static void 751 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 752 struct ath_buf *bf) 753 { 754 struct ath_hal *ah = sc->sc_ah; 755 struct ath_buf *bf_first; 756 757 /* 758 * Insert the frame on the outbound list and pass it on 759 * to the hardware. Multicast frames buffered for power 760 * save stations and transmit from the CAB queue are stored 761 * on a s/w only queue and loaded on to the CAB queue in 762 * the SWBA handler since frames only go out on DTIM and 763 * to avoid possible races. 764 */ 765 ATH_TX_LOCK_ASSERT(sc); 766 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 767 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 768 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 769 ("ath_tx_handoff_hw called for mcast queue")); 770 771 /* 772 * XXX We should instead just verify that sc_txstart_cnt 773 * or ath_txproc_cnt > 0. That would mean that 774 * the reset is going to be waiting for us to complete. 775 */ 776 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { 777 device_printf(sc->sc_dev, 778 "%s: TX dispatch without holding txcount/txstart refcnt!\n", 779 __func__); 780 } 781 782 /* 783 * XXX .. this is going to cause the hardware to get upset; 784 * so we really should find some way to drop or queue 785 * things. 786 */ 787 788 ATH_TXQ_LOCK(txq); 789 790 /* 791 * XXX TODO: if there's a holdingbf, then 792 * ATH_TXQ_PUTRUNNING should be clear. 793 * 794 * If there is a holdingbf and the list is empty, 795 * then axq_link should be pointing to the holdingbf. 796 * 797 * Otherwise it should point to the last descriptor 798 * in the last ath_buf. 799 * 800 * In any case, we should really ensure that we 801 * update the previous descriptor link pointer to 802 * this descriptor, regardless of all of the above state. 803 * 804 * For now this is captured by having axq_link point 805 * to either the holdingbf (if the TXQ list is empty) 806 * or the end of the list (if the TXQ list isn't empty.) 807 * I'd rather just kill axq_link here and do it as above. 808 */ 809 810 /* 811 * Append the frame to the TX queue. 812 */ 813 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 814 ATH_KTR(sc, ATH_KTR_TX, 3, 815 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 816 "depth=%d", 817 txq->axq_qnum, 818 bf, 819 txq->axq_depth); 820 821 /* 822 * If there's a link pointer, update it. 823 * 824 * XXX we should replace this with the above logic, just 825 * to kill axq_link with fire. 826 */ 827 if (txq->axq_link != NULL) { 828 *txq->axq_link = bf->bf_daddr; 829 DPRINTF(sc, ATH_DEBUG_XMIT, 830 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 831 txq->axq_qnum, txq->axq_link, 832 (caddr_t)bf->bf_daddr, bf->bf_desc, 833 txq->axq_depth); 834 ATH_KTR(sc, ATH_KTR_TX, 5, 835 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 836 "lastds=%d", 837 txq->axq_qnum, txq->axq_link, 838 (caddr_t)bf->bf_daddr, bf->bf_desc, 839 bf->bf_lastds); 840 } 841 842 /* 843 * If we've not pushed anything into the hardware yet, 844 * push the head of the queue into the TxDP. 845 * 846 * Once we've started DMA, there's no guarantee that 847 * updating the TxDP with a new value will actually work. 848 * So we just don't do that - if we hit the end of the list, 849 * we keep that buffer around (the "holding buffer") and 850 * re-start DMA by updating the link pointer of _that_ 851 * descriptor and then restart DMA. 852 */ 853 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 854 bf_first = TAILQ_FIRST(&txq->axq_q); 855 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 856 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 857 DPRINTF(sc, ATH_DEBUG_XMIT, 858 "%s: TXDP[%u] = %p (%p) depth %d\n", 859 __func__, txq->axq_qnum, 860 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 861 txq->axq_depth); 862 ATH_KTR(sc, ATH_KTR_TX, 5, 863 "ath_tx_handoff: TXDP[%u] = %p (%p) " 864 "lastds=%p depth %d", 865 txq->axq_qnum, 866 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 867 bf_first->bf_lastds, 868 txq->axq_depth); 869 } 870 871 /* 872 * Ensure that the bf TXQ matches this TXQ, so later 873 * checking and holding buffer manipulation is sane. 874 */ 875 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 876 DPRINTF(sc, ATH_DEBUG_XMIT, 877 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 878 __func__, bf, bf->bf_state.bfs_tx_queue, 879 txq->axq_qnum); 880 } 881 882 /* 883 * Track aggregate queue depth. 884 */ 885 if (bf->bf_state.bfs_aggr) 886 txq->axq_aggr_depth++; 887 888 /* 889 * Update the link pointer. 890 */ 891 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 892 893 /* 894 * Start DMA. 895 * 896 * If we wrote a TxDP above, DMA will start from here. 897 * 898 * If DMA is running, it'll do nothing. 899 * 900 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 901 * or VEOL) then it stops at the last transmitted write. 902 * We then append a new frame by updating the link pointer 903 * in that descriptor and then kick TxE here; it will re-read 904 * that last descriptor and find the new descriptor to transmit. 905 * 906 * This is why we keep the holding descriptor around. 907 */ 908 ath_hal_txstart(ah, txq->axq_qnum); 909 ATH_TXQ_UNLOCK(txq); 910 ATH_KTR(sc, ATH_KTR_TX, 1, 911 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 912 } 913 914 /* 915 * Restart TX DMA for the given TXQ. 916 * 917 * This must be called whether the queue is empty or not. 918 */ 919 static void 920 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 921 { 922 struct ath_buf *bf, *bf_last; 923 924 ATH_TXQ_LOCK_ASSERT(txq); 925 926 /* XXX make this ATH_TXQ_FIRST */ 927 bf = TAILQ_FIRST(&txq->axq_q); 928 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 929 930 if (bf == NULL) 931 return; 932 933 DPRINTF(sc, ATH_DEBUG_RESET, 934 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 935 __func__, 936 txq->axq_qnum, 937 bf, 938 bf_last, 939 (uint32_t) bf->bf_daddr); 940 941 #ifdef ATH_DEBUG 942 if (sc->sc_debug & ATH_DEBUG_RESET) 943 ath_tx_dump(sc, txq); 944 #endif 945 946 /* 947 * This is called from a restart, so DMA is known to be 948 * completely stopped. 949 */ 950 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 951 ("%s: Q%d: called with PUTRUNNING=1\n", 952 __func__, 953 txq->axq_qnum)); 954 955 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 956 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 957 958 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 959 &txq->axq_link); 960 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 961 } 962 963 /* 964 * Hand off a packet to the hardware (or mcast queue.) 965 * 966 * The relevant hardware txq should be locked. 967 */ 968 static void 969 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 970 struct ath_buf *bf) 971 { 972 ATH_TX_LOCK_ASSERT(sc); 973 974 #ifdef ATH_DEBUG_ALQ 975 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 976 ath_tx_alq_post(sc, bf); 977 #endif 978 979 if (txq->axq_qnum == ATH_TXQ_SWQ) 980 ath_tx_handoff_mcast(sc, txq, bf); 981 else 982 ath_tx_handoff_hw(sc, txq, bf); 983 } 984 985 static int 986 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 987 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 988 int *keyix) 989 { 990 DPRINTF(sc, ATH_DEBUG_XMIT, 991 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 992 __func__, 993 *hdrlen, 994 *pktlen, 995 isfrag, 996 iswep, 997 m0); 998 999 if (iswep) { 1000 const struct ieee80211_cipher *cip; 1001 struct ieee80211_key *k; 1002 1003 /* 1004 * Construct the 802.11 header+trailer for an encrypted 1005 * frame. The only reason this can fail is because of an 1006 * unknown or unsupported cipher/key type. 1007 */ 1008 k = ieee80211_crypto_encap(ni, m0); 1009 if (k == NULL) { 1010 /* 1011 * This can happen when the key is yanked after the 1012 * frame was queued. Just discard the frame; the 1013 * 802.11 layer counts failures and provides 1014 * debugging/diagnostics. 1015 */ 1016 return (0); 1017 } 1018 /* 1019 * Adjust the packet + header lengths for the crypto 1020 * additions and calculate the h/w key index. When 1021 * a s/w mic is done the frame will have had any mic 1022 * added to it prior to entry so m0->m_pkthdr.len will 1023 * account for it. Otherwise we need to add it to the 1024 * packet length. 1025 */ 1026 cip = k->wk_cipher; 1027 (*hdrlen) += cip->ic_header; 1028 (*pktlen) += cip->ic_header + cip->ic_trailer; 1029 /* NB: frags always have any TKIP MIC done in s/w */ 1030 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1031 (*pktlen) += cip->ic_miclen; 1032 (*keyix) = k->wk_keyix; 1033 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1034 /* 1035 * Use station key cache slot, if assigned. 1036 */ 1037 (*keyix) = ni->ni_ucastkey.wk_keyix; 1038 if ((*keyix) == IEEE80211_KEYIX_NONE) 1039 (*keyix) = HAL_TXKEYIX_INVALID; 1040 } else 1041 (*keyix) = HAL_TXKEYIX_INVALID; 1042 1043 return (1); 1044 } 1045 1046 /* 1047 * Calculate whether interoperability protection is required for 1048 * this frame. 1049 * 1050 * This requires the rate control information be filled in, 1051 * as the protection requirement depends upon the current 1052 * operating mode / PHY. 1053 */ 1054 static void 1055 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1056 { 1057 struct ieee80211_frame *wh; 1058 uint8_t rix; 1059 uint16_t flags; 1060 int shortPreamble; 1061 const HAL_RATE_TABLE *rt = sc->sc_currates; 1062 struct ieee80211com *ic = &sc->sc_ic; 1063 1064 flags = bf->bf_state.bfs_txflags; 1065 rix = bf->bf_state.bfs_rc[0].rix; 1066 shortPreamble = bf->bf_state.bfs_shpream; 1067 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1068 1069 /* 1070 * If 802.11g protection is enabled, determine whether 1071 * to use RTS/CTS or just CTS. Note that this is only 1072 * done for OFDM unicast frames. 1073 */ 1074 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1075 rt->info[rix].phy == IEEE80211_T_OFDM && 1076 (flags & HAL_TXDESC_NOACK) == 0) { 1077 bf->bf_state.bfs_doprot = 1; 1078 /* XXX fragments must use CCK rates w/ protection */ 1079 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1080 flags |= HAL_TXDESC_RTSENA; 1081 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1082 flags |= HAL_TXDESC_CTSENA; 1083 } 1084 /* 1085 * For frags it would be desirable to use the 1086 * highest CCK rate for RTS/CTS. But stations 1087 * farther away may detect it at a lower CCK rate 1088 * so use the configured protection rate instead 1089 * (for now). 1090 */ 1091 sc->sc_stats.ast_tx_protect++; 1092 } 1093 1094 /* 1095 * If 11n protection is enabled and it's a HT frame, 1096 * enable RTS. 1097 * 1098 * XXX ic_htprotmode or ic_curhtprotmode? 1099 * XXX should it_htprotmode only matter if ic_curhtprotmode 1100 * XXX indicates it's not a HT pure environment? 1101 */ 1102 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1103 rt->info[rix].phy == IEEE80211_T_HT && 1104 (flags & HAL_TXDESC_NOACK) == 0) { 1105 flags |= HAL_TXDESC_RTSENA; 1106 sc->sc_stats.ast_tx_htprotect++; 1107 } 1108 bf->bf_state.bfs_txflags = flags; 1109 } 1110 1111 /* 1112 * Update the frame duration given the currently selected rate. 1113 * 1114 * This also updates the frame duration value, so it will require 1115 * a DMA flush. 1116 */ 1117 static void 1118 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1119 { 1120 struct ieee80211_frame *wh; 1121 uint8_t rix; 1122 uint16_t flags; 1123 int shortPreamble; 1124 struct ath_hal *ah = sc->sc_ah; 1125 const HAL_RATE_TABLE *rt = sc->sc_currates; 1126 int isfrag = bf->bf_m->m_flags & M_FRAG; 1127 1128 flags = bf->bf_state.bfs_txflags; 1129 rix = bf->bf_state.bfs_rc[0].rix; 1130 shortPreamble = bf->bf_state.bfs_shpream; 1131 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1132 1133 /* 1134 * Calculate duration. This logically belongs in the 802.11 1135 * layer but it lacks sufficient information to calculate it. 1136 */ 1137 if ((flags & HAL_TXDESC_NOACK) == 0 && 1138 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1139 u_int16_t dur; 1140 if (shortPreamble) 1141 dur = rt->info[rix].spAckDuration; 1142 else 1143 dur = rt->info[rix].lpAckDuration; 1144 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1145 dur += dur; /* additional SIFS+ACK */ 1146 /* 1147 * Include the size of next fragment so NAV is 1148 * updated properly. The last fragment uses only 1149 * the ACK duration 1150 * 1151 * XXX TODO: ensure that the rate lookup for each 1152 * fragment is the same as the rate used by the 1153 * first fragment! 1154 */ 1155 dur += ath_hal_computetxtime(ah, 1156 rt, 1157 bf->bf_nextfraglen, 1158 rix, shortPreamble); 1159 } 1160 if (isfrag) { 1161 /* 1162 * Force hardware to use computed duration for next 1163 * fragment by disabling multi-rate retry which updates 1164 * duration based on the multi-rate duration table. 1165 */ 1166 bf->bf_state.bfs_ismrr = 0; 1167 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1168 /* XXX update bfs_rc[0].try? */ 1169 } 1170 1171 /* Update the duration field itself */ 1172 *(u_int16_t *)wh->i_dur = htole16(dur); 1173 } 1174 } 1175 1176 static uint8_t 1177 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1178 int cix, int shortPreamble) 1179 { 1180 uint8_t ctsrate; 1181 1182 /* 1183 * CTS transmit rate is derived from the transmit rate 1184 * by looking in the h/w rate table. We must also factor 1185 * in whether or not a short preamble is to be used. 1186 */ 1187 /* NB: cix is set above where RTS/CTS is enabled */ 1188 KASSERT(cix != 0xff, ("cix not setup")); 1189 ctsrate = rt->info[cix].rateCode; 1190 1191 /* XXX this should only matter for legacy rates */ 1192 if (shortPreamble) 1193 ctsrate |= rt->info[cix].shortPreamble; 1194 1195 return (ctsrate); 1196 } 1197 1198 /* 1199 * Calculate the RTS/CTS duration for legacy frames. 1200 */ 1201 static int 1202 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1203 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1204 int flags) 1205 { 1206 int ctsduration = 0; 1207 1208 /* This mustn't be called for HT modes */ 1209 if (rt->info[cix].phy == IEEE80211_T_HT) { 1210 kprintf("%s: HT rate where it shouldn't be (0x%x)\n", 1211 __func__, rt->info[cix].rateCode); 1212 return (-1); 1213 } 1214 1215 /* 1216 * Compute the transmit duration based on the frame 1217 * size and the size of an ACK frame. We call into the 1218 * HAL to do the computation since it depends on the 1219 * characteristics of the actual PHY being used. 1220 * 1221 * NB: CTS is assumed the same size as an ACK so we can 1222 * use the precalculated ACK durations. 1223 */ 1224 if (shortPreamble) { 1225 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1226 ctsduration += rt->info[cix].spAckDuration; 1227 ctsduration += ath_hal_computetxtime(ah, 1228 rt, pktlen, rix, AH_TRUE); 1229 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1230 ctsduration += rt->info[rix].spAckDuration; 1231 } else { 1232 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1233 ctsduration += rt->info[cix].lpAckDuration; 1234 ctsduration += ath_hal_computetxtime(ah, 1235 rt, pktlen, rix, AH_FALSE); 1236 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1237 ctsduration += rt->info[rix].lpAckDuration; 1238 } 1239 1240 return (ctsduration); 1241 } 1242 1243 /* 1244 * Update the given ath_buf with updated rts/cts setup and duration 1245 * values. 1246 * 1247 * To support rate lookups for each software retry, the rts/cts rate 1248 * and cts duration must be re-calculated. 1249 * 1250 * This function assumes the RTS/CTS flags have been set as needed; 1251 * mrr has been disabled; and the rate control lookup has been done. 1252 * 1253 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1254 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1255 */ 1256 static void 1257 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1258 { 1259 uint16_t ctsduration = 0; 1260 uint8_t ctsrate = 0; 1261 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1262 uint8_t cix = 0; 1263 const HAL_RATE_TABLE *rt = sc->sc_currates; 1264 1265 /* 1266 * No RTS/CTS enabled? Don't bother. 1267 */ 1268 if ((bf->bf_state.bfs_txflags & 1269 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1270 /* XXX is this really needed? */ 1271 bf->bf_state.bfs_ctsrate = 0; 1272 bf->bf_state.bfs_ctsduration = 0; 1273 return; 1274 } 1275 1276 /* 1277 * If protection is enabled, use the protection rix control 1278 * rate. Otherwise use the rate0 control rate. 1279 */ 1280 if (bf->bf_state.bfs_doprot) 1281 rix = sc->sc_protrix; 1282 else 1283 rix = bf->bf_state.bfs_rc[0].rix; 1284 1285 /* 1286 * If the raw path has hard-coded ctsrate0 to something, 1287 * use it. 1288 */ 1289 if (bf->bf_state.bfs_ctsrate0 != 0) 1290 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1291 else 1292 /* Control rate from above */ 1293 cix = rt->info[rix].controlRate; 1294 1295 /* Calculate the rtscts rate for the given cix */ 1296 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1297 bf->bf_state.bfs_shpream); 1298 1299 /* The 11n chipsets do ctsduration calculations for you */ 1300 if (! ath_tx_is_11n(sc)) 1301 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1302 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1303 rt, bf->bf_state.bfs_txflags); 1304 1305 /* Squirrel away in ath_buf */ 1306 bf->bf_state.bfs_ctsrate = ctsrate; 1307 bf->bf_state.bfs_ctsduration = ctsduration; 1308 1309 /* 1310 * Must disable multi-rate retry when using RTS/CTS. 1311 */ 1312 if (!sc->sc_mrrprot) { 1313 bf->bf_state.bfs_ismrr = 0; 1314 bf->bf_state.bfs_try0 = 1315 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1316 } 1317 } 1318 1319 /* 1320 * Setup the descriptor chain for a normal or fast-frame 1321 * frame. 1322 * 1323 * XXX TODO: extend to include the destination hardware QCU ID. 1324 * Make sure that is correct. Make sure that when being added 1325 * to the mcastq, the CABQ QCUID is set or things will get a bit 1326 * odd. 1327 */ 1328 static void 1329 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1330 { 1331 struct ath_desc *ds = bf->bf_desc; 1332 struct ath_hal *ah = sc->sc_ah; 1333 1334 if (bf->bf_state.bfs_txrate0 == 0) 1335 DPRINTF(sc, ATH_DEBUG_XMIT, 1336 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1337 1338 ath_hal_setuptxdesc(ah, ds 1339 , bf->bf_state.bfs_pktlen /* packet length */ 1340 , bf->bf_state.bfs_hdrlen /* header length */ 1341 , bf->bf_state.bfs_atype /* Atheros packet type */ 1342 , bf->bf_state.bfs_txpower /* txpower */ 1343 , bf->bf_state.bfs_txrate0 1344 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1345 , bf->bf_state.bfs_keyix /* key cache index */ 1346 , bf->bf_state.bfs_txantenna /* antenna mode */ 1347 , bf->bf_state.bfs_txflags /* flags */ 1348 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1349 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1350 ); 1351 1352 /* 1353 * This will be overriden when the descriptor chain is written. 1354 */ 1355 bf->bf_lastds = ds; 1356 bf->bf_last = bf; 1357 1358 /* Set rate control and descriptor chain for this frame */ 1359 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1360 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1361 } 1362 1363 /* 1364 * Do a rate lookup. 1365 * 1366 * This performs a rate lookup for the given ath_buf only if it's required. 1367 * Non-data frames and raw frames don't require it. 1368 * 1369 * This populates the primary and MRR entries; MRR values are 1370 * then disabled later on if something requires it (eg RTS/CTS on 1371 * pre-11n chipsets. 1372 * 1373 * This needs to be done before the RTS/CTS fields are calculated 1374 * as they may depend upon the rate chosen. 1375 */ 1376 static void 1377 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1378 { 1379 uint8_t rate, rix; 1380 int try0; 1381 1382 if (! bf->bf_state.bfs_doratelookup) 1383 return; 1384 1385 /* Get rid of any previous state */ 1386 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1387 1388 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1389 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1390 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1391 1392 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1393 bf->bf_state.bfs_rc[0].rix = rix; 1394 bf->bf_state.bfs_rc[0].ratecode = rate; 1395 bf->bf_state.bfs_rc[0].tries = try0; 1396 1397 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1398 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1399 bf->bf_state.bfs_rc); 1400 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1401 1402 sc->sc_txrix = rix; /* for LED blinking */ 1403 sc->sc_lastdatarix = rix; /* for fast frames */ 1404 bf->bf_state.bfs_try0 = try0; 1405 bf->bf_state.bfs_txrate0 = rate; 1406 } 1407 1408 /* 1409 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1410 */ 1411 static void 1412 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1413 struct ath_buf *bf) 1414 { 1415 struct ath_node *an = ATH_NODE(bf->bf_node); 1416 1417 ATH_TX_LOCK_ASSERT(sc); 1418 1419 if (an->clrdmask == 1) { 1420 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1421 an->clrdmask = 0; 1422 } 1423 } 1424 1425 /* 1426 * Return whether this frame should be software queued or 1427 * direct dispatched. 1428 * 1429 * When doing powersave, BAR frames should be queued but other management 1430 * frames should be directly sent. 1431 * 1432 * When not doing powersave, stick BAR frames into the hardware queue 1433 * so it goes out even though the queue is paused. 1434 * 1435 * For now, management frames are also software queued by default. 1436 */ 1437 static int 1438 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1439 struct mbuf *m0, int *queue_to_head) 1440 { 1441 struct ieee80211_node *ni = &an->an_node; 1442 struct ieee80211_frame *wh; 1443 uint8_t type, subtype; 1444 1445 wh = mtod(m0, struct ieee80211_frame *); 1446 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1447 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1448 1449 (*queue_to_head) = 0; 1450 1451 /* If it's not in powersave - direct-dispatch BAR */ 1452 if ((ATH_NODE(ni)->an_is_powersave == 0) 1453 && type == IEEE80211_FC0_TYPE_CTL && 1454 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1455 DPRINTF(sc, ATH_DEBUG_SW_TX, 1456 "%s: BAR: TX'ing direct\n", __func__); 1457 return (0); 1458 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1459 && type == IEEE80211_FC0_TYPE_CTL && 1460 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1461 /* BAR TX whilst asleep; queue */ 1462 DPRINTF(sc, ATH_DEBUG_SW_TX, 1463 "%s: swq: TX'ing\n", __func__); 1464 (*queue_to_head) = 1; 1465 return (1); 1466 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1467 && (type == IEEE80211_FC0_TYPE_MGT || 1468 type == IEEE80211_FC0_TYPE_CTL)) { 1469 /* 1470 * Other control/mgmt frame; bypass software queuing 1471 * for now! 1472 */ 1473 #if defined(__DragonFly__) 1474 DPRINTF(sc, ATH_DEBUG_XMIT, 1475 "%s: %s: Node is asleep; sending mgmt " 1476 "(type=%d, subtype=%d)\n", 1477 __func__, ath_hal_ether_sprintf(ni->ni_macaddr), 1478 type, subtype); 1479 #else 1480 DPRINTF(sc, ATH_DEBUG_XMIT, 1481 "%s: %6D: Node is asleep; sending mgmt " 1482 "(type=%d, subtype=%d)\n", 1483 __func__, ni->ni_macaddr, ":", type, subtype); 1484 #endif 1485 return (0); 1486 } else { 1487 return (1); 1488 } 1489 } 1490 1491 1492 /* 1493 * Transmit the given frame to the hardware. 1494 * 1495 * The frame must already be setup; rate control must already have 1496 * been done. 1497 * 1498 * XXX since the TXQ lock is being held here (and I dislike holding 1499 * it for this long when not doing software aggregation), later on 1500 * break this function into "setup_normal" and "xmit_normal". The 1501 * lock only needs to be held for the ath_tx_handoff call. 1502 * 1503 * XXX we don't update the leak count here - if we're doing 1504 * direct frame dispatch, we need to be able to do it without 1505 * decrementing the leak count (eg multicast queue frames.) 1506 */ 1507 static void 1508 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1509 struct ath_buf *bf) 1510 { 1511 struct ath_node *an = ATH_NODE(bf->bf_node); 1512 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1513 1514 ATH_TX_LOCK_ASSERT(sc); 1515 1516 /* 1517 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1518 * set a completion handler however it doesn't (yet) properly 1519 * handle the strict ordering requirements needed for normal, 1520 * non-aggregate session frames. 1521 * 1522 * Once this is implemented, only set CLRDMASK like this for 1523 * frames that must go out - eg management/raw frames. 1524 */ 1525 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1526 1527 /* Setup the descriptor before handoff */ 1528 ath_tx_do_ratelookup(sc, bf); 1529 ath_tx_calc_duration(sc, bf); 1530 ath_tx_calc_protection(sc, bf); 1531 ath_tx_set_rtscts(sc, bf); 1532 ath_tx_rate_fill_rcflags(sc, bf); 1533 ath_tx_setds(sc, bf); 1534 1535 /* Track per-TID hardware queue depth correctly */ 1536 tid->hwq_depth++; 1537 1538 /* Assign the completion handler */ 1539 bf->bf_comp = ath_tx_normal_comp; 1540 1541 /* Hand off to hardware */ 1542 ath_tx_handoff(sc, txq, bf); 1543 } 1544 1545 /* 1546 * Do the basic frame setup stuff that's required before the frame 1547 * is added to a software queue. 1548 * 1549 * All frames get mostly the same treatment and it's done once. 1550 * Retransmits fiddle with things like the rate control setup, 1551 * setting the retransmit bit in the packet; doing relevant DMA/bus 1552 * syncing and relinking it (back) into the hardware TX queue. 1553 * 1554 * Note that this may cause the mbuf to be reallocated, so 1555 * m0 may not be valid. 1556 */ 1557 static int 1558 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1559 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1560 { 1561 struct ieee80211vap *vap = ni->ni_vap; 1562 struct ath_hal *ah = sc->sc_ah; 1563 struct ieee80211com *ic = &sc->sc_ic; 1564 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1565 int error, iswep, ismcast, isfrag, ismrr; 1566 int keyix, hdrlen, pktlen, try0 = 0; 1567 u_int8_t rix = 0, txrate = 0; 1568 struct ath_desc *ds; 1569 struct ieee80211_frame *wh; 1570 u_int subtype, flags; 1571 HAL_PKT_TYPE atype; 1572 const HAL_RATE_TABLE *rt; 1573 HAL_BOOL shortPreamble; 1574 struct ath_node *an; 1575 u_int pri; 1576 1577 /* 1578 * To ensure that both sequence numbers and the CCMP PN handling 1579 * is "correct", make sure that the relevant TID queue is locked. 1580 * Otherwise the CCMP PN and seqno may appear out of order, causing 1581 * re-ordered frames to have out of order CCMP PN's, resulting 1582 * in many, many frame drops. 1583 */ 1584 ATH_TX_LOCK_ASSERT(sc); 1585 1586 wh = mtod(m0, struct ieee80211_frame *); 1587 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1588 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1589 isfrag = m0->m_flags & M_FRAG; 1590 hdrlen = ieee80211_anyhdrsize(wh); 1591 /* 1592 * Packet length must not include any 1593 * pad bytes; deduct them here. 1594 */ 1595 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1596 1597 /* Handle encryption twiddling if needed */ 1598 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1599 &pktlen, &keyix)) { 1600 ieee80211_free_mbuf(m0); 1601 return EIO; 1602 } 1603 1604 /* packet header may have moved, reset our local pointer */ 1605 wh = mtod(m0, struct ieee80211_frame *); 1606 1607 pktlen += IEEE80211_CRC_LEN; 1608 1609 /* 1610 * Load the DMA map so any coalescing is done. This 1611 * also calculates the number of descriptors we need. 1612 */ 1613 error = ath_tx_dmasetup(sc, bf, m0); 1614 if (error != 0) 1615 return error; 1616 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 1617 bf->bf_node = ni; /* NB: held reference */ 1618 m0 = bf->bf_m; /* NB: may have changed */ 1619 wh = mtod(m0, struct ieee80211_frame *); 1620 1621 /* setup descriptors */ 1622 ds = bf->bf_desc; 1623 rt = sc->sc_currates; 1624 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1625 1626 /* 1627 * NB: the 802.11 layer marks whether or not we should 1628 * use short preamble based on the current mode and 1629 * negotiated parameters. 1630 */ 1631 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1632 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1633 shortPreamble = AH_TRUE; 1634 sc->sc_stats.ast_tx_shortpre++; 1635 } else { 1636 shortPreamble = AH_FALSE; 1637 } 1638 1639 an = ATH_NODE(ni); 1640 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1641 flags = 0; 1642 ismrr = 0; /* default no multi-rate retry*/ 1643 pri = M_WME_GETAC(m0); /* honor classification */ 1644 /* XXX use txparams instead of fixed values */ 1645 /* 1646 * Calculate Atheros packet type from IEEE80211 packet header, 1647 * setup for rate calculations, and select h/w transmit queue. 1648 */ 1649 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1650 case IEEE80211_FC0_TYPE_MGT: 1651 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1652 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1653 atype = HAL_PKT_TYPE_BEACON; 1654 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1655 atype = HAL_PKT_TYPE_PROBE_RESP; 1656 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1657 atype = HAL_PKT_TYPE_ATIM; 1658 else 1659 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1660 rix = an->an_mgmtrix; 1661 txrate = rt->info[rix].rateCode; 1662 if (shortPreamble) 1663 txrate |= rt->info[rix].shortPreamble; 1664 try0 = ATH_TXMGTTRY; 1665 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1666 break; 1667 case IEEE80211_FC0_TYPE_CTL: 1668 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1669 rix = an->an_mgmtrix; 1670 txrate = rt->info[rix].rateCode; 1671 if (shortPreamble) 1672 txrate |= rt->info[rix].shortPreamble; 1673 try0 = ATH_TXMGTTRY; 1674 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1675 break; 1676 case IEEE80211_FC0_TYPE_DATA: 1677 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1678 /* 1679 * Data frames: multicast frames go out at a fixed rate, 1680 * EAPOL frames use the mgmt frame rate; otherwise consult 1681 * the rate control module for the rate to use. 1682 */ 1683 if (ismcast) { 1684 rix = an->an_mcastrix; 1685 txrate = rt->info[rix].rateCode; 1686 if (shortPreamble) 1687 txrate |= rt->info[rix].shortPreamble; 1688 try0 = 1; 1689 } else if (m0->m_flags & M_EAPOL) { 1690 /* XXX? maybe always use long preamble? */ 1691 rix = an->an_mgmtrix; 1692 txrate = rt->info[rix].rateCode; 1693 if (shortPreamble) 1694 txrate |= rt->info[rix].shortPreamble; 1695 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1696 } else { 1697 /* 1698 * Do rate lookup on each TX, rather than using 1699 * the hard-coded TX information decided here. 1700 */ 1701 ismrr = 1; 1702 bf->bf_state.bfs_doratelookup = 1; 1703 } 1704 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1705 flags |= HAL_TXDESC_NOACK; 1706 break; 1707 default: 1708 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", 1709 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1710 /* XXX statistic */ 1711 /* XXX free tx dmamap */ 1712 ieee80211_free_mbuf(m0); 1713 return EIO; 1714 } 1715 1716 /* 1717 * There are two known scenarios where the frame AC doesn't match 1718 * what the destination TXQ is. 1719 * 1720 * + non-QoS frames (eg management?) that the net80211 stack has 1721 * assigned a higher AC to, but since it's a non-QoS TID, it's 1722 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1723 * It's quite possible that management frames should just be 1724 * direct dispatched to hardware rather than go via the software 1725 * queue; that should be investigated in the future. There are 1726 * some specific scenarios where this doesn't make sense, mostly 1727 * surrounding ADDBA request/response - hence why that is special 1728 * cased. 1729 * 1730 * + Multicast frames going into the VAP mcast queue. That shows up 1731 * as "TXQ 11". 1732 * 1733 * This driver should eventually support separate TID and TXQ locking, 1734 * allowing for arbitrary AC frames to appear on arbitrary software 1735 * queues, being queued to the "correct" hardware queue when needed. 1736 */ 1737 #if 0 1738 if (txq != sc->sc_ac2q[pri]) { 1739 DPRINTF(sc, ATH_DEBUG_XMIT, 1740 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1741 __func__, 1742 txq, 1743 txq->axq_qnum, 1744 pri, 1745 sc->sc_ac2q[pri], 1746 sc->sc_ac2q[pri]->axq_qnum); 1747 } 1748 #endif 1749 1750 /* 1751 * Calculate miscellaneous flags. 1752 */ 1753 if (ismcast) { 1754 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1755 } else if (pktlen > vap->iv_rtsthreshold && 1756 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1757 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1758 sc->sc_stats.ast_tx_rts++; 1759 } 1760 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1761 sc->sc_stats.ast_tx_noack++; 1762 #ifdef IEEE80211_SUPPORT_TDMA 1763 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1764 DPRINTF(sc, ATH_DEBUG_TDMA, 1765 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1766 sc->sc_stats.ast_tdma_ack++; 1767 /* XXX free tx dmamap */ 1768 ieee80211_free_mbuf(m0); 1769 return EIO; 1770 } 1771 #endif 1772 1773 /* 1774 * Determine if a tx interrupt should be generated for 1775 * this descriptor. We take a tx interrupt to reap 1776 * descriptors when the h/w hits an EOL condition or 1777 * when the descriptor is specifically marked to generate 1778 * an interrupt. We periodically mark descriptors in this 1779 * way to insure timely replenishing of the supply needed 1780 * for sending frames. Defering interrupts reduces system 1781 * load and potentially allows more concurrent work to be 1782 * done but if done to aggressively can cause senders to 1783 * backup. 1784 * 1785 * NB: use >= to deal with sc_txintrperiod changing 1786 * dynamically through sysctl. 1787 */ 1788 if (flags & HAL_TXDESC_INTREQ) { 1789 txq->axq_intrcnt = 0; 1790 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1791 flags |= HAL_TXDESC_INTREQ; 1792 txq->axq_intrcnt = 0; 1793 } 1794 1795 /* This point forward is actual TX bits */ 1796 1797 /* 1798 * At this point we are committed to sending the frame 1799 * and we don't need to look at m_nextpkt; clear it in 1800 * case this frame is part of frag chain. 1801 */ 1802 m0->m_nextpkt = NULL; 1803 1804 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1805 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1806 sc->sc_hwmap[rix].ieeerate, -1); 1807 1808 if (ieee80211_radiotap_active_vap(vap)) { 1809 u_int64_t tsf = ath_hal_gettsf64(ah); 1810 1811 sc->sc_tx_th.wt_tsf = htole64(tsf); 1812 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1813 if (iswep) 1814 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1815 if (isfrag) 1816 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1817 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1818 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1819 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1820 1821 ieee80211_radiotap_tx(vap, m0); 1822 } 1823 1824 /* Blank the legacy rate array */ 1825 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1826 1827 /* 1828 * ath_buf_set_rate needs at least one rate/try to setup 1829 * the rate scenario. 1830 */ 1831 bf->bf_state.bfs_rc[0].rix = rix; 1832 bf->bf_state.bfs_rc[0].tries = try0; 1833 bf->bf_state.bfs_rc[0].ratecode = txrate; 1834 1835 /* Store the decided rate index values away */ 1836 bf->bf_state.bfs_pktlen = pktlen; 1837 bf->bf_state.bfs_hdrlen = hdrlen; 1838 bf->bf_state.bfs_atype = atype; 1839 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1840 bf->bf_state.bfs_txrate0 = txrate; 1841 bf->bf_state.bfs_try0 = try0; 1842 bf->bf_state.bfs_keyix = keyix; 1843 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1844 bf->bf_state.bfs_txflags = flags; 1845 bf->bf_state.bfs_shpream = shortPreamble; 1846 1847 /* XXX this should be done in ath_tx_setrate() */ 1848 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1849 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1850 bf->bf_state.bfs_ctsduration = 0; 1851 bf->bf_state.bfs_ismrr = ismrr; 1852 1853 return 0; 1854 } 1855 1856 /* 1857 * Queue a frame to the hardware or software queue. 1858 * 1859 * This can be called by the net80211 code. 1860 * 1861 * XXX what about locking? Or, push the seqno assign into the 1862 * XXX aggregate scheduler so its serialised? 1863 * 1864 * XXX When sending management frames via ath_raw_xmit(), 1865 * should CLRDMASK be set unconditionally? 1866 */ 1867 int 1868 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1869 struct ath_buf *bf, struct mbuf *m0) 1870 { 1871 struct ieee80211vap *vap = ni->ni_vap; 1872 struct ath_vap *avp = ATH_VAP(vap); 1873 int r = 0; 1874 u_int pri; 1875 int tid; 1876 struct ath_txq *txq; 1877 int ismcast; 1878 const struct ieee80211_frame *wh; 1879 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1880 ieee80211_seq seqno; 1881 uint8_t type, subtype; 1882 int queue_to_head; 1883 1884 ATH_TX_LOCK_ASSERT(sc); 1885 1886 /* 1887 * Determine the target hardware queue. 1888 * 1889 * For multicast frames, the txq gets overridden appropriately 1890 * depending upon the state of PS. 1891 * 1892 * For any other frame, we do a TID/QoS lookup inside the frame 1893 * to see what the TID should be. If it's a non-QoS frame, the 1894 * AC and TID are overridden. The TID/TXQ code assumes the 1895 * TID is on a predictable hardware TXQ, so we don't support 1896 * having a node TID queued to multiple hardware TXQs. 1897 * This may change in the future but would require some locking 1898 * fudgery. 1899 */ 1900 pri = ath_tx_getac(sc, m0); 1901 tid = ath_tx_gettid(sc, m0); 1902 1903 txq = sc->sc_ac2q[pri]; 1904 wh = mtod(m0, struct ieee80211_frame *); 1905 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1906 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1907 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1908 1909 /* 1910 * Enforce how deep the multicast queue can grow. 1911 * 1912 * XXX duplicated in ath_raw_xmit(). 1913 */ 1914 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1915 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1916 > sc->sc_txq_mcastq_maxdepth) { 1917 sc->sc_stats.ast_tx_mcastq_overflow++; 1918 m_freem(m0); 1919 return (ENOBUFS); 1920 } 1921 } 1922 1923 /* 1924 * Enforce how deep the unicast queue can grow. 1925 * 1926 * If the node is in power save then we don't want 1927 * the software queue to grow too deep, or a node may 1928 * end up consuming all of the ath_buf entries. 1929 * 1930 * For now, only do this for DATA frames. 1931 * 1932 * We will want to cap how many management/control 1933 * frames get punted to the software queue so it doesn't 1934 * fill up. But the correct solution isn't yet obvious. 1935 * In any case, this check should at least let frames pass 1936 * that we are direct-dispatching. 1937 * 1938 * XXX TODO: duplicate this to the raw xmit path! 1939 */ 1940 if (type == IEEE80211_FC0_TYPE_DATA && 1941 ATH_NODE(ni)->an_is_powersave && 1942 ATH_NODE(ni)->an_swq_depth > 1943 sc->sc_txq_node_psq_maxdepth) { 1944 sc->sc_stats.ast_tx_node_psq_overflow++; 1945 m_freem(m0); 1946 return (ENOBUFS); 1947 } 1948 1949 /* A-MPDU TX */ 1950 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1951 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1952 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1953 1954 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1955 __func__, tid, pri, is_ampdu); 1956 1957 /* Set local packet state, used to queue packets to hardware */ 1958 bf->bf_state.bfs_tid = tid; 1959 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1960 bf->bf_state.bfs_pri = pri; 1961 1962 #if 1 1963 /* 1964 * When servicing one or more stations in power-save mode 1965 * (or) if there is some mcast data waiting on the mcast 1966 * queue (to prevent out of order delivery) multicast frames 1967 * must be bufferd until after the beacon. 1968 * 1969 * TODO: we should lock the mcastq before we check the length. 1970 */ 1971 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1972 txq = &avp->av_mcastq; 1973 /* 1974 * Mark the frame as eventually belonging on the CAB 1975 * queue, so the descriptor setup functions will 1976 * correctly initialise the descriptor 'qcuId' field. 1977 */ 1978 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1979 } 1980 #endif 1981 1982 /* Do the generic frame setup */ 1983 /* XXX should just bzero the bf_state? */ 1984 bf->bf_state.bfs_dobaw = 0; 1985 1986 /* A-MPDU TX? Manually set sequence number */ 1987 /* 1988 * Don't do it whilst pending; the net80211 layer still 1989 * assigns them. 1990 */ 1991 if (is_ampdu_tx) { 1992 /* 1993 * Always call; this function will 1994 * handle making sure that null data frames 1995 * don't get a sequence number from the current 1996 * TID and thus mess with the BAW. 1997 */ 1998 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1999 2000 /* 2001 * Don't add QoS NULL frames to the BAW. 2002 */ 2003 if (IEEE80211_QOS_HAS_SEQ(wh) && 2004 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2005 bf->bf_state.bfs_dobaw = 1; 2006 } 2007 } 2008 2009 /* 2010 * If needed, the sequence number has been assigned. 2011 * Squirrel it away somewhere easy to get to. 2012 */ 2013 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2014 2015 /* Is ampdu pending? fetch the seqno and print it out */ 2016 if (is_ampdu_pending) 2017 DPRINTF(sc, ATH_DEBUG_SW_TX, 2018 "%s: tid %d: ampdu pending, seqno %d\n", 2019 __func__, tid, M_SEQNO_GET(m0)); 2020 2021 /* This also sets up the DMA map */ 2022 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2023 2024 if (r != 0) 2025 goto done; 2026 2027 /* At this point m0 could have changed! */ 2028 m0 = bf->bf_m; 2029 2030 #if 1 2031 /* 2032 * If it's a multicast frame, do a direct-dispatch to the 2033 * destination hardware queue. Don't bother software 2034 * queuing it. 2035 */ 2036 /* 2037 * If it's a BAR frame, do a direct dispatch to the 2038 * destination hardware queue. Don't bother software 2039 * queuing it, as the TID will now be paused. 2040 * Sending a BAR frame can occur from the net80211 txa timer 2041 * (ie, retries) or from the ath txtask (completion call.) 2042 * It queues directly to hardware because the TID is paused 2043 * at this point (and won't be unpaused until the BAR has 2044 * either been TXed successfully or max retries has been 2045 * reached.) 2046 */ 2047 /* 2048 * Until things are better debugged - if this node is asleep 2049 * and we're sending it a non-BAR frame, direct dispatch it. 2050 * Why? Because we need to figure out what's actually being 2051 * sent - eg, during reassociation/reauthentication after 2052 * the node (last) disappeared whilst asleep, the driver should 2053 * have unpaused/unsleep'ed the node. So until that is 2054 * sorted out, use this workaround. 2055 */ 2056 if (txq == &avp->av_mcastq) { 2057 DPRINTF(sc, ATH_DEBUG_SW_TX, 2058 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2059 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2060 ath_tx_xmit_normal(sc, txq, bf); 2061 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2062 &queue_to_head)) { 2063 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2064 } else { 2065 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2066 ath_tx_xmit_normal(sc, txq, bf); 2067 } 2068 #else 2069 /* 2070 * For now, since there's no software queue, 2071 * direct-dispatch to the hardware. 2072 */ 2073 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2074 /* 2075 * Update the current leak count if 2076 * we're leaking frames; and set the 2077 * MORE flag as appropriate. 2078 */ 2079 ath_tx_leak_count_update(sc, tid, bf); 2080 ath_tx_xmit_normal(sc, txq, bf); 2081 #endif 2082 done: 2083 return 0; 2084 } 2085 2086 static int 2087 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2088 struct ath_buf *bf, struct mbuf *m0, 2089 const struct ieee80211_bpf_params *params) 2090 { 2091 struct ieee80211com *ic = &sc->sc_ic; 2092 struct ath_hal *ah = sc->sc_ah; 2093 struct ieee80211vap *vap = ni->ni_vap; 2094 int error, ismcast, ismrr; 2095 int keyix, hdrlen, pktlen, try0, txantenna; 2096 u_int8_t rix, txrate; 2097 struct ieee80211_frame *wh; 2098 u_int flags; 2099 HAL_PKT_TYPE atype; 2100 const HAL_RATE_TABLE *rt; 2101 struct ath_desc *ds; 2102 u_int pri; 2103 int o_tid = -1; 2104 int do_override; 2105 uint8_t type, subtype; 2106 int queue_to_head; 2107 struct ath_node *an = ATH_NODE(ni); 2108 2109 ATH_TX_LOCK_ASSERT(sc); 2110 2111 wh = mtod(m0, struct ieee80211_frame *); 2112 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2113 hdrlen = ieee80211_anyhdrsize(wh); 2114 /* 2115 * Packet length must not include any 2116 * pad bytes; deduct them here. 2117 */ 2118 /* XXX honor IEEE80211_BPF_DATAPAD */ 2119 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2120 2121 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2122 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2123 2124 ATH_KTR(sc, ATH_KTR_TX, 2, 2125 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2126 2127 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2128 __func__, ismcast); 2129 2130 pri = params->ibp_pri & 3; 2131 /* Override pri if the frame isn't a QoS one */ 2132 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2133 pri = ath_tx_getac(sc, m0); 2134 2135 /* XXX If it's an ADDBA, override the correct queue */ 2136 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2137 2138 /* Map ADDBA to the correct priority */ 2139 if (do_override) { 2140 #if 0 2141 DPRINTF(sc, ATH_DEBUG_XMIT, 2142 "%s: overriding tid %d pri %d -> %d\n", 2143 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2144 #endif 2145 pri = TID_TO_WME_AC(o_tid); 2146 } 2147 2148 /* Handle encryption twiddling if needed */ 2149 if (! ath_tx_tag_crypto(sc, ni, 2150 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2151 &hdrlen, &pktlen, &keyix)) { 2152 ieee80211_free_mbuf(m0); 2153 return EIO; 2154 } 2155 /* packet header may have moved, reset our local pointer */ 2156 wh = mtod(m0, struct ieee80211_frame *); 2157 2158 /* Do the generic frame setup */ 2159 /* XXX should just bzero the bf_state? */ 2160 bf->bf_state.bfs_dobaw = 0; 2161 2162 error = ath_tx_dmasetup(sc, bf, m0); 2163 if (error != 0) 2164 return error; 2165 m0 = bf->bf_m; /* NB: may have changed */ 2166 wh = mtod(m0, struct ieee80211_frame *); 2167 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 2168 bf->bf_node = ni; /* NB: held reference */ 2169 2170 /* Always enable CLRDMASK for raw frames for now.. */ 2171 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2172 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2173 if (params->ibp_flags & IEEE80211_BPF_RTS) 2174 flags |= HAL_TXDESC_RTSENA; 2175 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2176 /* XXX assume 11g/11n protection? */ 2177 bf->bf_state.bfs_doprot = 1; 2178 flags |= HAL_TXDESC_CTSENA; 2179 } 2180 /* XXX leave ismcast to injector? */ 2181 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2182 flags |= HAL_TXDESC_NOACK; 2183 2184 rt = sc->sc_currates; 2185 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2186 2187 /* Fetch first rate information */ 2188 rix = ath_tx_findrix(sc, params->ibp_rate0); 2189 try0 = params->ibp_try0; 2190 2191 /* 2192 * Override EAPOL rate as appropriate. 2193 */ 2194 if (m0->m_flags & M_EAPOL) { 2195 /* XXX? maybe always use long preamble? */ 2196 rix = an->an_mgmtrix; 2197 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 2198 } 2199 2200 txrate = rt->info[rix].rateCode; 2201 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2202 txrate |= rt->info[rix].shortPreamble; 2203 sc->sc_txrix = rix; 2204 ismrr = (params->ibp_try1 != 0); 2205 txantenna = params->ibp_pri >> 2; 2206 if (txantenna == 0) /* XXX? */ 2207 txantenna = sc->sc_txantenna; 2208 2209 /* 2210 * Since ctsrate is fixed, store it away for later 2211 * use when the descriptor fields are being set. 2212 */ 2213 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2214 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2215 2216 /* 2217 * NB: we mark all packets as type PSPOLL so the h/w won't 2218 * set the sequence number, duration, etc. 2219 */ 2220 atype = HAL_PKT_TYPE_PSPOLL; 2221 2222 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2223 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2224 sc->sc_hwmap[rix].ieeerate, -1); 2225 2226 if (ieee80211_radiotap_active_vap(vap)) { 2227 u_int64_t tsf = ath_hal_gettsf64(ah); 2228 2229 sc->sc_tx_th.wt_tsf = htole64(tsf); 2230 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2231 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2232 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2233 if (m0->m_flags & M_FRAG) 2234 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2235 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2236 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2237 ieee80211_get_node_txpower(ni)); 2238 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2239 2240 ieee80211_radiotap_tx(vap, m0); 2241 } 2242 2243 /* 2244 * Formulate first tx descriptor with tx controls. 2245 */ 2246 ds = bf->bf_desc; 2247 /* XXX check return value? */ 2248 2249 /* Store the decided rate index values away */ 2250 bf->bf_state.bfs_pktlen = pktlen; 2251 bf->bf_state.bfs_hdrlen = hdrlen; 2252 bf->bf_state.bfs_atype = atype; 2253 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2254 ieee80211_get_node_txpower(ni)); 2255 bf->bf_state.bfs_txrate0 = txrate; 2256 bf->bf_state.bfs_try0 = try0; 2257 bf->bf_state.bfs_keyix = keyix; 2258 bf->bf_state.bfs_txantenna = txantenna; 2259 bf->bf_state.bfs_txflags = flags; 2260 bf->bf_state.bfs_shpream = 2261 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2262 2263 /* Set local packet state, used to queue packets to hardware */ 2264 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2265 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2266 bf->bf_state.bfs_pri = pri; 2267 2268 /* XXX this should be done in ath_tx_setrate() */ 2269 bf->bf_state.bfs_ctsrate = 0; 2270 bf->bf_state.bfs_ctsduration = 0; 2271 bf->bf_state.bfs_ismrr = ismrr; 2272 2273 /* Blank the legacy rate array */ 2274 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2275 2276 bf->bf_state.bfs_rc[0].rix = rix; 2277 bf->bf_state.bfs_rc[0].tries = try0; 2278 bf->bf_state.bfs_rc[0].ratecode = txrate; 2279 2280 if (ismrr) { 2281 int rix; 2282 2283 rix = ath_tx_findrix(sc, params->ibp_rate1); 2284 bf->bf_state.bfs_rc[1].rix = rix; 2285 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2286 2287 rix = ath_tx_findrix(sc, params->ibp_rate2); 2288 bf->bf_state.bfs_rc[2].rix = rix; 2289 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2290 2291 rix = ath_tx_findrix(sc, params->ibp_rate3); 2292 bf->bf_state.bfs_rc[3].rix = rix; 2293 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2294 } 2295 /* 2296 * All the required rate control decisions have been made; 2297 * fill in the rc flags. 2298 */ 2299 ath_tx_rate_fill_rcflags(sc, bf); 2300 2301 /* NB: no buffered multicast in power save support */ 2302 2303 /* 2304 * If we're overiding the ADDBA destination, dump directly 2305 * into the hardware queue, right after any pending 2306 * frames to that node are. 2307 */ 2308 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2309 __func__, do_override); 2310 2311 #if 1 2312 /* 2313 * Put addba frames in the right place in the right TID/HWQ. 2314 */ 2315 if (do_override) { 2316 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2317 /* 2318 * XXX if it's addba frames, should we be leaking 2319 * them out via the frame leak method? 2320 * XXX for now let's not risk it; but we may wish 2321 * to investigate this later. 2322 */ 2323 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2324 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2325 &queue_to_head)) { 2326 /* Queue to software queue */ 2327 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2328 } else { 2329 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2330 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2331 } 2332 #else 2333 /* Direct-dispatch to the hardware */ 2334 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2335 /* 2336 * Update the current leak count if 2337 * we're leaking frames; and set the 2338 * MORE flag as appropriate. 2339 */ 2340 ath_tx_leak_count_update(sc, tid, bf); 2341 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2342 #endif 2343 return 0; 2344 } 2345 2346 /* 2347 * Send a raw frame. 2348 * 2349 * This can be called by net80211. 2350 */ 2351 int 2352 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2353 const struct ieee80211_bpf_params *params) 2354 { 2355 struct ieee80211com *ic = ni->ni_ic; 2356 struct ath_softc *sc = ic->ic_softc; 2357 struct ath_buf *bf; 2358 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2359 int error = 0; 2360 2361 ATH_PCU_LOCK(sc); 2362 if (sc->sc_inreset_cnt > 0) { 2363 DPRINTF(sc, ATH_DEBUG_XMIT, 2364 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2365 error = EIO; 2366 ATH_PCU_UNLOCK(sc); 2367 goto badbad; 2368 } 2369 sc->sc_txstart_cnt++; 2370 ATH_PCU_UNLOCK(sc); 2371 2372 /* Wake the hardware up already */ 2373 ATH_LOCK(sc); 2374 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2375 ATH_UNLOCK(sc); 2376 2377 ATH_TX_LOCK(sc); 2378 2379 if (!sc->sc_running || sc->sc_invalid) { 2380 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d", 2381 __func__, sc->sc_running, sc->sc_invalid); 2382 m_freem(m); 2383 error = ENETDOWN; 2384 goto bad; 2385 } 2386 2387 /* 2388 * Enforce how deep the multicast queue can grow. 2389 * 2390 * XXX duplicated in ath_tx_start(). 2391 */ 2392 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2393 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2394 > sc->sc_txq_mcastq_maxdepth) { 2395 sc->sc_stats.ast_tx_mcastq_overflow++; 2396 error = ENOBUFS; 2397 } 2398 2399 if (error != 0) { 2400 m_freem(m); 2401 goto bad; 2402 } 2403 } 2404 2405 /* 2406 * Grab a TX buffer and associated resources. 2407 */ 2408 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2409 if (bf == NULL) { 2410 sc->sc_stats.ast_tx_nobuf++; 2411 m_freem(m); 2412 error = ENOBUFS; 2413 goto bad; 2414 } 2415 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2416 m, params, bf); 2417 2418 if (params == NULL) { 2419 /* 2420 * Legacy path; interpret frame contents to decide 2421 * precisely how to send the frame. 2422 */ 2423 if (ath_tx_start(sc, ni, bf, m)) { 2424 error = EIO; /* XXX */ 2425 goto bad2; 2426 } 2427 } else { 2428 /* 2429 * Caller supplied explicit parameters to use in 2430 * sending the frame. 2431 */ 2432 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2433 error = EIO; /* XXX */ 2434 goto bad2; 2435 } 2436 } 2437 sc->sc_wd_timer = 5; 2438 sc->sc_stats.ast_tx_raw++; 2439 2440 /* 2441 * Update the TIM - if there's anything queued to the 2442 * software queue and power save is enabled, we should 2443 * set the TIM. 2444 */ 2445 ath_tx_update_tim(sc, ni, 1); 2446 2447 ATH_TX_UNLOCK(sc); 2448 2449 ATH_PCU_LOCK(sc); 2450 sc->sc_txstart_cnt--; 2451 ATH_PCU_UNLOCK(sc); 2452 2453 2454 /* Put the hardware back to sleep if required */ 2455 ATH_LOCK(sc); 2456 ath_power_restore_power_state(sc); 2457 ATH_UNLOCK(sc); 2458 2459 return 0; 2460 2461 bad2: 2462 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2463 "bf=%p", 2464 m, 2465 params, 2466 bf); 2467 ATH_TXBUF_LOCK(sc); 2468 ath_returnbuf_head(sc, bf); 2469 ATH_TXBUF_UNLOCK(sc); 2470 2471 bad: 2472 ATH_TX_UNLOCK(sc); 2473 2474 ATH_PCU_LOCK(sc); 2475 sc->sc_txstart_cnt--; 2476 ATH_PCU_UNLOCK(sc); 2477 2478 /* Put the hardware back to sleep if required */ 2479 ATH_LOCK(sc); 2480 ath_power_restore_power_state(sc); 2481 ATH_UNLOCK(sc); 2482 2483 badbad: 2484 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2485 m, params); 2486 sc->sc_stats.ast_tx_raw_fail++; 2487 2488 return error; 2489 } 2490 2491 /* Some helper functions */ 2492 2493 /* 2494 * ADDBA (and potentially others) need to be placed in the same 2495 * hardware queue as the TID/node it's relating to. This is so 2496 * it goes out after any pending non-aggregate frames to the 2497 * same node/TID. 2498 * 2499 * If this isn't done, the ADDBA can go out before the frames 2500 * queued in hardware. Even though these frames have a sequence 2501 * number -earlier- than the ADDBA can be transmitted (but 2502 * no frames whose sequence numbers are after the ADDBA should 2503 * be!) they'll arrive after the ADDBA - and the receiving end 2504 * will simply drop them as being out of the BAW. 2505 * 2506 * The frames can't be appended to the TID software queue - it'll 2507 * never be sent out. So these frames have to be directly 2508 * dispatched to the hardware, rather than queued in software. 2509 * So if this function returns true, the TXQ has to be 2510 * overridden and it has to be directly dispatched. 2511 * 2512 * It's a dirty hack, but someone's gotta do it. 2513 */ 2514 2515 /* 2516 * XXX doesn't belong here! 2517 */ 2518 static int 2519 ieee80211_is_action(struct ieee80211_frame *wh) 2520 { 2521 /* Type: Management frame? */ 2522 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2523 IEEE80211_FC0_TYPE_MGT) 2524 return 0; 2525 2526 /* Subtype: Action frame? */ 2527 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2528 IEEE80211_FC0_SUBTYPE_ACTION) 2529 return 0; 2530 2531 return 1; 2532 } 2533 2534 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2535 /* 2536 * Return an alternate TID for ADDBA request frames. 2537 * 2538 * Yes, this likely should be done in the net80211 layer. 2539 */ 2540 static int 2541 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2542 struct ieee80211_node *ni, 2543 struct mbuf *m0, int *tid) 2544 { 2545 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2546 struct ieee80211_action_ba_addbarequest *ia; 2547 uint8_t *frm; 2548 uint16_t baparamset; 2549 2550 /* Not action frame? Bail */ 2551 if (! ieee80211_is_action(wh)) 2552 return 0; 2553 2554 /* XXX Not needed for frames we send? */ 2555 #if 0 2556 /* Correct length? */ 2557 if (! ieee80211_parse_action(ni, m)) 2558 return 0; 2559 #endif 2560 2561 /* Extract out action frame */ 2562 frm = (u_int8_t *)&wh[1]; 2563 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2564 2565 /* Not ADDBA? Bail */ 2566 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2567 return 0; 2568 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2569 return 0; 2570 2571 /* Extract TID, return it */ 2572 baparamset = le16toh(ia->rq_baparamset); 2573 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2574 2575 return 1; 2576 } 2577 #undef MS 2578 2579 /* Per-node software queue operations */ 2580 2581 /* 2582 * Add the current packet to the given BAW. 2583 * It is assumed that the current packet 2584 * 2585 * + fits inside the BAW; 2586 * + already has had a sequence number allocated. 2587 * 2588 * Since the BAW status may be modified by both the ath task and 2589 * the net80211/ifnet contexts, the TID must be locked. 2590 */ 2591 void 2592 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2593 struct ath_tid *tid, struct ath_buf *bf) 2594 { 2595 int index, cindex; 2596 struct ieee80211_tx_ampdu *tap; 2597 2598 ATH_TX_LOCK_ASSERT(sc); 2599 2600 if (bf->bf_state.bfs_isretried) 2601 return; 2602 2603 tap = ath_tx_get_tx_tid(an, tid->tid); 2604 2605 if (! bf->bf_state.bfs_dobaw) { 2606 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2607 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2608 __func__, SEQNO(bf->bf_state.bfs_seqno), 2609 tap->txa_start, tap->txa_wnd); 2610 } 2611 2612 if (bf->bf_state.bfs_addedbaw) 2613 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2614 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2615 "baw head=%d tail=%d\n", 2616 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2617 tap->txa_start, tap->txa_wnd, tid->baw_head, 2618 tid->baw_tail); 2619 2620 /* 2621 * Verify that the given sequence number is not outside of the 2622 * BAW. Complain loudly if that's the case. 2623 */ 2624 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2625 SEQNO(bf->bf_state.bfs_seqno))) { 2626 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2627 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2628 "baw head=%d tail=%d\n", 2629 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2630 tap->txa_start, tap->txa_wnd, tid->baw_head, 2631 tid->baw_tail); 2632 } 2633 2634 /* 2635 * ni->ni_txseqs[] is the currently allocated seqno. 2636 * the txa state contains the current baw start. 2637 */ 2638 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2639 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2640 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2641 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2642 "baw head=%d tail=%d\n", 2643 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2644 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2645 tid->baw_tail); 2646 2647 2648 #if 0 2649 assert(tid->tx_buf[cindex] == NULL); 2650 #endif 2651 if (tid->tx_buf[cindex] != NULL) { 2652 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2653 "%s: ba packet dup (index=%d, cindex=%d, " 2654 "head=%d, tail=%d)\n", 2655 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2656 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2657 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2658 __func__, 2659 tid->tx_buf[cindex], 2660 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2661 bf, 2662 SEQNO(bf->bf_state.bfs_seqno) 2663 ); 2664 } 2665 tid->tx_buf[cindex] = bf; 2666 2667 if (index >= ((tid->baw_tail - tid->baw_head) & 2668 (ATH_TID_MAX_BUFS - 1))) { 2669 tid->baw_tail = cindex; 2670 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2671 } 2672 } 2673 2674 /* 2675 * Flip the BAW buffer entry over from the existing one to the new one. 2676 * 2677 * When software retransmitting a (sub-)frame, it is entirely possible that 2678 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2679 * In that instance the buffer is cloned and the new buffer is used for 2680 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2681 * tracking array to maintain consistency. 2682 */ 2683 static void 2684 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2685 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2686 { 2687 int index, cindex; 2688 struct ieee80211_tx_ampdu *tap; 2689 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2690 2691 ATH_TX_LOCK_ASSERT(sc); 2692 2693 tap = ath_tx_get_tx_tid(an, tid->tid); 2694 index = ATH_BA_INDEX(tap->txa_start, seqno); 2695 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2696 2697 /* 2698 * Just warn for now; if it happens then we should find out 2699 * about it. It's highly likely the aggregation session will 2700 * soon hang. 2701 */ 2702 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2703 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2704 "%s: retransmitted buffer" 2705 " has mismatching seqno's, BA session may hang.\n", 2706 __func__); 2707 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2708 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2709 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2710 } 2711 2712 if (tid->tx_buf[cindex] != old_bf) { 2713 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2714 "%s: ath_buf pointer incorrect; " 2715 " has m BA session may hang.\n", __func__); 2716 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2717 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2718 } 2719 2720 tid->tx_buf[cindex] = new_bf; 2721 } 2722 2723 /* 2724 * seq_start - left edge of BAW 2725 * seq_next - current/next sequence number to allocate 2726 * 2727 * Since the BAW status may be modified by both the ath task and 2728 * the net80211/ifnet contexts, the TID must be locked. 2729 */ 2730 static void 2731 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2732 struct ath_tid *tid, const struct ath_buf *bf) 2733 { 2734 int index, cindex; 2735 struct ieee80211_tx_ampdu *tap; 2736 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2737 2738 ATH_TX_LOCK_ASSERT(sc); 2739 2740 tap = ath_tx_get_tx_tid(an, tid->tid); 2741 index = ATH_BA_INDEX(tap->txa_start, seqno); 2742 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2743 2744 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2745 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2746 "baw head=%d, tail=%d\n", 2747 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2748 cindex, tid->baw_head, tid->baw_tail); 2749 2750 /* 2751 * If this occurs then we have a big problem - something else 2752 * has slid tap->txa_start along without updating the BAW 2753 * tracking start/end pointers. Thus the TX BAW state is now 2754 * completely busted. 2755 * 2756 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2757 * it's quite possible that a cloned buffer is making its way 2758 * here and causing it to fire off. Disable TDMA for now. 2759 */ 2760 if (tid->tx_buf[cindex] != bf) { 2761 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2762 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2763 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2764 tid->tx_buf[cindex], 2765 (tid->tx_buf[cindex] != NULL) ? 2766 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2767 } 2768 2769 tid->tx_buf[cindex] = NULL; 2770 2771 while (tid->baw_head != tid->baw_tail && 2772 !tid->tx_buf[tid->baw_head]) { 2773 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2774 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2775 } 2776 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2777 "%s: tid=%d: baw is now %d:%d, baw head=%d\n", 2778 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); 2779 } 2780 2781 static void 2782 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2783 struct ath_buf *bf) 2784 { 2785 struct ieee80211_frame *wh; 2786 2787 ATH_TX_LOCK_ASSERT(sc); 2788 2789 if (tid->an->an_leak_count > 0) { 2790 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2791 2792 /* 2793 * Update MORE based on the software/net80211 queue states. 2794 */ 2795 if ((tid->an->an_stack_psq > 0) 2796 || (tid->an->an_swq_depth > 0)) 2797 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2798 else 2799 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2800 2801 #if defined(__DragonFly__) 2802 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2803 "%s: %s: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2804 __func__, 2805 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 2806 tid->an->an_leak_count, 2807 tid->an->an_stack_psq, 2808 tid->an->an_swq_depth, 2809 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2810 #else 2811 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2812 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2813 __func__, 2814 tid->an->an_node.ni_macaddr, 2815 ":", 2816 tid->an->an_leak_count, 2817 tid->an->an_stack_psq, 2818 tid->an->an_swq_depth, 2819 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2820 #endif 2821 2822 /* 2823 * Re-sync the underlying buffer. 2824 */ 2825 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2826 BUS_DMASYNC_PREWRITE); 2827 2828 tid->an->an_leak_count --; 2829 } 2830 } 2831 2832 static int 2833 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2834 { 2835 2836 ATH_TX_LOCK_ASSERT(sc); 2837 2838 if (tid->an->an_leak_count > 0) { 2839 return (1); 2840 } 2841 if (tid->paused) 2842 return (0); 2843 return (1); 2844 } 2845 2846 /* 2847 * Mark the current node/TID as ready to TX. 2848 * 2849 * This is done to make it easy for the software scheduler to 2850 * find which nodes have data to send. 2851 * 2852 * The TXQ lock must be held. 2853 */ 2854 void 2855 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2856 { 2857 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2858 2859 ATH_TX_LOCK_ASSERT(sc); 2860 2861 /* 2862 * If we are leaking out a frame to this destination 2863 * for PS-POLL, ensure that we allow scheduling to 2864 * occur. 2865 */ 2866 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2867 return; /* paused, can't schedule yet */ 2868 2869 if (tid->sched) 2870 return; /* already scheduled */ 2871 2872 tid->sched = 1; 2873 2874 #if 0 2875 /* 2876 * If this is a sleeping node we're leaking to, given 2877 * it a higher priority. This is so bad for QoS it hurts. 2878 */ 2879 if (tid->an->an_leak_count) { 2880 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2881 } else { 2882 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2883 } 2884 #endif 2885 2886 /* 2887 * We can't do the above - it'll confuse the TXQ software 2888 * scheduler which will keep checking the _head_ TID 2889 * in the list to see if it has traffic. If we queue 2890 * a TID to the head of the list and it doesn't transmit, 2891 * we'll check it again. 2892 * 2893 * So, get the rest of this leaking frames support working 2894 * and reliable first and _then_ optimise it so they're 2895 * pushed out in front of any other pending software 2896 * queued nodes. 2897 */ 2898 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2899 } 2900 2901 /* 2902 * Mark the current node as no longer needing to be polled for 2903 * TX packets. 2904 * 2905 * The TXQ lock must be held. 2906 */ 2907 static void 2908 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2909 { 2910 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2911 2912 ATH_TX_LOCK_ASSERT(sc); 2913 2914 if (tid->sched == 0) 2915 return; 2916 2917 tid->sched = 0; 2918 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2919 } 2920 2921 /* 2922 * Assign a sequence number manually to the given frame. 2923 * 2924 * This should only be called for A-MPDU TX frames. 2925 */ 2926 static ieee80211_seq 2927 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2928 struct ath_buf *bf, struct mbuf *m0) 2929 { 2930 struct ieee80211_frame *wh; 2931 int tid, pri; 2932 ieee80211_seq seqno; 2933 uint8_t subtype; 2934 2935 /* TID lookup */ 2936 wh = mtod(m0, struct ieee80211_frame *); 2937 pri = M_WME_GETAC(m0); /* honor classification */ 2938 tid = WME_AC_TO_TID(pri); 2939 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2940 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2941 2942 /* XXX Is it a control frame? Ignore */ 2943 2944 /* Does the packet require a sequence number? */ 2945 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2946 return -1; 2947 2948 ATH_TX_LOCK_ASSERT(sc); 2949 2950 /* 2951 * Is it a QOS NULL Data frame? Give it a sequence number from 2952 * the default TID (IEEE80211_NONQOS_TID.) 2953 * 2954 * The RX path of everything I've looked at doesn't include the NULL 2955 * data frame sequence number in the aggregation state updates, so 2956 * assigning it a sequence number there will cause a BAW hole on the 2957 * RX side. 2958 */ 2959 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2960 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2961 /* XXX no locking for this TID? This is a bit of a problem. */ 2962 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2963 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2964 } else { 2965 /* Manually assign sequence number */ 2966 seqno = ni->ni_txseqs[tid]; 2967 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2968 } 2969 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2970 M_SEQNO_SET(m0, seqno); 2971 2972 /* Return so caller can do something with it if needed */ 2973 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2974 return seqno; 2975 } 2976 2977 /* 2978 * Attempt to direct dispatch an aggregate frame to hardware. 2979 * If the frame is out of BAW, queue. 2980 * Otherwise, schedule it as a single frame. 2981 */ 2982 static void 2983 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2984 struct ath_txq *txq, struct ath_buf *bf) 2985 { 2986 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2987 struct ieee80211_tx_ampdu *tap; 2988 2989 ATH_TX_LOCK_ASSERT(sc); 2990 2991 tap = ath_tx_get_tx_tid(an, tid->tid); 2992 2993 /* paused? queue */ 2994 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 2995 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2996 /* XXX don't sched - we're paused! */ 2997 return; 2998 } 2999 3000 /* outside baw? queue */ 3001 if (bf->bf_state.bfs_dobaw && 3002 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 3003 SEQNO(bf->bf_state.bfs_seqno)))) { 3004 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3005 ath_tx_tid_sched(sc, tid); 3006 return; 3007 } 3008 3009 /* 3010 * This is a temporary check and should be removed once 3011 * all the relevant code paths have been fixed. 3012 * 3013 * During aggregate retries, it's possible that the head 3014 * frame will fail (which has the bfs_aggr and bfs_nframes 3015 * fields set for said aggregate) and will be retried as 3016 * a single frame. In this instance, the values should 3017 * be reset or the completion code will get upset with you. 3018 */ 3019 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 3020 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3021 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 3022 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 3023 bf->bf_state.bfs_aggr = 0; 3024 bf->bf_state.bfs_nframes = 1; 3025 } 3026 3027 /* Update CLRDMASK just before this frame is queued */ 3028 ath_tx_update_clrdmask(sc, tid, bf); 3029 3030 /* Direct dispatch to hardware */ 3031 ath_tx_do_ratelookup(sc, bf); 3032 ath_tx_calc_duration(sc, bf); 3033 ath_tx_calc_protection(sc, bf); 3034 ath_tx_set_rtscts(sc, bf); 3035 ath_tx_rate_fill_rcflags(sc, bf); 3036 ath_tx_setds(sc, bf); 3037 3038 /* Statistics */ 3039 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3040 3041 /* Track per-TID hardware queue depth correctly */ 3042 tid->hwq_depth++; 3043 3044 /* Add to BAW */ 3045 if (bf->bf_state.bfs_dobaw) { 3046 ath_tx_addto_baw(sc, an, tid, bf); 3047 bf->bf_state.bfs_addedbaw = 1; 3048 } 3049 3050 /* Set completion handler, multi-frame aggregate or not */ 3051 bf->bf_comp = ath_tx_aggr_comp; 3052 3053 /* 3054 * Update the current leak count if 3055 * we're leaking frames; and set the 3056 * MORE flag as appropriate. 3057 */ 3058 ath_tx_leak_count_update(sc, tid, bf); 3059 3060 /* Hand off to hardware */ 3061 ath_tx_handoff(sc, txq, bf); 3062 } 3063 3064 /* 3065 * Attempt to send the packet. 3066 * If the queue isn't busy, direct-dispatch. 3067 * If the queue is busy enough, queue the given packet on the 3068 * relevant software queue. 3069 */ 3070 void 3071 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3072 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3073 { 3074 struct ath_node *an = ATH_NODE(ni); 3075 struct ieee80211_frame *wh; 3076 struct ath_tid *atid; 3077 int pri, tid; 3078 struct mbuf *m0 = bf->bf_m; 3079 3080 ATH_TX_LOCK_ASSERT(sc); 3081 3082 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3083 wh = mtod(m0, struct ieee80211_frame *); 3084 pri = ath_tx_getac(sc, m0); 3085 tid = ath_tx_gettid(sc, m0); 3086 atid = &an->an_tid[tid]; 3087 3088 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3089 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3090 3091 /* Set local packet state, used to queue packets to hardware */ 3092 /* XXX potentially duplicate info, re-check */ 3093 bf->bf_state.bfs_tid = tid; 3094 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3095 bf->bf_state.bfs_pri = pri; 3096 3097 /* 3098 * If the hardware queue isn't busy, queue it directly. 3099 * If the hardware queue is busy, queue it. 3100 * If the TID is paused or the traffic it outside BAW, software 3101 * queue it. 3102 * 3103 * If the node is in power-save and we're leaking a frame, 3104 * leak a single frame. 3105 */ 3106 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3107 /* TID is paused, queue */ 3108 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3109 /* 3110 * If the caller requested that it be sent at a high 3111 * priority, queue it at the head of the list. 3112 */ 3113 if (queue_to_head) 3114 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3115 else 3116 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3117 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3118 /* AMPDU pending; queue */ 3119 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3120 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3121 /* XXX sched? */ 3122 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3123 /* AMPDU running, attempt direct dispatch if possible */ 3124 3125 /* 3126 * Always queue the frame to the tail of the list. 3127 */ 3128 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3129 3130 /* 3131 * If the hardware queue isn't busy, direct dispatch 3132 * the head frame in the list. Don't schedule the 3133 * TID - let it build some more frames first? 3134 * 3135 * When running A-MPDU, always just check the hardware 3136 * queue depth against the aggregate frame limit. 3137 * We don't want to burst a large number of single frames 3138 * out to the hardware; we want to aggressively hold back. 3139 * 3140 * Otherwise, schedule the TID. 3141 */ 3142 /* XXX TXQ locking */ 3143 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { 3144 bf = ATH_TID_FIRST(atid); 3145 ATH_TID_REMOVE(atid, bf, bf_list); 3146 3147 /* 3148 * Ensure it's definitely treated as a non-AMPDU 3149 * frame - this information may have been left 3150 * over from a previous attempt. 3151 */ 3152 bf->bf_state.bfs_aggr = 0; 3153 bf->bf_state.bfs_nframes = 1; 3154 3155 /* Queue to the hardware */ 3156 ath_tx_xmit_aggr(sc, an, txq, bf); 3157 DPRINTF(sc, ATH_DEBUG_SW_TX, 3158 "%s: xmit_aggr\n", 3159 __func__); 3160 } else { 3161 DPRINTF(sc, ATH_DEBUG_SW_TX, 3162 "%s: ampdu; swq'ing\n", 3163 __func__); 3164 3165 ath_tx_tid_sched(sc, atid); 3166 } 3167 /* 3168 * If we're not doing A-MPDU, be prepared to direct dispatch 3169 * up to both limits if possible. This particular corner 3170 * case may end up with packet starvation between aggregate 3171 * traffic and non-aggregate traffic: we want to ensure 3172 * that non-aggregate stations get a few frames queued to the 3173 * hardware before the aggregate station(s) get their chance. 3174 * 3175 * So if you only ever see a couple of frames direct dispatched 3176 * to the hardware from a non-AMPDU client, check both here 3177 * and in the software queue dispatcher to ensure that those 3178 * non-AMPDU stations get a fair chance to transmit. 3179 */ 3180 /* XXX TXQ locking */ 3181 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3182 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3183 /* AMPDU not running, attempt direct dispatch */ 3184 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3185 /* See if clrdmask needs to be set */ 3186 ath_tx_update_clrdmask(sc, atid, bf); 3187 3188 /* 3189 * Update the current leak count if 3190 * we're leaking frames; and set the 3191 * MORE flag as appropriate. 3192 */ 3193 ath_tx_leak_count_update(sc, atid, bf); 3194 3195 /* 3196 * Dispatch the frame. 3197 */ 3198 ath_tx_xmit_normal(sc, txq, bf); 3199 } else { 3200 /* Busy; queue */ 3201 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3202 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3203 ath_tx_tid_sched(sc, atid); 3204 } 3205 } 3206 3207 /* 3208 * Only set the clrdmask bit if none of the nodes are currently 3209 * filtered. 3210 * 3211 * XXX TODO: go through all the callers and check to see 3212 * which are being called in the context of looping over all 3213 * TIDs (eg, if all tids are being paused, resumed, etc.) 3214 * That'll avoid O(n^2) complexity here. 3215 */ 3216 static void 3217 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3218 { 3219 int i; 3220 3221 ATH_TX_LOCK_ASSERT(sc); 3222 3223 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3224 if (an->an_tid[i].isfiltered == 1) 3225 return; 3226 } 3227 an->clrdmask = 1; 3228 } 3229 3230 /* 3231 * Configure the per-TID node state. 3232 * 3233 * This likely belongs in if_ath_node.c but I can't think of anywhere 3234 * else to put it just yet. 3235 * 3236 * This sets up the SLISTs and the mutex as appropriate. 3237 */ 3238 void 3239 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3240 { 3241 int i, j; 3242 struct ath_tid *atid; 3243 3244 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3245 atid = &an->an_tid[i]; 3246 3247 /* XXX now with this bzer(), is the field 0'ing needed? */ 3248 bzero(atid, sizeof(*atid)); 3249 3250 TAILQ_INIT(&atid->tid_q); 3251 TAILQ_INIT(&atid->filtq.tid_q); 3252 atid->tid = i; 3253 atid->an = an; 3254 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3255 atid->tx_buf[j] = NULL; 3256 atid->baw_head = atid->baw_tail = 0; 3257 atid->paused = 0; 3258 atid->sched = 0; 3259 atid->hwq_depth = 0; 3260 atid->cleanup_inprogress = 0; 3261 if (i == IEEE80211_NONQOS_TID) 3262 atid->ac = ATH_NONQOS_TID_AC; 3263 else 3264 atid->ac = TID_TO_WME_AC(i); 3265 } 3266 an->clrdmask = 1; /* Always start by setting this bit */ 3267 } 3268 3269 /* 3270 * Pause the current TID. This stops packets from being transmitted 3271 * on it. 3272 * 3273 * Since this is also called from upper layers as well as the driver, 3274 * it will get the TID lock. 3275 */ 3276 static void 3277 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3278 { 3279 3280 ATH_TX_LOCK_ASSERT(sc); 3281 tid->paused++; 3282 #if defined(__DragonFly__) 3283 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%s]: tid=%d, paused = %d\n", 3284 __func__, 3285 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3286 tid->tid, 3287 tid->paused); 3288 #else 3289 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", 3290 __func__, 3291 tid->an->an_node.ni_macaddr, ":", 3292 tid->tid, 3293 tid->paused); 3294 #endif 3295 } 3296 3297 /* 3298 * Unpause the current TID, and schedule it if needed. 3299 */ 3300 static void 3301 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3302 { 3303 ATH_TX_LOCK_ASSERT(sc); 3304 3305 /* 3306 * There's some odd places where ath_tx_tid_resume() is called 3307 * when it shouldn't be; this works around that particular issue 3308 * until it's actually resolved. 3309 */ 3310 if (tid->paused == 0) { 3311 #if defined(__DragonFly__) 3312 device_printf(sc->sc_dev, 3313 "%s: [%s]: tid=%d, paused=0?\n", 3314 __func__, 3315 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3316 tid->tid); 3317 #else 3318 device_printf(sc->sc_dev, 3319 "%s: [%6D]: tid=%d, paused=0?\n", 3320 __func__, 3321 tid->an->an_node.ni_macaddr, ":", 3322 tid->tid); 3323 #endif 3324 } else { 3325 tid->paused--; 3326 } 3327 3328 #if defined(__DragonFly__) 3329 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3330 "%s: [%s]: tid=%d, unpaused = %d\n", 3331 __func__, 3332 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3333 tid->tid, 3334 tid->paused); 3335 #else 3336 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3337 "%s: [%6D]: tid=%d, unpaused = %d\n", 3338 __func__, 3339 tid->an->an_node.ni_macaddr, ":", 3340 tid->tid, 3341 tid->paused); 3342 #endif 3343 3344 if (tid->paused) 3345 return; 3346 3347 /* 3348 * Override the clrdmask configuration for the next frame 3349 * from this TID, just to get the ball rolling. 3350 */ 3351 ath_tx_set_clrdmask(sc, tid->an); 3352 3353 if (tid->axq_depth == 0) 3354 return; 3355 3356 /* XXX isfiltered shouldn't ever be 0 at this point */ 3357 if (tid->isfiltered == 1) { 3358 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3359 __func__); 3360 return; 3361 } 3362 3363 ath_tx_tid_sched(sc, tid); 3364 3365 /* 3366 * Queue the software TX scheduler. 3367 */ 3368 ath_tx_swq_kick(sc); 3369 } 3370 3371 /* 3372 * Add the given ath_buf to the TID filtered frame list. 3373 * This requires the TID be filtered. 3374 */ 3375 static void 3376 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3377 struct ath_buf *bf) 3378 { 3379 3380 ATH_TX_LOCK_ASSERT(sc); 3381 3382 if (!tid->isfiltered) 3383 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3384 __func__); 3385 3386 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3387 3388 /* Set the retry bit and bump the retry counter */ 3389 ath_tx_set_retry(sc, bf); 3390 sc->sc_stats.ast_tx_swfiltered++; 3391 3392 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3393 } 3394 3395 /* 3396 * Handle a completed filtered frame from the given TID. 3397 * This just enables/pauses the filtered frame state if required 3398 * and appends the filtered frame to the filtered queue. 3399 */ 3400 static void 3401 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3402 struct ath_buf *bf) 3403 { 3404 3405 ATH_TX_LOCK_ASSERT(sc); 3406 3407 if (! tid->isfiltered) { 3408 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", 3409 __func__, tid->tid); 3410 tid->isfiltered = 1; 3411 ath_tx_tid_pause(sc, tid); 3412 } 3413 3414 /* Add the frame to the filter queue */ 3415 ath_tx_tid_filt_addbuf(sc, tid, bf); 3416 } 3417 3418 /* 3419 * Complete the filtered frame TX completion. 3420 * 3421 * If there are no more frames in the hardware queue, unpause/unfilter 3422 * the TID if applicable. Otherwise we will wait for a node PS transition 3423 * to unfilter. 3424 */ 3425 static void 3426 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3427 { 3428 struct ath_buf *bf; 3429 int do_resume = 0; 3430 3431 ATH_TX_LOCK_ASSERT(sc); 3432 3433 if (tid->hwq_depth != 0) 3434 return; 3435 3436 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", 3437 __func__, tid->tid); 3438 if (tid->isfiltered == 1) { 3439 tid->isfiltered = 0; 3440 do_resume = 1; 3441 } 3442 3443 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3444 ath_tx_set_clrdmask(sc, tid->an); 3445 3446 /* XXX this is really quite inefficient */ 3447 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3448 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3449 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3450 } 3451 3452 /* And only resume if we had paused before */ 3453 if (do_resume) 3454 ath_tx_tid_resume(sc, tid); 3455 } 3456 3457 /* 3458 * Called when a single (aggregate or otherwise) frame is completed. 3459 * 3460 * Returns 0 if the buffer could be added to the filtered list 3461 * (cloned or otherwise), 1 if the buffer couldn't be added to the 3462 * filtered list (failed clone; expired retry) and the caller should 3463 * free it and handle it like a failure (eg by sending a BAR.) 3464 * 3465 * since the buffer may be cloned, bf must be not touched after this 3466 * if the return value is 0. 3467 */ 3468 static int 3469 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3470 struct ath_buf *bf) 3471 { 3472 struct ath_buf *nbf; 3473 int retval; 3474 3475 ATH_TX_LOCK_ASSERT(sc); 3476 3477 /* 3478 * Don't allow a filtered frame to live forever. 3479 */ 3480 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3481 sc->sc_stats.ast_tx_swretrymax++; 3482 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3483 "%s: bf=%p, seqno=%d, exceeded retries\n", 3484 __func__, 3485 bf, 3486 SEQNO(bf->bf_state.bfs_seqno)); 3487 retval = 1; /* error */ 3488 goto finish; 3489 } 3490 3491 /* 3492 * A busy buffer can't be added to the retry list. 3493 * It needs to be cloned. 3494 */ 3495 if (bf->bf_flags & ATH_BUF_BUSY) { 3496 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3497 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3498 "%s: busy buffer clone: %p -> %p\n", 3499 __func__, bf, nbf); 3500 } else { 3501 nbf = bf; 3502 } 3503 3504 if (nbf == NULL) { 3505 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3506 "%s: busy buffer couldn't be cloned (%p)!\n", 3507 __func__, bf); 3508 retval = 1; /* error */ 3509 } else { 3510 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3511 retval = 0; /* ok */ 3512 } 3513 finish: 3514 ath_tx_tid_filt_comp_complete(sc, tid); 3515 3516 return (retval); 3517 } 3518 3519 static void 3520 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3521 struct ath_buf *bf_first, ath_bufhead *bf_q) 3522 { 3523 struct ath_buf *bf, *bf_next, *nbf; 3524 3525 ATH_TX_LOCK_ASSERT(sc); 3526 3527 bf = bf_first; 3528 while (bf) { 3529 bf_next = bf->bf_next; 3530 bf->bf_next = NULL; /* Remove it from the aggr list */ 3531 3532 /* 3533 * Don't allow a filtered frame to live forever. 3534 */ 3535 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3536 sc->sc_stats.ast_tx_swretrymax++; 3537 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3538 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", 3539 __func__, 3540 tid->tid, 3541 bf, 3542 SEQNO(bf->bf_state.bfs_seqno)); 3543 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3544 goto next; 3545 } 3546 3547 if (bf->bf_flags & ATH_BUF_BUSY) { 3548 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3549 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3550 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", 3551 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); 3552 } else { 3553 nbf = bf; 3554 } 3555 3556 /* 3557 * If the buffer couldn't be cloned, add it to bf_q; 3558 * the caller will free the buffer(s) as required. 3559 */ 3560 if (nbf == NULL) { 3561 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3562 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", 3563 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); 3564 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3565 } else { 3566 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3567 } 3568 next: 3569 bf = bf_next; 3570 } 3571 3572 ath_tx_tid_filt_comp_complete(sc, tid); 3573 } 3574 3575 /* 3576 * Suspend the queue because we need to TX a BAR. 3577 */ 3578 static void 3579 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3580 { 3581 3582 ATH_TX_LOCK_ASSERT(sc); 3583 3584 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3585 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3586 __func__, 3587 tid->tid, 3588 tid->bar_wait, 3589 tid->bar_tx); 3590 3591 /* We shouldn't be called when bar_tx is 1 */ 3592 if (tid->bar_tx) { 3593 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3594 "%s: bar_tx is 1?!\n", __func__); 3595 } 3596 3597 /* If we've already been called, just be patient. */ 3598 if (tid->bar_wait) 3599 return; 3600 3601 /* Wait! */ 3602 tid->bar_wait = 1; 3603 3604 /* Only one pause, no matter how many frames fail */ 3605 ath_tx_tid_pause(sc, tid); 3606 } 3607 3608 /* 3609 * We've finished with BAR handling - either we succeeded or 3610 * failed. Either way, unsuspend TX. 3611 */ 3612 static void 3613 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3614 { 3615 3616 ATH_TX_LOCK_ASSERT(sc); 3617 3618 #if defined(__DragonFly__) 3619 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3620 "%s: %s: TID=%d, called\n", 3621 __func__, 3622 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3623 tid->tid); 3624 #else 3625 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3626 "%s: %6D: TID=%d, called\n", 3627 __func__, 3628 tid->an->an_node.ni_macaddr, 3629 ":", 3630 tid->tid); 3631 #endif 3632 3633 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3634 #if defined(__DragonFly__) 3635 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3636 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3637 __func__, 3638 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3639 tid->tid, tid->bar_tx, tid->bar_wait); 3640 #else 3641 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3642 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3643 __func__, tid->an->an_node.ni_macaddr, ":", 3644 tid->tid, tid->bar_tx, tid->bar_wait); 3645 #endif 3646 } 3647 3648 tid->bar_tx = tid->bar_wait = 0; 3649 ath_tx_tid_resume(sc, tid); 3650 } 3651 3652 /* 3653 * Return whether we're ready to TX a BAR frame. 3654 * 3655 * Requires the TID lock be held. 3656 */ 3657 static int 3658 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3659 { 3660 3661 ATH_TX_LOCK_ASSERT(sc); 3662 3663 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3664 return (0); 3665 3666 #if defined(__DragonFly__) 3667 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3668 "%s: %s: TID=%d, bar ready\n", 3669 __func__, 3670 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3671 tid->tid); 3672 #else 3673 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3674 "%s: %6D: TID=%d, bar ready\n", 3675 __func__, 3676 tid->an->an_node.ni_macaddr, 3677 ":", 3678 tid->tid); 3679 #endif 3680 3681 return (1); 3682 } 3683 3684 /* 3685 * Check whether the current TID is ready to have a BAR 3686 * TXed and if so, do the TX. 3687 * 3688 * Since the TID/TXQ lock can't be held during a call to 3689 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3690 * sending the BAR and locking it again. 3691 * 3692 * Eventually, the code to send the BAR should be broken out 3693 * from this routine so the lock doesn't have to be reacquired 3694 * just to be immediately dropped by the caller. 3695 */ 3696 static void 3697 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3698 { 3699 struct ieee80211_tx_ampdu *tap; 3700 3701 ATH_TX_LOCK_ASSERT(sc); 3702 3703 #if defined(__DragonFly__) 3704 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3705 "%s: %s: TID=%d, called\n", 3706 __func__, 3707 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3708 tid->tid); 3709 #else 3710 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3711 "%s: %6D: TID=%d, called\n", 3712 __func__, 3713 tid->an->an_node.ni_macaddr, 3714 ":", 3715 tid->tid); 3716 #endif 3717 3718 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3719 3720 /* 3721 * This is an error condition! 3722 */ 3723 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3724 #if defined(__DragonFly__) 3725 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3726 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3727 __func__, 3728 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3729 tid->tid, tid->bar_tx, tid->bar_wait); 3730 #else 3731 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3732 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3733 __func__, tid->an->an_node.ni_macaddr, ":", 3734 tid->tid, tid->bar_tx, tid->bar_wait); 3735 #endif 3736 return; 3737 } 3738 3739 /* Don't do anything if we still have pending frames */ 3740 if (tid->hwq_depth > 0) { 3741 #if defined(__DragonFly__) 3742 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3743 "%s: %s: TID=%d, hwq_depth=%d, waiting\n", 3744 __func__, 3745 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3746 tid->tid, 3747 tid->hwq_depth); 3748 #else 3749 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3750 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3751 __func__, 3752 tid->an->an_node.ni_macaddr, 3753 ":", 3754 tid->tid, 3755 tid->hwq_depth); 3756 #endif 3757 return; 3758 } 3759 3760 /* We're now about to TX */ 3761 tid->bar_tx = 1; 3762 3763 /* 3764 * Override the clrdmask configuration for the next frame, 3765 * just to get the ball rolling. 3766 */ 3767 ath_tx_set_clrdmask(sc, tid->an); 3768 3769 /* 3770 * Calculate new BAW left edge, now that all frames have either 3771 * succeeded or failed. 3772 * 3773 * XXX verify this is _actually_ the valid value to begin at! 3774 */ 3775 #if defined(__DragonFly__) 3776 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3777 "%s: %s: TID=%d, new BAW left edge=%d\n", 3778 __func__, 3779 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3780 tid->tid, 3781 tap->txa_start); 3782 #else 3783 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3784 "%s: %6D: TID=%d, new BAW left edge=%d\n", 3785 __func__, 3786 tid->an->an_node.ni_macaddr, 3787 ":", 3788 tid->tid, 3789 tap->txa_start); 3790 #endif 3791 3792 /* Try sending the BAR frame */ 3793 /* We can't hold the lock here! */ 3794 3795 ATH_TX_UNLOCK(sc); 3796 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3797 /* Success? Now we wait for notification that it's done */ 3798 ATH_TX_LOCK(sc); 3799 return; 3800 } 3801 3802 /* Failure? For now, warn loudly and continue */ 3803 ATH_TX_LOCK(sc); 3804 #if defined(__DragonFly__) 3805 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3806 "%s: %s: TID=%d, failed to TX BAR, continue!\n", 3807 __func__, 3808 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3809 tid->tid); 3810 #else 3811 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3812 "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3813 __func__, tid->an->an_node.ni_macaddr, ":", 3814 tid->tid); 3815 #endif 3816 ath_tx_tid_bar_unsuspend(sc, tid); 3817 } 3818 3819 static void 3820 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3821 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3822 { 3823 3824 ATH_TX_LOCK_ASSERT(sc); 3825 3826 /* 3827 * If the current TID is running AMPDU, update 3828 * the BAW. 3829 */ 3830 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3831 bf->bf_state.bfs_dobaw) { 3832 /* 3833 * Only remove the frame from the BAW if it's 3834 * been transmitted at least once; this means 3835 * the frame was in the BAW to begin with. 3836 */ 3837 if (bf->bf_state.bfs_retries > 0) { 3838 ath_tx_update_baw(sc, an, tid, bf); 3839 bf->bf_state.bfs_dobaw = 0; 3840 } 3841 #if 0 3842 /* 3843 * This has become a non-fatal error now 3844 */ 3845 if (! bf->bf_state.bfs_addedbaw) 3846 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3847 "%s: wasn't added: seqno %d\n", 3848 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3849 #endif 3850 } 3851 3852 /* Strip it out of an aggregate list if it was in one */ 3853 bf->bf_next = NULL; 3854 3855 /* Insert on the free queue to be freed by the caller */ 3856 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3857 } 3858 3859 static void 3860 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3861 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3862 { 3863 struct ieee80211_node *ni = &an->an_node; 3864 struct ath_txq *txq; 3865 struct ieee80211_tx_ampdu *tap; 3866 3867 txq = sc->sc_ac2q[tid->ac]; 3868 tap = ath_tx_get_tx_tid(an, tid->tid); 3869 3870 #if defined(__DragonFly__) 3871 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3872 "%s: %s: %s: bf=%p: addbaw=%d, dobaw=%d, " 3873 "seqno=%d, retry=%d\n", 3874 __func__, 3875 pfx, 3876 ath_hal_ether_sprintf(ni->ni_macaddr), 3877 bf, 3878 bf->bf_state.bfs_addedbaw, 3879 bf->bf_state.bfs_dobaw, 3880 SEQNO(bf->bf_state.bfs_seqno), 3881 bf->bf_state.bfs_retries); 3882 #else 3883 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3884 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3885 "seqno=%d, retry=%d\n", 3886 __func__, 3887 pfx, 3888 ni->ni_macaddr, 3889 ":", 3890 bf, 3891 bf->bf_state.bfs_addedbaw, 3892 bf->bf_state.bfs_dobaw, 3893 SEQNO(bf->bf_state.bfs_seqno), 3894 bf->bf_state.bfs_retries); 3895 #endif 3896 #if defined(__DragonFly__) 3897 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3898 "%s: %s: %s: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3899 __func__, 3900 pfx, 3901 ath_hal_ether_sprintf(ni->ni_macaddr), 3902 bf, 3903 txq->axq_qnum, 3904 txq->axq_depth, 3905 txq->axq_aggr_depth); 3906 #else 3907 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3908 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3909 __func__, 3910 pfx, 3911 ni->ni_macaddr, 3912 ":", 3913 bf, 3914 txq->axq_qnum, 3915 txq->axq_depth, 3916 txq->axq_aggr_depth); 3917 #endif 3918 #if defined(__DragonFly__) 3919 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3920 "%s: %s: %s: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3921 "isfiltered=%d\n", 3922 __func__, 3923 pfx, 3924 ath_hal_ether_sprintf(ni->ni_macaddr), 3925 bf, 3926 tid->axq_depth, 3927 tid->hwq_depth, 3928 tid->bar_wait, 3929 tid->isfiltered); 3930 #else 3931 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3932 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3933 "isfiltered=%d\n", 3934 __func__, 3935 pfx, 3936 ni->ni_macaddr, 3937 ":", 3938 bf, 3939 tid->axq_depth, 3940 tid->hwq_depth, 3941 tid->bar_wait, 3942 tid->isfiltered); 3943 #endif 3944 #if defined(__DragonFly__) 3945 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3946 "%s: %s: %s: tid %d: " 3947 "sched=%d, paused=%d, " 3948 "incomp=%d, baw_head=%d, " 3949 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3950 __func__, 3951 pfx, 3952 ath_hal_ether_sprintf(ni->ni_macaddr), 3953 tid->tid, 3954 tid->sched, tid->paused, 3955 tid->incomp, tid->baw_head, 3956 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3957 ni->ni_txseqs[tid->tid]); 3958 #else 3959 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3960 "%s: %s: %6D: tid %d: " 3961 "sched=%d, paused=%d, " 3962 "incomp=%d, baw_head=%d, " 3963 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3964 __func__, 3965 pfx, 3966 ni->ni_macaddr, 3967 ":", 3968 tid->tid, 3969 tid->sched, tid->paused, 3970 tid->incomp, tid->baw_head, 3971 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3972 ni->ni_txseqs[tid->tid]); 3973 #endif 3974 /* XXX Dump the frame, see what it is? */ 3975 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3976 ieee80211_dump_pkt(ni->ni_ic, 3977 mtod(bf->bf_m, const uint8_t *), 3978 bf->bf_m->m_len, 0, -1); 3979 } 3980 3981 /* 3982 * Free any packets currently pending in the software TX queue. 3983 * 3984 * This will be called when a node is being deleted. 3985 * 3986 * It can also be called on an active node during an interface 3987 * reset or state transition. 3988 * 3989 * (From Linux/reference): 3990 * 3991 * TODO: For frame(s) that are in the retry state, we will reuse the 3992 * sequence number(s) without setting the retry bit. The 3993 * alternative is to give up on these and BAR the receiver's window 3994 * forward. 3995 */ 3996 static void 3997 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3998 struct ath_tid *tid, ath_bufhead *bf_cq) 3999 { 4000 struct ath_buf *bf; 4001 struct ieee80211_tx_ampdu *tap; 4002 struct ieee80211_node *ni = &an->an_node; 4003 int t; 4004 4005 tap = ath_tx_get_tx_tid(an, tid->tid); 4006 4007 ATH_TX_LOCK_ASSERT(sc); 4008 4009 /* Walk the queue, free frames */ 4010 t = 0; 4011 for (;;) { 4012 bf = ATH_TID_FIRST(tid); 4013 if (bf == NULL) { 4014 break; 4015 } 4016 4017 if (t == 0) { 4018 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 4019 // t = 1; 4020 } 4021 4022 ATH_TID_REMOVE(tid, bf, bf_list); 4023 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 4024 } 4025 4026 /* And now, drain the filtered frame queue */ 4027 t = 0; 4028 for (;;) { 4029 bf = ATH_TID_FILT_FIRST(tid); 4030 if (bf == NULL) 4031 break; 4032 4033 if (t == 0) { 4034 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 4035 // t = 1; 4036 } 4037 4038 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 4039 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 4040 } 4041 4042 /* 4043 * Override the clrdmask configuration for the next frame 4044 * in case there is some future transmission, just to get 4045 * the ball rolling. 4046 * 4047 * This won't hurt things if the TID is about to be freed. 4048 */ 4049 ath_tx_set_clrdmask(sc, tid->an); 4050 4051 /* 4052 * Now that it's completed, grab the TID lock and update 4053 * the sequence number and BAW window. 4054 * Because sequence numbers have been assigned to frames 4055 * that haven't been sent yet, it's entirely possible 4056 * we'll be called with some pending frames that have not 4057 * been transmitted. 4058 * 4059 * The cleaner solution is to do the sequence number allocation 4060 * when the packet is first transmitted - and thus the "retries" 4061 * check above would be enough to update the BAW/seqno. 4062 */ 4063 4064 /* But don't do it for non-QoS TIDs */ 4065 if (tap) { 4066 #if 1 4067 #if defined(__DragonFly__) 4068 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4069 "%s: %s: node %p: TID %d: sliding BAW left edge to %d\n", 4070 __func__, 4071 ath_hal_ether_sprintf(ni->ni_macaddr), 4072 an, 4073 tid->tid, 4074 tap->txa_start); 4075 #else 4076 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4077 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 4078 __func__, 4079 ni->ni_macaddr, 4080 ":", 4081 an, 4082 tid->tid, 4083 tap->txa_start); 4084 #endif 4085 #endif 4086 ni->ni_txseqs[tid->tid] = tap->txa_start; 4087 tid->baw_tail = tid->baw_head; 4088 } 4089 } 4090 4091 /* 4092 * Reset the TID state. This must be only called once the node has 4093 * had its frames flushed from this TID, to ensure that no other 4094 * pause / unpause logic can kick in. 4095 */ 4096 static void 4097 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 4098 { 4099 4100 #if 0 4101 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 4102 tid->paused = tid->sched = tid->addba_tx_pending = 0; 4103 tid->incomp = tid->cleanup_inprogress = 0; 4104 #endif 4105 4106 /* 4107 * If we have a bar_wait set, we need to unpause the TID 4108 * here. Otherwise once cleanup has finished, the TID won't 4109 * have the right paused counter. 4110 * 4111 * XXX I'm not going through resume here - I don't want the 4112 * node to be rescheuled just yet. This however should be 4113 * methodized! 4114 */ 4115 if (tid->bar_wait) { 4116 if (tid->paused > 0) { 4117 tid->paused --; 4118 } 4119 } 4120 4121 /* 4122 * XXX same with a currently filtered TID. 4123 * 4124 * Since this is being called during a flush, we assume that 4125 * the filtered frame list is actually empty. 4126 * 4127 * XXX TODO: add in a check to ensure that the filtered queue 4128 * depth is actually 0! 4129 */ 4130 if (tid->isfiltered) { 4131 if (tid->paused > 0) { 4132 tid->paused --; 4133 } 4134 } 4135 4136 /* 4137 * Clear BAR, filtered frames, scheduled and ADDBA pending. 4138 * The TID may be going through cleanup from the last association 4139 * where things in the BAW are still in the hardware queue. 4140 */ 4141 tid->bar_wait = 0; 4142 tid->bar_tx = 0; 4143 tid->isfiltered = 0; 4144 tid->sched = 0; 4145 tid->addba_tx_pending = 0; 4146 4147 /* 4148 * XXX TODO: it may just be enough to walk the HWQs and mark 4149 * frames for that node as non-aggregate; or mark the ath_node 4150 * with something that indicates that aggregation is no longer 4151 * occurring. Then we can just toss the BAW complaints and 4152 * do a complete hard reset of state here - no pause, no 4153 * complete counter, etc. 4154 */ 4155 4156 } 4157 4158 /* 4159 * Flush all software queued packets for the given node. 4160 * 4161 * This occurs when a completion handler frees the last buffer 4162 * for a node, and the node is thus freed. This causes the node 4163 * to be cleaned up, which ends up calling ath_tx_node_flush. 4164 */ 4165 void 4166 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 4167 { 4168 int tid; 4169 ath_bufhead bf_cq; 4170 struct ath_buf *bf; 4171 4172 TAILQ_INIT(&bf_cq); 4173 4174 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 4175 &an->an_node); 4176 4177 ATH_TX_LOCK(sc); 4178 #if defined(__DragonFly__) 4179 DPRINTF(sc, ATH_DEBUG_NODE, 4180 "%s: %s: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 4181 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 4182 __func__, 4183 ath_hal_ether_sprintf(an->an_node.ni_macaddr), 4184 an->an_is_powersave, 4185 an->an_stack_psq, 4186 an->an_tim_set, 4187 an->an_swq_depth, 4188 an->clrdmask, 4189 an->an_leak_count); 4190 #else 4191 DPRINTF(sc, ATH_DEBUG_NODE, 4192 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 4193 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 4194 __func__, 4195 an->an_node.ni_macaddr, 4196 ":", 4197 an->an_is_powersave, 4198 an->an_stack_psq, 4199 an->an_tim_set, 4200 an->an_swq_depth, 4201 an->clrdmask, 4202 an->an_leak_count); 4203 #endif 4204 4205 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 4206 struct ath_tid *atid = &an->an_tid[tid]; 4207 4208 /* Free packets */ 4209 ath_tx_tid_drain(sc, an, atid, &bf_cq); 4210 4211 /* Remove this tid from the list of active tids */ 4212 ath_tx_tid_unsched(sc, atid); 4213 4214 /* Reset the per-TID pause, BAR, etc state */ 4215 ath_tx_tid_reset(sc, atid); 4216 } 4217 4218 /* 4219 * Clear global leak count 4220 */ 4221 an->an_leak_count = 0; 4222 ATH_TX_UNLOCK(sc); 4223 4224 /* Handle completed frames */ 4225 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4226 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4227 ath_tx_default_comp(sc, bf, 0); 4228 } 4229 } 4230 4231 /* 4232 * Drain all the software TXQs currently with traffic queued. 4233 */ 4234 void 4235 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4236 { 4237 struct ath_tid *tid; 4238 ath_bufhead bf_cq; 4239 struct ath_buf *bf; 4240 4241 TAILQ_INIT(&bf_cq); 4242 ATH_TX_LOCK(sc); 4243 4244 /* 4245 * Iterate over all active tids for the given txq, 4246 * flushing and unsched'ing them 4247 */ 4248 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4249 tid = TAILQ_FIRST(&txq->axq_tidq); 4250 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4251 ath_tx_tid_unsched(sc, tid); 4252 } 4253 4254 ATH_TX_UNLOCK(sc); 4255 4256 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4257 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4258 ath_tx_default_comp(sc, bf, 0); 4259 } 4260 } 4261 4262 /* 4263 * Handle completion of non-aggregate session frames. 4264 * 4265 * This (currently) doesn't implement software retransmission of 4266 * non-aggregate frames! 4267 * 4268 * Software retransmission of non-aggregate frames needs to obey 4269 * the strict sequence number ordering, and drop any frames that 4270 * will fail this. 4271 * 4272 * For now, filtered frames and frame transmission will cause 4273 * all kinds of issues. So we don't support them. 4274 * 4275 * So anyone queuing frames via ath_tx_normal_xmit() or 4276 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4277 */ 4278 void 4279 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4280 { 4281 struct ieee80211_node *ni = bf->bf_node; 4282 struct ath_node *an = ATH_NODE(ni); 4283 int tid = bf->bf_state.bfs_tid; 4284 struct ath_tid *atid = &an->an_tid[tid]; 4285 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4286 4287 /* The TID state is protected behind the TXQ lock */ 4288 ATH_TX_LOCK(sc); 4289 4290 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4291 __func__, bf, fail, atid->hwq_depth - 1); 4292 4293 atid->hwq_depth--; 4294 4295 #if 0 4296 /* 4297 * If the frame was filtered, stick it on the filter frame 4298 * queue and complain about it. It shouldn't happen! 4299 */ 4300 if ((ts->ts_status & HAL_TXERR_FILT) || 4301 (ts->ts_status != 0 && atid->isfiltered)) { 4302 DPRINTF(sc, ATH_DEBUG_SW_TX, 4303 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4304 __func__, 4305 atid->isfiltered, 4306 ts->ts_status); 4307 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4308 } 4309 #endif 4310 if (atid->isfiltered) 4311 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4312 if (atid->hwq_depth < 0) 4313 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4314 __func__, atid->hwq_depth); 4315 4316 /* If the TID is being cleaned up, track things */ 4317 /* XXX refactor! */ 4318 if (atid->cleanup_inprogress) { 4319 atid->incomp--; 4320 if (atid->incomp == 0) { 4321 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4322 "%s: TID %d: cleaned up! resume!\n", 4323 __func__, tid); 4324 atid->cleanup_inprogress = 0; 4325 ath_tx_tid_resume(sc, atid); 4326 } 4327 } 4328 4329 /* 4330 * If the queue is filtered, potentially mark it as complete 4331 * and reschedule it as needed. 4332 * 4333 * This is required as there may be a subsequent TX descriptor 4334 * for this end-node that has CLRDMASK set, so it's quite possible 4335 * that a filtered frame will be followed by a non-filtered 4336 * (complete or otherwise) frame. 4337 * 4338 * XXX should we do this before we complete the frame? 4339 */ 4340 if (atid->isfiltered) 4341 ath_tx_tid_filt_comp_complete(sc, atid); 4342 ATH_TX_UNLOCK(sc); 4343 4344 /* 4345 * punt to rate control if we're not being cleaned up 4346 * during a hw queue drain and the frame wanted an ACK. 4347 */ 4348 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4349 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4350 ts, bf->bf_state.bfs_pktlen, 4351 1, (ts->ts_status == 0) ? 0 : 1); 4352 4353 ath_tx_default_comp(sc, bf, fail); 4354 } 4355 4356 /* 4357 * Handle cleanup of aggregate session packets that aren't 4358 * an A-MPDU. 4359 * 4360 * There's no need to update the BAW here - the session is being 4361 * torn down. 4362 */ 4363 static void 4364 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4365 { 4366 struct ieee80211_node *ni = bf->bf_node; 4367 struct ath_node *an = ATH_NODE(ni); 4368 int tid = bf->bf_state.bfs_tid; 4369 struct ath_tid *atid = &an->an_tid[tid]; 4370 4371 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4372 __func__, tid, atid->incomp); 4373 4374 ATH_TX_LOCK(sc); 4375 atid->incomp--; 4376 4377 /* XXX refactor! */ 4378 if (bf->bf_state.bfs_dobaw) { 4379 ath_tx_update_baw(sc, an, atid, bf); 4380 if (!bf->bf_state.bfs_addedbaw) 4381 DPRINTF(sc, ATH_DEBUG_SW_TX, 4382 "%s: wasn't added: seqno %d\n", 4383 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4384 } 4385 4386 if (atid->incomp == 0) { 4387 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4388 "%s: TID %d: cleaned up! resume!\n", 4389 __func__, tid); 4390 atid->cleanup_inprogress = 0; 4391 ath_tx_tid_resume(sc, atid); 4392 } 4393 ATH_TX_UNLOCK(sc); 4394 4395 ath_tx_default_comp(sc, bf, 0); 4396 } 4397 4398 4399 /* 4400 * This as it currently stands is a bit dumb. Ideally we'd just 4401 * fail the frame the normal way and have it permanently fail 4402 * via the normal aggregate completion path. 4403 */ 4404 static void 4405 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, 4406 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) 4407 { 4408 struct ath_tid *atid = &an->an_tid[tid]; 4409 struct ath_buf *bf, *bf_next; 4410 4411 ATH_TX_LOCK_ASSERT(sc); 4412 4413 /* 4414 * Remove this frame from the queue. 4415 */ 4416 ATH_TID_REMOVE(atid, bf_head, bf_list); 4417 4418 /* 4419 * Loop over all the frames in the aggregate. 4420 */ 4421 bf = bf_head; 4422 while (bf != NULL) { 4423 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ 4424 4425 /* 4426 * If it's been added to the BAW we need to kick 4427 * it out of the BAW before we continue. 4428 * 4429 * XXX if it's an aggregate, assert that it's in the 4430 * BAW - we shouldn't have it be in an aggregate 4431 * otherwise! 4432 */ 4433 if (bf->bf_state.bfs_addedbaw) { 4434 ath_tx_update_baw(sc, an, atid, bf); 4435 bf->bf_state.bfs_dobaw = 0; 4436 } 4437 4438 /* 4439 * Give it the default completion handler. 4440 */ 4441 bf->bf_comp = ath_tx_normal_comp; 4442 bf->bf_next = NULL; 4443 4444 /* 4445 * Add it to the list to free. 4446 */ 4447 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4448 4449 /* 4450 * Now advance to the next frame in the aggregate. 4451 */ 4452 bf = bf_next; 4453 } 4454 } 4455 4456 /* 4457 * Performs transmit side cleanup when TID changes from aggregated to 4458 * unaggregated and during reassociation. 4459 * 4460 * For now, this just tosses everything from the TID software queue 4461 * whether or not it has been retried and marks the TID as 4462 * pending completion if there's anything for this TID queued to 4463 * the hardware. 4464 * 4465 * The caller is responsible for pausing the TID and unpausing the 4466 * TID if no cleanup was required. Otherwise the cleanup path will 4467 * unpause the TID once the last hardware queued frame is completed. 4468 */ 4469 static void 4470 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4471 ath_bufhead *bf_cq) 4472 { 4473 struct ath_tid *atid = &an->an_tid[tid]; 4474 struct ath_buf *bf, *bf_next; 4475 4476 ATH_TX_LOCK_ASSERT(sc); 4477 4478 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4479 "%s: TID %d: called; inprogress=%d\n", __func__, tid, 4480 atid->cleanup_inprogress); 4481 4482 /* 4483 * Move the filtered frames to the TX queue, before 4484 * we run off and discard/process things. 4485 */ 4486 4487 /* XXX this is really quite inefficient */ 4488 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4489 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4490 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4491 } 4492 4493 /* 4494 * Update the frames in the software TX queue: 4495 * 4496 * + Discard retry frames in the queue 4497 * + Fix the completion function to be non-aggregate 4498 */ 4499 bf = ATH_TID_FIRST(atid); 4500 while (bf) { 4501 /* 4502 * Grab the next frame in the list, we may 4503 * be fiddling with the list. 4504 */ 4505 bf_next = TAILQ_NEXT(bf, bf_list); 4506 4507 /* 4508 * Free the frame and all subframes. 4509 */ 4510 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); 4511 4512 /* 4513 * Next frame! 4514 */ 4515 bf = bf_next; 4516 } 4517 4518 /* 4519 * If there's anything in the hardware queue we wait 4520 * for the TID HWQ to empty. 4521 */ 4522 if (atid->hwq_depth > 0) { 4523 /* 4524 * XXX how about we kill atid->incomp, and instead 4525 * replace it with a macro that checks that atid->hwq_depth 4526 * is 0? 4527 */ 4528 atid->incomp = atid->hwq_depth; 4529 atid->cleanup_inprogress = 1; 4530 } 4531 4532 if (atid->cleanup_inprogress) 4533 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4534 "%s: TID %d: cleanup needed: %d packets\n", 4535 __func__, tid, atid->incomp); 4536 4537 /* Owner now must free completed frames */ 4538 } 4539 4540 static struct ath_buf * 4541 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4542 struct ath_tid *tid, struct ath_buf *bf) 4543 { 4544 struct ath_buf *nbf; 4545 int error; 4546 4547 /* 4548 * Clone the buffer. This will handle the dma unmap and 4549 * copy the node reference to the new buffer. If this 4550 * works out, 'bf' will have no DMA mapping, no mbuf 4551 * pointer and no node reference. 4552 */ 4553 nbf = ath_buf_clone(sc, bf); 4554 4555 #if 0 4556 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4557 __func__); 4558 #endif 4559 4560 if (nbf == NULL) { 4561 /* Failed to clone */ 4562 DPRINTF(sc, ATH_DEBUG_XMIT, 4563 "%s: failed to clone a busy buffer\n", 4564 __func__); 4565 return NULL; 4566 } 4567 4568 /* Setup the dma for the new buffer */ 4569 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4570 if (error != 0) { 4571 DPRINTF(sc, ATH_DEBUG_XMIT, 4572 "%s: failed to setup dma for clone\n", 4573 __func__); 4574 /* 4575 * Put this at the head of the list, not tail; 4576 * that way it doesn't interfere with the 4577 * busy buffer logic (which uses the tail of 4578 * the list.) 4579 */ 4580 ATH_TXBUF_LOCK(sc); 4581 ath_returnbuf_head(sc, nbf); 4582 ATH_TXBUF_UNLOCK(sc); 4583 return NULL; 4584 } 4585 4586 /* Update BAW if required, before we free the original buf */ 4587 if (bf->bf_state.bfs_dobaw) 4588 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4589 4590 /* Free original buffer; return new buffer */ 4591 ath_freebuf(sc, bf); 4592 4593 return nbf; 4594 } 4595 4596 /* 4597 * Handle retrying an unaggregate frame in an aggregate 4598 * session. 4599 * 4600 * If too many retries occur, pause the TID, wait for 4601 * any further retransmits (as there's no reason why 4602 * non-aggregate frames in an aggregate session are 4603 * transmitted in-order; they just have to be in-BAW) 4604 * and then queue a BAR. 4605 */ 4606 static void 4607 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4608 { 4609 struct ieee80211_node *ni = bf->bf_node; 4610 struct ath_node *an = ATH_NODE(ni); 4611 int tid = bf->bf_state.bfs_tid; 4612 struct ath_tid *atid = &an->an_tid[tid]; 4613 struct ieee80211_tx_ampdu *tap; 4614 4615 ATH_TX_LOCK(sc); 4616 4617 tap = ath_tx_get_tx_tid(an, tid); 4618 4619 /* 4620 * If the buffer is marked as busy, we can't directly 4621 * reuse it. Instead, try to clone the buffer. 4622 * If the clone is successful, recycle the old buffer. 4623 * If the clone is unsuccessful, set bfs_retries to max 4624 * to force the next bit of code to free the buffer 4625 * for us. 4626 */ 4627 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4628 (bf->bf_flags & ATH_BUF_BUSY)) { 4629 struct ath_buf *nbf; 4630 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4631 if (nbf) 4632 /* bf has been freed at this point */ 4633 bf = nbf; 4634 else 4635 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4636 } 4637 4638 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4639 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4640 "%s: exceeded retries; seqno %d\n", 4641 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4642 sc->sc_stats.ast_tx_swretrymax++; 4643 4644 /* Update BAW anyway */ 4645 if (bf->bf_state.bfs_dobaw) { 4646 ath_tx_update_baw(sc, an, atid, bf); 4647 if (! bf->bf_state.bfs_addedbaw) 4648 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4649 "%s: wasn't added: seqno %d\n", 4650 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4651 } 4652 bf->bf_state.bfs_dobaw = 0; 4653 4654 /* Suspend the TX queue and get ready to send the BAR */ 4655 ath_tx_tid_bar_suspend(sc, atid); 4656 4657 /* Send the BAR if there are no other frames waiting */ 4658 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4659 ath_tx_tid_bar_tx(sc, atid); 4660 4661 ATH_TX_UNLOCK(sc); 4662 4663 /* Free buffer, bf is free after this call */ 4664 ath_tx_default_comp(sc, bf, 0); 4665 return; 4666 } 4667 4668 /* 4669 * This increments the retry counter as well as 4670 * sets the retry flag in the ath_buf and packet 4671 * body. 4672 */ 4673 ath_tx_set_retry(sc, bf); 4674 sc->sc_stats.ast_tx_swretries++; 4675 4676 /* 4677 * Insert this at the head of the queue, so it's 4678 * retried before any current/subsequent frames. 4679 */ 4680 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4681 ath_tx_tid_sched(sc, atid); 4682 /* Send the BAR if there are no other frames waiting */ 4683 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4684 ath_tx_tid_bar_tx(sc, atid); 4685 4686 ATH_TX_UNLOCK(sc); 4687 } 4688 4689 /* 4690 * Common code for aggregate excessive retry/subframe retry. 4691 * If retrying, queues buffers to bf_q. If not, frees the 4692 * buffers. 4693 * 4694 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4695 */ 4696 static int 4697 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4698 ath_bufhead *bf_q) 4699 { 4700 struct ieee80211_node *ni = bf->bf_node; 4701 struct ath_node *an = ATH_NODE(ni); 4702 int tid = bf->bf_state.bfs_tid; 4703 struct ath_tid *atid = &an->an_tid[tid]; 4704 4705 ATH_TX_LOCK_ASSERT(sc); 4706 4707 /* XXX clr11naggr should be done for all subframes */ 4708 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4709 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4710 4711 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4712 4713 /* 4714 * If the buffer is marked as busy, we can't directly 4715 * reuse it. Instead, try to clone the buffer. 4716 * If the clone is successful, recycle the old buffer. 4717 * If the clone is unsuccessful, set bfs_retries to max 4718 * to force the next bit of code to free the buffer 4719 * for us. 4720 */ 4721 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4722 (bf->bf_flags & ATH_BUF_BUSY)) { 4723 struct ath_buf *nbf; 4724 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4725 if (nbf) 4726 /* bf has been freed at this point */ 4727 bf = nbf; 4728 else 4729 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4730 } 4731 4732 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4733 sc->sc_stats.ast_tx_swretrymax++; 4734 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4735 "%s: max retries: seqno %d\n", 4736 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4737 ath_tx_update_baw(sc, an, atid, bf); 4738 if (!bf->bf_state.bfs_addedbaw) 4739 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4740 "%s: wasn't added: seqno %d\n", 4741 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4742 bf->bf_state.bfs_dobaw = 0; 4743 return 1; 4744 } 4745 4746 ath_tx_set_retry(sc, bf); 4747 sc->sc_stats.ast_tx_swretries++; 4748 bf->bf_next = NULL; /* Just to make sure */ 4749 4750 /* Clear the aggregate state */ 4751 bf->bf_state.bfs_aggr = 0; 4752 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4753 bf->bf_state.bfs_nframes = 1; 4754 4755 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4756 return 0; 4757 } 4758 4759 /* 4760 * error pkt completion for an aggregate destination 4761 */ 4762 static void 4763 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4764 struct ath_tid *tid) 4765 { 4766 struct ieee80211_node *ni = bf_first->bf_node; 4767 struct ath_node *an = ATH_NODE(ni); 4768 struct ath_buf *bf_next, *bf; 4769 ath_bufhead bf_q; 4770 int drops = 0; 4771 struct ieee80211_tx_ampdu *tap; 4772 ath_bufhead bf_cq; 4773 4774 TAILQ_INIT(&bf_q); 4775 TAILQ_INIT(&bf_cq); 4776 4777 /* 4778 * Update rate control - all frames have failed. 4779 * 4780 * XXX use the length in the first frame in the series; 4781 * XXX just so things are consistent for now. 4782 */ 4783 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4784 &bf_first->bf_status.ds_txstat, 4785 bf_first->bf_state.bfs_pktlen, 4786 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4787 4788 ATH_TX_LOCK(sc); 4789 tap = ath_tx_get_tx_tid(an, tid->tid); 4790 sc->sc_stats.ast_tx_aggr_failall++; 4791 4792 /* Retry all subframes */ 4793 bf = bf_first; 4794 while (bf) { 4795 bf_next = bf->bf_next; 4796 bf->bf_next = NULL; /* Remove it from the aggr list */ 4797 sc->sc_stats.ast_tx_aggr_fail++; 4798 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4799 drops++; 4800 bf->bf_next = NULL; 4801 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4802 } 4803 bf = bf_next; 4804 } 4805 4806 /* Prepend all frames to the beginning of the queue */ 4807 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4808 TAILQ_REMOVE(&bf_q, bf, bf_list); 4809 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4810 } 4811 4812 /* 4813 * Schedule the TID to be re-tried. 4814 */ 4815 ath_tx_tid_sched(sc, tid); 4816 4817 /* 4818 * send bar if we dropped any frames 4819 * 4820 * Keep the txq lock held for now, as we need to ensure 4821 * that ni_txseqs[] is consistent (as it's being updated 4822 * in the ifnet TX context or raw TX context.) 4823 */ 4824 if (drops) { 4825 /* Suspend the TX queue and get ready to send the BAR */ 4826 ath_tx_tid_bar_suspend(sc, tid); 4827 } 4828 4829 /* 4830 * Send BAR if required 4831 */ 4832 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4833 ath_tx_tid_bar_tx(sc, tid); 4834 4835 ATH_TX_UNLOCK(sc); 4836 4837 /* Complete frames which errored out */ 4838 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4839 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4840 ath_tx_default_comp(sc, bf, 0); 4841 } 4842 } 4843 4844 /* 4845 * Handle clean-up of packets from an aggregate list. 4846 * 4847 * There's no need to update the BAW here - the session is being 4848 * torn down. 4849 */ 4850 static void 4851 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4852 { 4853 struct ath_buf *bf, *bf_next; 4854 struct ieee80211_node *ni = bf_first->bf_node; 4855 struct ath_node *an = ATH_NODE(ni); 4856 int tid = bf_first->bf_state.bfs_tid; 4857 struct ath_tid *atid = &an->an_tid[tid]; 4858 4859 ATH_TX_LOCK(sc); 4860 4861 /* update incomp */ 4862 atid->incomp--; 4863 4864 /* Update the BAW */ 4865 bf = bf_first; 4866 while (bf) { 4867 /* XXX refactor! */ 4868 if (bf->bf_state.bfs_dobaw) { 4869 ath_tx_update_baw(sc, an, atid, bf); 4870 if (!bf->bf_state.bfs_addedbaw) 4871 DPRINTF(sc, ATH_DEBUG_SW_TX, 4872 "%s: wasn't added: seqno %d\n", 4873 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4874 } 4875 bf = bf->bf_next; 4876 } 4877 4878 if (atid->incomp == 0) { 4879 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4880 "%s: TID %d: cleaned up! resume!\n", 4881 __func__, tid); 4882 atid->cleanup_inprogress = 0; 4883 ath_tx_tid_resume(sc, atid); 4884 } 4885 4886 /* Send BAR if required */ 4887 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4888 /* 4889 * XXX TODO: we should likely just tear down the BAR state here, 4890 * rather than sending a BAR. 4891 */ 4892 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4893 ath_tx_tid_bar_tx(sc, atid); 4894 4895 ATH_TX_UNLOCK(sc); 4896 4897 /* Handle frame completion as individual frames */ 4898 bf = bf_first; 4899 while (bf) { 4900 bf_next = bf->bf_next; 4901 bf->bf_next = NULL; 4902 ath_tx_default_comp(sc, bf, 1); 4903 bf = bf_next; 4904 } 4905 } 4906 4907 /* 4908 * Handle completion of an set of aggregate frames. 4909 * 4910 * Note: the completion handler is the last descriptor in the aggregate, 4911 * not the last descriptor in the first frame. 4912 */ 4913 static void 4914 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4915 int fail) 4916 { 4917 //struct ath_desc *ds = bf->bf_lastds; 4918 struct ieee80211_node *ni = bf_first->bf_node; 4919 struct ath_node *an = ATH_NODE(ni); 4920 int tid = bf_first->bf_state.bfs_tid; 4921 struct ath_tid *atid = &an->an_tid[tid]; 4922 struct ath_tx_status ts; 4923 struct ieee80211_tx_ampdu *tap; 4924 ath_bufhead bf_q; 4925 ath_bufhead bf_cq; 4926 int seq_st, tx_ok; 4927 int hasba, isaggr; 4928 uint32_t ba[2]; 4929 struct ath_buf *bf, *bf_next; 4930 int ba_index; 4931 int drops = 0; 4932 int nframes = 0, nbad = 0, nf; 4933 int pktlen; 4934 /* XXX there's too much on the stack? */ 4935 struct ath_rc_series rc[ATH_RC_NUM]; 4936 int txseq; 4937 4938 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4939 __func__, atid->hwq_depth); 4940 4941 /* 4942 * Take a copy; this may be needed -after- bf_first 4943 * has been completed and freed. 4944 */ 4945 ts = bf_first->bf_status.ds_txstat; 4946 4947 TAILQ_INIT(&bf_q); 4948 TAILQ_INIT(&bf_cq); 4949 4950 /* The TID state is kept behind the TXQ lock */ 4951 ATH_TX_LOCK(sc); 4952 4953 atid->hwq_depth--; 4954 if (atid->hwq_depth < 0) 4955 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4956 __func__, atid->hwq_depth); 4957 4958 /* 4959 * If the TID is filtered, handle completing the filter 4960 * transition before potentially kicking it to the cleanup 4961 * function. 4962 * 4963 * XXX this is duplicate work, ew. 4964 */ 4965 if (atid->isfiltered) 4966 ath_tx_tid_filt_comp_complete(sc, atid); 4967 4968 /* 4969 * Punt cleanup to the relevant function, not our problem now 4970 */ 4971 if (atid->cleanup_inprogress) { 4972 if (atid->isfiltered) 4973 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4974 "%s: isfiltered=1, normal_comp?\n", 4975 __func__); 4976 ATH_TX_UNLOCK(sc); 4977 ath_tx_comp_cleanup_aggr(sc, bf_first); 4978 return; 4979 } 4980 4981 /* 4982 * If the frame is filtered, transition to filtered frame 4983 * mode and add this to the filtered frame list. 4984 * 4985 * XXX TODO: figure out how this interoperates with 4986 * BAR, pause and cleanup states. 4987 */ 4988 if ((ts.ts_status & HAL_TXERR_FILT) || 4989 (ts.ts_status != 0 && atid->isfiltered)) { 4990 if (fail != 0) 4991 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4992 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4993 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4994 4995 /* Remove from BAW */ 4996 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4997 if (bf->bf_state.bfs_addedbaw) 4998 drops++; 4999 if (bf->bf_state.bfs_dobaw) { 5000 ath_tx_update_baw(sc, an, atid, bf); 5001 if (!bf->bf_state.bfs_addedbaw) 5002 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5003 "%s: wasn't added: seqno %d\n", 5004 __func__, 5005 SEQNO(bf->bf_state.bfs_seqno)); 5006 } 5007 bf->bf_state.bfs_dobaw = 0; 5008 } 5009 /* 5010 * If any intermediate frames in the BAW were dropped when 5011 * handling filtering things, send a BAR. 5012 */ 5013 if (drops) 5014 ath_tx_tid_bar_suspend(sc, atid); 5015 5016 /* 5017 * Finish up by sending a BAR if required and freeing 5018 * the frames outside of the TX lock. 5019 */ 5020 goto finish_send_bar; 5021 } 5022 5023 /* 5024 * XXX for now, use the first frame in the aggregate for 5025 * XXX rate control completion; it's at least consistent. 5026 */ 5027 pktlen = bf_first->bf_state.bfs_pktlen; 5028 5029 /* 5030 * Handle errors first! 5031 * 5032 * Here, handle _any_ error as a "exceeded retries" error. 5033 * Later on (when filtered frames are to be specially handled) 5034 * it'll have to be expanded. 5035 */ 5036 #if 0 5037 if (ts.ts_status & HAL_TXERR_XRETRY) { 5038 #endif 5039 if (ts.ts_status != 0) { 5040 ATH_TX_UNLOCK(sc); 5041 ath_tx_comp_aggr_error(sc, bf_first, atid); 5042 return; 5043 } 5044 5045 tap = ath_tx_get_tx_tid(an, tid); 5046 5047 /* 5048 * extract starting sequence and block-ack bitmap 5049 */ 5050 /* XXX endian-ness of seq_st, ba? */ 5051 seq_st = ts.ts_seqnum; 5052 hasba = !! (ts.ts_flags & HAL_TX_BA); 5053 tx_ok = (ts.ts_status == 0); 5054 isaggr = bf_first->bf_state.bfs_aggr; 5055 ba[0] = ts.ts_ba_low; 5056 ba[1] = ts.ts_ba_high; 5057 5058 /* 5059 * Copy the TX completion status and the rate control 5060 * series from the first descriptor, as it may be freed 5061 * before the rate control code can get its grubby fingers 5062 * into things. 5063 */ 5064 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 5065 5066 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5067 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 5068 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 5069 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 5070 isaggr, seq_st, hasba, ba[0], ba[1]); 5071 5072 /* 5073 * The reference driver doesn't do this; it simply ignores 5074 * this check in its entirety. 5075 * 5076 * I've seen this occur when using iperf to send traffic 5077 * out tid 1 - the aggregate frames are all marked as TID 1, 5078 * but the TXSTATUS has TID=0. So, let's just ignore this 5079 * check. 5080 */ 5081 #if 0 5082 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 5083 if (tid != ts.ts_tid) { 5084 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 5085 __func__, tid, ts.ts_tid); 5086 tx_ok = 0; 5087 } 5088 #endif 5089 5090 /* AR5416 BA bug; this requires an interface reset */ 5091 if (isaggr && tx_ok && (! hasba)) { 5092 device_printf(sc->sc_dev, 5093 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 5094 "seq_st=%d\n", 5095 __func__, hasba, tx_ok, isaggr, seq_st); 5096 /* XXX TODO: schedule an interface reset */ 5097 #ifdef ATH_DEBUG 5098 ath_printtxbuf(sc, bf_first, 5099 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 5100 #endif 5101 } 5102 5103 /* 5104 * Walk the list of frames, figure out which ones were correctly 5105 * sent and which weren't. 5106 */ 5107 bf = bf_first; 5108 nf = bf_first->bf_state.bfs_nframes; 5109 5110 /* bf_first is going to be invalid once this list is walked */ 5111 bf_first = NULL; 5112 5113 /* 5114 * Walk the list of completed frames and determine 5115 * which need to be completed and which need to be 5116 * retransmitted. 5117 * 5118 * For completed frames, the completion functions need 5119 * to be called at the end of this function as the last 5120 * node reference may free the node. 5121 * 5122 * Finally, since the TXQ lock can't be held during the 5123 * completion callback (to avoid lock recursion), 5124 * the completion calls have to be done outside of the 5125 * lock. 5126 */ 5127 while (bf) { 5128 nframes++; 5129 ba_index = ATH_BA_INDEX(seq_st, 5130 SEQNO(bf->bf_state.bfs_seqno)); 5131 bf_next = bf->bf_next; 5132 bf->bf_next = NULL; /* Remove it from the aggr list */ 5133 5134 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5135 "%s: checking bf=%p seqno=%d; ack=%d\n", 5136 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 5137 ATH_BA_ISSET(ba, ba_index)); 5138 5139 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 5140 sc->sc_stats.ast_tx_aggr_ok++; 5141 ath_tx_update_baw(sc, an, atid, bf); 5142 bf->bf_state.bfs_dobaw = 0; 5143 if (!bf->bf_state.bfs_addedbaw) 5144 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5145 "%s: wasn't added: seqno %d\n", 5146 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5147 bf->bf_next = NULL; 5148 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5149 } else { 5150 sc->sc_stats.ast_tx_aggr_fail++; 5151 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 5152 drops++; 5153 bf->bf_next = NULL; 5154 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5155 } 5156 nbad++; 5157 } 5158 bf = bf_next; 5159 } 5160 5161 /* 5162 * Now that the BAW updates have been done, unlock 5163 * 5164 * txseq is grabbed before the lock is released so we 5165 * have a consistent view of what -was- in the BAW. 5166 * Anything after this point will not yet have been 5167 * TXed. 5168 */ 5169 txseq = tap->txa_start; 5170 ATH_TX_UNLOCK(sc); 5171 5172 if (nframes != nf) 5173 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5174 "%s: num frames seen=%d; bf nframes=%d\n", 5175 __func__, nframes, nf); 5176 5177 /* 5178 * Now we know how many frames were bad, call the rate 5179 * control code. 5180 */ 5181 if (fail == 0) 5182 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 5183 nbad); 5184 5185 /* 5186 * send bar if we dropped any frames 5187 */ 5188 if (drops) { 5189 /* Suspend the TX queue and get ready to send the BAR */ 5190 ATH_TX_LOCK(sc); 5191 ath_tx_tid_bar_suspend(sc, atid); 5192 ATH_TX_UNLOCK(sc); 5193 } 5194 5195 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5196 "%s: txa_start now %d\n", __func__, tap->txa_start); 5197 5198 ATH_TX_LOCK(sc); 5199 5200 /* Prepend all frames to the beginning of the queue */ 5201 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 5202 TAILQ_REMOVE(&bf_q, bf, bf_list); 5203 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 5204 } 5205 5206 /* 5207 * Reschedule to grab some further frames. 5208 */ 5209 ath_tx_tid_sched(sc, atid); 5210 5211 /* 5212 * If the queue is filtered, re-schedule as required. 5213 * 5214 * This is required as there may be a subsequent TX descriptor 5215 * for this end-node that has CLRDMASK set, so it's quite possible 5216 * that a filtered frame will be followed by a non-filtered 5217 * (complete or otherwise) frame. 5218 * 5219 * XXX should we do this before we complete the frame? 5220 */ 5221 if (atid->isfiltered) 5222 ath_tx_tid_filt_comp_complete(sc, atid); 5223 5224 finish_send_bar: 5225 5226 /* 5227 * Send BAR if required 5228 */ 5229 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5230 ath_tx_tid_bar_tx(sc, atid); 5231 5232 ATH_TX_UNLOCK(sc); 5233 5234 /* Do deferred completion */ 5235 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5236 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5237 ath_tx_default_comp(sc, bf, 0); 5238 } 5239 } 5240 5241 /* 5242 * Handle completion of unaggregated frames in an ADDBA 5243 * session. 5244 * 5245 * Fail is set to 1 if the entry is being freed via a call to 5246 * ath_tx_draintxq(). 5247 */ 5248 static void 5249 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 5250 { 5251 struct ieee80211_node *ni = bf->bf_node; 5252 struct ath_node *an = ATH_NODE(ni); 5253 int tid = bf->bf_state.bfs_tid; 5254 struct ath_tid *atid = &an->an_tid[tid]; 5255 struct ath_tx_status ts; 5256 int drops = 0; 5257 5258 /* 5259 * Take a copy of this; filtering/cloning the frame may free the 5260 * bf pointer. 5261 */ 5262 ts = bf->bf_status.ds_txstat; 5263 5264 /* 5265 * Update rate control status here, before we possibly 5266 * punt to retry or cleanup. 5267 * 5268 * Do it outside of the TXQ lock. 5269 */ 5270 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 5271 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 5272 &bf->bf_status.ds_txstat, 5273 bf->bf_state.bfs_pktlen, 5274 1, (ts.ts_status == 0) ? 0 : 1); 5275 5276 /* 5277 * This is called early so atid->hwq_depth can be tracked. 5278 * This unfortunately means that it's released and regrabbed 5279 * during retry and cleanup. That's rather inefficient. 5280 */ 5281 ATH_TX_LOCK(sc); 5282 5283 if (tid == IEEE80211_NONQOS_TID) 5284 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 5285 5286 DPRINTF(sc, ATH_DEBUG_SW_TX, 5287 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 5288 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 5289 SEQNO(bf->bf_state.bfs_seqno)); 5290 5291 atid->hwq_depth--; 5292 if (atid->hwq_depth < 0) 5293 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 5294 __func__, atid->hwq_depth); 5295 5296 /* 5297 * If the TID is filtered, handle completing the filter 5298 * transition before potentially kicking it to the cleanup 5299 * function. 5300 */ 5301 if (atid->isfiltered) 5302 ath_tx_tid_filt_comp_complete(sc, atid); 5303 5304 /* 5305 * If a cleanup is in progress, punt to comp_cleanup; 5306 * rather than handling it here. It's thus their 5307 * responsibility to clean up, call the completion 5308 * function in net80211, etc. 5309 */ 5310 if (atid->cleanup_inprogress) { 5311 if (atid->isfiltered) 5312 DPRINTF(sc, ATH_DEBUG_SW_TX, 5313 "%s: isfiltered=1, normal_comp?\n", 5314 __func__); 5315 ATH_TX_UNLOCK(sc); 5316 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5317 __func__); 5318 ath_tx_comp_cleanup_unaggr(sc, bf); 5319 return; 5320 } 5321 5322 /* 5323 * XXX TODO: how does cleanup, BAR and filtered frame handling 5324 * overlap? 5325 * 5326 * If the frame is filtered OR if it's any failure but 5327 * the TID is filtered, the frame must be added to the 5328 * filtered frame list. 5329 * 5330 * However - a busy buffer can't be added to the filtered 5331 * list as it will end up being recycled without having 5332 * been made available for the hardware. 5333 */ 5334 if ((ts.ts_status & HAL_TXERR_FILT) || 5335 (ts.ts_status != 0 && atid->isfiltered)) { 5336 int freeframe; 5337 5338 if (fail != 0) 5339 DPRINTF(sc, ATH_DEBUG_SW_TX, 5340 "%s: isfiltered=1, fail=%d\n", 5341 __func__, fail); 5342 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5343 /* 5344 * If freeframe=0 then bf is no longer ours; don't 5345 * touch it. 5346 */ 5347 if (freeframe) { 5348 /* Remove from BAW */ 5349 if (bf->bf_state.bfs_addedbaw) 5350 drops++; 5351 if (bf->bf_state.bfs_dobaw) { 5352 ath_tx_update_baw(sc, an, atid, bf); 5353 if (!bf->bf_state.bfs_addedbaw) 5354 DPRINTF(sc, ATH_DEBUG_SW_TX, 5355 "%s: wasn't added: seqno %d\n", 5356 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5357 } 5358 bf->bf_state.bfs_dobaw = 0; 5359 } 5360 5361 /* 5362 * If the frame couldn't be filtered, treat it as a drop and 5363 * prepare to send a BAR. 5364 */ 5365 if (freeframe && drops) 5366 ath_tx_tid_bar_suspend(sc, atid); 5367 5368 /* 5369 * Send BAR if required 5370 */ 5371 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5372 ath_tx_tid_bar_tx(sc, atid); 5373 5374 ATH_TX_UNLOCK(sc); 5375 /* 5376 * If freeframe is set, then the frame couldn't be 5377 * cloned and bf is still valid. Just complete/free it. 5378 */ 5379 if (freeframe) 5380 ath_tx_default_comp(sc, bf, fail); 5381 5382 return; 5383 } 5384 /* 5385 * Don't bother with the retry check if all frames 5386 * are being failed (eg during queue deletion.) 5387 */ 5388 #if 0 5389 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5390 #endif 5391 if (fail == 0 && ts.ts_status != 0) { 5392 ATH_TX_UNLOCK(sc); 5393 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5394 __func__); 5395 ath_tx_aggr_retry_unaggr(sc, bf); 5396 return; 5397 } 5398 5399 /* Success? Complete */ 5400 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5401 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5402 if (bf->bf_state.bfs_dobaw) { 5403 ath_tx_update_baw(sc, an, atid, bf); 5404 bf->bf_state.bfs_dobaw = 0; 5405 if (!bf->bf_state.bfs_addedbaw) 5406 DPRINTF(sc, ATH_DEBUG_SW_TX, 5407 "%s: wasn't added: seqno %d\n", 5408 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5409 } 5410 5411 /* 5412 * If the queue is filtered, re-schedule as required. 5413 * 5414 * This is required as there may be a subsequent TX descriptor 5415 * for this end-node that has CLRDMASK set, so it's quite possible 5416 * that a filtered frame will be followed by a non-filtered 5417 * (complete or otherwise) frame. 5418 * 5419 * XXX should we do this before we complete the frame? 5420 */ 5421 if (atid->isfiltered) 5422 ath_tx_tid_filt_comp_complete(sc, atid); 5423 5424 /* 5425 * Send BAR if required 5426 */ 5427 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5428 ath_tx_tid_bar_tx(sc, atid); 5429 5430 ATH_TX_UNLOCK(sc); 5431 5432 ath_tx_default_comp(sc, bf, fail); 5433 /* bf is freed at this point */ 5434 } 5435 5436 void 5437 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5438 { 5439 if (bf->bf_state.bfs_aggr) 5440 ath_tx_aggr_comp_aggr(sc, bf, fail); 5441 else 5442 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5443 } 5444 5445 /* 5446 * Schedule some packets from the given node/TID to the hardware. 5447 * 5448 * This is the aggregate version. 5449 */ 5450 void 5451 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5452 struct ath_tid *tid) 5453 { 5454 struct ath_buf *bf; 5455 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5456 struct ieee80211_tx_ampdu *tap; 5457 ATH_AGGR_STATUS status; 5458 ath_bufhead bf_q; 5459 5460 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5461 ATH_TX_LOCK_ASSERT(sc); 5462 5463 /* 5464 * XXX TODO: If we're called for a queue that we're leaking frames to, 5465 * ensure we only leak one. 5466 */ 5467 5468 tap = ath_tx_get_tx_tid(an, tid->tid); 5469 5470 if (tid->tid == IEEE80211_NONQOS_TID) 5471 DPRINTF(sc, ATH_DEBUG_SW_TX, 5472 "%s: called for TID=NONQOS_TID?\n", __func__); 5473 5474 for (;;) { 5475 status = ATH_AGGR_DONE; 5476 5477 /* 5478 * If the upper layer has paused the TID, don't 5479 * queue any further packets. 5480 * 5481 * This can also occur from the completion task because 5482 * of packet loss; but as its serialised with this code, 5483 * it won't "appear" half way through queuing packets. 5484 */ 5485 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5486 break; 5487 5488 bf = ATH_TID_FIRST(tid); 5489 if (bf == NULL) { 5490 break; 5491 } 5492 5493 /* 5494 * If the packet doesn't fall within the BAW (eg a NULL 5495 * data frame), schedule it directly; continue. 5496 */ 5497 if (! bf->bf_state.bfs_dobaw) { 5498 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5499 "%s: non-baw packet\n", 5500 __func__); 5501 ATH_TID_REMOVE(tid, bf, bf_list); 5502 5503 if (bf->bf_state.bfs_nframes > 1) 5504 DPRINTF(sc, ATH_DEBUG_SW_TX, 5505 "%s: aggr=%d, nframes=%d\n", 5506 __func__, 5507 bf->bf_state.bfs_aggr, 5508 bf->bf_state.bfs_nframes); 5509 5510 /* 5511 * This shouldn't happen - such frames shouldn't 5512 * ever have been queued as an aggregate in the 5513 * first place. However, make sure the fields 5514 * are correctly setup just to be totally sure. 5515 */ 5516 bf->bf_state.bfs_aggr = 0; 5517 bf->bf_state.bfs_nframes = 1; 5518 5519 /* Update CLRDMASK just before this frame is queued */ 5520 ath_tx_update_clrdmask(sc, tid, bf); 5521 5522 ath_tx_do_ratelookup(sc, bf); 5523 ath_tx_calc_duration(sc, bf); 5524 ath_tx_calc_protection(sc, bf); 5525 ath_tx_set_rtscts(sc, bf); 5526 ath_tx_rate_fill_rcflags(sc, bf); 5527 ath_tx_setds(sc, bf); 5528 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5529 5530 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5531 5532 /* Queue the packet; continue */ 5533 goto queuepkt; 5534 } 5535 5536 TAILQ_INIT(&bf_q); 5537 5538 /* 5539 * Do a rate control lookup on the first frame in the 5540 * list. The rate control code needs that to occur 5541 * before it can determine whether to TX. 5542 * It's inaccurate because the rate control code doesn't 5543 * really "do" aggregate lookups, so it only considers 5544 * the size of the first frame. 5545 */ 5546 ath_tx_do_ratelookup(sc, bf); 5547 bf->bf_state.bfs_rc[3].rix = 0; 5548 bf->bf_state.bfs_rc[3].tries = 0; 5549 5550 ath_tx_calc_duration(sc, bf); 5551 ath_tx_calc_protection(sc, bf); 5552 5553 ath_tx_set_rtscts(sc, bf); 5554 ath_tx_rate_fill_rcflags(sc, bf); 5555 5556 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5557 5558 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5559 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5560 5561 /* 5562 * No frames to be picked up - out of BAW 5563 */ 5564 if (TAILQ_EMPTY(&bf_q)) 5565 break; 5566 5567 /* 5568 * This assumes that the descriptor list in the ath_bufhead 5569 * are already linked together via bf_next pointers. 5570 */ 5571 bf = TAILQ_FIRST(&bf_q); 5572 5573 if (status == ATH_AGGR_8K_LIMITED) 5574 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5575 5576 /* 5577 * If it's the only frame send as non-aggregate 5578 * assume that ath_tx_form_aggr() has checked 5579 * whether it's in the BAW and added it appropriately. 5580 */ 5581 if (bf->bf_state.bfs_nframes == 1) { 5582 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5583 "%s: single-frame aggregate\n", __func__); 5584 5585 /* Update CLRDMASK just before this frame is queued */ 5586 ath_tx_update_clrdmask(sc, tid, bf); 5587 5588 bf->bf_state.bfs_aggr = 0; 5589 bf->bf_state.bfs_ndelim = 0; 5590 ath_tx_setds(sc, bf); 5591 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5592 if (status == ATH_AGGR_BAW_CLOSED) 5593 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5594 else 5595 sc->sc_aggr_stats.aggr_single_pkt++; 5596 } else { 5597 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5598 "%s: multi-frame aggregate: %d frames, " 5599 "length %d\n", 5600 __func__, bf->bf_state.bfs_nframes, 5601 bf->bf_state.bfs_al); 5602 bf->bf_state.bfs_aggr = 1; 5603 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5604 sc->sc_aggr_stats.aggr_aggr_pkt++; 5605 5606 /* Update CLRDMASK just before this frame is queued */ 5607 ath_tx_update_clrdmask(sc, tid, bf); 5608 5609 /* 5610 * Calculate the duration/protection as required. 5611 */ 5612 ath_tx_calc_duration(sc, bf); 5613 ath_tx_calc_protection(sc, bf); 5614 5615 /* 5616 * Update the rate and rtscts information based on the 5617 * rate decision made by the rate control code; 5618 * the first frame in the aggregate needs it. 5619 */ 5620 ath_tx_set_rtscts(sc, bf); 5621 5622 /* 5623 * Setup the relevant descriptor fields 5624 * for aggregation. The first descriptor 5625 * already points to the rest in the chain. 5626 */ 5627 ath_tx_setds_11n(sc, bf); 5628 5629 } 5630 queuepkt: 5631 /* Set completion handler, multi-frame aggregate or not */ 5632 bf->bf_comp = ath_tx_aggr_comp; 5633 5634 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5635 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5636 5637 /* 5638 * Update leak count and frame config if were leaking frames. 5639 * 5640 * XXX TODO: it should update all frames in an aggregate 5641 * correctly! 5642 */ 5643 ath_tx_leak_count_update(sc, tid, bf); 5644 5645 /* Punt to txq */ 5646 ath_tx_handoff(sc, txq, bf); 5647 5648 /* Track outstanding buffer count to hardware */ 5649 /* aggregates are "one" buffer */ 5650 tid->hwq_depth++; 5651 5652 /* 5653 * Break out if ath_tx_form_aggr() indicated 5654 * there can't be any further progress (eg BAW is full.) 5655 * Checking for an empty txq is done above. 5656 * 5657 * XXX locking on txq here? 5658 */ 5659 /* XXX TXQ locking */ 5660 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5661 (status == ATH_AGGR_BAW_CLOSED || 5662 status == ATH_AGGR_LEAK_CLOSED)) 5663 break; 5664 } 5665 } 5666 5667 /* 5668 * Schedule some packets from the given node/TID to the hardware. 5669 * 5670 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5671 * It just dumps frames into the TXQ. We should limit how deep 5672 * the transmit queue can grow for frames dispatched to the given 5673 * TXQ. 5674 * 5675 * To avoid locking issues, either we need to own the TXQ lock 5676 * at this point, or we need to pass in the maximum frame count 5677 * from the caller. 5678 */ 5679 void 5680 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5681 struct ath_tid *tid) 5682 { 5683 struct ath_buf *bf; 5684 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5685 5686 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5687 __func__, an, tid->tid); 5688 5689 ATH_TX_LOCK_ASSERT(sc); 5690 5691 /* Check - is AMPDU pending or running? then print out something */ 5692 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5693 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5694 __func__, tid->tid); 5695 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5696 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5697 __func__, tid->tid); 5698 5699 for (;;) { 5700 5701 /* 5702 * If the upper layers have paused the TID, don't 5703 * queue any further packets. 5704 * 5705 * XXX if we are leaking frames, make sure we decrement 5706 * that counter _and_ we continue here. 5707 */ 5708 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5709 break; 5710 5711 bf = ATH_TID_FIRST(tid); 5712 if (bf == NULL) { 5713 break; 5714 } 5715 5716 ATH_TID_REMOVE(tid, bf, bf_list); 5717 5718 /* Sanity check! */ 5719 if (tid->tid != bf->bf_state.bfs_tid) { 5720 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5721 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5722 tid->tid); 5723 } 5724 /* Normal completion handler */ 5725 bf->bf_comp = ath_tx_normal_comp; 5726 5727 /* 5728 * Override this for now, until the non-aggregate 5729 * completion handler correctly handles software retransmits. 5730 */ 5731 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5732 5733 /* Update CLRDMASK just before this frame is queued */ 5734 ath_tx_update_clrdmask(sc, tid, bf); 5735 5736 /* Program descriptors + rate control */ 5737 ath_tx_do_ratelookup(sc, bf); 5738 ath_tx_calc_duration(sc, bf); 5739 ath_tx_calc_protection(sc, bf); 5740 ath_tx_set_rtscts(sc, bf); 5741 ath_tx_rate_fill_rcflags(sc, bf); 5742 ath_tx_setds(sc, bf); 5743 5744 /* 5745 * Update the current leak count if 5746 * we're leaking frames; and set the 5747 * MORE flag as appropriate. 5748 */ 5749 ath_tx_leak_count_update(sc, tid, bf); 5750 5751 /* Track outstanding buffer count to hardware */ 5752 /* aggregates are "one" buffer */ 5753 tid->hwq_depth++; 5754 5755 /* Punt to hardware or software txq */ 5756 ath_tx_handoff(sc, txq, bf); 5757 } 5758 } 5759 5760 /* 5761 * Schedule some packets to the given hardware queue. 5762 * 5763 * This function walks the list of TIDs (ie, ath_node TIDs 5764 * with queued traffic) and attempts to schedule traffic 5765 * from them. 5766 * 5767 * TID scheduling is implemented as a FIFO, with TIDs being 5768 * added to the end of the queue after some frames have been 5769 * scheduled. 5770 */ 5771 void 5772 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5773 { 5774 struct ath_tid *tid, *next, *last; 5775 5776 ATH_TX_LOCK_ASSERT(sc); 5777 5778 /* 5779 * Don't schedule if the hardware queue is busy. 5780 * This (hopefully) gives some more time to aggregate 5781 * some packets in the aggregation queue. 5782 * 5783 * XXX It doesn't stop a parallel sender from sneaking 5784 * in transmitting a frame! 5785 */ 5786 /* XXX TXQ locking */ 5787 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5788 sc->sc_aggr_stats.aggr_sched_nopkt++; 5789 return; 5790 } 5791 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5792 sc->sc_aggr_stats.aggr_sched_nopkt++; 5793 return; 5794 } 5795 5796 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5797 5798 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5799 /* 5800 * Suspend paused queues here; they'll be resumed 5801 * once the addba completes or times out. 5802 */ 5803 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5804 __func__, tid->tid, tid->paused); 5805 ath_tx_tid_unsched(sc, tid); 5806 /* 5807 * This node may be in power-save and we're leaking 5808 * a frame; be careful. 5809 */ 5810 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5811 goto loop_done; 5812 } 5813 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5814 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5815 else 5816 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5817 5818 /* Not empty? Re-schedule */ 5819 if (tid->axq_depth != 0) 5820 ath_tx_tid_sched(sc, tid); 5821 5822 /* 5823 * Give the software queue time to aggregate more 5824 * packets. If we aren't running aggregation then 5825 * we should still limit the hardware queue depth. 5826 */ 5827 /* XXX TXQ locking */ 5828 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5829 break; 5830 } 5831 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5832 break; 5833 } 5834 loop_done: 5835 /* 5836 * If this was the last entry on the original list, stop. 5837 * Otherwise nodes that have been rescheduled onto the end 5838 * of the TID FIFO list will just keep being rescheduled. 5839 * 5840 * XXX What should we do about nodes that were paused 5841 * but are pending a leaking frame in response to a ps-poll? 5842 * They'll be put at the front of the list; so they'll 5843 * prematurely trigger this condition! Ew. 5844 */ 5845 if (tid == last) 5846 break; 5847 } 5848 } 5849 5850 /* 5851 * TX addba handling 5852 */ 5853 5854 /* 5855 * Return net80211 TID struct pointer, or NULL for none 5856 */ 5857 struct ieee80211_tx_ampdu * 5858 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5859 { 5860 struct ieee80211_node *ni = &an->an_node; 5861 struct ieee80211_tx_ampdu *tap; 5862 5863 if (tid == IEEE80211_NONQOS_TID) 5864 return NULL; 5865 5866 tap = &ni->ni_tx_ampdu[tid]; 5867 return tap; 5868 } 5869 5870 /* 5871 * Is AMPDU-TX running? 5872 */ 5873 static int 5874 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5875 { 5876 struct ieee80211_tx_ampdu *tap; 5877 5878 if (tid == IEEE80211_NONQOS_TID) 5879 return 0; 5880 5881 tap = ath_tx_get_tx_tid(an, tid); 5882 if (tap == NULL) 5883 return 0; /* Not valid; default to not running */ 5884 5885 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5886 } 5887 5888 /* 5889 * Is AMPDU-TX negotiation pending? 5890 */ 5891 static int 5892 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5893 { 5894 struct ieee80211_tx_ampdu *tap; 5895 5896 if (tid == IEEE80211_NONQOS_TID) 5897 return 0; 5898 5899 tap = ath_tx_get_tx_tid(an, tid); 5900 if (tap == NULL) 5901 return 0; /* Not valid; default to not pending */ 5902 5903 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5904 } 5905 5906 /* 5907 * Is AMPDU-TX pending for the given TID? 5908 */ 5909 5910 5911 /* 5912 * Method to handle sending an ADDBA request. 5913 * 5914 * We tap this so the relevant flags can be set to pause the TID 5915 * whilst waiting for the response. 5916 * 5917 * XXX there's no timeout handler we can override? 5918 */ 5919 int 5920 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5921 int dialogtoken, int baparamset, int batimeout) 5922 { 5923 struct ath_softc *sc = ni->ni_ic->ic_softc; 5924 int tid = tap->txa_tid; 5925 struct ath_node *an = ATH_NODE(ni); 5926 struct ath_tid *atid = &an->an_tid[tid]; 5927 5928 /* 5929 * XXX danger Will Robinson! 5930 * 5931 * Although the taskqueue may be running and scheduling some more 5932 * packets, these should all be _before_ the addba sequence number. 5933 * However, net80211 will keep self-assigning sequence numbers 5934 * until addba has been negotiated. 5935 * 5936 * In the past, these packets would be "paused" (which still works 5937 * fine, as they're being scheduled to the driver in the same 5938 * serialised method which is calling the addba request routine) 5939 * and when the aggregation session begins, they'll be dequeued 5940 * as aggregate packets and added to the BAW. However, now there's 5941 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5942 * packets. Thus they never get included in the BAW tracking and 5943 * this can cause the initial burst of packets after the addba 5944 * negotiation to "hang", as they quickly fall outside the BAW. 5945 * 5946 * The "eventual" solution should be to tag these packets with 5947 * dobaw. Although net80211 has given us a sequence number, 5948 * it'll be "after" the left edge of the BAW and thus it'll 5949 * fall within it. 5950 */ 5951 ATH_TX_LOCK(sc); 5952 /* 5953 * This is a bit annoying. Until net80211 HT code inherits some 5954 * (any) locking, we may have this called in parallel BUT only 5955 * one response/timeout will be called. Grr. 5956 */ 5957 if (atid->addba_tx_pending == 0) { 5958 ath_tx_tid_pause(sc, atid); 5959 atid->addba_tx_pending = 1; 5960 } 5961 ATH_TX_UNLOCK(sc); 5962 5963 #if defined(__DragonFly__) 5964 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5965 "%s: %s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5966 __func__, 5967 ath_hal_ether_sprintf(ni->ni_macaddr), 5968 dialogtoken, baparamset, batimeout); 5969 #else 5970 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5971 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5972 __func__, 5973 ni->ni_macaddr, 5974 ":", 5975 dialogtoken, baparamset, batimeout); 5976 #endif 5977 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5978 "%s: txa_start=%d, ni_txseqs=%d\n", 5979 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5980 5981 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5982 batimeout); 5983 } 5984 5985 /* 5986 * Handle an ADDBA response. 5987 * 5988 * We unpause the queue so TX'ing can resume. 5989 * 5990 * Any packets TX'ed from this point should be "aggregate" (whether 5991 * aggregate or not) so the BAW is updated. 5992 * 5993 * Note! net80211 keeps self-assigning sequence numbers until 5994 * ampdu is negotiated. This means the initially-negotiated BAW left 5995 * edge won't match the ni->ni_txseq. 5996 * 5997 * So, being very dirty, the BAW left edge is "slid" here to match 5998 * ni->ni_txseq. 5999 * 6000 * What likely SHOULD happen is that all packets subsequent to the 6001 * addba request should be tagged as aggregate and queued as non-aggregate 6002 * frames; thus updating the BAW. For now though, I'll just slide the 6003 * window. 6004 */ 6005 int 6006 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6007 int status, int code, int batimeout) 6008 { 6009 struct ath_softc *sc = ni->ni_ic->ic_softc; 6010 int tid = tap->txa_tid; 6011 struct ath_node *an = ATH_NODE(ni); 6012 struct ath_tid *atid = &an->an_tid[tid]; 6013 int r; 6014 6015 #if defined(__DragonFly__) 6016 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6017 "%s: %s: called; status=%d, code=%d, batimeout=%d\n", __func__, 6018 ath_hal_ether_sprintf(ni->ni_macaddr), 6019 status, code, batimeout); 6020 #else 6021 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6022 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 6023 ni->ni_macaddr, 6024 ":", 6025 status, code, batimeout); 6026 #endif 6027 6028 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6029 "%s: txa_start=%d, ni_txseqs=%d\n", 6030 __func__, tap->txa_start, ni->ni_txseqs[tid]); 6031 6032 /* 6033 * Call this first, so the interface flags get updated 6034 * before the TID is unpaused. Otherwise a race condition 6035 * exists where the unpaused TID still doesn't yet have 6036 * IEEE80211_AGGR_RUNNING set. 6037 */ 6038 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 6039 6040 ATH_TX_LOCK(sc); 6041 atid->addba_tx_pending = 0; 6042 /* 6043 * XXX dirty! 6044 * Slide the BAW left edge to wherever net80211 left it for us. 6045 * Read above for more information. 6046 */ 6047 tap->txa_start = ni->ni_txseqs[tid]; 6048 ath_tx_tid_resume(sc, atid); 6049 ATH_TX_UNLOCK(sc); 6050 return r; 6051 } 6052 6053 6054 /* 6055 * Stop ADDBA on a queue. 6056 * 6057 * This can be called whilst BAR TX is currently active on the queue, 6058 * so make sure this is unblocked before continuing. 6059 */ 6060 void 6061 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 6062 { 6063 struct ath_softc *sc = ni->ni_ic->ic_softc; 6064 int tid = tap->txa_tid; 6065 struct ath_node *an = ATH_NODE(ni); 6066 struct ath_tid *atid = &an->an_tid[tid]; 6067 ath_bufhead bf_cq; 6068 struct ath_buf *bf; 6069 6070 #if defined(__DragonFly__) 6071 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %s: called\n", 6072 __func__, 6073 ath_hal_ether_sprintf(ni->ni_macaddr)); 6074 #else 6075 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 6076 __func__, 6077 ni->ni_macaddr, 6078 ":"); 6079 #endif 6080 6081 /* 6082 * Pause TID traffic early, so there aren't any races 6083 * Unblock the pending BAR held traffic, if it's currently paused. 6084 */ 6085 ATH_TX_LOCK(sc); 6086 ath_tx_tid_pause(sc, atid); 6087 if (atid->bar_wait) { 6088 /* 6089 * bar_unsuspend() expects bar_tx == 1, as it should be 6090 * called from the TX completion path. This quietens 6091 * the warning. It's cleared for us anyway. 6092 */ 6093 atid->bar_tx = 1; 6094 ath_tx_tid_bar_unsuspend(sc, atid); 6095 } 6096 ATH_TX_UNLOCK(sc); 6097 6098 /* There's no need to hold the TXQ lock here */ 6099 sc->sc_addba_stop(ni, tap); 6100 6101 /* 6102 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 6103 * it'll set the cleanup flag, and it'll be unpaused once 6104 * things have been cleaned up. 6105 */ 6106 TAILQ_INIT(&bf_cq); 6107 ATH_TX_LOCK(sc); 6108 6109 /* 6110 * In case there's a followup call to this, only call it 6111 * if we don't have a cleanup in progress. 6112 * 6113 * Since we've paused the queue above, we need to make 6114 * sure we unpause if there's already a cleanup in 6115 * progress - it means something else is also doing 6116 * this stuff, so we don't need to also keep it paused. 6117 */ 6118 if (atid->cleanup_inprogress) { 6119 ath_tx_tid_resume(sc, atid); 6120 } else { 6121 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 6122 /* 6123 * Unpause the TID if no cleanup is required. 6124 */ 6125 if (! atid->cleanup_inprogress) 6126 ath_tx_tid_resume(sc, atid); 6127 } 6128 ATH_TX_UNLOCK(sc); 6129 6130 /* Handle completing frames and fail them */ 6131 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6132 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6133 ath_tx_default_comp(sc, bf, 1); 6134 } 6135 6136 } 6137 6138 /* 6139 * Handle a node reassociation. 6140 * 6141 * We may have a bunch of frames queued to the hardware; those need 6142 * to be marked as cleanup. 6143 */ 6144 void 6145 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 6146 { 6147 struct ath_tid *tid; 6148 int i; 6149 ath_bufhead bf_cq; 6150 struct ath_buf *bf; 6151 6152 TAILQ_INIT(&bf_cq); 6153 6154 ATH_TX_UNLOCK_ASSERT(sc); 6155 6156 ATH_TX_LOCK(sc); 6157 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 6158 tid = &an->an_tid[i]; 6159 if (tid->hwq_depth == 0) 6160 continue; 6161 #if defined(__DragonFly__) 6162 DPRINTF(sc, ATH_DEBUG_NODE, 6163 "%s: %s: TID %d: cleaning up TID\n", 6164 __func__, 6165 ath_hal_ether_sprintf(an->an_node.ni_macaddr), 6166 i); 6167 #else 6168 DPRINTF(sc, ATH_DEBUG_NODE, 6169 "%s: %6D: TID %d: cleaning up TID\n", 6170 __func__, 6171 an->an_node.ni_macaddr, 6172 ":", 6173 i); 6174 #endif 6175 /* 6176 * In case there's a followup call to this, only call it 6177 * if we don't have a cleanup in progress. 6178 */ 6179 if (! tid->cleanup_inprogress) { 6180 ath_tx_tid_pause(sc, tid); 6181 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 6182 /* 6183 * Unpause the TID if no cleanup is required. 6184 */ 6185 if (! tid->cleanup_inprogress) 6186 ath_tx_tid_resume(sc, tid); 6187 } 6188 } 6189 ATH_TX_UNLOCK(sc); 6190 6191 /* Handle completing frames and fail them */ 6192 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6193 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6194 ath_tx_default_comp(sc, bf, 1); 6195 } 6196 } 6197 6198 /* 6199 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 6200 * it simply tears down the aggregation session. Ew. 6201 * 6202 * It however will call ieee80211_ampdu_stop() which will call 6203 * ic->ic_addba_stop(). 6204 * 6205 * XXX This uses a hard-coded max BAR count value; the whole 6206 * XXX BAR TX success or failure should be better handled! 6207 */ 6208 void 6209 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6210 int status) 6211 { 6212 struct ath_softc *sc = ni->ni_ic->ic_softc; 6213 int tid = tap->txa_tid; 6214 struct ath_node *an = ATH_NODE(ni); 6215 struct ath_tid *atid = &an->an_tid[tid]; 6216 int attempts = tap->txa_attempts; 6217 int old_txa_start; 6218 6219 #if defined(__DragonFly__) 6220 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6221 "%s: %s: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 6222 __func__, 6223 ath_hal_ether_sprintf(ni->ni_macaddr), 6224 tap->txa_tid, 6225 atid->tid, 6226 status, 6227 attempts, 6228 tap->txa_start, 6229 tap->txa_seqpending); 6230 #else 6231 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6232 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 6233 __func__, 6234 ni->ni_macaddr, 6235 ":", 6236 tap->txa_tid, 6237 atid->tid, 6238 status, 6239 attempts, 6240 tap->txa_start, 6241 tap->txa_seqpending); 6242 #endif 6243 6244 /* Note: This may update the BAW details */ 6245 /* 6246 * XXX What if this does slide the BAW along? We need to somehow 6247 * XXX either fix things when it does happen, or prevent the 6248 * XXX seqpending value to be anything other than exactly what 6249 * XXX the hell we want! 6250 * 6251 * XXX So for now, how I do this inside the TX lock for now 6252 * XXX and just correct it afterwards? The below condition should 6253 * XXX never happen and if it does I need to fix all kinds of things. 6254 */ 6255 ATH_TX_LOCK(sc); 6256 old_txa_start = tap->txa_start; 6257 sc->sc_bar_response(ni, tap, status); 6258 if (tap->txa_start != old_txa_start) { 6259 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", 6260 __func__, 6261 tid, 6262 tap->txa_start, 6263 old_txa_start); 6264 } 6265 tap->txa_start = old_txa_start; 6266 ATH_TX_UNLOCK(sc); 6267 6268 /* Unpause the TID */ 6269 /* 6270 * XXX if this is attempt=50, the TID will be downgraded 6271 * XXX to a non-aggregate session. So we must unpause the 6272 * XXX TID here or it'll never be done. 6273 * 6274 * Also, don't call it if bar_tx/bar_wait are 0; something 6275 * has beaten us to the punch? (XXX figure out what?) 6276 */ 6277 if (status == 0 || attempts == 50) { 6278 ATH_TX_LOCK(sc); 6279 if (atid->bar_tx == 0 || atid->bar_wait == 0) 6280 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6281 "%s: huh? bar_tx=%d, bar_wait=%d\n", 6282 __func__, 6283 atid->bar_tx, atid->bar_wait); 6284 else 6285 ath_tx_tid_bar_unsuspend(sc, atid); 6286 ATH_TX_UNLOCK(sc); 6287 } 6288 } 6289 6290 /* 6291 * This is called whenever the pending ADDBA request times out. 6292 * Unpause and reschedule the TID. 6293 */ 6294 void 6295 ath_addba_response_timeout(struct ieee80211_node *ni, 6296 struct ieee80211_tx_ampdu *tap) 6297 { 6298 struct ath_softc *sc = ni->ni_ic->ic_softc; 6299 int tid = tap->txa_tid; 6300 struct ath_node *an = ATH_NODE(ni); 6301 struct ath_tid *atid = &an->an_tid[tid]; 6302 6303 #if defined(__DragonFly__) 6304 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6305 "%s: %s: TID=%d, called; resuming\n", 6306 __func__, 6307 ath_hal_ether_sprintf(ni->ni_macaddr), 6308 tid); 6309 #else 6310 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6311 "%s: %6D: TID=%d, called; resuming\n", 6312 __func__, 6313 ni->ni_macaddr, 6314 ":", 6315 tid); 6316 #endif 6317 6318 ATH_TX_LOCK(sc); 6319 atid->addba_tx_pending = 0; 6320 ATH_TX_UNLOCK(sc); 6321 6322 /* Note: This updates the aggregate state to (again) pending */ 6323 sc->sc_addba_response_timeout(ni, tap); 6324 6325 /* Unpause the TID; which reschedules it */ 6326 ATH_TX_LOCK(sc); 6327 ath_tx_tid_resume(sc, atid); 6328 ATH_TX_UNLOCK(sc); 6329 } 6330 6331 /* 6332 * Check if a node is asleep or not. 6333 */ 6334 int 6335 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 6336 { 6337 6338 ATH_TX_LOCK_ASSERT(sc); 6339 6340 return (an->an_is_powersave); 6341 } 6342 6343 /* 6344 * Mark a node as currently "in powersaving." 6345 * This suspends all traffic on the node. 6346 * 6347 * This must be called with the node/tx locks free. 6348 * 6349 * XXX TODO: the locking silliness below is due to how the node 6350 * locking currently works. Right now, the node lock is grabbed 6351 * to do rate control lookups and these are done with the TX 6352 * queue lock held. This means the node lock can't be grabbed 6353 * first here or a LOR will occur. 6354 * 6355 * Eventually (hopefully!) the TX path code will only grab 6356 * the TXQ lock when transmitting and the ath_node lock when 6357 * doing node/TID operations. There are other complications - 6358 * the sched/unsched operations involve walking the per-txq 6359 * 'active tid' list and this requires both locks to be held. 6360 */ 6361 void 6362 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 6363 { 6364 struct ath_tid *atid; 6365 struct ath_txq *txq; 6366 int tid; 6367 6368 ATH_TX_UNLOCK_ASSERT(sc); 6369 6370 /* Suspend all traffic on the node */ 6371 ATH_TX_LOCK(sc); 6372 6373 if (an->an_is_powersave) { 6374 #if defined(__DragonFly__) 6375 DPRINTF(sc, ATH_DEBUG_XMIT, 6376 "%s: %s: node was already asleep!\n", 6377 __func__, ath_hal_ether_sprintf(an->an_node.ni_macaddr)); 6378 #else 6379 DPRINTF(sc, ATH_DEBUG_XMIT, 6380 "%s: %6D: node was already asleep!\n", 6381 __func__, an->an_node.ni_macaddr, ":"); 6382 #endif 6383 ATH_TX_UNLOCK(sc); 6384 return; 6385 } 6386 6387 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6388 atid = &an->an_tid[tid]; 6389 txq = sc->sc_ac2q[atid->ac]; 6390 6391 ath_tx_tid_pause(sc, atid); 6392 } 6393 6394 /* Mark node as in powersaving */ 6395 an->an_is_powersave = 1; 6396 6397 ATH_TX_UNLOCK(sc); 6398 } 6399 6400 /* 6401 * Mark a node as currently "awake." 6402 * This resumes all traffic to the node. 6403 */ 6404 void 6405 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 6406 { 6407 struct ath_tid *atid; 6408 struct ath_txq *txq; 6409 int tid; 6410 6411 ATH_TX_UNLOCK_ASSERT(sc); 6412 6413 ATH_TX_LOCK(sc); 6414 6415 /* !? */ 6416 if (an->an_is_powersave == 0) { 6417 ATH_TX_UNLOCK(sc); 6418 DPRINTF(sc, ATH_DEBUG_XMIT, 6419 "%s: an=%p: node was already awake\n", 6420 __func__, an); 6421 return; 6422 } 6423 6424 /* Mark node as awake */ 6425 an->an_is_powersave = 0; 6426 /* 6427 * Clear any pending leaked frame requests 6428 */ 6429 an->an_leak_count = 0; 6430 6431 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6432 atid = &an->an_tid[tid]; 6433 txq = sc->sc_ac2q[atid->ac]; 6434 6435 ath_tx_tid_resume(sc, atid); 6436 } 6437 ATH_TX_UNLOCK(sc); 6438 } 6439 6440 static int 6441 ath_legacy_dma_txsetup(struct ath_softc *sc) 6442 { 6443 6444 /* nothing new needed */ 6445 return (0); 6446 } 6447 6448 static int 6449 ath_legacy_dma_txteardown(struct ath_softc *sc) 6450 { 6451 6452 /* nothing new needed */ 6453 return (0); 6454 } 6455 6456 void 6457 ath_xmit_setup_legacy(struct ath_softc *sc) 6458 { 6459 /* 6460 * For now, just set the descriptor length to sizeof(ath_desc); 6461 * worry about extracting the real length out of the HAL later. 6462 */ 6463 sc->sc_tx_desclen = sizeof(struct ath_desc); 6464 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6465 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6466 6467 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6468 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6469 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6470 6471 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6472 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6473 6474 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6475 } 6476