1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 #include "opt_wlan.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysctl.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/kernel.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/errno.h> 55 #include <sys/callout.h> 56 #include <sys/bus.h> 57 #include <sys/endian.h> 58 #include <sys/kthread.h> 59 #include <sys/taskqueue.h> 60 #include <sys/priv.h> 61 #include <sys/ktr.h> 62 63 #include <net/if.h> 64 #include <net/if_var.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_arp.h> 69 #include <net/ethernet.h> 70 #include <net/if_llc.h> 71 72 #include <netproto/802_11/ieee80211_var.h> 73 #include <netproto/802_11/ieee80211_regdomain.h> 74 #ifdef IEEE80211_SUPPORT_SUPERG 75 #include <netproto/802_11/ieee80211_superg.h> 76 #endif 77 #ifdef IEEE80211_SUPPORT_TDMA 78 #include <netproto/802_11/ieee80211_tdma.h> 79 #endif 80 #include <netproto/802_11/ieee80211_ht.h> 81 82 #include <net/bpf.h> 83 84 #ifdef INET 85 #include <netinet/in.h> 86 #include <netinet/if_ether.h> 87 #endif 88 89 #include <dev/netif/ath/ath/if_athvar.h> 90 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 91 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 92 93 #include <dev/netif/ath/ath/if_ath_debug.h> 94 95 #ifdef ATH_TX99_DIAG 96 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 97 #endif 98 99 #include <dev/netif/ath/ath/if_ath_misc.h> 100 #include <dev/netif/ath/ath/if_ath_tx.h> 101 #include <dev/netif/ath/ath/if_ath_tx_ht.h> 102 103 #ifdef ATH_DEBUG_ALQ 104 #include <dev/netif/ath/ath/if_ath_alq.h> 105 #endif 106 107 extern const char* ath_hal_ether_sprintf(const uint8_t *mac); 108 109 /* 110 * How many retries to perform in software 111 */ 112 #define SWMAX_RETRIES 10 113 114 /* 115 * What queue to throw the non-QoS TID traffic into 116 */ 117 #define ATH_NONQOS_TID_AC WME_AC_VO 118 119 #if 0 120 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 121 #endif 122 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 123 int tid); 124 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 125 int tid); 126 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 127 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 128 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 129 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 130 static struct ath_buf * 131 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 132 struct ath_tid *tid, struct ath_buf *bf); 133 134 #ifdef ATH_DEBUG_ALQ 135 void 136 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 137 { 138 struct ath_buf *bf; 139 int i, n; 140 const char *ds; 141 142 /* XXX we should skip out early if debugging isn't enabled! */ 143 bf = bf_first; 144 145 while (bf != NULL) { 146 /* XXX should ensure bf_nseg > 0! */ 147 if (bf->bf_nseg == 0) 148 break; 149 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 150 for (i = 0, ds = (const char *) bf->bf_desc; 151 i < n; 152 i++, ds += sc->sc_tx_desclen) { 153 if_ath_alq_post(&sc->sc_alq, 154 ATH_ALQ_EDMA_TXDESC, 155 sc->sc_tx_desclen, 156 ds); 157 } 158 bf = bf->bf_next; 159 } 160 } 161 #endif /* ATH_DEBUG_ALQ */ 162 163 /* 164 * Whether to use the 11n rate scenario functions or not 165 */ 166 static inline int 167 ath_tx_is_11n(struct ath_softc *sc) 168 { 169 return ((sc->sc_ah->ah_magic == 0x20065416) || 170 (sc->sc_ah->ah_magic == 0x19741014)); 171 } 172 173 /* 174 * Obtain the current TID from the given frame. 175 * 176 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 177 * This has implications for which AC/priority the packet is placed 178 * in. 179 */ 180 static int 181 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 182 { 183 const struct ieee80211_frame *wh; 184 int pri = M_WME_GETAC(m0); 185 186 wh = mtod(m0, const struct ieee80211_frame *); 187 if (! IEEE80211_QOS_HAS_SEQ(wh)) 188 return IEEE80211_NONQOS_TID; 189 else 190 return WME_AC_TO_TID(pri); 191 } 192 193 static void 194 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 195 { 196 struct ieee80211_frame *wh; 197 198 wh = mtod(bf->bf_m, struct ieee80211_frame *); 199 /* Only update/resync if needed */ 200 if (bf->bf_state.bfs_isretried == 0) { 201 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 202 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 203 BUS_DMASYNC_PREWRITE); 204 } 205 bf->bf_state.bfs_isretried = 1; 206 bf->bf_state.bfs_retries ++; 207 } 208 209 /* 210 * Determine what the correct AC queue for the given frame 211 * should be. 212 * 213 * This code assumes that the TIDs map consistently to 214 * the underlying hardware (or software) ath_txq. 215 * Since the sender may try to set an AC which is 216 * arbitrary, non-QoS TIDs may end up being put on 217 * completely different ACs. There's no way to put a 218 * TID into multiple ath_txq's for scheduling, so 219 * for now we override the AC/TXQ selection and set 220 * non-QOS TID frames into the BE queue. 221 * 222 * This may be completely incorrect - specifically, 223 * some management frames may end up out of order 224 * compared to the QoS traffic they're controlling. 225 * I'll look into this later. 226 */ 227 static int 228 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 229 { 230 const struct ieee80211_frame *wh; 231 int pri = M_WME_GETAC(m0); 232 wh = mtod(m0, const struct ieee80211_frame *); 233 if (IEEE80211_QOS_HAS_SEQ(wh)) 234 return pri; 235 236 return ATH_NONQOS_TID_AC; 237 } 238 239 void 240 ath_txfrag_cleanup(struct ath_softc *sc, 241 ath_bufhead *frags, struct ieee80211_node *ni) 242 { 243 struct ath_buf *bf; 244 struct ath_buf *next; 245 246 ATH_TXBUF_LOCK_ASSERT(sc); 247 248 next = TAILQ_FIRST(frags); 249 while ((bf = next) != NULL) { 250 next = TAILQ_NEXT(bf, bf_list); 251 /* NB: bf assumed clean */ 252 TAILQ_REMOVE(frags, bf, bf_list); 253 ath_returnbuf_head(sc, bf); 254 ieee80211_node_decref(ni); 255 } 256 } 257 258 /* 259 * Setup xmit of a fragmented frame. Allocate a buffer 260 * for each frag and bump the node reference count to 261 * reflect the held reference to be setup by ath_tx_start. 262 */ 263 int 264 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 265 struct mbuf *m0, struct ieee80211_node *ni) 266 { 267 struct mbuf *m; 268 struct ath_buf *bf; 269 270 ATH_TXBUF_LOCK(sc); 271 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 272 /* XXX non-management? */ 273 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 274 if (bf == NULL) { /* out of buffers, cleanup */ 275 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 276 __func__); 277 ath_txfrag_cleanup(sc, frags, ni); 278 break; 279 } 280 ieee80211_node_incref(ni); 281 TAILQ_INSERT_TAIL(frags, bf, bf_list); 282 } 283 ATH_TXBUF_UNLOCK(sc); 284 285 return !TAILQ_EMPTY(frags); 286 } 287 288 /* 289 * Reclaim mbuf resources. For fragmented frames we 290 * need to claim each frag chained with m_nextpkt. 291 */ 292 void 293 ath_freetx(struct mbuf *m) 294 { 295 struct mbuf *next; 296 297 do { 298 next = m->m_nextpkt; 299 m->m_nextpkt = NULL; 300 m_freem(m); 301 } while ((m = next) != NULL); 302 } 303 304 static int 305 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 306 { 307 int error; 308 309 /* 310 * Load the DMA map so any coalescing is done. This 311 * also calculates the number of descriptors we need. 312 */ 313 error = bus_dmamap_load_mbuf_defrag(sc->sc_dmat, bf->bf_dmamap, &m0, 314 bf->bf_segs, ATH_TXDESC, &bf->bf_nseg, 315 BUS_DMA_NOWAIT); 316 if (error != 0) { 317 sc->sc_stats.ast_tx_busdma++; 318 ath_freetx(m0); 319 return error; 320 } 321 322 /* 323 * Discard null packets. 324 */ 325 if (bf->bf_nseg == 0) { 326 sc->sc_stats.ast_tx_nodata++; 327 ath_freetx(m0); 328 return EIO; 329 } 330 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 331 __func__, m0, m0->m_pkthdr.len); 332 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 333 bf->bf_m = m0; 334 335 return 0; 336 } 337 338 /* 339 * Chain together segments+descriptors for a frame - 11n or otherwise. 340 * 341 * For aggregates, this is called on each frame in the aggregate. 342 */ 343 static void 344 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 345 struct ath_buf *bf, int is_aggr, int is_first_subframe, 346 int is_last_subframe) 347 { 348 struct ath_hal *ah = sc->sc_ah; 349 char *ds; 350 int i, bp, dsp; 351 HAL_DMA_ADDR bufAddrList[4]; 352 uint32_t segLenList[4]; 353 int numTxMaps = 1; 354 int isFirstDesc = 1; 355 356 /* 357 * XXX There's txdma and txdma_mgmt; the descriptor 358 * sizes must match. 359 */ 360 struct ath_descdma *dd = &sc->sc_txdma; 361 362 /* 363 * Fillin the remainder of the descriptor info. 364 */ 365 366 /* 367 * We need the number of TX data pointers in each descriptor. 368 * EDMA and later chips support 4 TX buffers per descriptor; 369 * previous chips just support one. 370 */ 371 numTxMaps = sc->sc_tx_nmaps; 372 373 /* 374 * For EDMA and later chips ensure the TX map is fully populated 375 * before advancing to the next descriptor. 376 */ 377 ds = (char *) bf->bf_desc; 378 bp = dsp = 0; 379 bzero(bufAddrList, sizeof(bufAddrList)); 380 bzero(segLenList, sizeof(segLenList)); 381 for (i = 0; i < bf->bf_nseg; i++) { 382 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 383 segLenList[bp] = bf->bf_segs[i].ds_len; 384 bp++; 385 386 /* 387 * Go to the next segment if this isn't the last segment 388 * and there's space in the current TX map. 389 */ 390 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 391 continue; 392 393 /* 394 * Last segment or we're out of buffer pointers. 395 */ 396 bp = 0; 397 398 if (i == bf->bf_nseg - 1) 399 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 400 else 401 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 402 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 403 404 /* 405 * XXX This assumes that bfs_txq is the actual destination 406 * hardware queue at this point. It may not have been 407 * assigned, it may actually be pointing to the multicast 408 * software TXQ id. These must be fixed! 409 */ 410 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 411 , bufAddrList 412 , segLenList 413 , bf->bf_descid /* XXX desc id */ 414 , bf->bf_state.bfs_tx_queue 415 , isFirstDesc /* first segment */ 416 , i == bf->bf_nseg - 1 /* last segment */ 417 , (struct ath_desc *) ds0 /* first descriptor */ 418 ); 419 420 /* 421 * Make sure the 11n aggregate fields are cleared. 422 * 423 * XXX TODO: this doesn't need to be called for 424 * aggregate frames; as it'll be called on all 425 * sub-frames. Since the descriptors are in 426 * non-cacheable memory, this leads to some 427 * rather slow writes on MIPS/ARM platforms. 428 */ 429 if (ath_tx_is_11n(sc)) 430 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 431 432 /* 433 * If 11n is enabled, set it up as if it's an aggregate 434 * frame. 435 */ 436 if (is_last_subframe) { 437 ath_hal_set11n_aggr_last(sc->sc_ah, 438 (struct ath_desc *) ds); 439 } else if (is_aggr) { 440 /* 441 * This clears the aggrlen field; so 442 * the caller needs to call set_aggr_first()! 443 * 444 * XXX TODO: don't call this for the first 445 * descriptor in the first frame in an 446 * aggregate! 447 */ 448 ath_hal_set11n_aggr_middle(sc->sc_ah, 449 (struct ath_desc *) ds, 450 bf->bf_state.bfs_ndelim); 451 } 452 isFirstDesc = 0; 453 bf->bf_lastds = (struct ath_desc *) ds; 454 455 /* 456 * Don't forget to skip to the next descriptor. 457 */ 458 ds += sc->sc_tx_desclen; 459 dsp++; 460 461 /* 462 * .. and don't forget to blank these out! 463 */ 464 bzero(bufAddrList, sizeof(bufAddrList)); 465 bzero(segLenList, sizeof(segLenList)); 466 } 467 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 468 } 469 470 /* 471 * Set the rate control fields in the given descriptor based on 472 * the bf_state fields and node state. 473 * 474 * The bfs fields should already be set with the relevant rate 475 * control information, including whether MRR is to be enabled. 476 * 477 * Since the FreeBSD HAL currently sets up the first TX rate 478 * in ath_hal_setuptxdesc(), this will setup the MRR 479 * conditionally for the pre-11n chips, and call ath_buf_set_rate 480 * unconditionally for 11n chips. These require the 11n rate 481 * scenario to be set if MCS rates are enabled, so it's easier 482 * to just always call it. The caller can then only set rates 2, 3 483 * and 4 if multi-rate retry is needed. 484 */ 485 static void 486 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 487 struct ath_buf *bf) 488 { 489 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 490 491 /* If mrr is disabled, blank tries 1, 2, 3 */ 492 if (! bf->bf_state.bfs_ismrr) 493 rc[1].tries = rc[2].tries = rc[3].tries = 0; 494 495 #if 0 496 /* 497 * If NOACK is set, just set ntries=1. 498 */ 499 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 500 rc[1].tries = rc[2].tries = rc[3].tries = 0; 501 rc[0].tries = 1; 502 } 503 #endif 504 505 /* 506 * Always call - that way a retried descriptor will 507 * have the MRR fields overwritten. 508 * 509 * XXX TODO: see if this is really needed - setting up 510 * the first descriptor should set the MRR fields to 0 511 * for us anyway. 512 */ 513 if (ath_tx_is_11n(sc)) { 514 ath_buf_set_rate(sc, ni, bf); 515 } else { 516 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 517 , rc[1].ratecode, rc[1].tries 518 , rc[2].ratecode, rc[2].tries 519 , rc[3].ratecode, rc[3].tries 520 ); 521 } 522 } 523 524 /* 525 * Setup segments+descriptors for an 11n aggregate. 526 * bf_first is the first buffer in the aggregate. 527 * The descriptor list must already been linked together using 528 * bf->bf_next. 529 */ 530 static void 531 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 532 { 533 struct ath_buf *bf, *bf_prev = NULL; 534 struct ath_desc *ds0 = bf_first->bf_desc; 535 536 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 537 __func__, bf_first->bf_state.bfs_nframes, 538 bf_first->bf_state.bfs_al); 539 540 bf = bf_first; 541 542 if (bf->bf_state.bfs_txrate0 == 0) 543 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 544 __func__, bf, 0); 545 if (bf->bf_state.bfs_rc[0].ratecode == 0) 546 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 547 __func__, bf, 0); 548 549 /* 550 * Setup all descriptors of all subframes - this will 551 * call ath_hal_set11naggrmiddle() on every frame. 552 */ 553 while (bf != NULL) { 554 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 555 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 556 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 557 SEQNO(bf->bf_state.bfs_seqno)); 558 559 /* 560 * Setup the initial fields for the first descriptor - all 561 * the non-11n specific stuff. 562 */ 563 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 564 , bf->bf_state.bfs_pktlen /* packet length */ 565 , bf->bf_state.bfs_hdrlen /* header length */ 566 , bf->bf_state.bfs_atype /* Atheros packet type */ 567 , bf->bf_state.bfs_txpower /* txpower */ 568 , bf->bf_state.bfs_txrate0 569 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 570 , bf->bf_state.bfs_keyix /* key cache index */ 571 , bf->bf_state.bfs_txantenna /* antenna mode */ 572 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 573 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 574 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 575 ); 576 577 /* 578 * First descriptor? Setup the rate control and initial 579 * aggregate header information. 580 */ 581 if (bf == bf_first) { 582 /* 583 * setup first desc with rate and aggr info 584 */ 585 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 586 } 587 588 /* 589 * Setup the descriptors for a multi-descriptor frame. 590 * This is both aggregate and non-aggregate aware. 591 */ 592 ath_tx_chaindesclist(sc, ds0, bf, 593 1, /* is_aggr */ 594 !! (bf == bf_first), /* is_first_subframe */ 595 !! (bf->bf_next == NULL) /* is_last_subframe */ 596 ); 597 598 if (bf == bf_first) { 599 /* 600 * Initialise the first 11n aggregate with the 601 * aggregate length and aggregate enable bits. 602 */ 603 ath_hal_set11n_aggr_first(sc->sc_ah, 604 ds0, 605 bf->bf_state.bfs_al, 606 bf->bf_state.bfs_ndelim); 607 } 608 609 /* 610 * Link the last descriptor of the previous frame 611 * to the beginning descriptor of this frame. 612 */ 613 if (bf_prev != NULL) 614 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 615 bf->bf_daddr); 616 617 /* Save a copy so we can link the next descriptor in */ 618 bf_prev = bf; 619 bf = bf->bf_next; 620 } 621 622 /* 623 * Set the first descriptor bf_lastds field to point to 624 * the last descriptor in the last subframe, that's where 625 * the status update will occur. 626 */ 627 bf_first->bf_lastds = bf_prev->bf_lastds; 628 629 /* 630 * And bf_last in the first descriptor points to the end of 631 * the aggregate list. 632 */ 633 bf_first->bf_last = bf_prev; 634 635 /* 636 * For non-AR9300 NICs, which require the rate control 637 * in the final descriptor - let's set that up now. 638 * 639 * This is because the filltxdesc() HAL call doesn't 640 * populate the last segment with rate control information 641 * if firstSeg is also true. For non-aggregate frames 642 * that is fine, as the first frame already has rate control 643 * info. But if the last frame in an aggregate has one 644 * descriptor, both firstseg and lastseg will be true and 645 * the rate info isn't copied. 646 * 647 * This is inefficient on MIPS/ARM platforms that have 648 * non-cachable memory for TX descriptors, but we'll just 649 * make do for now. 650 * 651 * As to why the rate table is stashed in the last descriptor 652 * rather than the first descriptor? Because proctxdesc() 653 * is called on the final descriptor in an MPDU or A-MPDU - 654 * ie, the one that gets updated by the hardware upon 655 * completion. That way proctxdesc() doesn't need to know 656 * about the first _and_ last TX descriptor. 657 */ 658 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 659 660 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 661 } 662 663 /* 664 * Hand-off a frame to the multicast TX queue. 665 * 666 * This is a software TXQ which will be appended to the CAB queue 667 * during the beacon setup code. 668 * 669 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 670 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 671 * with the actual hardware txq, or all of this will fall apart. 672 * 673 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 674 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 675 * correctly. 676 */ 677 static void 678 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 679 struct ath_buf *bf) 680 { 681 ATH_TX_LOCK_ASSERT(sc); 682 683 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 684 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 685 686 /* 687 * Ensure that the tx queue is the cabq, so things get 688 * mapped correctly. 689 */ 690 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 691 DPRINTF(sc, ATH_DEBUG_XMIT, 692 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 693 __func__, bf, bf->bf_state.bfs_tx_queue, 694 txq->axq_qnum); 695 } 696 697 ATH_TXQ_LOCK(txq); 698 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 699 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 700 struct ieee80211_frame *wh; 701 702 /* mark previous frame */ 703 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 704 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 705 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 706 BUS_DMASYNC_PREWRITE); 707 708 /* link descriptor */ 709 ath_hal_settxdesclink(sc->sc_ah, 710 bf_last->bf_lastds, 711 bf->bf_daddr); 712 } 713 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 714 ATH_TXQ_UNLOCK(txq); 715 } 716 717 /* 718 * Hand-off packet to a hardware queue. 719 */ 720 static void 721 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 722 struct ath_buf *bf) 723 { 724 struct ath_hal *ah = sc->sc_ah; 725 struct ath_buf *bf_first; 726 727 /* 728 * Insert the frame on the outbound list and pass it on 729 * to the hardware. Multicast frames buffered for power 730 * save stations and transmit from the CAB queue are stored 731 * on a s/w only queue and loaded on to the CAB queue in 732 * the SWBA handler since frames only go out on DTIM and 733 * to avoid possible races. 734 */ 735 ATH_TX_LOCK_ASSERT(sc); 736 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 737 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 738 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 739 ("ath_tx_handoff_hw called for mcast queue")); 740 741 /* 742 * XXX racy, should hold the PCU lock when checking this, 743 * and also should ensure that the TX counter is >0! 744 */ 745 KASSERT((sc->sc_inreset_cnt == 0), 746 ("%s: TX during reset?\n", __func__)); 747 748 #if 0 749 /* 750 * This causes a LOR. Find out where the PCU lock is being 751 * held whilst the TXQ lock is grabbed - that shouldn't 752 * be occuring. 753 */ 754 ATH_PCU_LOCK(sc); 755 if (sc->sc_inreset_cnt) { 756 ATH_PCU_UNLOCK(sc); 757 DPRINTF(sc, ATH_DEBUG_RESET, 758 "%s: called with sc_in_reset != 0\n", 759 __func__); 760 DPRINTF(sc, ATH_DEBUG_XMIT, 761 "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 762 __func__, txq->axq_qnum, 763 (caddr_t)bf->bf_daddr, bf->bf_desc, 764 txq->axq_depth); 765 /* XXX axq_link needs to be set and updated! */ 766 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 767 if (bf->bf_state.bfs_aggr) 768 txq->axq_aggr_depth++; 769 return; 770 } 771 ATH_PCU_UNLOCK(sc); 772 #endif 773 774 ATH_TXQ_LOCK(txq); 775 776 /* 777 * XXX TODO: if there's a holdingbf, then 778 * ATH_TXQ_PUTRUNNING should be clear. 779 * 780 * If there is a holdingbf and the list is empty, 781 * then axq_link should be pointing to the holdingbf. 782 * 783 * Otherwise it should point to the last descriptor 784 * in the last ath_buf. 785 * 786 * In any case, we should really ensure that we 787 * update the previous descriptor link pointer to 788 * this descriptor, regardless of all of the above state. 789 * 790 * For now this is captured by having axq_link point 791 * to either the holdingbf (if the TXQ list is empty) 792 * or the end of the list (if the TXQ list isn't empty.) 793 * I'd rather just kill axq_link here and do it as above. 794 */ 795 796 /* 797 * Append the frame to the TX queue. 798 */ 799 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 800 ATH_KTR(sc, ATH_KTR_TX, 3, 801 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 802 "depth=%d", 803 txq->axq_qnum, 804 bf, 805 txq->axq_depth); 806 807 /* 808 * If there's a link pointer, update it. 809 * 810 * XXX we should replace this with the above logic, just 811 * to kill axq_link with fire. 812 */ 813 if (txq->axq_link != NULL) { 814 *txq->axq_link = bf->bf_daddr; 815 DPRINTF(sc, ATH_DEBUG_XMIT, 816 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 817 txq->axq_qnum, txq->axq_link, 818 (caddr_t)bf->bf_daddr, bf->bf_desc, 819 txq->axq_depth); 820 ATH_KTR(sc, ATH_KTR_TX, 5, 821 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 822 "lastds=%d", 823 txq->axq_qnum, txq->axq_link, 824 (caddr_t)bf->bf_daddr, bf->bf_desc, 825 bf->bf_lastds); 826 } 827 828 /* 829 * If we've not pushed anything into the hardware yet, 830 * push the head of the queue into the TxDP. 831 * 832 * Once we've started DMA, there's no guarantee that 833 * updating the TxDP with a new value will actually work. 834 * So we just don't do that - if we hit the end of the list, 835 * we keep that buffer around (the "holding buffer") and 836 * re-start DMA by updating the link pointer of _that_ 837 * descriptor and then restart DMA. 838 */ 839 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 840 bf_first = TAILQ_FIRST(&txq->axq_q); 841 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 842 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 843 DPRINTF(sc, ATH_DEBUG_XMIT, 844 "%s: TXDP[%u] = %p (%p) depth %d\n", 845 __func__, txq->axq_qnum, 846 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 847 txq->axq_depth); 848 ATH_KTR(sc, ATH_KTR_TX, 5, 849 "ath_tx_handoff: TXDP[%u] = %p (%p) " 850 "lastds=%p depth %d", 851 txq->axq_qnum, 852 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 853 bf_first->bf_lastds, 854 txq->axq_depth); 855 } 856 857 /* 858 * Ensure that the bf TXQ matches this TXQ, so later 859 * checking and holding buffer manipulation is sane. 860 */ 861 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 862 DPRINTF(sc, ATH_DEBUG_XMIT, 863 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 864 __func__, bf, bf->bf_state.bfs_tx_queue, 865 txq->axq_qnum); 866 } 867 868 /* 869 * Track aggregate queue depth. 870 */ 871 if (bf->bf_state.bfs_aggr) 872 txq->axq_aggr_depth++; 873 874 /* 875 * Update the link pointer. 876 */ 877 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 878 879 /* 880 * Start DMA. 881 * 882 * If we wrote a TxDP above, DMA will start from here. 883 * 884 * If DMA is running, it'll do nothing. 885 * 886 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 887 * or VEOL) then it stops at the last transmitted write. 888 * We then append a new frame by updating the link pointer 889 * in that descriptor and then kick TxE here; it will re-read 890 * that last descriptor and find the new descriptor to transmit. 891 * 892 * This is why we keep the holding descriptor around. 893 */ 894 ath_hal_txstart(ah, txq->axq_qnum); 895 ATH_TXQ_UNLOCK(txq); 896 ATH_KTR(sc, ATH_KTR_TX, 1, 897 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 898 } 899 900 /* 901 * Restart TX DMA for the given TXQ. 902 * 903 * This must be called whether the queue is empty or not. 904 */ 905 static void 906 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 907 { 908 struct ath_buf *bf, *bf_last; 909 910 ATH_TXQ_LOCK_ASSERT(txq); 911 912 /* XXX make this ATH_TXQ_FIRST */ 913 bf = TAILQ_FIRST(&txq->axq_q); 914 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 915 916 if (bf == NULL) 917 return; 918 919 DPRINTF(sc, ATH_DEBUG_RESET, 920 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 921 __func__, 922 txq->axq_qnum, 923 bf, 924 bf_last, 925 (uint32_t) bf->bf_daddr); 926 927 #ifdef ATH_DEBUG 928 if (sc->sc_debug & ATH_DEBUG_RESET) 929 ath_tx_dump(sc, txq); 930 #endif 931 932 /* 933 * This is called from a restart, so DMA is known to be 934 * completely stopped. 935 */ 936 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 937 ("%s: Q%d: called with PUTRUNNING=1\n", 938 __func__, 939 txq->axq_qnum)); 940 941 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 942 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 943 944 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 945 &txq->axq_link); 946 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 947 } 948 949 /* 950 * Hand off a packet to the hardware (or mcast queue.) 951 * 952 * The relevant hardware txq should be locked. 953 */ 954 static void 955 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 956 struct ath_buf *bf) 957 { 958 ATH_TX_LOCK_ASSERT(sc); 959 960 #ifdef ATH_DEBUG_ALQ 961 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 962 ath_tx_alq_post(sc, bf); 963 #endif 964 965 if (txq->axq_qnum == ATH_TXQ_SWQ) 966 ath_tx_handoff_mcast(sc, txq, bf); 967 else 968 ath_tx_handoff_hw(sc, txq, bf); 969 } 970 971 static int 972 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 973 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 974 int *keyix) 975 { 976 DPRINTF(sc, ATH_DEBUG_XMIT, 977 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 978 __func__, 979 *hdrlen, 980 *pktlen, 981 isfrag, 982 iswep, 983 m0); 984 985 if (iswep) { 986 const struct ieee80211_cipher *cip; 987 struct ieee80211_key *k; 988 989 /* 990 * Construct the 802.11 header+trailer for an encrypted 991 * frame. The only reason this can fail is because of an 992 * unknown or unsupported cipher/key type. 993 */ 994 k = ieee80211_crypto_encap(ni, m0); 995 if (k == NULL) { 996 /* 997 * This can happen when the key is yanked after the 998 * frame was queued. Just discard the frame; the 999 * 802.11 layer counts failures and provides 1000 * debugging/diagnostics. 1001 */ 1002 return (0); 1003 } 1004 /* 1005 * Adjust the packet + header lengths for the crypto 1006 * additions and calculate the h/w key index. When 1007 * a s/w mic is done the frame will have had any mic 1008 * added to it prior to entry so m0->m_pkthdr.len will 1009 * account for it. Otherwise we need to add it to the 1010 * packet length. 1011 */ 1012 cip = k->wk_cipher; 1013 (*hdrlen) += cip->ic_header; 1014 (*pktlen) += cip->ic_header + cip->ic_trailer; 1015 /* NB: frags always have any TKIP MIC done in s/w */ 1016 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1017 (*pktlen) += cip->ic_miclen; 1018 (*keyix) = k->wk_keyix; 1019 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1020 /* 1021 * Use station key cache slot, if assigned. 1022 */ 1023 (*keyix) = ni->ni_ucastkey.wk_keyix; 1024 if ((*keyix) == IEEE80211_KEYIX_NONE) 1025 (*keyix) = HAL_TXKEYIX_INVALID; 1026 } else 1027 (*keyix) = HAL_TXKEYIX_INVALID; 1028 1029 return (1); 1030 } 1031 1032 /* 1033 * Calculate whether interoperability protection is required for 1034 * this frame. 1035 * 1036 * This requires the rate control information be filled in, 1037 * as the protection requirement depends upon the current 1038 * operating mode / PHY. 1039 */ 1040 static void 1041 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1042 { 1043 struct ieee80211_frame *wh; 1044 uint8_t rix; 1045 uint16_t flags; 1046 int shortPreamble; 1047 const HAL_RATE_TABLE *rt = sc->sc_currates; 1048 struct ifnet *ifp = sc->sc_ifp; 1049 struct ieee80211com *ic = ifp->if_l2com; 1050 1051 flags = bf->bf_state.bfs_txflags; 1052 rix = bf->bf_state.bfs_rc[0].rix; 1053 shortPreamble = bf->bf_state.bfs_shpream; 1054 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1055 1056 /* 1057 * If 802.11g protection is enabled, determine whether 1058 * to use RTS/CTS or just CTS. Note that this is only 1059 * done for OFDM unicast frames. 1060 */ 1061 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1062 rt->info[rix].phy == IEEE80211_T_OFDM && 1063 (flags & HAL_TXDESC_NOACK) == 0) { 1064 bf->bf_state.bfs_doprot = 1; 1065 /* XXX fragments must use CCK rates w/ protection */ 1066 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1067 flags |= HAL_TXDESC_RTSENA; 1068 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1069 flags |= HAL_TXDESC_CTSENA; 1070 } 1071 /* 1072 * For frags it would be desirable to use the 1073 * highest CCK rate for RTS/CTS. But stations 1074 * farther away may detect it at a lower CCK rate 1075 * so use the configured protection rate instead 1076 * (for now). 1077 */ 1078 sc->sc_stats.ast_tx_protect++; 1079 } 1080 1081 /* 1082 * If 11n protection is enabled and it's a HT frame, 1083 * enable RTS. 1084 * 1085 * XXX ic_htprotmode or ic_curhtprotmode? 1086 * XXX should it_htprotmode only matter if ic_curhtprotmode 1087 * XXX indicates it's not a HT pure environment? 1088 */ 1089 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1090 rt->info[rix].phy == IEEE80211_T_HT && 1091 (flags & HAL_TXDESC_NOACK) == 0) { 1092 flags |= HAL_TXDESC_RTSENA; 1093 sc->sc_stats.ast_tx_htprotect++; 1094 } 1095 bf->bf_state.bfs_txflags = flags; 1096 } 1097 1098 /* 1099 * Update the frame duration given the currently selected rate. 1100 * 1101 * This also updates the frame duration value, so it will require 1102 * a DMA flush. 1103 */ 1104 static void 1105 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1106 { 1107 struct ieee80211_frame *wh; 1108 uint8_t rix; 1109 uint16_t flags; 1110 int shortPreamble; 1111 struct ath_hal *ah = sc->sc_ah; 1112 const HAL_RATE_TABLE *rt = sc->sc_currates; 1113 int isfrag = bf->bf_m->m_flags & M_FRAG; 1114 1115 flags = bf->bf_state.bfs_txflags; 1116 rix = bf->bf_state.bfs_rc[0].rix; 1117 shortPreamble = bf->bf_state.bfs_shpream; 1118 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1119 1120 /* 1121 * Calculate duration. This logically belongs in the 802.11 1122 * layer but it lacks sufficient information to calculate it. 1123 */ 1124 if ((flags & HAL_TXDESC_NOACK) == 0 && 1125 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1126 u_int16_t dur; 1127 if (shortPreamble) 1128 dur = rt->info[rix].spAckDuration; 1129 else 1130 dur = rt->info[rix].lpAckDuration; 1131 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1132 dur += dur; /* additional SIFS+ACK */ 1133 /* 1134 * Include the size of next fragment so NAV is 1135 * updated properly. The last fragment uses only 1136 * the ACK duration 1137 * 1138 * XXX TODO: ensure that the rate lookup for each 1139 * fragment is the same as the rate used by the 1140 * first fragment! 1141 */ 1142 dur += ath_hal_computetxtime(ah, 1143 rt, 1144 bf->bf_nextfraglen, 1145 rix, shortPreamble); 1146 } 1147 if (isfrag) { 1148 /* 1149 * Force hardware to use computed duration for next 1150 * fragment by disabling multi-rate retry which updates 1151 * duration based on the multi-rate duration table. 1152 */ 1153 bf->bf_state.bfs_ismrr = 0; 1154 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1155 /* XXX update bfs_rc[0].try? */ 1156 } 1157 1158 /* Update the duration field itself */ 1159 *(u_int16_t *)wh->i_dur = htole16(dur); 1160 } 1161 } 1162 1163 static uint8_t 1164 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1165 int cix, int shortPreamble) 1166 { 1167 uint8_t ctsrate; 1168 1169 /* 1170 * CTS transmit rate is derived from the transmit rate 1171 * by looking in the h/w rate table. We must also factor 1172 * in whether or not a short preamble is to be used. 1173 */ 1174 /* NB: cix is set above where RTS/CTS is enabled */ 1175 KASSERT(cix != 0xff, ("cix not setup")); 1176 ctsrate = rt->info[cix].rateCode; 1177 1178 /* XXX this should only matter for legacy rates */ 1179 if (shortPreamble) 1180 ctsrate |= rt->info[cix].shortPreamble; 1181 1182 return (ctsrate); 1183 } 1184 1185 /* 1186 * Calculate the RTS/CTS duration for legacy frames. 1187 */ 1188 static int 1189 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1190 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1191 int flags) 1192 { 1193 int ctsduration = 0; 1194 1195 /* This mustn't be called for HT modes */ 1196 if (rt->info[cix].phy == IEEE80211_T_HT) { 1197 kprintf("%s: HT rate where it shouldn't be (0x%x)\n", 1198 __func__, rt->info[cix].rateCode); 1199 return (-1); 1200 } 1201 1202 /* 1203 * Compute the transmit duration based on the frame 1204 * size and the size of an ACK frame. We call into the 1205 * HAL to do the computation since it depends on the 1206 * characteristics of the actual PHY being used. 1207 * 1208 * NB: CTS is assumed the same size as an ACK so we can 1209 * use the precalculated ACK durations. 1210 */ 1211 if (shortPreamble) { 1212 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1213 ctsduration += rt->info[cix].spAckDuration; 1214 ctsduration += ath_hal_computetxtime(ah, 1215 rt, pktlen, rix, AH_TRUE); 1216 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1217 ctsduration += rt->info[rix].spAckDuration; 1218 } else { 1219 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1220 ctsduration += rt->info[cix].lpAckDuration; 1221 ctsduration += ath_hal_computetxtime(ah, 1222 rt, pktlen, rix, AH_FALSE); 1223 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1224 ctsduration += rt->info[rix].lpAckDuration; 1225 } 1226 1227 return (ctsduration); 1228 } 1229 1230 /* 1231 * Update the given ath_buf with updated rts/cts setup and duration 1232 * values. 1233 * 1234 * To support rate lookups for each software retry, the rts/cts rate 1235 * and cts duration must be re-calculated. 1236 * 1237 * This function assumes the RTS/CTS flags have been set as needed; 1238 * mrr has been disabled; and the rate control lookup has been done. 1239 * 1240 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1241 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1242 */ 1243 static void 1244 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1245 { 1246 uint16_t ctsduration = 0; 1247 uint8_t ctsrate = 0; 1248 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1249 uint8_t cix = 0; 1250 const HAL_RATE_TABLE *rt = sc->sc_currates; 1251 1252 /* 1253 * No RTS/CTS enabled? Don't bother. 1254 */ 1255 if ((bf->bf_state.bfs_txflags & 1256 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1257 /* XXX is this really needed? */ 1258 bf->bf_state.bfs_ctsrate = 0; 1259 bf->bf_state.bfs_ctsduration = 0; 1260 return; 1261 } 1262 1263 /* 1264 * If protection is enabled, use the protection rix control 1265 * rate. Otherwise use the rate0 control rate. 1266 */ 1267 if (bf->bf_state.bfs_doprot) 1268 rix = sc->sc_protrix; 1269 else 1270 rix = bf->bf_state.bfs_rc[0].rix; 1271 1272 /* 1273 * If the raw path has hard-coded ctsrate0 to something, 1274 * use it. 1275 */ 1276 if (bf->bf_state.bfs_ctsrate0 != 0) 1277 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1278 else 1279 /* Control rate from above */ 1280 cix = rt->info[rix].controlRate; 1281 1282 /* Calculate the rtscts rate for the given cix */ 1283 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1284 bf->bf_state.bfs_shpream); 1285 1286 /* The 11n chipsets do ctsduration calculations for you */ 1287 if (! ath_tx_is_11n(sc)) 1288 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1289 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1290 rt, bf->bf_state.bfs_txflags); 1291 1292 /* Squirrel away in ath_buf */ 1293 bf->bf_state.bfs_ctsrate = ctsrate; 1294 bf->bf_state.bfs_ctsduration = ctsduration; 1295 1296 /* 1297 * Must disable multi-rate retry when using RTS/CTS. 1298 */ 1299 if (!sc->sc_mrrprot) { 1300 bf->bf_state.bfs_ismrr = 0; 1301 bf->bf_state.bfs_try0 = 1302 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1303 } 1304 } 1305 1306 /* 1307 * Setup the descriptor chain for a normal or fast-frame 1308 * frame. 1309 * 1310 * XXX TODO: extend to include the destination hardware QCU ID. 1311 * Make sure that is correct. Make sure that when being added 1312 * to the mcastq, the CABQ QCUID is set or things will get a bit 1313 * odd. 1314 */ 1315 static void 1316 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1317 { 1318 struct ath_desc *ds = bf->bf_desc; 1319 struct ath_hal *ah = sc->sc_ah; 1320 1321 if (bf->bf_state.bfs_txrate0 == 0) 1322 DPRINTF(sc, ATH_DEBUG_XMIT, 1323 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1324 1325 ath_hal_setuptxdesc(ah, ds 1326 , bf->bf_state.bfs_pktlen /* packet length */ 1327 , bf->bf_state.bfs_hdrlen /* header length */ 1328 , bf->bf_state.bfs_atype /* Atheros packet type */ 1329 , bf->bf_state.bfs_txpower /* txpower */ 1330 , bf->bf_state.bfs_txrate0 1331 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1332 , bf->bf_state.bfs_keyix /* key cache index */ 1333 , bf->bf_state.bfs_txantenna /* antenna mode */ 1334 , bf->bf_state.bfs_txflags /* flags */ 1335 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1336 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1337 ); 1338 1339 /* 1340 * This will be overriden when the descriptor chain is written. 1341 */ 1342 bf->bf_lastds = ds; 1343 bf->bf_last = bf; 1344 1345 /* Set rate control and descriptor chain for this frame */ 1346 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1347 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1348 } 1349 1350 /* 1351 * Do a rate lookup. 1352 * 1353 * This performs a rate lookup for the given ath_buf only if it's required. 1354 * Non-data frames and raw frames don't require it. 1355 * 1356 * This populates the primary and MRR entries; MRR values are 1357 * then disabled later on if something requires it (eg RTS/CTS on 1358 * pre-11n chipsets. 1359 * 1360 * This needs to be done before the RTS/CTS fields are calculated 1361 * as they may depend upon the rate chosen. 1362 */ 1363 static void 1364 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1365 { 1366 uint8_t rate, rix; 1367 int try0; 1368 1369 if (! bf->bf_state.bfs_doratelookup) 1370 return; 1371 1372 /* Get rid of any previous state */ 1373 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1374 1375 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1376 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1377 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1378 1379 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1380 bf->bf_state.bfs_rc[0].rix = rix; 1381 bf->bf_state.bfs_rc[0].ratecode = rate; 1382 bf->bf_state.bfs_rc[0].tries = try0; 1383 1384 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1385 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1386 bf->bf_state.bfs_rc); 1387 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1388 1389 sc->sc_txrix = rix; /* for LED blinking */ 1390 sc->sc_lastdatarix = rix; /* for fast frames */ 1391 bf->bf_state.bfs_try0 = try0; 1392 bf->bf_state.bfs_txrate0 = rate; 1393 } 1394 1395 /* 1396 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1397 */ 1398 static void 1399 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1400 struct ath_buf *bf) 1401 { 1402 struct ath_node *an = ATH_NODE(bf->bf_node); 1403 1404 ATH_TX_LOCK_ASSERT(sc); 1405 1406 if (an->clrdmask == 1) { 1407 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1408 an->clrdmask = 0; 1409 } 1410 } 1411 1412 /* 1413 * Return whether this frame should be software queued or 1414 * direct dispatched. 1415 * 1416 * When doing powersave, BAR frames should be queued but other management 1417 * frames should be directly sent. 1418 * 1419 * When not doing powersave, stick BAR frames into the hardware queue 1420 * so it goes out even though the queue is paused. 1421 * 1422 * For now, management frames are also software queued by default. 1423 */ 1424 static int 1425 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1426 struct mbuf *m0, int *queue_to_head) 1427 { 1428 struct ieee80211_node *ni = &an->an_node; 1429 struct ieee80211_frame *wh; 1430 uint8_t type, subtype; 1431 1432 wh = mtod(m0, struct ieee80211_frame *); 1433 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1434 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1435 1436 (*queue_to_head) = 0; 1437 1438 /* If it's not in powersave - direct-dispatch BAR */ 1439 if ((ATH_NODE(ni)->an_is_powersave == 0) 1440 && type == IEEE80211_FC0_TYPE_CTL && 1441 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1442 DPRINTF(sc, ATH_DEBUG_SW_TX, 1443 "%s: BAR: TX'ing direct\n", __func__); 1444 return (0); 1445 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1446 && type == IEEE80211_FC0_TYPE_CTL && 1447 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1448 /* BAR TX whilst asleep; queue */ 1449 DPRINTF(sc, ATH_DEBUG_SW_TX, 1450 "%s: swq: TX'ing\n", __func__); 1451 (*queue_to_head) = 1; 1452 return (1); 1453 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1454 && (type == IEEE80211_FC0_TYPE_MGT || 1455 type == IEEE80211_FC0_TYPE_CTL)) { 1456 /* 1457 * Other control/mgmt frame; bypass software queuing 1458 * for now! 1459 */ 1460 DPRINTF(sc, ATH_DEBUG_XMIT, 1461 "%s: %s: Node is asleep; sending mgmt " 1462 "(type=%d, subtype=%d)\n", 1463 __func__, ath_hal_ether_sprintf(ni->ni_macaddr), 1464 type, subtype); 1465 return (0); 1466 } else { 1467 return (1); 1468 } 1469 } 1470 1471 1472 /* 1473 * Transmit the given frame to the hardware. 1474 * 1475 * The frame must already be setup; rate control must already have 1476 * been done. 1477 * 1478 * XXX since the TXQ lock is being held here (and I dislike holding 1479 * it for this long when not doing software aggregation), later on 1480 * break this function into "setup_normal" and "xmit_normal". The 1481 * lock only needs to be held for the ath_tx_handoff call. 1482 * 1483 * XXX we don't update the leak count here - if we're doing 1484 * direct frame dispatch, we need to be able to do it without 1485 * decrementing the leak count (eg multicast queue frames.) 1486 */ 1487 static void 1488 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1489 struct ath_buf *bf) 1490 { 1491 struct ath_node *an = ATH_NODE(bf->bf_node); 1492 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1493 1494 ATH_TX_LOCK_ASSERT(sc); 1495 1496 /* 1497 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1498 * set a completion handler however it doesn't (yet) properly 1499 * handle the strict ordering requirements needed for normal, 1500 * non-aggregate session frames. 1501 * 1502 * Once this is implemented, only set CLRDMASK like this for 1503 * frames that must go out - eg management/raw frames. 1504 */ 1505 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1506 1507 /* Setup the descriptor before handoff */ 1508 ath_tx_do_ratelookup(sc, bf); 1509 ath_tx_calc_duration(sc, bf); 1510 ath_tx_calc_protection(sc, bf); 1511 ath_tx_set_rtscts(sc, bf); 1512 ath_tx_rate_fill_rcflags(sc, bf); 1513 ath_tx_setds(sc, bf); 1514 1515 /* Track per-TID hardware queue depth correctly */ 1516 tid->hwq_depth++; 1517 1518 /* Assign the completion handler */ 1519 bf->bf_comp = ath_tx_normal_comp; 1520 1521 /* Hand off to hardware */ 1522 ath_tx_handoff(sc, txq, bf); 1523 } 1524 1525 /* 1526 * Do the basic frame setup stuff that's required before the frame 1527 * is added to a software queue. 1528 * 1529 * All frames get mostly the same treatment and it's done once. 1530 * Retransmits fiddle with things like the rate control setup, 1531 * setting the retransmit bit in the packet; doing relevant DMA/bus 1532 * syncing and relinking it (back) into the hardware TX queue. 1533 * 1534 * Note that this may cause the mbuf to be reallocated, so 1535 * m0 may not be valid. 1536 */ 1537 static int 1538 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1539 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1540 { 1541 struct ieee80211vap *vap = ni->ni_vap; 1542 struct ath_hal *ah = sc->sc_ah; 1543 struct ifnet *ifp = sc->sc_ifp; 1544 struct ieee80211com *ic = ifp->if_l2com; 1545 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1546 int error, iswep, ismcast, isfrag, ismrr; 1547 int keyix, hdrlen, pktlen, try0 = 0; 1548 u_int8_t rix = 0, txrate = 0; 1549 struct ath_desc *ds; 1550 struct ieee80211_frame *wh; 1551 u_int subtype, flags; 1552 HAL_PKT_TYPE atype; 1553 const HAL_RATE_TABLE *rt; 1554 HAL_BOOL shortPreamble; 1555 struct ath_node *an; 1556 u_int pri; 1557 1558 /* 1559 * To ensure that both sequence numbers and the CCMP PN handling 1560 * is "correct", make sure that the relevant TID queue is locked. 1561 * Otherwise the CCMP PN and seqno may appear out of order, causing 1562 * re-ordered frames to have out of order CCMP PN's, resulting 1563 * in many, many frame drops. 1564 */ 1565 ATH_TX_LOCK_ASSERT(sc); 1566 1567 wh = mtod(m0, struct ieee80211_frame *); 1568 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 1569 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1570 isfrag = m0->m_flags & M_FRAG; 1571 hdrlen = ieee80211_anyhdrsize(wh); 1572 /* 1573 * Packet length must not include any 1574 * pad bytes; deduct them here. 1575 */ 1576 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1577 1578 /* Handle encryption twiddling if needed */ 1579 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1580 &pktlen, &keyix)) { 1581 ath_freetx(m0); 1582 return EIO; 1583 } 1584 1585 /* packet header may have moved, reset our local pointer */ 1586 wh = mtod(m0, struct ieee80211_frame *); 1587 1588 pktlen += IEEE80211_CRC_LEN; 1589 1590 /* 1591 * Load the DMA map so any coalescing is done. This 1592 * also calculates the number of descriptors we need. 1593 */ 1594 error = ath_tx_dmasetup(sc, bf, m0); 1595 if (error != 0) 1596 return error; 1597 bf->bf_node = ni; /* NB: held reference */ 1598 m0 = bf->bf_m; /* NB: may have changed */ 1599 wh = mtod(m0, struct ieee80211_frame *); 1600 1601 /* setup descriptors */ 1602 ds = bf->bf_desc; 1603 rt = sc->sc_currates; 1604 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1605 1606 /* 1607 * NB: the 802.11 layer marks whether or not we should 1608 * use short preamble based on the current mode and 1609 * negotiated parameters. 1610 */ 1611 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1612 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1613 shortPreamble = AH_TRUE; 1614 sc->sc_stats.ast_tx_shortpre++; 1615 } else { 1616 shortPreamble = AH_FALSE; 1617 } 1618 1619 an = ATH_NODE(ni); 1620 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1621 flags = 0; 1622 ismrr = 0; /* default no multi-rate retry*/ 1623 pri = M_WME_GETAC(m0); /* honor classification */ 1624 /* XXX use txparams instead of fixed values */ 1625 /* 1626 * Calculate Atheros packet type from IEEE80211 packet header, 1627 * setup for rate calculations, and select h/w transmit queue. 1628 */ 1629 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1630 case IEEE80211_FC0_TYPE_MGT: 1631 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1632 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1633 atype = HAL_PKT_TYPE_BEACON; 1634 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1635 atype = HAL_PKT_TYPE_PROBE_RESP; 1636 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1637 atype = HAL_PKT_TYPE_ATIM; 1638 else 1639 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1640 rix = an->an_mgmtrix; 1641 txrate = rt->info[rix].rateCode; 1642 if (shortPreamble) 1643 txrate |= rt->info[rix].shortPreamble; 1644 try0 = ATH_TXMGTTRY; 1645 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1646 break; 1647 case IEEE80211_FC0_TYPE_CTL: 1648 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1649 rix = an->an_mgmtrix; 1650 txrate = rt->info[rix].rateCode; 1651 if (shortPreamble) 1652 txrate |= rt->info[rix].shortPreamble; 1653 try0 = ATH_TXMGTTRY; 1654 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1655 break; 1656 case IEEE80211_FC0_TYPE_DATA: 1657 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1658 /* 1659 * Data frames: multicast frames go out at a fixed rate, 1660 * EAPOL frames use the mgmt frame rate; otherwise consult 1661 * the rate control module for the rate to use. 1662 */ 1663 if (ismcast) { 1664 rix = an->an_mcastrix; 1665 txrate = rt->info[rix].rateCode; 1666 if (shortPreamble) 1667 txrate |= rt->info[rix].shortPreamble; 1668 try0 = 1; 1669 } else if (m0->m_flags & M_EAPOL) { 1670 /* XXX? maybe always use long preamble? */ 1671 rix = an->an_mgmtrix; 1672 txrate = rt->info[rix].rateCode; 1673 if (shortPreamble) 1674 txrate |= rt->info[rix].shortPreamble; 1675 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1676 } else { 1677 /* 1678 * Do rate lookup on each TX, rather than using 1679 * the hard-coded TX information decided here. 1680 */ 1681 ismrr = 1; 1682 bf->bf_state.bfs_doratelookup = 1; 1683 } 1684 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1685 flags |= HAL_TXDESC_NOACK; 1686 break; 1687 default: 1688 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1689 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1690 /* XXX statistic */ 1691 /* XXX free tx dmamap */ 1692 ath_freetx(m0); 1693 return EIO; 1694 } 1695 1696 /* 1697 * There are two known scenarios where the frame AC doesn't match 1698 * what the destination TXQ is. 1699 * 1700 * + non-QoS frames (eg management?) that the net80211 stack has 1701 * assigned a higher AC to, but since it's a non-QoS TID, it's 1702 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1703 * It's quite possible that management frames should just be 1704 * direct dispatched to hardware rather than go via the software 1705 * queue; that should be investigated in the future. There are 1706 * some specific scenarios where this doesn't make sense, mostly 1707 * surrounding ADDBA request/response - hence why that is special 1708 * cased. 1709 * 1710 * + Multicast frames going into the VAP mcast queue. That shows up 1711 * as "TXQ 11". 1712 * 1713 * This driver should eventually support separate TID and TXQ locking, 1714 * allowing for arbitrary AC frames to appear on arbitrary software 1715 * queues, being queued to the "correct" hardware queue when needed. 1716 */ 1717 #if 0 1718 if (txq != sc->sc_ac2q[pri]) { 1719 DPRINTF(sc, ATH_DEBUG_XMIT, 1720 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1721 __func__, 1722 txq, 1723 txq->axq_qnum, 1724 pri, 1725 sc->sc_ac2q[pri], 1726 sc->sc_ac2q[pri]->axq_qnum); 1727 } 1728 #endif 1729 1730 /* 1731 * Calculate miscellaneous flags. 1732 */ 1733 if (ismcast) { 1734 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1735 } else if (pktlen > vap->iv_rtsthreshold && 1736 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1737 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1738 sc->sc_stats.ast_tx_rts++; 1739 } 1740 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1741 sc->sc_stats.ast_tx_noack++; 1742 #ifdef IEEE80211_SUPPORT_TDMA 1743 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1744 DPRINTF(sc, ATH_DEBUG_TDMA, 1745 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1746 sc->sc_stats.ast_tdma_ack++; 1747 /* XXX free tx dmamap */ 1748 ath_freetx(m0); 1749 return EIO; 1750 } 1751 #endif 1752 1753 /* 1754 * Determine if a tx interrupt should be generated for 1755 * this descriptor. We take a tx interrupt to reap 1756 * descriptors when the h/w hits an EOL condition or 1757 * when the descriptor is specifically marked to generate 1758 * an interrupt. We periodically mark descriptors in this 1759 * way to insure timely replenishing of the supply needed 1760 * for sending frames. Defering interrupts reduces system 1761 * load and potentially allows more concurrent work to be 1762 * done but if done to aggressively can cause senders to 1763 * backup. 1764 * 1765 * NB: use >= to deal with sc_txintrperiod changing 1766 * dynamically through sysctl. 1767 */ 1768 if (flags & HAL_TXDESC_INTREQ) { 1769 txq->axq_intrcnt = 0; 1770 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1771 flags |= HAL_TXDESC_INTREQ; 1772 txq->axq_intrcnt = 0; 1773 } 1774 1775 /* This point forward is actual TX bits */ 1776 1777 /* 1778 * At this point we are committed to sending the frame 1779 * and we don't need to look at m_nextpkt; clear it in 1780 * case this frame is part of frag chain. 1781 */ 1782 m0->m_nextpkt = NULL; 1783 1784 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1785 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1786 sc->sc_hwmap[rix].ieeerate, -1); 1787 1788 if (ieee80211_radiotap_active_vap(vap)) { 1789 u_int64_t tsf = ath_hal_gettsf64(ah); 1790 1791 sc->sc_tx_th.wt_tsf = htole64(tsf); 1792 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1793 if (iswep) 1794 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1795 if (isfrag) 1796 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1797 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1798 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1799 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1800 1801 ieee80211_radiotap_tx(vap, m0); 1802 } 1803 1804 /* Blank the legacy rate array */ 1805 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1806 1807 /* 1808 * ath_buf_set_rate needs at least one rate/try to setup 1809 * the rate scenario. 1810 */ 1811 bf->bf_state.bfs_rc[0].rix = rix; 1812 bf->bf_state.bfs_rc[0].tries = try0; 1813 bf->bf_state.bfs_rc[0].ratecode = txrate; 1814 1815 /* Store the decided rate index values away */ 1816 bf->bf_state.bfs_pktlen = pktlen; 1817 bf->bf_state.bfs_hdrlen = hdrlen; 1818 bf->bf_state.bfs_atype = atype; 1819 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1820 bf->bf_state.bfs_txrate0 = txrate; 1821 bf->bf_state.bfs_try0 = try0; 1822 bf->bf_state.bfs_keyix = keyix; 1823 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1824 bf->bf_state.bfs_txflags = flags; 1825 bf->bf_state.bfs_shpream = shortPreamble; 1826 1827 /* XXX this should be done in ath_tx_setrate() */ 1828 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1829 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1830 bf->bf_state.bfs_ctsduration = 0; 1831 bf->bf_state.bfs_ismrr = ismrr; 1832 1833 return 0; 1834 } 1835 1836 /* 1837 * Queue a frame to the hardware or software queue. 1838 * 1839 * This can be called by the net80211 code. 1840 * 1841 * XXX what about locking? Or, push the seqno assign into the 1842 * XXX aggregate scheduler so its serialised? 1843 * 1844 * XXX When sending management frames via ath_raw_xmit(), 1845 * should CLRDMASK be set unconditionally? 1846 */ 1847 int 1848 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1849 struct ath_buf *bf, struct mbuf *m0) 1850 { 1851 struct ieee80211vap *vap = ni->ni_vap; 1852 struct ath_vap *avp = ATH_VAP(vap); 1853 int r = 0; 1854 u_int pri; 1855 int tid; 1856 struct ath_txq *txq; 1857 int ismcast; 1858 const struct ieee80211_frame *wh; 1859 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1860 ieee80211_seq seqno; 1861 uint8_t type, subtype; 1862 int queue_to_head; 1863 1864 ATH_TX_LOCK_ASSERT(sc); 1865 1866 /* 1867 * Determine the target hardware queue. 1868 * 1869 * For multicast frames, the txq gets overridden appropriately 1870 * depending upon the state of PS. 1871 * 1872 * For any other frame, we do a TID/QoS lookup inside the frame 1873 * to see what the TID should be. If it's a non-QoS frame, the 1874 * AC and TID are overridden. The TID/TXQ code assumes the 1875 * TID is on a predictable hardware TXQ, so we don't support 1876 * having a node TID queued to multiple hardware TXQs. 1877 * This may change in the future but would require some locking 1878 * fudgery. 1879 */ 1880 pri = ath_tx_getac(sc, m0); 1881 tid = ath_tx_gettid(sc, m0); 1882 1883 txq = sc->sc_ac2q[pri]; 1884 wh = mtod(m0, struct ieee80211_frame *); 1885 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1886 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1887 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1888 1889 /* 1890 * Enforce how deep the multicast queue can grow. 1891 * 1892 * XXX duplicated in ath_raw_xmit(). 1893 */ 1894 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1895 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1896 > sc->sc_txq_mcastq_maxdepth) { 1897 sc->sc_stats.ast_tx_mcastq_overflow++; 1898 m_freem(m0); 1899 return (ENOBUFS); 1900 } 1901 } 1902 1903 /* 1904 * Enforce how deep the unicast queue can grow. 1905 * 1906 * If the node is in power save then we don't want 1907 * the software queue to grow too deep, or a node may 1908 * end up consuming all of the ath_buf entries. 1909 * 1910 * For now, only do this for DATA frames. 1911 * 1912 * We will want to cap how many management/control 1913 * frames get punted to the software queue so it doesn't 1914 * fill up. But the correct solution isn't yet obvious. 1915 * In any case, this check should at least let frames pass 1916 * that we are direct-dispatching. 1917 * 1918 * XXX TODO: duplicate this to the raw xmit path! 1919 */ 1920 if (type == IEEE80211_FC0_TYPE_DATA && 1921 ATH_NODE(ni)->an_is_powersave && 1922 ATH_NODE(ni)->an_swq_depth > 1923 sc->sc_txq_node_psq_maxdepth) { 1924 sc->sc_stats.ast_tx_node_psq_overflow++; 1925 m_freem(m0); 1926 return (ENOBUFS); 1927 } 1928 1929 /* A-MPDU TX */ 1930 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1931 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1932 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1933 1934 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1935 __func__, tid, pri, is_ampdu); 1936 1937 /* Set local packet state, used to queue packets to hardware */ 1938 bf->bf_state.bfs_tid = tid; 1939 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1940 bf->bf_state.bfs_pri = pri; 1941 1942 #if 1 1943 /* 1944 * When servicing one or more stations in power-save mode 1945 * (or) if there is some mcast data waiting on the mcast 1946 * queue (to prevent out of order delivery) multicast frames 1947 * must be bufferd until after the beacon. 1948 * 1949 * TODO: we should lock the mcastq before we check the length. 1950 */ 1951 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1952 txq = &avp->av_mcastq; 1953 /* 1954 * Mark the frame as eventually belonging on the CAB 1955 * queue, so the descriptor setup functions will 1956 * correctly initialise the descriptor 'qcuId' field. 1957 */ 1958 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1959 } 1960 #endif 1961 1962 /* Do the generic frame setup */ 1963 /* XXX should just bzero the bf_state? */ 1964 bf->bf_state.bfs_dobaw = 0; 1965 1966 /* A-MPDU TX? Manually set sequence number */ 1967 /* 1968 * Don't do it whilst pending; the net80211 layer still 1969 * assigns them. 1970 */ 1971 if (is_ampdu_tx) { 1972 /* 1973 * Always call; this function will 1974 * handle making sure that null data frames 1975 * don't get a sequence number from the current 1976 * TID and thus mess with the BAW. 1977 */ 1978 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1979 1980 /* 1981 * Don't add QoS NULL frames to the BAW. 1982 */ 1983 if (IEEE80211_QOS_HAS_SEQ(wh) && 1984 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 1985 bf->bf_state.bfs_dobaw = 1; 1986 } 1987 } 1988 1989 /* 1990 * If needed, the sequence number has been assigned. 1991 * Squirrel it away somewhere easy to get to. 1992 */ 1993 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1994 1995 /* Is ampdu pending? fetch the seqno and print it out */ 1996 if (is_ampdu_pending) 1997 DPRINTF(sc, ATH_DEBUG_SW_TX, 1998 "%s: tid %d: ampdu pending, seqno %d\n", 1999 __func__, tid, M_SEQNO_GET(m0)); 2000 2001 /* This also sets up the DMA map */ 2002 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2003 2004 if (r != 0) 2005 goto done; 2006 2007 /* At this point m0 could have changed! */ 2008 m0 = bf->bf_m; 2009 2010 #if 1 2011 /* 2012 * If it's a multicast frame, do a direct-dispatch to the 2013 * destination hardware queue. Don't bother software 2014 * queuing it. 2015 */ 2016 /* 2017 * If it's a BAR frame, do a direct dispatch to the 2018 * destination hardware queue. Don't bother software 2019 * queuing it, as the TID will now be paused. 2020 * Sending a BAR frame can occur from the net80211 txa timer 2021 * (ie, retries) or from the ath txtask (completion call.) 2022 * It queues directly to hardware because the TID is paused 2023 * at this point (and won't be unpaused until the BAR has 2024 * either been TXed successfully or max retries has been 2025 * reached.) 2026 */ 2027 /* 2028 * Until things are better debugged - if this node is asleep 2029 * and we're sending it a non-BAR frame, direct dispatch it. 2030 * Why? Because we need to figure out what's actually being 2031 * sent - eg, during reassociation/reauthentication after 2032 * the node (last) disappeared whilst asleep, the driver should 2033 * have unpaused/unsleep'ed the node. So until that is 2034 * sorted out, use this workaround. 2035 */ 2036 if (txq == &avp->av_mcastq) { 2037 DPRINTF(sc, ATH_DEBUG_SW_TX, 2038 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2039 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2040 ath_tx_xmit_normal(sc, txq, bf); 2041 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2042 &queue_to_head)) { 2043 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2044 } else { 2045 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2046 ath_tx_xmit_normal(sc, txq, bf); 2047 } 2048 #else 2049 /* 2050 * For now, since there's no software queue, 2051 * direct-dispatch to the hardware. 2052 */ 2053 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2054 /* 2055 * Update the current leak count if 2056 * we're leaking frames; and set the 2057 * MORE flag as appropriate. 2058 */ 2059 ath_tx_leak_count_update(sc, tid, bf); 2060 ath_tx_xmit_normal(sc, txq, bf); 2061 #endif 2062 done: 2063 return 0; 2064 } 2065 2066 static int 2067 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2068 struct ath_buf *bf, struct mbuf *m0, 2069 const struct ieee80211_bpf_params *params) 2070 { 2071 struct ifnet *ifp = sc->sc_ifp; 2072 struct ieee80211com *ic = ifp->if_l2com; 2073 struct ath_hal *ah = sc->sc_ah; 2074 struct ieee80211vap *vap = ni->ni_vap; 2075 int error, ismcast, ismrr; 2076 int keyix, hdrlen, pktlen, try0, txantenna; 2077 u_int8_t rix, txrate; 2078 struct ieee80211_frame *wh; 2079 u_int flags; 2080 HAL_PKT_TYPE atype; 2081 const HAL_RATE_TABLE *rt; 2082 struct ath_desc *ds; 2083 u_int pri; 2084 int o_tid = -1; 2085 int do_override; 2086 uint8_t type, subtype; 2087 int queue_to_head; 2088 2089 ATH_TX_LOCK_ASSERT(sc); 2090 2091 wh = mtod(m0, struct ieee80211_frame *); 2092 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2093 hdrlen = ieee80211_anyhdrsize(wh); 2094 /* 2095 * Packet length must not include any 2096 * pad bytes; deduct them here. 2097 */ 2098 /* XXX honor IEEE80211_BPF_DATAPAD */ 2099 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2100 2101 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2102 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2103 2104 ATH_KTR(sc, ATH_KTR_TX, 2, 2105 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2106 2107 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2108 __func__, ismcast); 2109 2110 pri = params->ibp_pri & 3; 2111 /* Override pri if the frame isn't a QoS one */ 2112 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2113 pri = ath_tx_getac(sc, m0); 2114 2115 /* XXX If it's an ADDBA, override the correct queue */ 2116 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2117 2118 /* Map ADDBA to the correct priority */ 2119 if (do_override) { 2120 #if 0 2121 DPRINTF(sc, ATH_DEBUG_XMIT, 2122 "%s: overriding tid %d pri %d -> %d\n", 2123 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2124 #endif 2125 pri = TID_TO_WME_AC(o_tid); 2126 } 2127 2128 /* Handle encryption twiddling if needed */ 2129 if (! ath_tx_tag_crypto(sc, ni, 2130 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2131 &hdrlen, &pktlen, &keyix)) { 2132 ath_freetx(m0); 2133 return EIO; 2134 } 2135 /* packet header may have moved, reset our local pointer */ 2136 wh = mtod(m0, struct ieee80211_frame *); 2137 2138 /* Do the generic frame setup */ 2139 /* XXX should just bzero the bf_state? */ 2140 bf->bf_state.bfs_dobaw = 0; 2141 2142 error = ath_tx_dmasetup(sc, bf, m0); 2143 if (error != 0) 2144 return error; 2145 m0 = bf->bf_m; /* NB: may have changed */ 2146 wh = mtod(m0, struct ieee80211_frame *); 2147 bf->bf_node = ni; /* NB: held reference */ 2148 2149 /* Always enable CLRDMASK for raw frames for now.. */ 2150 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2151 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2152 if (params->ibp_flags & IEEE80211_BPF_RTS) 2153 flags |= HAL_TXDESC_RTSENA; 2154 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2155 /* XXX assume 11g/11n protection? */ 2156 bf->bf_state.bfs_doprot = 1; 2157 flags |= HAL_TXDESC_CTSENA; 2158 } 2159 /* XXX leave ismcast to injector? */ 2160 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2161 flags |= HAL_TXDESC_NOACK; 2162 2163 rt = sc->sc_currates; 2164 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2165 rix = ath_tx_findrix(sc, params->ibp_rate0); 2166 txrate = rt->info[rix].rateCode; 2167 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2168 txrate |= rt->info[rix].shortPreamble; 2169 sc->sc_txrix = rix; 2170 try0 = params->ibp_try0; 2171 ismrr = (params->ibp_try1 != 0); 2172 txantenna = params->ibp_pri >> 2; 2173 if (txantenna == 0) /* XXX? */ 2174 txantenna = sc->sc_txantenna; 2175 2176 /* 2177 * Since ctsrate is fixed, store it away for later 2178 * use when the descriptor fields are being set. 2179 */ 2180 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2181 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2182 2183 /* 2184 * NB: we mark all packets as type PSPOLL so the h/w won't 2185 * set the sequence number, duration, etc. 2186 */ 2187 atype = HAL_PKT_TYPE_PSPOLL; 2188 2189 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2190 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2191 sc->sc_hwmap[rix].ieeerate, -1); 2192 2193 if (ieee80211_radiotap_active_vap(vap)) { 2194 u_int64_t tsf = ath_hal_gettsf64(ah); 2195 2196 sc->sc_tx_th.wt_tsf = htole64(tsf); 2197 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2198 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2199 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2200 if (m0->m_flags & M_FRAG) 2201 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2202 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2203 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2204 ieee80211_get_node_txpower(ni)); 2205 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2206 2207 ieee80211_radiotap_tx(vap, m0); 2208 } 2209 2210 /* 2211 * Formulate first tx descriptor with tx controls. 2212 */ 2213 ds = bf->bf_desc; 2214 /* XXX check return value? */ 2215 2216 /* Store the decided rate index values away */ 2217 bf->bf_state.bfs_pktlen = pktlen; 2218 bf->bf_state.bfs_hdrlen = hdrlen; 2219 bf->bf_state.bfs_atype = atype; 2220 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2221 ieee80211_get_node_txpower(ni)); 2222 bf->bf_state.bfs_txrate0 = txrate; 2223 bf->bf_state.bfs_try0 = try0; 2224 bf->bf_state.bfs_keyix = keyix; 2225 bf->bf_state.bfs_txantenna = txantenna; 2226 bf->bf_state.bfs_txflags = flags; 2227 bf->bf_state.bfs_shpream = 2228 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2229 2230 /* Set local packet state, used to queue packets to hardware */ 2231 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2232 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2233 bf->bf_state.bfs_pri = pri; 2234 2235 /* XXX this should be done in ath_tx_setrate() */ 2236 bf->bf_state.bfs_ctsrate = 0; 2237 bf->bf_state.bfs_ctsduration = 0; 2238 bf->bf_state.bfs_ismrr = ismrr; 2239 2240 /* Blank the legacy rate array */ 2241 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2242 2243 bf->bf_state.bfs_rc[0].rix = 2244 ath_tx_findrix(sc, params->ibp_rate0); 2245 bf->bf_state.bfs_rc[0].tries = try0; 2246 bf->bf_state.bfs_rc[0].ratecode = txrate; 2247 2248 if (ismrr) { 2249 int rix; 2250 2251 rix = ath_tx_findrix(sc, params->ibp_rate1); 2252 bf->bf_state.bfs_rc[1].rix = rix; 2253 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2254 2255 rix = ath_tx_findrix(sc, params->ibp_rate2); 2256 bf->bf_state.bfs_rc[2].rix = rix; 2257 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2258 2259 rix = ath_tx_findrix(sc, params->ibp_rate3); 2260 bf->bf_state.bfs_rc[3].rix = rix; 2261 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2262 } 2263 /* 2264 * All the required rate control decisions have been made; 2265 * fill in the rc flags. 2266 */ 2267 ath_tx_rate_fill_rcflags(sc, bf); 2268 2269 /* NB: no buffered multicast in power save support */ 2270 2271 /* 2272 * If we're overiding the ADDBA destination, dump directly 2273 * into the hardware queue, right after any pending 2274 * frames to that node are. 2275 */ 2276 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2277 __func__, do_override); 2278 2279 #if 1 2280 /* 2281 * Put addba frames in the right place in the right TID/HWQ. 2282 */ 2283 if (do_override) { 2284 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2285 /* 2286 * XXX if it's addba frames, should we be leaking 2287 * them out via the frame leak method? 2288 * XXX for now let's not risk it; but we may wish 2289 * to investigate this later. 2290 */ 2291 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2292 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2293 &queue_to_head)) { 2294 /* Queue to software queue */ 2295 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2296 } else { 2297 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2298 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2299 } 2300 #else 2301 /* Direct-dispatch to the hardware */ 2302 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2303 /* 2304 * Update the current leak count if 2305 * we're leaking frames; and set the 2306 * MORE flag as appropriate. 2307 */ 2308 ath_tx_leak_count_update(sc, tid, bf); 2309 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2310 #endif 2311 return 0; 2312 } 2313 2314 /* 2315 * Send a raw frame. 2316 * 2317 * This can be called by net80211. 2318 */ 2319 int 2320 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2321 const struct ieee80211_bpf_params *params) 2322 { 2323 struct ieee80211com *ic = ni->ni_ic; 2324 struct ifnet *ifp = ic->ic_ifp; 2325 struct ath_softc *sc = ifp->if_softc; 2326 struct ath_buf *bf; 2327 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2328 int error = 0; 2329 2330 ATH_PCU_LOCK(sc); 2331 if (sc->sc_inreset_cnt > 0) { 2332 DPRINTF(sc, ATH_DEBUG_XMIT, 2333 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2334 error = EIO; 2335 ATH_PCU_UNLOCK(sc); 2336 goto bad0; 2337 } 2338 sc->sc_txstart_cnt++; 2339 ATH_PCU_UNLOCK(sc); 2340 2341 ATH_TX_LOCK(sc); 2342 2343 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) { 2344 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2345 (ifp->if_flags & IFF_RUNNING) == 0 ? 2346 "!running" : "invalid"); 2347 m_freem(m); 2348 error = ENETDOWN; 2349 goto bad; 2350 } 2351 2352 /* 2353 * Enforce how deep the multicast queue can grow. 2354 * 2355 * XXX duplicated in ath_tx_start(). 2356 */ 2357 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2358 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2359 > sc->sc_txq_mcastq_maxdepth) { 2360 sc->sc_stats.ast_tx_mcastq_overflow++; 2361 error = ENOBUFS; 2362 } 2363 2364 if (error != 0) { 2365 m_freem(m); 2366 goto bad; 2367 } 2368 } 2369 2370 /* 2371 * Grab a TX buffer and associated resources. 2372 */ 2373 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2374 if (bf == NULL) { 2375 sc->sc_stats.ast_tx_nobuf++; 2376 m_freem(m); 2377 error = ENOBUFS; 2378 goto bad; 2379 } 2380 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2381 m, params, bf); 2382 2383 if (params == NULL) { 2384 /* 2385 * Legacy path; interpret frame contents to decide 2386 * precisely how to send the frame. 2387 */ 2388 if (ath_tx_start(sc, ni, bf, m)) { 2389 error = EIO; /* XXX */ 2390 goto bad2; 2391 } 2392 } else { 2393 /* 2394 * Caller supplied explicit parameters to use in 2395 * sending the frame. 2396 */ 2397 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2398 error = EIO; /* XXX */ 2399 goto bad2; 2400 } 2401 } 2402 sc->sc_wd_timer = 5; 2403 ifp->if_opackets++; 2404 sc->sc_stats.ast_tx_raw++; 2405 2406 /* 2407 * Update the TIM - if there's anything queued to the 2408 * software queue and power save is enabled, we should 2409 * set the TIM. 2410 */ 2411 ath_tx_update_tim(sc, ni, 1); 2412 2413 ATH_TX_UNLOCK(sc); 2414 2415 ATH_PCU_LOCK(sc); 2416 sc->sc_txstart_cnt--; 2417 ATH_PCU_UNLOCK(sc); 2418 2419 return 0; 2420 bad2: 2421 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2422 "bf=%p", 2423 m, 2424 params, 2425 bf); 2426 ATH_TXBUF_LOCK(sc); 2427 ath_returnbuf_head(sc, bf); 2428 ATH_TXBUF_UNLOCK(sc); 2429 bad: 2430 2431 ATH_TX_UNLOCK(sc); 2432 2433 ATH_PCU_LOCK(sc); 2434 sc->sc_txstart_cnt--; 2435 ATH_PCU_UNLOCK(sc); 2436 bad0: 2437 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2438 m, params); 2439 ifp->if_oerrors++; 2440 sc->sc_stats.ast_tx_raw_fail++; 2441 ieee80211_free_node(ni); 2442 2443 return error; 2444 } 2445 2446 /* Some helper functions */ 2447 2448 /* 2449 * ADDBA (and potentially others) need to be placed in the same 2450 * hardware queue as the TID/node it's relating to. This is so 2451 * it goes out after any pending non-aggregate frames to the 2452 * same node/TID. 2453 * 2454 * If this isn't done, the ADDBA can go out before the frames 2455 * queued in hardware. Even though these frames have a sequence 2456 * number -earlier- than the ADDBA can be transmitted (but 2457 * no frames whose sequence numbers are after the ADDBA should 2458 * be!) they'll arrive after the ADDBA - and the receiving end 2459 * will simply drop them as being out of the BAW. 2460 * 2461 * The frames can't be appended to the TID software queue - it'll 2462 * never be sent out. So these frames have to be directly 2463 * dispatched to the hardware, rather than queued in software. 2464 * So if this function returns true, the TXQ has to be 2465 * overridden and it has to be directly dispatched. 2466 * 2467 * It's a dirty hack, but someone's gotta do it. 2468 */ 2469 2470 /* 2471 * XXX doesn't belong here! 2472 */ 2473 static int 2474 ieee80211_is_action(struct ieee80211_frame *wh) 2475 { 2476 /* Type: Management frame? */ 2477 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2478 IEEE80211_FC0_TYPE_MGT) 2479 return 0; 2480 2481 /* Subtype: Action frame? */ 2482 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2483 IEEE80211_FC0_SUBTYPE_ACTION) 2484 return 0; 2485 2486 return 1; 2487 } 2488 2489 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2490 /* 2491 * Return an alternate TID for ADDBA request frames. 2492 * 2493 * Yes, this likely should be done in the net80211 layer. 2494 */ 2495 static int 2496 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2497 struct ieee80211_node *ni, 2498 struct mbuf *m0, int *tid) 2499 { 2500 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2501 struct ieee80211_action_ba_addbarequest *ia; 2502 uint8_t *frm; 2503 uint16_t baparamset; 2504 2505 /* Not action frame? Bail */ 2506 if (! ieee80211_is_action(wh)) 2507 return 0; 2508 2509 /* XXX Not needed for frames we send? */ 2510 #if 0 2511 /* Correct length? */ 2512 if (! ieee80211_parse_action(ni, m)) 2513 return 0; 2514 #endif 2515 2516 /* Extract out action frame */ 2517 frm = (u_int8_t *)&wh[1]; 2518 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2519 2520 /* Not ADDBA? Bail */ 2521 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2522 return 0; 2523 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2524 return 0; 2525 2526 /* Extract TID, return it */ 2527 baparamset = le16toh(ia->rq_baparamset); 2528 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2529 2530 return 1; 2531 } 2532 #undef MS 2533 2534 /* Per-node software queue operations */ 2535 2536 /* 2537 * Add the current packet to the given BAW. 2538 * It is assumed that the current packet 2539 * 2540 * + fits inside the BAW; 2541 * + already has had a sequence number allocated. 2542 * 2543 * Since the BAW status may be modified by both the ath task and 2544 * the net80211/ifnet contexts, the TID must be locked. 2545 */ 2546 void 2547 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2548 struct ath_tid *tid, struct ath_buf *bf) 2549 { 2550 int index, cindex; 2551 struct ieee80211_tx_ampdu *tap; 2552 2553 ATH_TX_LOCK_ASSERT(sc); 2554 2555 if (bf->bf_state.bfs_isretried) 2556 return; 2557 2558 tap = ath_tx_get_tx_tid(an, tid->tid); 2559 2560 if (! bf->bf_state.bfs_dobaw) { 2561 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2562 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2563 __func__, SEQNO(bf->bf_state.bfs_seqno), 2564 tap->txa_start, tap->txa_wnd); 2565 } 2566 2567 if (bf->bf_state.bfs_addedbaw) 2568 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2569 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2570 "baw head=%d tail=%d\n", 2571 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2572 tap->txa_start, tap->txa_wnd, tid->baw_head, 2573 tid->baw_tail); 2574 2575 /* 2576 * Verify that the given sequence number is not outside of the 2577 * BAW. Complain loudly if that's the case. 2578 */ 2579 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2580 SEQNO(bf->bf_state.bfs_seqno))) { 2581 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2582 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2583 "baw head=%d tail=%d\n", 2584 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2585 tap->txa_start, tap->txa_wnd, tid->baw_head, 2586 tid->baw_tail); 2587 } 2588 2589 /* 2590 * ni->ni_txseqs[] is the currently allocated seqno. 2591 * the txa state contains the current baw start. 2592 */ 2593 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2594 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2595 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2596 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2597 "baw head=%d tail=%d\n", 2598 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2599 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2600 tid->baw_tail); 2601 2602 2603 #if 0 2604 assert(tid->tx_buf[cindex] == NULL); 2605 #endif 2606 if (tid->tx_buf[cindex] != NULL) { 2607 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2608 "%s: ba packet dup (index=%d, cindex=%d, " 2609 "head=%d, tail=%d)\n", 2610 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2611 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2612 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2613 __func__, 2614 tid->tx_buf[cindex], 2615 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2616 bf, 2617 SEQNO(bf->bf_state.bfs_seqno) 2618 ); 2619 } 2620 tid->tx_buf[cindex] = bf; 2621 2622 if (index >= ((tid->baw_tail - tid->baw_head) & 2623 (ATH_TID_MAX_BUFS - 1))) { 2624 tid->baw_tail = cindex; 2625 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2626 } 2627 } 2628 2629 /* 2630 * Flip the BAW buffer entry over from the existing one to the new one. 2631 * 2632 * When software retransmitting a (sub-)frame, it is entirely possible that 2633 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2634 * In that instance the buffer is cloned and the new buffer is used for 2635 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2636 * tracking array to maintain consistency. 2637 */ 2638 static void 2639 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2640 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2641 { 2642 int index, cindex; 2643 struct ieee80211_tx_ampdu *tap; 2644 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2645 2646 ATH_TX_LOCK_ASSERT(sc); 2647 2648 tap = ath_tx_get_tx_tid(an, tid->tid); 2649 index = ATH_BA_INDEX(tap->txa_start, seqno); 2650 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2651 2652 /* 2653 * Just warn for now; if it happens then we should find out 2654 * about it. It's highly likely the aggregation session will 2655 * soon hang. 2656 */ 2657 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2658 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2659 "%s: retransmitted buffer" 2660 " has mismatching seqno's, BA session may hang.\n", 2661 __func__); 2662 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2663 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2664 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2665 } 2666 2667 if (tid->tx_buf[cindex] != old_bf) { 2668 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2669 "%s: ath_buf pointer incorrect; " 2670 " has m BA session may hang.\n", __func__); 2671 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2672 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2673 } 2674 2675 tid->tx_buf[cindex] = new_bf; 2676 } 2677 2678 /* 2679 * seq_start - left edge of BAW 2680 * seq_next - current/next sequence number to allocate 2681 * 2682 * Since the BAW status may be modified by both the ath task and 2683 * the net80211/ifnet contexts, the TID must be locked. 2684 */ 2685 static void 2686 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2687 struct ath_tid *tid, const struct ath_buf *bf) 2688 { 2689 int index, cindex; 2690 struct ieee80211_tx_ampdu *tap; 2691 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2692 2693 ATH_TX_LOCK_ASSERT(sc); 2694 2695 tap = ath_tx_get_tx_tid(an, tid->tid); 2696 index = ATH_BA_INDEX(tap->txa_start, seqno); 2697 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2698 2699 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2700 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2701 "baw head=%d, tail=%d\n", 2702 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2703 cindex, tid->baw_head, tid->baw_tail); 2704 2705 /* 2706 * If this occurs then we have a big problem - something else 2707 * has slid tap->txa_start along without updating the BAW 2708 * tracking start/end pointers. Thus the TX BAW state is now 2709 * completely busted. 2710 * 2711 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2712 * it's quite possible that a cloned buffer is making its way 2713 * here and causing it to fire off. Disable TDMA for now. 2714 */ 2715 if (tid->tx_buf[cindex] != bf) { 2716 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2717 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2718 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2719 tid->tx_buf[cindex], 2720 (tid->tx_buf[cindex] != NULL) ? 2721 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2722 } 2723 2724 tid->tx_buf[cindex] = NULL; 2725 2726 while (tid->baw_head != tid->baw_tail && 2727 !tid->tx_buf[tid->baw_head]) { 2728 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2729 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2730 } 2731 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2732 "%s: baw is now %d:%d, baw head=%d\n", 2733 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2734 } 2735 2736 static void 2737 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2738 struct ath_buf *bf) 2739 { 2740 struct ieee80211_frame *wh; 2741 2742 ATH_TX_LOCK_ASSERT(sc); 2743 2744 if (tid->an->an_leak_count > 0) { 2745 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2746 2747 /* 2748 * Update MORE based on the software/net80211 queue states. 2749 */ 2750 if ((tid->an->an_stack_psq > 0) 2751 || (tid->an->an_swq_depth > 0)) 2752 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2753 else 2754 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2755 2756 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2757 "%s: %s: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2758 __func__, 2759 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 2760 tid->an->an_leak_count, 2761 tid->an->an_stack_psq, 2762 tid->an->an_swq_depth, 2763 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2764 2765 /* 2766 * Re-sync the underlying buffer. 2767 */ 2768 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2769 BUS_DMASYNC_PREWRITE); 2770 2771 tid->an->an_leak_count --; 2772 } 2773 } 2774 2775 static int 2776 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2777 { 2778 2779 ATH_TX_LOCK_ASSERT(sc); 2780 2781 if (tid->an->an_leak_count > 0) { 2782 return (1); 2783 } 2784 if (tid->paused) 2785 return (0); 2786 return (1); 2787 } 2788 2789 /* 2790 * Mark the current node/TID as ready to TX. 2791 * 2792 * This is done to make it easy for the software scheduler to 2793 * find which nodes have data to send. 2794 * 2795 * The TXQ lock must be held. 2796 */ 2797 void 2798 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2799 { 2800 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2801 2802 ATH_TX_LOCK_ASSERT(sc); 2803 2804 /* 2805 * If we are leaking out a frame to this destination 2806 * for PS-POLL, ensure that we allow scheduling to 2807 * occur. 2808 */ 2809 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2810 return; /* paused, can't schedule yet */ 2811 2812 if (tid->sched) 2813 return; /* already scheduled */ 2814 2815 tid->sched = 1; 2816 2817 #if 0 2818 /* 2819 * If this is a sleeping node we're leaking to, given 2820 * it a higher priority. This is so bad for QoS it hurts. 2821 */ 2822 if (tid->an->an_leak_count) { 2823 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2824 } else { 2825 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2826 } 2827 #endif 2828 2829 /* 2830 * We can't do the above - it'll confuse the TXQ software 2831 * scheduler which will keep checking the _head_ TID 2832 * in the list to see if it has traffic. If we queue 2833 * a TID to the head of the list and it doesn't transmit, 2834 * we'll check it again. 2835 * 2836 * So, get the rest of this leaking frames support working 2837 * and reliable first and _then_ optimise it so they're 2838 * pushed out in front of any other pending software 2839 * queued nodes. 2840 */ 2841 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2842 } 2843 2844 /* 2845 * Mark the current node as no longer needing to be polled for 2846 * TX packets. 2847 * 2848 * The TXQ lock must be held. 2849 */ 2850 static void 2851 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2852 { 2853 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2854 2855 ATH_TX_LOCK_ASSERT(sc); 2856 2857 if (tid->sched == 0) 2858 return; 2859 2860 tid->sched = 0; 2861 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2862 } 2863 2864 /* 2865 * Assign a sequence number manually to the given frame. 2866 * 2867 * This should only be called for A-MPDU TX frames. 2868 */ 2869 static ieee80211_seq 2870 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2871 struct ath_buf *bf, struct mbuf *m0) 2872 { 2873 struct ieee80211_frame *wh; 2874 int tid, pri; 2875 ieee80211_seq seqno; 2876 uint8_t subtype; 2877 2878 /* TID lookup */ 2879 wh = mtod(m0, struct ieee80211_frame *); 2880 pri = M_WME_GETAC(m0); /* honor classification */ 2881 tid = WME_AC_TO_TID(pri); 2882 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2883 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2884 2885 /* XXX Is it a control frame? Ignore */ 2886 2887 /* Does the packet require a sequence number? */ 2888 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2889 return -1; 2890 2891 ATH_TX_LOCK_ASSERT(sc); 2892 2893 /* 2894 * Is it a QOS NULL Data frame? Give it a sequence number from 2895 * the default TID (IEEE80211_NONQOS_TID.) 2896 * 2897 * The RX path of everything I've looked at doesn't include the NULL 2898 * data frame sequence number in the aggregation state updates, so 2899 * assigning it a sequence number there will cause a BAW hole on the 2900 * RX side. 2901 */ 2902 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2903 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2904 /* XXX no locking for this TID? This is a bit of a problem. */ 2905 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2906 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2907 } else { 2908 /* Manually assign sequence number */ 2909 seqno = ni->ni_txseqs[tid]; 2910 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2911 } 2912 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2913 M_SEQNO_SET(m0, seqno); 2914 2915 /* Return so caller can do something with it if needed */ 2916 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2917 return seqno; 2918 } 2919 2920 /* 2921 * Attempt to direct dispatch an aggregate frame to hardware. 2922 * If the frame is out of BAW, queue. 2923 * Otherwise, schedule it as a single frame. 2924 */ 2925 static void 2926 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2927 struct ath_txq *txq, struct ath_buf *bf) 2928 { 2929 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2930 struct ieee80211_tx_ampdu *tap; 2931 2932 ATH_TX_LOCK_ASSERT(sc); 2933 2934 tap = ath_tx_get_tx_tid(an, tid->tid); 2935 2936 /* paused? queue */ 2937 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 2938 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2939 /* XXX don't sched - we're paused! */ 2940 return; 2941 } 2942 2943 /* outside baw? queue */ 2944 if (bf->bf_state.bfs_dobaw && 2945 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2946 SEQNO(bf->bf_state.bfs_seqno)))) { 2947 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2948 ath_tx_tid_sched(sc, tid); 2949 return; 2950 } 2951 2952 /* 2953 * This is a temporary check and should be removed once 2954 * all the relevant code paths have been fixed. 2955 * 2956 * During aggregate retries, it's possible that the head 2957 * frame will fail (which has the bfs_aggr and bfs_nframes 2958 * fields set for said aggregate) and will be retried as 2959 * a single frame. In this instance, the values should 2960 * be reset or the completion code will get upset with you. 2961 */ 2962 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2963 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 2964 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 2965 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 2966 bf->bf_state.bfs_aggr = 0; 2967 bf->bf_state.bfs_nframes = 1; 2968 } 2969 2970 /* Update CLRDMASK just before this frame is queued */ 2971 ath_tx_update_clrdmask(sc, tid, bf); 2972 2973 /* Direct dispatch to hardware */ 2974 ath_tx_do_ratelookup(sc, bf); 2975 ath_tx_calc_duration(sc, bf); 2976 ath_tx_calc_protection(sc, bf); 2977 ath_tx_set_rtscts(sc, bf); 2978 ath_tx_rate_fill_rcflags(sc, bf); 2979 ath_tx_setds(sc, bf); 2980 2981 /* Statistics */ 2982 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 2983 2984 /* Track per-TID hardware queue depth correctly */ 2985 tid->hwq_depth++; 2986 2987 /* Add to BAW */ 2988 if (bf->bf_state.bfs_dobaw) { 2989 ath_tx_addto_baw(sc, an, tid, bf); 2990 bf->bf_state.bfs_addedbaw = 1; 2991 } 2992 2993 /* Set completion handler, multi-frame aggregate or not */ 2994 bf->bf_comp = ath_tx_aggr_comp; 2995 2996 /* 2997 * Update the current leak count if 2998 * we're leaking frames; and set the 2999 * MORE flag as appropriate. 3000 */ 3001 ath_tx_leak_count_update(sc, tid, bf); 3002 3003 /* Hand off to hardware */ 3004 ath_tx_handoff(sc, txq, bf); 3005 } 3006 3007 /* 3008 * Attempt to send the packet. 3009 * If the queue isn't busy, direct-dispatch. 3010 * If the queue is busy enough, queue the given packet on the 3011 * relevant software queue. 3012 */ 3013 void 3014 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3015 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3016 { 3017 struct ath_node *an = ATH_NODE(ni); 3018 struct ieee80211_frame *wh; 3019 struct ath_tid *atid; 3020 int pri, tid; 3021 struct mbuf *m0 = bf->bf_m; 3022 3023 ATH_TX_LOCK_ASSERT(sc); 3024 3025 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3026 wh = mtod(m0, struct ieee80211_frame *); 3027 pri = ath_tx_getac(sc, m0); 3028 tid = ath_tx_gettid(sc, m0); 3029 atid = &an->an_tid[tid]; 3030 3031 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3032 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3033 3034 /* Set local packet state, used to queue packets to hardware */ 3035 /* XXX potentially duplicate info, re-check */ 3036 bf->bf_state.bfs_tid = tid; 3037 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3038 bf->bf_state.bfs_pri = pri; 3039 3040 /* 3041 * If the hardware queue isn't busy, queue it directly. 3042 * If the hardware queue is busy, queue it. 3043 * If the TID is paused or the traffic it outside BAW, software 3044 * queue it. 3045 * 3046 * If the node is in power-save and we're leaking a frame, 3047 * leak a single frame. 3048 */ 3049 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3050 /* TID is paused, queue */ 3051 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3052 /* 3053 * If the caller requested that it be sent at a high 3054 * priority, queue it at the head of the list. 3055 */ 3056 if (queue_to_head) 3057 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3058 else 3059 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3060 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3061 /* AMPDU pending; queue */ 3062 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3063 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3064 /* XXX sched? */ 3065 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3066 /* AMPDU running, attempt direct dispatch if possible */ 3067 3068 /* 3069 * Always queue the frame to the tail of the list. 3070 */ 3071 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3072 3073 /* 3074 * If the hardware queue isn't busy, direct dispatch 3075 * the head frame in the list. Don't schedule the 3076 * TID - let it build some more frames first? 3077 * 3078 * When running A-MPDU, always just check the hardware 3079 * queue depth against the aggregate frame limit. 3080 * We don't want to burst a large number of single frames 3081 * out to the hardware; we want to aggressively hold back. 3082 * 3083 * Otherwise, schedule the TID. 3084 */ 3085 /* XXX TXQ locking */ 3086 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { 3087 bf = ATH_TID_FIRST(atid); 3088 ATH_TID_REMOVE(atid, bf, bf_list); 3089 3090 /* 3091 * Ensure it's definitely treated as a non-AMPDU 3092 * frame - this information may have been left 3093 * over from a previous attempt. 3094 */ 3095 bf->bf_state.bfs_aggr = 0; 3096 bf->bf_state.bfs_nframes = 1; 3097 3098 /* Queue to the hardware */ 3099 ath_tx_xmit_aggr(sc, an, txq, bf); 3100 DPRINTF(sc, ATH_DEBUG_SW_TX, 3101 "%s: xmit_aggr\n", 3102 __func__); 3103 } else { 3104 DPRINTF(sc, ATH_DEBUG_SW_TX, 3105 "%s: ampdu; swq'ing\n", 3106 __func__); 3107 3108 ath_tx_tid_sched(sc, atid); 3109 } 3110 /* 3111 * If we're not doing A-MPDU, be prepared to direct dispatch 3112 * up to both limits if possible. This particular corner 3113 * case may end up with packet starvation between aggregate 3114 * traffic and non-aggregate traffic: we wnat to ensure 3115 * that non-aggregate stations get a few frames queued to the 3116 * hardware before the aggregate station(s) get their chance. 3117 * 3118 * So if you only ever see a couple of frames direct dispatched 3119 * to the hardware from a non-AMPDU client, check both here 3120 * and in the software queue dispatcher to ensure that those 3121 * non-AMPDU stations get a fair chance to transmit. 3122 */ 3123 /* XXX TXQ locking */ 3124 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3125 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3126 /* AMPDU not running, attempt direct dispatch */ 3127 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3128 /* See if clrdmask needs to be set */ 3129 ath_tx_update_clrdmask(sc, atid, bf); 3130 3131 /* 3132 * Update the current leak count if 3133 * we're leaking frames; and set the 3134 * MORE flag as appropriate. 3135 */ 3136 ath_tx_leak_count_update(sc, atid, bf); 3137 3138 /* 3139 * Dispatch the frame. 3140 */ 3141 ath_tx_xmit_normal(sc, txq, bf); 3142 } else { 3143 /* Busy; queue */ 3144 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3145 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3146 ath_tx_tid_sched(sc, atid); 3147 } 3148 } 3149 3150 /* 3151 * Only set the clrdmask bit if none of the nodes are currently 3152 * filtered. 3153 * 3154 * XXX TODO: go through all the callers and check to see 3155 * which are being called in the context of looping over all 3156 * TIDs (eg, if all tids are being paused, resumed, etc.) 3157 * That'll avoid O(n^2) complexity here. 3158 */ 3159 static void 3160 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3161 { 3162 int i; 3163 3164 ATH_TX_LOCK_ASSERT(sc); 3165 3166 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3167 if (an->an_tid[i].isfiltered == 1) 3168 return; 3169 } 3170 an->clrdmask = 1; 3171 } 3172 3173 /* 3174 * Configure the per-TID node state. 3175 * 3176 * This likely belongs in if_ath_node.c but I can't think of anywhere 3177 * else to put it just yet. 3178 * 3179 * This sets up the SLISTs and the mutex as appropriate. 3180 */ 3181 void 3182 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3183 { 3184 int i, j; 3185 struct ath_tid *atid; 3186 3187 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3188 atid = &an->an_tid[i]; 3189 3190 /* XXX now with this bzer(), is the field 0'ing needed? */ 3191 bzero(atid, sizeof(*atid)); 3192 3193 TAILQ_INIT(&atid->tid_q); 3194 TAILQ_INIT(&atid->filtq.tid_q); 3195 atid->tid = i; 3196 atid->an = an; 3197 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3198 atid->tx_buf[j] = NULL; 3199 atid->baw_head = atid->baw_tail = 0; 3200 atid->paused = 0; 3201 atid->sched = 0; 3202 atid->hwq_depth = 0; 3203 atid->cleanup_inprogress = 0; 3204 if (i == IEEE80211_NONQOS_TID) 3205 atid->ac = ATH_NONQOS_TID_AC; 3206 else 3207 atid->ac = TID_TO_WME_AC(i); 3208 } 3209 an->clrdmask = 1; /* Always start by setting this bit */ 3210 } 3211 3212 /* 3213 * Pause the current TID. This stops packets from being transmitted 3214 * on it. 3215 * 3216 * Since this is also called from upper layers as well as the driver, 3217 * it will get the TID lock. 3218 */ 3219 static void 3220 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3221 { 3222 3223 ATH_TX_LOCK_ASSERT(sc); 3224 tid->paused++; 3225 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 3226 __func__, tid->paused); 3227 } 3228 3229 /* 3230 * Unpause the current TID, and schedule it if needed. 3231 */ 3232 static void 3233 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3234 { 3235 ATH_TX_LOCK_ASSERT(sc); 3236 3237 /* 3238 * There's some odd places where ath_tx_tid_resume() is called 3239 * when it shouldn't be; this works around that particular issue 3240 * until it's actually resolved. 3241 */ 3242 if (tid->paused == 0) { 3243 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3244 "%s: %s: paused=0?\n", __func__, 3245 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr)); 3246 } else { 3247 tid->paused--; 3248 } 3249 3250 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 3251 __func__, tid->paused); 3252 3253 if (tid->paused) 3254 return; 3255 3256 /* 3257 * Override the clrdmask configuration for the next frame 3258 * from this TID, just to get the ball rolling. 3259 */ 3260 ath_tx_set_clrdmask(sc, tid->an); 3261 3262 if (tid->axq_depth == 0) 3263 return; 3264 3265 /* XXX isfiltered shouldn't ever be 0 at this point */ 3266 if (tid->isfiltered == 1) { 3267 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3268 __func__); 3269 return; 3270 } 3271 3272 ath_tx_tid_sched(sc, tid); 3273 3274 /* 3275 * Queue the software TX scheduler. 3276 */ 3277 ath_tx_swq_kick(sc); 3278 } 3279 3280 /* 3281 * Add the given ath_buf to the TID filtered frame list. 3282 * This requires the TID be filtered. 3283 */ 3284 static void 3285 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3286 struct ath_buf *bf) 3287 { 3288 3289 ATH_TX_LOCK_ASSERT(sc); 3290 3291 if (!tid->isfiltered) 3292 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3293 __func__); 3294 3295 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3296 3297 /* Set the retry bit and bump the retry counter */ 3298 ath_tx_set_retry(sc, bf); 3299 sc->sc_stats.ast_tx_swfiltered++; 3300 3301 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3302 } 3303 3304 /* 3305 * Handle a completed filtered frame from the given TID. 3306 * This just enables/pauses the filtered frame state if required 3307 * and appends the filtered frame to the filtered queue. 3308 */ 3309 static void 3310 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3311 struct ath_buf *bf) 3312 { 3313 3314 ATH_TX_LOCK_ASSERT(sc); 3315 3316 if (! tid->isfiltered) { 3317 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", 3318 __func__); 3319 tid->isfiltered = 1; 3320 ath_tx_tid_pause(sc, tid); 3321 } 3322 3323 /* Add the frame to the filter queue */ 3324 ath_tx_tid_filt_addbuf(sc, tid, bf); 3325 } 3326 3327 /* 3328 * Complete the filtered frame TX completion. 3329 * 3330 * If there are no more frames in the hardware queue, unpause/unfilter 3331 * the TID if applicable. Otherwise we will wait for a node PS transition 3332 * to unfilter. 3333 */ 3334 static void 3335 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3336 { 3337 struct ath_buf *bf; 3338 3339 ATH_TX_LOCK_ASSERT(sc); 3340 3341 if (tid->hwq_depth != 0) 3342 return; 3343 3344 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", 3345 __func__); 3346 tid->isfiltered = 0; 3347 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3348 ath_tx_set_clrdmask(sc, tid->an); 3349 3350 /* XXX this is really quite inefficient */ 3351 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3352 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3353 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3354 } 3355 3356 ath_tx_tid_resume(sc, tid); 3357 } 3358 3359 /* 3360 * Called when a single (aggregate or otherwise) frame is completed. 3361 * 3362 * Returns 1 if the buffer could be added to the filtered list 3363 * (cloned or otherwise), 0 if the buffer couldn't be added to the 3364 * filtered list (failed clone; expired retry) and the caller should 3365 * free it and handle it like a failure (eg by sending a BAR.) 3366 */ 3367 static int 3368 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3369 struct ath_buf *bf) 3370 { 3371 struct ath_buf *nbf; 3372 int retval; 3373 3374 ATH_TX_LOCK_ASSERT(sc); 3375 3376 /* 3377 * Don't allow a filtered frame to live forever. 3378 */ 3379 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3380 sc->sc_stats.ast_tx_swretrymax++; 3381 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3382 "%s: bf=%p, seqno=%d, exceeded retries\n", 3383 __func__, 3384 bf, 3385 bf->bf_state.bfs_seqno); 3386 return (0); 3387 } 3388 3389 /* 3390 * A busy buffer can't be added to the retry list. 3391 * It needs to be cloned. 3392 */ 3393 if (bf->bf_flags & ATH_BUF_BUSY) { 3394 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3395 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3396 "%s: busy buffer clone: %p -> %p\n", 3397 __func__, bf, nbf); 3398 } else { 3399 nbf = bf; 3400 } 3401 3402 if (nbf == NULL) { 3403 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3404 "%s: busy buffer couldn't be cloned (%p)!\n", 3405 __func__, bf); 3406 retval = 1; 3407 } else { 3408 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3409 retval = 0; 3410 } 3411 ath_tx_tid_filt_comp_complete(sc, tid); 3412 3413 return (retval); 3414 } 3415 3416 static void 3417 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3418 struct ath_buf *bf_first, ath_bufhead *bf_q) 3419 { 3420 struct ath_buf *bf, *bf_next, *nbf; 3421 3422 ATH_TX_LOCK_ASSERT(sc); 3423 3424 bf = bf_first; 3425 while (bf) { 3426 bf_next = bf->bf_next; 3427 bf->bf_next = NULL; /* Remove it from the aggr list */ 3428 3429 /* 3430 * Don't allow a filtered frame to live forever. 3431 */ 3432 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3433 sc->sc_stats.ast_tx_swretrymax++; 3434 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3435 "%s: bf=%p, seqno=%d, exceeded retries\n", 3436 __func__, 3437 bf, 3438 bf->bf_state.bfs_seqno); 3439 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3440 goto next; 3441 } 3442 3443 if (bf->bf_flags & ATH_BUF_BUSY) { 3444 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3445 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3446 "%s: busy buffer cloned: %p -> %p", 3447 __func__, bf, nbf); 3448 } else { 3449 nbf = bf; 3450 } 3451 3452 /* 3453 * If the buffer couldn't be cloned, add it to bf_q; 3454 * the caller will free the buffer(s) as required. 3455 */ 3456 if (nbf == NULL) { 3457 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3458 "%s: buffer couldn't be cloned! (%p)\n", 3459 __func__, bf); 3460 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3461 } else { 3462 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3463 } 3464 next: 3465 bf = bf_next; 3466 } 3467 3468 ath_tx_tid_filt_comp_complete(sc, tid); 3469 } 3470 3471 /* 3472 * Suspend the queue because we need to TX a BAR. 3473 */ 3474 static void 3475 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3476 { 3477 3478 ATH_TX_LOCK_ASSERT(sc); 3479 3480 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3481 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3482 __func__, 3483 tid->tid, 3484 tid->bar_wait, 3485 tid->bar_tx); 3486 3487 /* We shouldn't be called when bar_tx is 1 */ 3488 if (tid->bar_tx) { 3489 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3490 "%s: bar_tx is 1?!\n", __func__); 3491 } 3492 3493 /* If we've already been called, just be patient. */ 3494 if (tid->bar_wait) 3495 return; 3496 3497 /* Wait! */ 3498 tid->bar_wait = 1; 3499 3500 /* Only one pause, no matter how many frames fail */ 3501 ath_tx_tid_pause(sc, tid); 3502 } 3503 3504 /* 3505 * We've finished with BAR handling - either we succeeded or 3506 * failed. Either way, unsuspend TX. 3507 */ 3508 static void 3509 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3510 { 3511 3512 ATH_TX_LOCK_ASSERT(sc); 3513 3514 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3515 "%s: %s: TID=%d, called\n", 3516 __func__, 3517 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3518 tid->tid); 3519 3520 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3521 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3522 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3523 __func__, 3524 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3525 tid->tid, tid->bar_tx, tid->bar_wait); 3526 } 3527 3528 tid->bar_tx = tid->bar_wait = 0; 3529 ath_tx_tid_resume(sc, tid); 3530 } 3531 3532 /* 3533 * Return whether we're ready to TX a BAR frame. 3534 * 3535 * Requires the TID lock be held. 3536 */ 3537 static int 3538 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3539 { 3540 3541 ATH_TX_LOCK_ASSERT(sc); 3542 3543 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3544 return (0); 3545 3546 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3547 "%s: %s: TID=%d, bar ready\n", 3548 __func__, 3549 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3550 tid->tid); 3551 3552 return (1); 3553 } 3554 3555 /* 3556 * Check whether the current TID is ready to have a BAR 3557 * TXed and if so, do the TX. 3558 * 3559 * Since the TID/TXQ lock can't be held during a call to 3560 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3561 * sending the BAR and locking it again. 3562 * 3563 * Eventually, the code to send the BAR should be broken out 3564 * from this routine so the lock doesn't have to be reacquired 3565 * just to be immediately dropped by the caller. 3566 */ 3567 static void 3568 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3569 { 3570 struct ieee80211_tx_ampdu *tap; 3571 3572 ATH_TX_LOCK_ASSERT(sc); 3573 3574 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3575 "%s: %s: TID=%d, called\n", 3576 __func__, 3577 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3578 tid->tid); 3579 3580 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3581 3582 /* 3583 * This is an error condition! 3584 */ 3585 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3586 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3587 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3588 __func__, 3589 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3590 tid->tid, tid->bar_tx, tid->bar_wait); 3591 return; 3592 } 3593 3594 /* Don't do anything if we still have pending frames */ 3595 if (tid->hwq_depth > 0) { 3596 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3597 "%s: %s: TID=%d, hwq_depth=%d, waiting\n", 3598 __func__, 3599 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3600 tid->tid, 3601 tid->hwq_depth); 3602 return; 3603 } 3604 3605 /* We're now about to TX */ 3606 tid->bar_tx = 1; 3607 3608 /* 3609 * Override the clrdmask configuration for the next frame, 3610 * just to get the ball rolling. 3611 */ 3612 ath_tx_set_clrdmask(sc, tid->an); 3613 3614 /* 3615 * Calculate new BAW left edge, now that all frames have either 3616 * succeeded or failed. 3617 * 3618 * XXX verify this is _actually_ the valid value to begin at! 3619 */ 3620 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3621 "%s: %s: TID=%d, new BAW left edge=%d\n", 3622 __func__, 3623 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3624 tid->tid, 3625 tap->txa_start); 3626 3627 /* Try sending the BAR frame */ 3628 /* We can't hold the lock here! */ 3629 3630 ATH_TX_UNLOCK(sc); 3631 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3632 /* Success? Now we wait for notification that it's done */ 3633 ATH_TX_LOCK(sc); 3634 return; 3635 } 3636 3637 /* Failure? For now, warn loudly and continue */ 3638 ATH_TX_LOCK(sc); 3639 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3640 "%s: %s: TID=%d, failed to TX BAR, continue!\n", 3641 __func__, ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3642 tid->tid); 3643 ath_tx_tid_bar_unsuspend(sc, tid); 3644 } 3645 3646 static void 3647 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3648 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3649 { 3650 3651 ATH_TX_LOCK_ASSERT(sc); 3652 3653 /* 3654 * If the current TID is running AMPDU, update 3655 * the BAW. 3656 */ 3657 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3658 bf->bf_state.bfs_dobaw) { 3659 /* 3660 * Only remove the frame from the BAW if it's 3661 * been transmitted at least once; this means 3662 * the frame was in the BAW to begin with. 3663 */ 3664 if (bf->bf_state.bfs_retries > 0) { 3665 ath_tx_update_baw(sc, an, tid, bf); 3666 bf->bf_state.bfs_dobaw = 0; 3667 } 3668 #if 0 3669 /* 3670 * This has become a non-fatal error now 3671 */ 3672 if (! bf->bf_state.bfs_addedbaw) 3673 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3674 "%s: wasn't added: seqno %d\n", 3675 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3676 #endif 3677 } 3678 3679 /* Strip it out of an aggregate list if it was in one */ 3680 bf->bf_next = NULL; 3681 3682 /* Insert on the free queue to be freed by the caller */ 3683 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3684 } 3685 3686 static void 3687 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3688 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3689 { 3690 struct ieee80211_node *ni = &an->an_node; 3691 struct ath_txq *txq; 3692 struct ieee80211_tx_ampdu *tap; 3693 3694 txq = sc->sc_ac2q[tid->ac]; 3695 tap = ath_tx_get_tx_tid(an, tid->tid); 3696 3697 DPRINTF(sc, ATH_DEBUG_SW_TX, 3698 "%s: %s: %s: bf=%p: addbaw=%d, dobaw=%d, " 3699 "seqno=%d, retry=%d\n", 3700 __func__, 3701 pfx, 3702 ath_hal_ether_sprintf(ni->ni_macaddr), 3703 bf, 3704 bf->bf_state.bfs_addedbaw, 3705 bf->bf_state.bfs_dobaw, 3706 SEQNO(bf->bf_state.bfs_seqno), 3707 bf->bf_state.bfs_retries); 3708 DPRINTF(sc, ATH_DEBUG_SW_TX, 3709 "%s: %s: %s: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3710 __func__, 3711 pfx, 3712 ath_hal_ether_sprintf(ni->ni_macaddr), 3713 bf, 3714 txq->axq_qnum, 3715 txq->axq_depth, 3716 txq->axq_aggr_depth); 3717 DPRINTF(sc, ATH_DEBUG_SW_TX, 3718 "%s: %s: %s: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3719 "isfiltered=%d\n", 3720 __func__, 3721 pfx, 3722 ath_hal_ether_sprintf(ni->ni_macaddr), 3723 bf, 3724 tid->axq_depth, 3725 tid->hwq_depth, 3726 tid->bar_wait, 3727 tid->isfiltered); 3728 DPRINTF(sc, ATH_DEBUG_SW_TX, 3729 "%s: %s: %s: tid %d: " 3730 "sched=%d, paused=%d, " 3731 "incomp=%d, baw_head=%d, " 3732 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3733 __func__, 3734 pfx, 3735 ath_hal_ether_sprintf(ni->ni_macaddr), 3736 tid->tid, 3737 tid->sched, tid->paused, 3738 tid->incomp, tid->baw_head, 3739 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3740 ni->ni_txseqs[tid->tid]); 3741 3742 /* XXX Dump the frame, see what it is? */ 3743 ieee80211_dump_pkt(ni->ni_ic, 3744 mtod(bf->bf_m, const uint8_t *), 3745 bf->bf_m->m_len, 0, -1); 3746 } 3747 3748 /* 3749 * Free any packets currently pending in the software TX queue. 3750 * 3751 * This will be called when a node is being deleted. 3752 * 3753 * It can also be called on an active node during an interface 3754 * reset or state transition. 3755 * 3756 * (From Linux/reference): 3757 * 3758 * TODO: For frame(s) that are in the retry state, we will reuse the 3759 * sequence number(s) without setting the retry bit. The 3760 * alternative is to give up on these and BAR the receiver's window 3761 * forward. 3762 */ 3763 static void 3764 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3765 struct ath_tid *tid, ath_bufhead *bf_cq) 3766 { 3767 struct ath_buf *bf; 3768 struct ieee80211_tx_ampdu *tap; 3769 struct ieee80211_node *ni = &an->an_node; 3770 int t; 3771 3772 tap = ath_tx_get_tx_tid(an, tid->tid); 3773 3774 ATH_TX_LOCK_ASSERT(sc); 3775 3776 /* Walk the queue, free frames */ 3777 t = 0; 3778 for (;;) { 3779 bf = ATH_TID_FIRST(tid); 3780 if (bf == NULL) { 3781 break; 3782 } 3783 3784 if (t == 0) { 3785 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3786 t = 1; 3787 } 3788 3789 ATH_TID_REMOVE(tid, bf, bf_list); 3790 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3791 } 3792 3793 /* And now, drain the filtered frame queue */ 3794 t = 0; 3795 for (;;) { 3796 bf = ATH_TID_FILT_FIRST(tid); 3797 if (bf == NULL) 3798 break; 3799 3800 if (t == 0) { 3801 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3802 t = 1; 3803 } 3804 3805 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3806 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3807 } 3808 3809 /* 3810 * Override the clrdmask configuration for the next frame 3811 * in case there is some future transmission, just to get 3812 * the ball rolling. 3813 * 3814 * This won't hurt things if the TID is about to be freed. 3815 */ 3816 ath_tx_set_clrdmask(sc, tid->an); 3817 3818 /* 3819 * Now that it's completed, grab the TID lock and update 3820 * the sequence number and BAW window. 3821 * Because sequence numbers have been assigned to frames 3822 * that haven't been sent yet, it's entirely possible 3823 * we'll be called with some pending frames that have not 3824 * been transmitted. 3825 * 3826 * The cleaner solution is to do the sequence number allocation 3827 * when the packet is first transmitted - and thus the "retries" 3828 * check above would be enough to update the BAW/seqno. 3829 */ 3830 3831 /* But don't do it for non-QoS TIDs */ 3832 if (tap) { 3833 #if 1 3834 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3835 "%s: %s: node %p: TID %d: sliding BAW left edge to %d\n", 3836 __func__, 3837 ath_hal_ether_sprintf(ni->ni_macaddr), 3838 an, 3839 tid->tid, 3840 tap->txa_start); 3841 #endif 3842 ni->ni_txseqs[tid->tid] = tap->txa_start; 3843 tid->baw_tail = tid->baw_head; 3844 } 3845 } 3846 3847 /* 3848 * Reset the TID state. This must be only called once the node has 3849 * had its frames flushed from this TID, to ensure that no other 3850 * pause / unpause logic can kick in. 3851 */ 3852 static void 3853 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 3854 { 3855 3856 #if 0 3857 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 3858 tid->paused = tid->sched = tid->addba_tx_pending = 0; 3859 tid->incomp = tid->cleanup_inprogress = 0; 3860 #endif 3861 3862 /* 3863 * If we have a bar_wait set, we need to unpause the TID 3864 * here. Otherwise once cleanup has finished, the TID won't 3865 * have the right paused counter. 3866 * 3867 * XXX I'm not going through resume here - I don't want the 3868 * node to be rescheuled just yet. This however should be 3869 * methodized! 3870 */ 3871 if (tid->bar_wait) { 3872 if (tid->paused > 0) { 3873 tid->paused --; 3874 } 3875 } 3876 3877 /* 3878 * XXX same with a currently filtered TID. 3879 * 3880 * Since this is being called during a flush, we assume that 3881 * the filtered frame list is actually empty. 3882 * 3883 * XXX TODO: add in a check to ensure that the filtered queue 3884 * depth is actually 0! 3885 */ 3886 if (tid->isfiltered) { 3887 if (tid->paused > 0) { 3888 tid->paused --; 3889 } 3890 } 3891 3892 /* 3893 * Clear BAR, filtered frames, scheduled and ADDBA pending. 3894 * The TID may be going through cleanup from the last association 3895 * where things in the BAW are still in the hardware queue. 3896 */ 3897 tid->bar_wait = 0; 3898 tid->bar_tx = 0; 3899 tid->isfiltered = 0; 3900 tid->sched = 0; 3901 tid->addba_tx_pending = 0; 3902 3903 /* 3904 * XXX TODO: it may just be enough to walk the HWQs and mark 3905 * frames for that node as non-aggregate; or mark the ath_node 3906 * with something that indicates that aggregation is no longer 3907 * occuring. Then we can just toss the BAW complaints and 3908 * do a complete hard reset of state here - no pause, no 3909 * complete counter, etc. 3910 */ 3911 3912 } 3913 3914 /* 3915 * Flush all software queued packets for the given node. 3916 * 3917 * This occurs when a completion handler frees the last buffer 3918 * for a node, and the node is thus freed. This causes the node 3919 * to be cleaned up, which ends up calling ath_tx_node_flush. 3920 */ 3921 void 3922 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3923 { 3924 int tid; 3925 ath_bufhead bf_cq; 3926 struct ath_buf *bf; 3927 3928 TAILQ_INIT(&bf_cq); 3929 3930 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3931 &an->an_node); 3932 3933 ATH_TX_LOCK(sc); 3934 DPRINTF(sc, ATH_DEBUG_NODE, 3935 "%s: %s: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 3936 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 3937 __func__, 3938 ath_hal_ether_sprintf(an->an_node.ni_macaddr), 3939 an->an_is_powersave, 3940 an->an_stack_psq, 3941 an->an_tim_set, 3942 an->an_swq_depth, 3943 an->clrdmask, 3944 an->an_leak_count); 3945 3946 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 3947 struct ath_tid *atid = &an->an_tid[tid]; 3948 3949 /* Free packets */ 3950 ath_tx_tid_drain(sc, an, atid, &bf_cq); 3951 3952 /* Remove this tid from the list of active tids */ 3953 ath_tx_tid_unsched(sc, atid); 3954 3955 /* Reset the per-TID pause, BAR, etc state */ 3956 ath_tx_tid_reset(sc, atid); 3957 } 3958 3959 /* 3960 * Clear global leak count 3961 */ 3962 an->an_leak_count = 0; 3963 ATH_TX_UNLOCK(sc); 3964 3965 /* Handle completed frames */ 3966 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3967 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3968 ath_tx_default_comp(sc, bf, 0); 3969 } 3970 } 3971 3972 /* 3973 * Drain all the software TXQs currently with traffic queued. 3974 */ 3975 void 3976 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 3977 { 3978 struct ath_tid *tid; 3979 ath_bufhead bf_cq; 3980 struct ath_buf *bf; 3981 3982 TAILQ_INIT(&bf_cq); 3983 ATH_TX_LOCK(sc); 3984 3985 /* 3986 * Iterate over all active tids for the given txq, 3987 * flushing and unsched'ing them 3988 */ 3989 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 3990 tid = TAILQ_FIRST(&txq->axq_tidq); 3991 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 3992 ath_tx_tid_unsched(sc, tid); 3993 } 3994 3995 ATH_TX_UNLOCK(sc); 3996 3997 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3998 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3999 ath_tx_default_comp(sc, bf, 0); 4000 } 4001 } 4002 4003 /* 4004 * Handle completion of non-aggregate session frames. 4005 * 4006 * This (currently) doesn't implement software retransmission of 4007 * non-aggregate frames! 4008 * 4009 * Software retransmission of non-aggregate frames needs to obey 4010 * the strict sequence number ordering, and drop any frames that 4011 * will fail this. 4012 * 4013 * For now, filtered frames and frame transmission will cause 4014 * all kinds of issues. So we don't support them. 4015 * 4016 * So anyone queuing frames via ath_tx_normal_xmit() or 4017 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4018 */ 4019 void 4020 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4021 { 4022 struct ieee80211_node *ni = bf->bf_node; 4023 struct ath_node *an = ATH_NODE(ni); 4024 int tid = bf->bf_state.bfs_tid; 4025 struct ath_tid *atid = &an->an_tid[tid]; 4026 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4027 4028 /* The TID state is protected behind the TXQ lock */ 4029 ATH_TX_LOCK(sc); 4030 4031 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4032 __func__, bf, fail, atid->hwq_depth - 1); 4033 4034 atid->hwq_depth--; 4035 4036 #if 0 4037 /* 4038 * If the frame was filtered, stick it on the filter frame 4039 * queue and complain about it. It shouldn't happen! 4040 */ 4041 if ((ts->ts_status & HAL_TXERR_FILT) || 4042 (ts->ts_status != 0 && atid->isfiltered)) { 4043 DPRINTF(sc, ATH_DEBUG_SW_TX, 4044 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4045 __func__, 4046 atid->isfiltered, 4047 ts->ts_status); 4048 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4049 } 4050 #endif 4051 if (atid->isfiltered) 4052 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4053 if (atid->hwq_depth < 0) 4054 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4055 __func__, atid->hwq_depth); 4056 4057 /* 4058 * If the queue is filtered, potentially mark it as complete 4059 * and reschedule it as needed. 4060 * 4061 * This is required as there may be a subsequent TX descriptor 4062 * for this end-node that has CLRDMASK set, so it's quite possible 4063 * that a filtered frame will be followed by a non-filtered 4064 * (complete or otherwise) frame. 4065 * 4066 * XXX should we do this before we complete the frame? 4067 */ 4068 if (atid->isfiltered) 4069 ath_tx_tid_filt_comp_complete(sc, atid); 4070 ATH_TX_UNLOCK(sc); 4071 4072 /* 4073 * punt to rate control if we're not being cleaned up 4074 * during a hw queue drain and the frame wanted an ACK. 4075 */ 4076 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4077 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4078 ts, bf->bf_state.bfs_pktlen, 4079 1, (ts->ts_status == 0) ? 0 : 1); 4080 4081 ath_tx_default_comp(sc, bf, fail); 4082 } 4083 4084 /* 4085 * Handle cleanup of aggregate session packets that aren't 4086 * an A-MPDU. 4087 * 4088 * There's no need to update the BAW here - the session is being 4089 * torn down. 4090 */ 4091 static void 4092 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4093 { 4094 struct ieee80211_node *ni = bf->bf_node; 4095 struct ath_node *an = ATH_NODE(ni); 4096 int tid = bf->bf_state.bfs_tid; 4097 struct ath_tid *atid = &an->an_tid[tid]; 4098 4099 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4100 __func__, tid, atid->incomp); 4101 4102 ATH_TX_LOCK(sc); 4103 atid->incomp--; 4104 if (atid->incomp == 0) { 4105 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4106 "%s: TID %d: cleaned up! resume!\n", 4107 __func__, tid); 4108 atid->cleanup_inprogress = 0; 4109 ath_tx_tid_resume(sc, atid); 4110 } 4111 ATH_TX_UNLOCK(sc); 4112 4113 ath_tx_default_comp(sc, bf, 0); 4114 } 4115 4116 /* 4117 * Performs transmit side cleanup when TID changes from aggregated to 4118 * unaggregated. 4119 * 4120 * - Discard all retry frames from the s/w queue. 4121 * - Fix the tx completion function for all buffers in s/w queue. 4122 * - Count the number of unacked frames, and let transmit completion 4123 * handle it later. 4124 * 4125 * The caller is responsible for pausing the TID and unpausing the 4126 * TID if no cleanup was required. Otherwise the cleanup path will 4127 * unpause the TID once the last hardware queued frame is completed. 4128 */ 4129 static void 4130 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4131 ath_bufhead *bf_cq) 4132 { 4133 struct ath_tid *atid = &an->an_tid[tid]; 4134 struct ieee80211_tx_ampdu *tap; 4135 struct ath_buf *bf, *bf_next; 4136 4137 ATH_TX_LOCK_ASSERT(sc); 4138 4139 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4140 "%s: TID %d: called\n", __func__, tid); 4141 4142 /* 4143 * Move the filtered frames to the TX queue, before 4144 * we run off and discard/process things. 4145 */ 4146 /* XXX this is really quite inefficient */ 4147 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4148 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4149 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4150 } 4151 4152 /* 4153 * Update the frames in the software TX queue: 4154 * 4155 * + Discard retry frames in the queue 4156 * + Fix the completion function to be non-aggregate 4157 */ 4158 bf = ATH_TID_FIRST(atid); 4159 while (bf) { 4160 if (bf->bf_state.bfs_isretried) { 4161 bf_next = TAILQ_NEXT(bf, bf_list); 4162 ATH_TID_REMOVE(atid, bf, bf_list); 4163 if (bf->bf_state.bfs_dobaw) { 4164 ath_tx_update_baw(sc, an, atid, bf); 4165 if (!bf->bf_state.bfs_addedbaw) 4166 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4167 "%s: wasn't added: seqno %d\n", 4168 __func__, 4169 SEQNO(bf->bf_state.bfs_seqno)); 4170 } 4171 bf->bf_state.bfs_dobaw = 0; 4172 /* 4173 * Call the default completion handler with "fail" just 4174 * so upper levels are suitably notified about this. 4175 */ 4176 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4177 bf = bf_next; 4178 continue; 4179 } 4180 /* Give these the default completion handler */ 4181 bf->bf_comp = ath_tx_normal_comp; 4182 bf = TAILQ_NEXT(bf, bf_list); 4183 } 4184 4185 /* 4186 * Calculate what hardware-queued frames exist based 4187 * on the current BAW size. Ie, what frames have been 4188 * added to the TX hardware queue for this TID but 4189 * not yet ACKed. 4190 */ 4191 tap = ath_tx_get_tx_tid(an, tid); 4192 /* Need the lock - fiddling with BAW */ 4193 while (atid->baw_head != atid->baw_tail) { 4194 if (atid->tx_buf[atid->baw_head]) { 4195 atid->incomp++; 4196 atid->cleanup_inprogress = 1; 4197 atid->tx_buf[atid->baw_head] = NULL; 4198 } 4199 INCR(atid->baw_head, ATH_TID_MAX_BUFS); 4200 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 4201 } 4202 4203 if (atid->cleanup_inprogress) 4204 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4205 "%s: TID %d: cleanup needed: %d packets\n", 4206 __func__, tid, atid->incomp); 4207 4208 /* Owner now must free completed frames */ 4209 } 4210 4211 static struct ath_buf * 4212 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4213 struct ath_tid *tid, struct ath_buf *bf) 4214 { 4215 struct ath_buf *nbf; 4216 int error; 4217 4218 /* 4219 * Clone the buffer. This will handle the dma unmap and 4220 * copy the node reference to the new buffer. If this 4221 * works out, 'bf' will have no DMA mapping, no mbuf 4222 * pointer and no node reference. 4223 */ 4224 nbf = ath_buf_clone(sc, bf); 4225 4226 #if 0 4227 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4228 __func__); 4229 #endif 4230 4231 if (nbf == NULL) { 4232 /* Failed to clone */ 4233 DPRINTF(sc, ATH_DEBUG_XMIT, 4234 "%s: failed to clone a busy buffer\n", 4235 __func__); 4236 return NULL; 4237 } 4238 4239 /* Setup the dma for the new buffer */ 4240 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4241 if (error != 0) { 4242 DPRINTF(sc, ATH_DEBUG_XMIT, 4243 "%s: failed to setup dma for clone\n", 4244 __func__); 4245 /* 4246 * Put this at the head of the list, not tail; 4247 * that way it doesn't interfere with the 4248 * busy buffer logic (which uses the tail of 4249 * the list.) 4250 */ 4251 ATH_TXBUF_LOCK(sc); 4252 ath_returnbuf_head(sc, nbf); 4253 ATH_TXBUF_UNLOCK(sc); 4254 return NULL; 4255 } 4256 4257 /* Update BAW if required, before we free the original buf */ 4258 if (bf->bf_state.bfs_dobaw) 4259 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4260 4261 /* Free original buffer; return new buffer */ 4262 ath_freebuf(sc, bf); 4263 4264 return nbf; 4265 } 4266 4267 /* 4268 * Handle retrying an unaggregate frame in an aggregate 4269 * session. 4270 * 4271 * If too many retries occur, pause the TID, wait for 4272 * any further retransmits (as there's no reason why 4273 * non-aggregate frames in an aggregate session are 4274 * transmitted in-order; they just have to be in-BAW) 4275 * and then queue a BAR. 4276 */ 4277 static void 4278 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4279 { 4280 struct ieee80211_node *ni = bf->bf_node; 4281 struct ath_node *an = ATH_NODE(ni); 4282 int tid = bf->bf_state.bfs_tid; 4283 struct ath_tid *atid = &an->an_tid[tid]; 4284 struct ieee80211_tx_ampdu *tap; 4285 4286 ATH_TX_LOCK(sc); 4287 4288 tap = ath_tx_get_tx_tid(an, tid); 4289 4290 /* 4291 * If the buffer is marked as busy, we can't directly 4292 * reuse it. Instead, try to clone the buffer. 4293 * If the clone is successful, recycle the old buffer. 4294 * If the clone is unsuccessful, set bfs_retries to max 4295 * to force the next bit of code to free the buffer 4296 * for us. 4297 */ 4298 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4299 (bf->bf_flags & ATH_BUF_BUSY)) { 4300 struct ath_buf *nbf; 4301 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4302 if (nbf) 4303 /* bf has been freed at this point */ 4304 bf = nbf; 4305 else 4306 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4307 } 4308 4309 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4310 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4311 "%s: exceeded retries; seqno %d\n", 4312 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4313 sc->sc_stats.ast_tx_swretrymax++; 4314 4315 /* Update BAW anyway */ 4316 if (bf->bf_state.bfs_dobaw) { 4317 ath_tx_update_baw(sc, an, atid, bf); 4318 if (! bf->bf_state.bfs_addedbaw) 4319 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4320 "%s: wasn't added: seqno %d\n", 4321 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4322 } 4323 bf->bf_state.bfs_dobaw = 0; 4324 4325 /* Suspend the TX queue and get ready to send the BAR */ 4326 ath_tx_tid_bar_suspend(sc, atid); 4327 4328 /* Send the BAR if there are no other frames waiting */ 4329 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4330 ath_tx_tid_bar_tx(sc, atid); 4331 4332 ATH_TX_UNLOCK(sc); 4333 4334 /* Free buffer, bf is free after this call */ 4335 ath_tx_default_comp(sc, bf, 0); 4336 return; 4337 } 4338 4339 /* 4340 * This increments the retry counter as well as 4341 * sets the retry flag in the ath_buf and packet 4342 * body. 4343 */ 4344 ath_tx_set_retry(sc, bf); 4345 sc->sc_stats.ast_tx_swretries++; 4346 4347 /* 4348 * Insert this at the head of the queue, so it's 4349 * retried before any current/subsequent frames. 4350 */ 4351 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4352 ath_tx_tid_sched(sc, atid); 4353 /* Send the BAR if there are no other frames waiting */ 4354 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4355 ath_tx_tid_bar_tx(sc, atid); 4356 4357 ATH_TX_UNLOCK(sc); 4358 } 4359 4360 /* 4361 * Common code for aggregate excessive retry/subframe retry. 4362 * If retrying, queues buffers to bf_q. If not, frees the 4363 * buffers. 4364 * 4365 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4366 */ 4367 static int 4368 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4369 ath_bufhead *bf_q) 4370 { 4371 struct ieee80211_node *ni = bf->bf_node; 4372 struct ath_node *an = ATH_NODE(ni); 4373 int tid = bf->bf_state.bfs_tid; 4374 struct ath_tid *atid = &an->an_tid[tid]; 4375 4376 ATH_TX_LOCK_ASSERT(sc); 4377 4378 /* XXX clr11naggr should be done for all subframes */ 4379 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4380 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4381 4382 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4383 4384 /* 4385 * If the buffer is marked as busy, we can't directly 4386 * reuse it. Instead, try to clone the buffer. 4387 * If the clone is successful, recycle the old buffer. 4388 * If the clone is unsuccessful, set bfs_retries to max 4389 * to force the next bit of code to free the buffer 4390 * for us. 4391 */ 4392 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4393 (bf->bf_flags & ATH_BUF_BUSY)) { 4394 struct ath_buf *nbf; 4395 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4396 if (nbf) 4397 /* bf has been freed at this point */ 4398 bf = nbf; 4399 else 4400 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4401 } 4402 4403 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4404 sc->sc_stats.ast_tx_swretrymax++; 4405 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4406 "%s: max retries: seqno %d\n", 4407 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4408 ath_tx_update_baw(sc, an, atid, bf); 4409 if (!bf->bf_state.bfs_addedbaw) 4410 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4411 "%s: wasn't added: seqno %d\n", 4412 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4413 bf->bf_state.bfs_dobaw = 0; 4414 return 1; 4415 } 4416 4417 ath_tx_set_retry(sc, bf); 4418 sc->sc_stats.ast_tx_swretries++; 4419 bf->bf_next = NULL; /* Just to make sure */ 4420 4421 /* Clear the aggregate state */ 4422 bf->bf_state.bfs_aggr = 0; 4423 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4424 bf->bf_state.bfs_nframes = 1; 4425 4426 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4427 return 0; 4428 } 4429 4430 /* 4431 * error pkt completion for an aggregate destination 4432 */ 4433 static void 4434 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4435 struct ath_tid *tid) 4436 { 4437 struct ieee80211_node *ni = bf_first->bf_node; 4438 struct ath_node *an = ATH_NODE(ni); 4439 struct ath_buf *bf_next, *bf; 4440 ath_bufhead bf_q; 4441 int drops = 0; 4442 struct ieee80211_tx_ampdu *tap; 4443 ath_bufhead bf_cq; 4444 4445 TAILQ_INIT(&bf_q); 4446 TAILQ_INIT(&bf_cq); 4447 4448 /* 4449 * Update rate control - all frames have failed. 4450 * 4451 * XXX use the length in the first frame in the series; 4452 * XXX just so things are consistent for now. 4453 */ 4454 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4455 &bf_first->bf_status.ds_txstat, 4456 bf_first->bf_state.bfs_pktlen, 4457 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4458 4459 ATH_TX_LOCK(sc); 4460 tap = ath_tx_get_tx_tid(an, tid->tid); 4461 sc->sc_stats.ast_tx_aggr_failall++; 4462 4463 /* Retry all subframes */ 4464 bf = bf_first; 4465 while (bf) { 4466 bf_next = bf->bf_next; 4467 bf->bf_next = NULL; /* Remove it from the aggr list */ 4468 sc->sc_stats.ast_tx_aggr_fail++; 4469 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4470 drops++; 4471 bf->bf_next = NULL; 4472 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4473 } 4474 bf = bf_next; 4475 } 4476 4477 /* Prepend all frames to the beginning of the queue */ 4478 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4479 TAILQ_REMOVE(&bf_q, bf, bf_list); 4480 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4481 } 4482 4483 /* 4484 * Schedule the TID to be re-tried. 4485 */ 4486 ath_tx_tid_sched(sc, tid); 4487 4488 /* 4489 * send bar if we dropped any frames 4490 * 4491 * Keep the txq lock held for now, as we need to ensure 4492 * that ni_txseqs[] is consistent (as it's being updated 4493 * in the ifnet TX context or raw TX context.) 4494 */ 4495 if (drops) { 4496 /* Suspend the TX queue and get ready to send the BAR */ 4497 ath_tx_tid_bar_suspend(sc, tid); 4498 } 4499 4500 /* 4501 * Send BAR if required 4502 */ 4503 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4504 ath_tx_tid_bar_tx(sc, tid); 4505 4506 ATH_TX_UNLOCK(sc); 4507 4508 /* Complete frames which errored out */ 4509 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4510 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4511 ath_tx_default_comp(sc, bf, 0); 4512 } 4513 } 4514 4515 /* 4516 * Handle clean-up of packets from an aggregate list. 4517 * 4518 * There's no need to update the BAW here - the session is being 4519 * torn down. 4520 */ 4521 static void 4522 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4523 { 4524 struct ath_buf *bf, *bf_next; 4525 struct ieee80211_node *ni = bf_first->bf_node; 4526 struct ath_node *an = ATH_NODE(ni); 4527 int tid = bf_first->bf_state.bfs_tid; 4528 struct ath_tid *atid = &an->an_tid[tid]; 4529 4530 ATH_TX_LOCK(sc); 4531 4532 /* update incomp */ 4533 bf = bf_first; 4534 while (bf) { 4535 atid->incomp--; 4536 bf = bf->bf_next; 4537 } 4538 4539 if (atid->incomp == 0) { 4540 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4541 "%s: TID %d: cleaned up! resume!\n", 4542 __func__, tid); 4543 atid->cleanup_inprogress = 0; 4544 ath_tx_tid_resume(sc, atid); 4545 } 4546 4547 /* Send BAR if required */ 4548 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4549 /* 4550 * XXX TODO: we should likely just tear down the BAR state here, 4551 * rather than sending a BAR. 4552 */ 4553 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4554 ath_tx_tid_bar_tx(sc, atid); 4555 4556 ATH_TX_UNLOCK(sc); 4557 4558 /* Handle frame completion */ 4559 bf = bf_first; 4560 while (bf) { 4561 bf_next = bf->bf_next; 4562 ath_tx_default_comp(sc, bf, 1); 4563 bf = bf_next; 4564 } 4565 } 4566 4567 /* 4568 * Handle completion of an set of aggregate frames. 4569 * 4570 * Note: the completion handler is the last descriptor in the aggregate, 4571 * not the last descriptor in the first frame. 4572 */ 4573 static void 4574 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4575 int fail) 4576 { 4577 //struct ath_desc *ds = bf->bf_lastds; 4578 struct ieee80211_node *ni = bf_first->bf_node; 4579 struct ath_node *an = ATH_NODE(ni); 4580 int tid = bf_first->bf_state.bfs_tid; 4581 struct ath_tid *atid = &an->an_tid[tid]; 4582 struct ath_tx_status ts; 4583 struct ieee80211_tx_ampdu *tap; 4584 ath_bufhead bf_q; 4585 ath_bufhead bf_cq; 4586 int seq_st, tx_ok; 4587 int hasba, isaggr; 4588 uint32_t ba[2]; 4589 struct ath_buf *bf, *bf_next; 4590 int ba_index; 4591 int drops = 0; 4592 int nframes = 0, nbad = 0, nf; 4593 int pktlen; 4594 /* XXX there's too much on the stack? */ 4595 struct ath_rc_series rc[ATH_RC_NUM]; 4596 int txseq; 4597 4598 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4599 __func__, atid->hwq_depth); 4600 4601 /* 4602 * Take a copy; this may be needed -after- bf_first 4603 * has been completed and freed. 4604 */ 4605 ts = bf_first->bf_status.ds_txstat; 4606 4607 TAILQ_INIT(&bf_q); 4608 TAILQ_INIT(&bf_cq); 4609 4610 /* The TID state is kept behind the TXQ lock */ 4611 ATH_TX_LOCK(sc); 4612 4613 atid->hwq_depth--; 4614 if (atid->hwq_depth < 0) 4615 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4616 __func__, atid->hwq_depth); 4617 4618 /* 4619 * If the TID is filtered, handle completing the filter 4620 * transition before potentially kicking it to the cleanup 4621 * function. 4622 * 4623 * XXX this is duplicate work, ew. 4624 */ 4625 if (atid->isfiltered) 4626 ath_tx_tid_filt_comp_complete(sc, atid); 4627 4628 /* 4629 * Punt cleanup to the relevant function, not our problem now 4630 */ 4631 if (atid->cleanup_inprogress) { 4632 if (atid->isfiltered) 4633 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4634 "%s: isfiltered=1, normal_comp?\n", 4635 __func__); 4636 ATH_TX_UNLOCK(sc); 4637 ath_tx_comp_cleanup_aggr(sc, bf_first); 4638 return; 4639 } 4640 4641 /* 4642 * If the frame is filtered, transition to filtered frame 4643 * mode and add this to the filtered frame list. 4644 * 4645 * XXX TODO: figure out how this interoperates with 4646 * BAR, pause and cleanup states. 4647 */ 4648 if ((ts.ts_status & HAL_TXERR_FILT) || 4649 (ts.ts_status != 0 && atid->isfiltered)) { 4650 if (fail != 0) 4651 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4652 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4653 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4654 4655 /* Remove from BAW */ 4656 TAILQ_FOREACH(bf, &bf_cq, bf_list) { 4657 if (bf->bf_state.bfs_addedbaw) 4658 drops++; 4659 if (bf->bf_state.bfs_dobaw) { 4660 ath_tx_update_baw(sc, an, atid, bf); 4661 if (!bf->bf_state.bfs_addedbaw) 4662 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4663 "%s: wasn't added: seqno %d\n", 4664 __func__, 4665 SEQNO(bf->bf_state.bfs_seqno)); 4666 } 4667 bf->bf_state.bfs_dobaw = 0; 4668 } 4669 /* 4670 * If any intermediate frames in the BAW were dropped when 4671 * handling filtering things, send a BAR. 4672 */ 4673 if (drops) 4674 ath_tx_tid_bar_suspend(sc, atid); 4675 4676 /* 4677 * Finish up by sending a BAR if required and freeing 4678 * the frames outside of the TX lock. 4679 */ 4680 goto finish_send_bar; 4681 } 4682 4683 /* 4684 * XXX for now, use the first frame in the aggregate for 4685 * XXX rate control completion; it's at least consistent. 4686 */ 4687 pktlen = bf_first->bf_state.bfs_pktlen; 4688 4689 /* 4690 * Handle errors first! 4691 * 4692 * Here, handle _any_ error as a "exceeded retries" error. 4693 * Later on (when filtered frames are to be specially handled) 4694 * it'll have to be expanded. 4695 */ 4696 #if 0 4697 if (ts.ts_status & HAL_TXERR_XRETRY) { 4698 #endif 4699 if (ts.ts_status != 0) { 4700 ATH_TX_UNLOCK(sc); 4701 ath_tx_comp_aggr_error(sc, bf_first, atid); 4702 return; 4703 } 4704 4705 tap = ath_tx_get_tx_tid(an, tid); 4706 4707 /* 4708 * extract starting sequence and block-ack bitmap 4709 */ 4710 /* XXX endian-ness of seq_st, ba? */ 4711 seq_st = ts.ts_seqnum; 4712 hasba = !! (ts.ts_flags & HAL_TX_BA); 4713 tx_ok = (ts.ts_status == 0); 4714 isaggr = bf_first->bf_state.bfs_aggr; 4715 ba[0] = ts.ts_ba_low; 4716 ba[1] = ts.ts_ba_high; 4717 4718 /* 4719 * Copy the TX completion status and the rate control 4720 * series from the first descriptor, as it may be freed 4721 * before the rate control code can get its grubby fingers 4722 * into things. 4723 */ 4724 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4725 4726 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4727 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4728 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4729 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4730 isaggr, seq_st, hasba, ba[0], ba[1]); 4731 4732 /* 4733 * The reference driver doesn't do this; it simply ignores 4734 * this check in its entirety. 4735 * 4736 * I've seen this occur when using iperf to send traffic 4737 * out tid 1 - the aggregate frames are all marked as TID 1, 4738 * but the TXSTATUS has TID=0. So, let's just ignore this 4739 * check. 4740 */ 4741 #if 0 4742 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4743 if (tid != ts.ts_tid) { 4744 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4745 __func__, tid, ts.ts_tid); 4746 tx_ok = 0; 4747 } 4748 #endif 4749 4750 /* AR5416 BA bug; this requires an interface reset */ 4751 if (isaggr && tx_ok && (! hasba)) { 4752 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4753 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4754 "seq_st=%d\n", 4755 __func__, hasba, tx_ok, isaggr, seq_st); 4756 /* XXX TODO: schedule an interface reset */ 4757 #ifdef ATH_DEBUG 4758 ath_printtxbuf(sc, bf_first, 4759 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4760 #endif 4761 } 4762 4763 /* 4764 * Walk the list of frames, figure out which ones were correctly 4765 * sent and which weren't. 4766 */ 4767 bf = bf_first; 4768 nf = bf_first->bf_state.bfs_nframes; 4769 4770 /* bf_first is going to be invalid once this list is walked */ 4771 bf_first = NULL; 4772 4773 /* 4774 * Walk the list of completed frames and determine 4775 * which need to be completed and which need to be 4776 * retransmitted. 4777 * 4778 * For completed frames, the completion functions need 4779 * to be called at the end of this function as the last 4780 * node reference may free the node. 4781 * 4782 * Finally, since the TXQ lock can't be held during the 4783 * completion callback (to avoid lock recursion), 4784 * the completion calls have to be done outside of the 4785 * lock. 4786 */ 4787 while (bf) { 4788 nframes++; 4789 ba_index = ATH_BA_INDEX(seq_st, 4790 SEQNO(bf->bf_state.bfs_seqno)); 4791 bf_next = bf->bf_next; 4792 bf->bf_next = NULL; /* Remove it from the aggr list */ 4793 4794 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4795 "%s: checking bf=%p seqno=%d; ack=%d\n", 4796 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4797 ATH_BA_ISSET(ba, ba_index)); 4798 4799 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4800 sc->sc_stats.ast_tx_aggr_ok++; 4801 ath_tx_update_baw(sc, an, atid, bf); 4802 bf->bf_state.bfs_dobaw = 0; 4803 if (!bf->bf_state.bfs_addedbaw) 4804 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4805 "%s: wasn't added: seqno %d\n", 4806 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4807 bf->bf_next = NULL; 4808 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4809 } else { 4810 sc->sc_stats.ast_tx_aggr_fail++; 4811 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4812 drops++; 4813 bf->bf_next = NULL; 4814 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4815 } 4816 nbad++; 4817 } 4818 bf = bf_next; 4819 } 4820 4821 /* 4822 * Now that the BAW updates have been done, unlock 4823 * 4824 * txseq is grabbed before the lock is released so we 4825 * have a consistent view of what -was- in the BAW. 4826 * Anything after this point will not yet have been 4827 * TXed. 4828 */ 4829 txseq = tap->txa_start; 4830 ATH_TX_UNLOCK(sc); 4831 4832 if (nframes != nf) 4833 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4834 "%s: num frames seen=%d; bf nframes=%d\n", 4835 __func__, nframes, nf); 4836 4837 /* 4838 * Now we know how many frames were bad, call the rate 4839 * control code. 4840 */ 4841 if (fail == 0) 4842 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4843 nbad); 4844 4845 /* 4846 * send bar if we dropped any frames 4847 */ 4848 if (drops) { 4849 /* Suspend the TX queue and get ready to send the BAR */ 4850 ATH_TX_LOCK(sc); 4851 ath_tx_tid_bar_suspend(sc, atid); 4852 ATH_TX_UNLOCK(sc); 4853 } 4854 4855 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4856 "%s: txa_start now %d\n", __func__, tap->txa_start); 4857 4858 ATH_TX_LOCK(sc); 4859 4860 /* Prepend all frames to the beginning of the queue */ 4861 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4862 TAILQ_REMOVE(&bf_q, bf, bf_list); 4863 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4864 } 4865 4866 /* 4867 * Reschedule to grab some further frames. 4868 */ 4869 ath_tx_tid_sched(sc, atid); 4870 4871 /* 4872 * If the queue is filtered, re-schedule as required. 4873 * 4874 * This is required as there may be a subsequent TX descriptor 4875 * for this end-node that has CLRDMASK set, so it's quite possible 4876 * that a filtered frame will be followed by a non-filtered 4877 * (complete or otherwise) frame. 4878 * 4879 * XXX should we do this before we complete the frame? 4880 */ 4881 if (atid->isfiltered) 4882 ath_tx_tid_filt_comp_complete(sc, atid); 4883 4884 finish_send_bar: 4885 4886 /* 4887 * Send BAR if required 4888 */ 4889 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4890 ath_tx_tid_bar_tx(sc, atid); 4891 4892 ATH_TX_UNLOCK(sc); 4893 4894 /* Do deferred completion */ 4895 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4896 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4897 ath_tx_default_comp(sc, bf, 0); 4898 } 4899 } 4900 4901 /* 4902 * Handle completion of unaggregated frames in an ADDBA 4903 * session. 4904 * 4905 * Fail is set to 1 if the entry is being freed via a call to 4906 * ath_tx_draintxq(). 4907 */ 4908 static void 4909 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 4910 { 4911 struct ieee80211_node *ni = bf->bf_node; 4912 struct ath_node *an = ATH_NODE(ni); 4913 int tid = bf->bf_state.bfs_tid; 4914 struct ath_tid *atid = &an->an_tid[tid]; 4915 struct ath_tx_status ts; 4916 int drops = 0; 4917 4918 /* 4919 * Take a copy of this; filtering/cloning the frame may free the 4920 * bf pointer. 4921 */ 4922 ts = bf->bf_status.ds_txstat; 4923 4924 /* 4925 * Update rate control status here, before we possibly 4926 * punt to retry or cleanup. 4927 * 4928 * Do it outside of the TXQ lock. 4929 */ 4930 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4931 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4932 &bf->bf_status.ds_txstat, 4933 bf->bf_state.bfs_pktlen, 4934 1, (ts.ts_status == 0) ? 0 : 1); 4935 4936 /* 4937 * This is called early so atid->hwq_depth can be tracked. 4938 * This unfortunately means that it's released and regrabbed 4939 * during retry and cleanup. That's rather inefficient. 4940 */ 4941 ATH_TX_LOCK(sc); 4942 4943 if (tid == IEEE80211_NONQOS_TID) 4944 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 4945 4946 DPRINTF(sc, ATH_DEBUG_SW_TX, 4947 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 4948 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 4949 SEQNO(bf->bf_state.bfs_seqno)); 4950 4951 atid->hwq_depth--; 4952 if (atid->hwq_depth < 0) 4953 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4954 __func__, atid->hwq_depth); 4955 4956 /* 4957 * If the TID is filtered, handle completing the filter 4958 * transition before potentially kicking it to the cleanup 4959 * function. 4960 */ 4961 if (atid->isfiltered) 4962 ath_tx_tid_filt_comp_complete(sc, atid); 4963 4964 /* 4965 * If a cleanup is in progress, punt to comp_cleanup; 4966 * rather than handling it here. It's thus their 4967 * responsibility to clean up, call the completion 4968 * function in net80211, etc. 4969 */ 4970 if (atid->cleanup_inprogress) { 4971 if (atid->isfiltered) 4972 DPRINTF(sc, ATH_DEBUG_SW_TX, 4973 "%s: isfiltered=1, normal_comp?\n", 4974 __func__); 4975 ATH_TX_UNLOCK(sc); 4976 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 4977 __func__); 4978 ath_tx_comp_cleanup_unaggr(sc, bf); 4979 return; 4980 } 4981 4982 /* 4983 * XXX TODO: how does cleanup, BAR and filtered frame handling 4984 * overlap? 4985 * 4986 * If the frame is filtered OR if it's any failure but 4987 * the TID is filtered, the frame must be added to the 4988 * filtered frame list. 4989 * 4990 * However - a busy buffer can't be added to the filtered 4991 * list as it will end up being recycled without having 4992 * been made available for the hardware. 4993 */ 4994 if ((ts.ts_status & HAL_TXERR_FILT) || 4995 (ts.ts_status != 0 && atid->isfiltered)) { 4996 int freeframe; 4997 4998 if (fail != 0) 4999 DPRINTF(sc, ATH_DEBUG_SW_TX, 5000 "%s: isfiltered=1, fail=%d\n", 5001 __func__, fail); 5002 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5003 if (freeframe) { 5004 /* Remove from BAW */ 5005 if (bf->bf_state.bfs_addedbaw) 5006 drops++; 5007 if (bf->bf_state.bfs_dobaw) { 5008 ath_tx_update_baw(sc, an, atid, bf); 5009 if (!bf->bf_state.bfs_addedbaw) 5010 DPRINTF(sc, ATH_DEBUG_SW_TX, 5011 "%s: wasn't added: seqno %d\n", 5012 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5013 } 5014 bf->bf_state.bfs_dobaw = 0; 5015 } 5016 5017 /* 5018 * If the frame couldn't be filtered, treat it as a drop and 5019 * prepare to send a BAR. 5020 */ 5021 if (freeframe && drops) 5022 ath_tx_tid_bar_suspend(sc, atid); 5023 5024 /* 5025 * Send BAR if required 5026 */ 5027 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5028 ath_tx_tid_bar_tx(sc, atid); 5029 5030 ATH_TX_UNLOCK(sc); 5031 /* 5032 * If freeframe is set, then the frame couldn't be 5033 * cloned and bf is still valid. Just complete/free it. 5034 */ 5035 if (freeframe) 5036 ath_tx_default_comp(sc, bf, fail); 5037 5038 5039 return; 5040 } 5041 /* 5042 * Don't bother with the retry check if all frames 5043 * are being failed (eg during queue deletion.) 5044 */ 5045 #if 0 5046 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5047 #endif 5048 if (fail == 0 && ts.ts_status != 0) { 5049 ATH_TX_UNLOCK(sc); 5050 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5051 __func__); 5052 ath_tx_aggr_retry_unaggr(sc, bf); 5053 return; 5054 } 5055 5056 /* Success? Complete */ 5057 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5058 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5059 if (bf->bf_state.bfs_dobaw) { 5060 ath_tx_update_baw(sc, an, atid, bf); 5061 bf->bf_state.bfs_dobaw = 0; 5062 if (!bf->bf_state.bfs_addedbaw) 5063 DPRINTF(sc, ATH_DEBUG_SW_TX, 5064 "%s: wasn't added: seqno %d\n", 5065 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5066 } 5067 5068 /* 5069 * If the queue is filtered, re-schedule as required. 5070 * 5071 * This is required as there may be a subsequent TX descriptor 5072 * for this end-node that has CLRDMASK set, so it's quite possible 5073 * that a filtered frame will be followed by a non-filtered 5074 * (complete or otherwise) frame. 5075 * 5076 * XXX should we do this before we complete the frame? 5077 */ 5078 if (atid->isfiltered) 5079 ath_tx_tid_filt_comp_complete(sc, atid); 5080 5081 /* 5082 * Send BAR if required 5083 */ 5084 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5085 ath_tx_tid_bar_tx(sc, atid); 5086 5087 ATH_TX_UNLOCK(sc); 5088 5089 ath_tx_default_comp(sc, bf, fail); 5090 /* bf is freed at this point */ 5091 } 5092 5093 void 5094 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5095 { 5096 if (bf->bf_state.bfs_aggr) 5097 ath_tx_aggr_comp_aggr(sc, bf, fail); 5098 else 5099 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5100 } 5101 5102 /* 5103 * Schedule some packets from the given node/TID to the hardware. 5104 * 5105 * This is the aggregate version. 5106 */ 5107 void 5108 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5109 struct ath_tid *tid) 5110 { 5111 struct ath_buf *bf; 5112 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5113 struct ieee80211_tx_ampdu *tap; 5114 ATH_AGGR_STATUS status; 5115 ath_bufhead bf_q; 5116 5117 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5118 ATH_TX_LOCK_ASSERT(sc); 5119 5120 /* 5121 * XXX TODO: If we're called for a queue that we're leaking frames to, 5122 * ensure we only leak one. 5123 */ 5124 5125 tap = ath_tx_get_tx_tid(an, tid->tid); 5126 5127 if (tid->tid == IEEE80211_NONQOS_TID) 5128 DPRINTF(sc, ATH_DEBUG_SW_TX, 5129 "%s: called for TID=NONQOS_TID?\n", __func__); 5130 5131 for (;;) { 5132 status = ATH_AGGR_DONE; 5133 5134 /* 5135 * If the upper layer has paused the TID, don't 5136 * queue any further packets. 5137 * 5138 * This can also occur from the completion task because 5139 * of packet loss; but as its serialised with this code, 5140 * it won't "appear" half way through queuing packets. 5141 */ 5142 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5143 break; 5144 5145 bf = ATH_TID_FIRST(tid); 5146 if (bf == NULL) { 5147 break; 5148 } 5149 5150 /* 5151 * If the packet doesn't fall within the BAW (eg a NULL 5152 * data frame), schedule it directly; continue. 5153 */ 5154 if (! bf->bf_state.bfs_dobaw) { 5155 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5156 "%s: non-baw packet\n", 5157 __func__); 5158 ATH_TID_REMOVE(tid, bf, bf_list); 5159 5160 if (bf->bf_state.bfs_nframes > 1) 5161 DPRINTF(sc, ATH_DEBUG_SW_TX, 5162 "%s: aggr=%d, nframes=%d\n", 5163 __func__, 5164 bf->bf_state.bfs_aggr, 5165 bf->bf_state.bfs_nframes); 5166 5167 /* 5168 * This shouldn't happen - such frames shouldn't 5169 * ever have been queued as an aggregate in the 5170 * first place. However, make sure the fields 5171 * are correctly setup just to be totally sure. 5172 */ 5173 bf->bf_state.bfs_aggr = 0; 5174 bf->bf_state.bfs_nframes = 1; 5175 5176 /* Update CLRDMASK just before this frame is queued */ 5177 ath_tx_update_clrdmask(sc, tid, bf); 5178 5179 ath_tx_do_ratelookup(sc, bf); 5180 ath_tx_calc_duration(sc, bf); 5181 ath_tx_calc_protection(sc, bf); 5182 ath_tx_set_rtscts(sc, bf); 5183 ath_tx_rate_fill_rcflags(sc, bf); 5184 ath_tx_setds(sc, bf); 5185 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5186 5187 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5188 5189 /* Queue the packet; continue */ 5190 goto queuepkt; 5191 } 5192 5193 TAILQ_INIT(&bf_q); 5194 5195 /* 5196 * Do a rate control lookup on the first frame in the 5197 * list. The rate control code needs that to occur 5198 * before it can determine whether to TX. 5199 * It's inaccurate because the rate control code doesn't 5200 * really "do" aggregate lookups, so it only considers 5201 * the size of the first frame. 5202 */ 5203 ath_tx_do_ratelookup(sc, bf); 5204 bf->bf_state.bfs_rc[3].rix = 0; 5205 bf->bf_state.bfs_rc[3].tries = 0; 5206 5207 ath_tx_calc_duration(sc, bf); 5208 ath_tx_calc_protection(sc, bf); 5209 5210 ath_tx_set_rtscts(sc, bf); 5211 ath_tx_rate_fill_rcflags(sc, bf); 5212 5213 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5214 5215 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5216 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5217 5218 /* 5219 * No frames to be picked up - out of BAW 5220 */ 5221 if (TAILQ_EMPTY(&bf_q)) 5222 break; 5223 5224 /* 5225 * This assumes that the descriptor list in the ath_bufhead 5226 * are already linked together via bf_next pointers. 5227 */ 5228 bf = TAILQ_FIRST(&bf_q); 5229 5230 if (status == ATH_AGGR_8K_LIMITED) 5231 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5232 5233 /* 5234 * If it's the only frame send as non-aggregate 5235 * assume that ath_tx_form_aggr() has checked 5236 * whether it's in the BAW and added it appropriately. 5237 */ 5238 if (bf->bf_state.bfs_nframes == 1) { 5239 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5240 "%s: single-frame aggregate\n", __func__); 5241 5242 /* Update CLRDMASK just before this frame is queued */ 5243 ath_tx_update_clrdmask(sc, tid, bf); 5244 5245 bf->bf_state.bfs_aggr = 0; 5246 bf->bf_state.bfs_ndelim = 0; 5247 ath_tx_setds(sc, bf); 5248 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5249 if (status == ATH_AGGR_BAW_CLOSED) 5250 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5251 else 5252 sc->sc_aggr_stats.aggr_single_pkt++; 5253 } else { 5254 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5255 "%s: multi-frame aggregate: %d frames, " 5256 "length %d\n", 5257 __func__, bf->bf_state.bfs_nframes, 5258 bf->bf_state.bfs_al); 5259 bf->bf_state.bfs_aggr = 1; 5260 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5261 sc->sc_aggr_stats.aggr_aggr_pkt++; 5262 5263 /* Update CLRDMASK just before this frame is queued */ 5264 ath_tx_update_clrdmask(sc, tid, bf); 5265 5266 /* 5267 * Calculate the duration/protection as required. 5268 */ 5269 ath_tx_calc_duration(sc, bf); 5270 ath_tx_calc_protection(sc, bf); 5271 5272 /* 5273 * Update the rate and rtscts information based on the 5274 * rate decision made by the rate control code; 5275 * the first frame in the aggregate needs it. 5276 */ 5277 ath_tx_set_rtscts(sc, bf); 5278 5279 /* 5280 * Setup the relevant descriptor fields 5281 * for aggregation. The first descriptor 5282 * already points to the rest in the chain. 5283 */ 5284 ath_tx_setds_11n(sc, bf); 5285 5286 } 5287 queuepkt: 5288 /* Set completion handler, multi-frame aggregate or not */ 5289 bf->bf_comp = ath_tx_aggr_comp; 5290 5291 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5292 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5293 5294 /* 5295 * Update leak count and frame config if were leaking frames. 5296 * 5297 * XXX TODO: it should update all frames in an aggregate 5298 * correctly! 5299 */ 5300 ath_tx_leak_count_update(sc, tid, bf); 5301 5302 /* Punt to txq */ 5303 ath_tx_handoff(sc, txq, bf); 5304 5305 /* Track outstanding buffer count to hardware */ 5306 /* aggregates are "one" buffer */ 5307 tid->hwq_depth++; 5308 5309 /* 5310 * Break out if ath_tx_form_aggr() indicated 5311 * there can't be any further progress (eg BAW is full.) 5312 * Checking for an empty txq is done above. 5313 * 5314 * XXX locking on txq here? 5315 */ 5316 /* XXX TXQ locking */ 5317 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5318 (status == ATH_AGGR_BAW_CLOSED || 5319 status == ATH_AGGR_LEAK_CLOSED)) 5320 break; 5321 } 5322 } 5323 5324 /* 5325 * Schedule some packets from the given node/TID to the hardware. 5326 * 5327 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5328 * It just dumps frames into the TXQ. We should limit how deep 5329 * the transmit queue can grow for frames dispatched to the given 5330 * TXQ. 5331 * 5332 * To avoid locking issues, either we need to own the TXQ lock 5333 * at this point, or we need to pass in the maximum frame count 5334 * from the caller. 5335 */ 5336 void 5337 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5338 struct ath_tid *tid) 5339 { 5340 struct ath_buf *bf; 5341 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5342 5343 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5344 __func__, an, tid->tid); 5345 5346 ATH_TX_LOCK_ASSERT(sc); 5347 5348 /* Check - is AMPDU pending or running? then print out something */ 5349 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5350 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5351 __func__, tid->tid); 5352 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5353 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5354 __func__, tid->tid); 5355 5356 for (;;) { 5357 5358 /* 5359 * If the upper layers have paused the TID, don't 5360 * queue any further packets. 5361 * 5362 * XXX if we are leaking frames, make sure we decrement 5363 * that counter _and_ we continue here. 5364 */ 5365 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5366 break; 5367 5368 bf = ATH_TID_FIRST(tid); 5369 if (bf == NULL) { 5370 break; 5371 } 5372 5373 ATH_TID_REMOVE(tid, bf, bf_list); 5374 5375 /* Sanity check! */ 5376 if (tid->tid != bf->bf_state.bfs_tid) { 5377 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5378 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5379 tid->tid); 5380 } 5381 /* Normal completion handler */ 5382 bf->bf_comp = ath_tx_normal_comp; 5383 5384 /* 5385 * Override this for now, until the non-aggregate 5386 * completion handler correctly handles software retransmits. 5387 */ 5388 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5389 5390 /* Update CLRDMASK just before this frame is queued */ 5391 ath_tx_update_clrdmask(sc, tid, bf); 5392 5393 /* Program descriptors + rate control */ 5394 ath_tx_do_ratelookup(sc, bf); 5395 ath_tx_calc_duration(sc, bf); 5396 ath_tx_calc_protection(sc, bf); 5397 ath_tx_set_rtscts(sc, bf); 5398 ath_tx_rate_fill_rcflags(sc, bf); 5399 ath_tx_setds(sc, bf); 5400 5401 /* 5402 * Update the current leak count if 5403 * we're leaking frames; and set the 5404 * MORE flag as appropriate. 5405 */ 5406 ath_tx_leak_count_update(sc, tid, bf); 5407 5408 /* Track outstanding buffer count to hardware */ 5409 /* aggregates are "one" buffer */ 5410 tid->hwq_depth++; 5411 5412 /* Punt to hardware or software txq */ 5413 ath_tx_handoff(sc, txq, bf); 5414 } 5415 } 5416 5417 /* 5418 * Schedule some packets to the given hardware queue. 5419 * 5420 * This function walks the list of TIDs (ie, ath_node TIDs 5421 * with queued traffic) and attempts to schedule traffic 5422 * from them. 5423 * 5424 * TID scheduling is implemented as a FIFO, with TIDs being 5425 * added to the end of the queue after some frames have been 5426 * scheduled. 5427 */ 5428 void 5429 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5430 { 5431 struct ath_tid *tid, *last; 5432 5433 ATH_TX_LOCK_ASSERT(sc); 5434 5435 /* 5436 * Don't schedule if the hardware queue is busy. 5437 * This (hopefully) gives some more time to aggregate 5438 * some packets in the aggregation queue. 5439 * 5440 * XXX It doesn't stop a parallel sender from sneaking 5441 * in transmitting a frame! 5442 */ 5443 /* XXX TXQ locking */ 5444 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5445 sc->sc_aggr_stats.aggr_sched_nopkt++; 5446 return; 5447 } 5448 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5449 sc->sc_aggr_stats.aggr_sched_nopkt++; 5450 return; 5451 } 5452 5453 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5454 5455 while ((tid = TAILQ_FIRST(&txq->axq_tidq)) != NULL) { 5456 /* 5457 * Suspend paused queues here; they'll be resumed 5458 * once the addba completes or times out. 5459 */ 5460 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5461 __func__, tid->tid, tid->paused); 5462 ath_tx_tid_unsched(sc, tid); 5463 /* 5464 * This node may be in power-save and we're leaking 5465 * a frame; be careful. 5466 */ 5467 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5468 if (tid == last) 5469 break; 5470 continue; 5471 } 5472 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5473 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5474 else 5475 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5476 5477 /* Not empty? Re-schedule */ 5478 if (tid->axq_depth != 0) 5479 ath_tx_tid_sched(sc, tid); 5480 5481 /* 5482 * Give the software queue time to aggregate more 5483 * packets. If we aren't running aggregation then 5484 * we should still limit the hardware queue depth. 5485 */ 5486 /* XXX TXQ locking */ 5487 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5488 break; 5489 } 5490 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5491 break; 5492 } 5493 5494 /* 5495 * If this was the last entry on the original list, stop. 5496 * Otherwise nodes that have been rescheduled onto the end 5497 * of the TID FIFO list will just keep being rescheduled. 5498 * 5499 * XXX What should we do about nodes that were paused 5500 * but are pending a leaking frame in response to a ps-poll? 5501 * They'll be put at the front of the list; so they'll 5502 * prematurely trigger this condition! Ew. 5503 */ 5504 if (tid == last) 5505 break; 5506 } 5507 } 5508 5509 /* 5510 * TX addba handling 5511 */ 5512 5513 /* 5514 * Return net80211 TID struct pointer, or NULL for none 5515 */ 5516 struct ieee80211_tx_ampdu * 5517 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5518 { 5519 struct ieee80211_node *ni = &an->an_node; 5520 struct ieee80211_tx_ampdu *tap; 5521 5522 if (tid == IEEE80211_NONQOS_TID) 5523 return NULL; 5524 5525 tap = &ni->ni_tx_ampdu[tid]; 5526 return tap; 5527 } 5528 5529 /* 5530 * Is AMPDU-TX running? 5531 */ 5532 static int 5533 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5534 { 5535 struct ieee80211_tx_ampdu *tap; 5536 5537 if (tid == IEEE80211_NONQOS_TID) 5538 return 0; 5539 5540 tap = ath_tx_get_tx_tid(an, tid); 5541 if (tap == NULL) 5542 return 0; /* Not valid; default to not running */ 5543 5544 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5545 } 5546 5547 /* 5548 * Is AMPDU-TX negotiation pending? 5549 */ 5550 static int 5551 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5552 { 5553 struct ieee80211_tx_ampdu *tap; 5554 5555 if (tid == IEEE80211_NONQOS_TID) 5556 return 0; 5557 5558 tap = ath_tx_get_tx_tid(an, tid); 5559 if (tap == NULL) 5560 return 0; /* Not valid; default to not pending */ 5561 5562 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5563 } 5564 5565 /* 5566 * Is AMPDU-TX pending for the given TID? 5567 */ 5568 5569 5570 /* 5571 * Method to handle sending an ADDBA request. 5572 * 5573 * We tap this so the relevant flags can be set to pause the TID 5574 * whilst waiting for the response. 5575 * 5576 * XXX there's no timeout handler we can override? 5577 */ 5578 int 5579 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5580 int dialogtoken, int baparamset, int batimeout) 5581 { 5582 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5583 int tid = tap->txa_ac; 5584 struct ath_node *an = ATH_NODE(ni); 5585 struct ath_tid *atid = &an->an_tid[tid]; 5586 5587 /* 5588 * XXX danger Will Robinson! 5589 * 5590 * Although the taskqueue may be running and scheduling some more 5591 * packets, these should all be _before_ the addba sequence number. 5592 * However, net80211 will keep self-assigning sequence numbers 5593 * until addba has been negotiated. 5594 * 5595 * In the past, these packets would be "paused" (which still works 5596 * fine, as they're being scheduled to the driver in the same 5597 * serialised method which is calling the addba request routine) 5598 * and when the aggregation session begins, they'll be dequeued 5599 * as aggregate packets and added to the BAW. However, now there's 5600 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5601 * packets. Thus they never get included in the BAW tracking and 5602 * this can cause the initial burst of packets after the addba 5603 * negotiation to "hang", as they quickly fall outside the BAW. 5604 * 5605 * The "eventual" solution should be to tag these packets with 5606 * dobaw. Although net80211 has given us a sequence number, 5607 * it'll be "after" the left edge of the BAW and thus it'll 5608 * fall within it. 5609 */ 5610 ATH_TX_LOCK(sc); 5611 /* 5612 * This is a bit annoying. Until net80211 HT code inherits some 5613 * (any) locking, we may have this called in parallel BUT only 5614 * one response/timeout will be called. Grr. 5615 */ 5616 if (atid->addba_tx_pending == 0) { 5617 ath_tx_tid_pause(sc, atid); 5618 atid->addba_tx_pending = 1; 5619 } 5620 ATH_TX_UNLOCK(sc); 5621 5622 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5623 "%s: %s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5624 __func__, 5625 ath_hal_ether_sprintf(ni->ni_macaddr), 5626 dialogtoken, baparamset, batimeout); 5627 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5628 "%s: txa_start=%d, ni_txseqs=%d\n", 5629 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5630 5631 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5632 batimeout); 5633 } 5634 5635 /* 5636 * Handle an ADDBA response. 5637 * 5638 * We unpause the queue so TX'ing can resume. 5639 * 5640 * Any packets TX'ed from this point should be "aggregate" (whether 5641 * aggregate or not) so the BAW is updated. 5642 * 5643 * Note! net80211 keeps self-assigning sequence numbers until 5644 * ampdu is negotiated. This means the initially-negotiated BAW left 5645 * edge won't match the ni->ni_txseq. 5646 * 5647 * So, being very dirty, the BAW left edge is "slid" here to match 5648 * ni->ni_txseq. 5649 * 5650 * What likely SHOULD happen is that all packets subsequent to the 5651 * addba request should be tagged as aggregate and queued as non-aggregate 5652 * frames; thus updating the BAW. For now though, I'll just slide the 5653 * window. 5654 */ 5655 int 5656 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5657 int status, int code, int batimeout) 5658 { 5659 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5660 int tid = tap->txa_ac; 5661 struct ath_node *an = ATH_NODE(ni); 5662 struct ath_tid *atid = &an->an_tid[tid]; 5663 int r; 5664 5665 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5666 "%s: %s: called; status=%d, code=%d, batimeout=%d\n", __func__, 5667 ath_hal_ether_sprintf(ni->ni_macaddr), 5668 status, code, batimeout); 5669 5670 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5671 "%s: txa_start=%d, ni_txseqs=%d\n", 5672 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5673 5674 /* 5675 * Call this first, so the interface flags get updated 5676 * before the TID is unpaused. Otherwise a race condition 5677 * exists where the unpaused TID still doesn't yet have 5678 * IEEE80211_AGGR_RUNNING set. 5679 */ 5680 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5681 5682 ATH_TX_LOCK(sc); 5683 atid->addba_tx_pending = 0; 5684 /* 5685 * XXX dirty! 5686 * Slide the BAW left edge to wherever net80211 left it for us. 5687 * Read above for more information. 5688 */ 5689 tap->txa_start = ni->ni_txseqs[tid]; 5690 ath_tx_tid_resume(sc, atid); 5691 ATH_TX_UNLOCK(sc); 5692 return r; 5693 } 5694 5695 5696 /* 5697 * Stop ADDBA on a queue. 5698 * 5699 * This can be called whilst BAR TX is currently active on the queue, 5700 * so make sure this is unblocked before continuing. 5701 */ 5702 void 5703 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5704 { 5705 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5706 int tid = tap->txa_ac; 5707 struct ath_node *an = ATH_NODE(ni); 5708 struct ath_tid *atid = &an->an_tid[tid]; 5709 ath_bufhead bf_cq; 5710 struct ath_buf *bf; 5711 5712 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %s: called\n", 5713 __func__, 5714 ath_hal_ether_sprintf(ni->ni_macaddr)); 5715 5716 /* 5717 * Pause TID traffic early, so there aren't any races 5718 * Unblock the pending BAR held traffic, if it's currently paused. 5719 */ 5720 ATH_TX_LOCK(sc); 5721 ath_tx_tid_pause(sc, atid); 5722 if (atid->bar_wait) { 5723 /* 5724 * bar_unsuspend() expects bar_tx == 1, as it should be 5725 * called from the TX completion path. This quietens 5726 * the warning. It's cleared for us anyway. 5727 */ 5728 atid->bar_tx = 1; 5729 ath_tx_tid_bar_unsuspend(sc, atid); 5730 } 5731 ATH_TX_UNLOCK(sc); 5732 5733 /* There's no need to hold the TXQ lock here */ 5734 sc->sc_addba_stop(ni, tap); 5735 5736 /* 5737 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5738 * it'll set the cleanup flag, and it'll be unpaused once 5739 * things have been cleaned up. 5740 */ 5741 TAILQ_INIT(&bf_cq); 5742 ATH_TX_LOCK(sc); 5743 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 5744 /* 5745 * Unpause the TID if no cleanup is required. 5746 */ 5747 if (! atid->cleanup_inprogress) 5748 ath_tx_tid_resume(sc, atid); 5749 ATH_TX_UNLOCK(sc); 5750 5751 /* Handle completing frames and fail them */ 5752 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5753 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5754 ath_tx_default_comp(sc, bf, 1); 5755 } 5756 5757 } 5758 5759 /* 5760 * Handle a node reassociation. 5761 * 5762 * We may have a bunch of frames queued to the hardware; those need 5763 * to be marked as cleanup. 5764 */ 5765 void 5766 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 5767 { 5768 struct ath_tid *tid; 5769 int i; 5770 ath_bufhead bf_cq; 5771 struct ath_buf *bf; 5772 5773 TAILQ_INIT(&bf_cq); 5774 5775 ATH_TX_UNLOCK_ASSERT(sc); 5776 5777 ATH_TX_LOCK(sc); 5778 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 5779 tid = &an->an_tid[i]; 5780 if (tid->hwq_depth == 0) 5781 continue; 5782 ath_tx_tid_pause(sc, tid); 5783 DPRINTF(sc, ATH_DEBUG_NODE, 5784 "%s: %s: TID %d: cleaning up TID\n", 5785 __func__, 5786 ath_hal_ether_sprintf(an->an_node.ni_macaddr), 5787 i); 5788 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 5789 /* 5790 * Unpause the TID if no cleanup is required. 5791 */ 5792 if (! tid->cleanup_inprogress) 5793 ath_tx_tid_resume(sc, tid); 5794 } 5795 ATH_TX_UNLOCK(sc); 5796 5797 /* Handle completing frames and fail them */ 5798 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5799 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5800 ath_tx_default_comp(sc, bf, 1); 5801 } 5802 } 5803 5804 /* 5805 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5806 * it simply tears down the aggregation session. Ew. 5807 * 5808 * It however will call ieee80211_ampdu_stop() which will call 5809 * ic->ic_addba_stop(). 5810 * 5811 * XXX This uses a hard-coded max BAR count value; the whole 5812 * XXX BAR TX success or failure should be better handled! 5813 */ 5814 void 5815 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5816 int status) 5817 { 5818 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5819 int tid = tap->txa_ac; 5820 struct ath_node *an = ATH_NODE(ni); 5821 struct ath_tid *atid = &an->an_tid[tid]; 5822 int attempts = tap->txa_attempts; 5823 5824 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5825 "%s: %s: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 5826 __func__, 5827 ath_hal_ether_sprintf(ni->ni_macaddr), 5828 tap->txa_ac, 5829 atid->tid, 5830 status, 5831 attempts); 5832 5833 /* Note: This may update the BAW details */ 5834 sc->sc_bar_response(ni, tap, status); 5835 5836 /* Unpause the TID */ 5837 /* 5838 * XXX if this is attempt=50, the TID will be downgraded 5839 * XXX to a non-aggregate session. So we must unpause the 5840 * XXX TID here or it'll never be done. 5841 * 5842 * Also, don't call it if bar_tx/bar_wait are 0; something 5843 * has beaten us to the punch? (XXX figure out what?) 5844 */ 5845 if (status == 0 || attempts == 50) { 5846 ATH_TX_LOCK(sc); 5847 if (atid->bar_tx == 0 || atid->bar_wait == 0) 5848 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5849 "%s: huh? bar_tx=%d, bar_wait=%d\n", 5850 __func__, 5851 atid->bar_tx, atid->bar_wait); 5852 else 5853 ath_tx_tid_bar_unsuspend(sc, atid); 5854 ATH_TX_UNLOCK(sc); 5855 } 5856 } 5857 5858 /* 5859 * This is called whenever the pending ADDBA request times out. 5860 * Unpause and reschedule the TID. 5861 */ 5862 void 5863 ath_addba_response_timeout(struct ieee80211_node *ni, 5864 struct ieee80211_tx_ampdu *tap) 5865 { 5866 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5867 int tid = tap->txa_ac; 5868 struct ath_node *an = ATH_NODE(ni); 5869 struct ath_tid *atid = &an->an_tid[tid]; 5870 5871 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5872 "%s: %s: TID=%d, called; resuming\n", 5873 __func__, 5874 ath_hal_ether_sprintf(ni->ni_macaddr), 5875 tid); 5876 5877 ATH_TX_LOCK(sc); 5878 atid->addba_tx_pending = 0; 5879 ATH_TX_UNLOCK(sc); 5880 5881 /* Note: This updates the aggregate state to (again) pending */ 5882 sc->sc_addba_response_timeout(ni, tap); 5883 5884 /* Unpause the TID; which reschedules it */ 5885 ATH_TX_LOCK(sc); 5886 ath_tx_tid_resume(sc, atid); 5887 ATH_TX_UNLOCK(sc); 5888 } 5889 5890 /* 5891 * Check if a node is asleep or not. 5892 */ 5893 int 5894 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 5895 { 5896 5897 ATH_TX_LOCK_ASSERT(sc); 5898 5899 return (an->an_is_powersave); 5900 } 5901 5902 /* 5903 * Mark a node as currently "in powersaving." 5904 * This suspends all traffic on the node. 5905 * 5906 * This must be called with the node/tx locks free. 5907 * 5908 * XXX TODO: the locking silliness below is due to how the node 5909 * locking currently works. Right now, the node lock is grabbed 5910 * to do rate control lookups and these are done with the TX 5911 * queue lock held. This means the node lock can't be grabbed 5912 * first here or a LOR will occur. 5913 * 5914 * Eventually (hopefully!) the TX path code will only grab 5915 * the TXQ lock when transmitting and the ath_node lock when 5916 * doing node/TID operations. There are other complications - 5917 * the sched/unsched operations involve walking the per-txq 5918 * 'active tid' list and this requires both locks to be held. 5919 */ 5920 void 5921 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 5922 { 5923 struct ath_tid *atid; 5924 struct ath_txq *txq; 5925 int tid; 5926 5927 ATH_TX_UNLOCK_ASSERT(sc); 5928 5929 /* Suspend all traffic on the node */ 5930 ATH_TX_LOCK(sc); 5931 5932 if (an->an_is_powersave) { 5933 DPRINTF(sc, ATH_DEBUG_XMIT, 5934 "%s: %s: node was already asleep!\n", 5935 __func__, ath_hal_ether_sprintf(an->an_node.ni_macaddr)); 5936 ATH_TX_UNLOCK(sc); 5937 return; 5938 } 5939 5940 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5941 atid = &an->an_tid[tid]; 5942 txq = sc->sc_ac2q[atid->ac]; 5943 5944 ath_tx_tid_pause(sc, atid); 5945 } 5946 5947 /* Mark node as in powersaving */ 5948 an->an_is_powersave = 1; 5949 5950 ATH_TX_UNLOCK(sc); 5951 } 5952 5953 /* 5954 * Mark a node as currently "awake." 5955 * This resumes all traffic to the node. 5956 */ 5957 void 5958 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 5959 { 5960 struct ath_tid *atid; 5961 struct ath_txq *txq; 5962 int tid; 5963 5964 ATH_TX_UNLOCK_ASSERT(sc); 5965 5966 ATH_TX_LOCK(sc); 5967 5968 /* !? */ 5969 if (an->an_is_powersave == 0) { 5970 ATH_TX_UNLOCK(sc); 5971 DPRINTF(sc, ATH_DEBUG_XMIT, 5972 "%s: an=%p: node was already awake\n", 5973 __func__, an); 5974 return; 5975 } 5976 5977 /* Mark node as awake */ 5978 an->an_is_powersave = 0; 5979 /* 5980 * Clear any pending leaked frame requests 5981 */ 5982 an->an_leak_count = 0; 5983 5984 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5985 atid = &an->an_tid[tid]; 5986 txq = sc->sc_ac2q[atid->ac]; 5987 5988 ath_tx_tid_resume(sc, atid); 5989 } 5990 ATH_TX_UNLOCK(sc); 5991 } 5992 5993 static int 5994 ath_legacy_dma_txsetup(struct ath_softc *sc) 5995 { 5996 5997 /* nothing new needed */ 5998 return (0); 5999 } 6000 6001 static int 6002 ath_legacy_dma_txteardown(struct ath_softc *sc) 6003 { 6004 6005 /* nothing new needed */ 6006 return (0); 6007 } 6008 6009 void 6010 ath_xmit_setup_legacy(struct ath_softc *sc) 6011 { 6012 /* 6013 * For now, just set the descriptor length to sizeof(ath_desc); 6014 * worry about extracting the real length out of the HAL later. 6015 */ 6016 sc->sc_tx_desclen = sizeof(struct ath_desc); 6017 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6018 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6019 6020 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6021 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6022 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6023 6024 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6025 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6026 6027 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6028 } 6029