1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41 #include "opt_inet.h" 42 #include "opt_ath.h" 43 #include "opt_wlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysctl.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/errno.h> 56 #include <sys/callout.h> 57 #include <sys/bus.h> 58 #include <sys/endian.h> 59 #include <sys/kthread.h> 60 #include <sys/taskqueue.h> 61 #include <sys/priv.h> 62 #include <sys/ktr.h> 63 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <net/if_dl.h> 67 #include <net/if_media.h> 68 #include <net/if_types.h> 69 #include <net/if_arp.h> 70 #include <net/ethernet.h> 71 #include <net/if_llc.h> 72 73 #include <netproto/802_11/ieee80211_var.h> 74 #include <netproto/802_11/ieee80211_regdomain.h> 75 #ifdef IEEE80211_SUPPORT_SUPERG 76 #include <netproto/802_11/ieee80211_superg.h> 77 #endif 78 #ifdef IEEE80211_SUPPORT_TDMA 79 #include <netproto/802_11/ieee80211_tdma.h> 80 #endif 81 #include <netproto/802_11/ieee80211_ht.h> 82 83 #include <net/bpf.h> 84 85 #ifdef INET 86 #include <netinet/in.h> 87 #include <netinet/if_ether.h> 88 #endif 89 90 #include <dev/netif/ath/ath/if_athvar.h> 91 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 92 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 93 94 #include <dev/netif/ath/ath/if_ath_debug.h> 95 96 #ifdef ATH_TX99_DIAG 97 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 98 #endif 99 100 #include <dev/netif/ath/ath/if_ath_misc.h> 101 #include <dev/netif/ath/ath/if_ath_tx.h> 102 #include <dev/netif/ath/ath/if_ath_tx_ht.h> 103 104 #ifdef ATH_DEBUG_ALQ 105 #include <dev/netif/ath/ath/if_ath_alq.h> 106 #endif 107 108 extern const char* ath_hal_ether_sprintf(const uint8_t *mac); 109 110 /* 111 * How many retries to perform in software 112 */ 113 #define SWMAX_RETRIES 10 114 115 /* 116 * What queue to throw the non-QoS TID traffic into 117 */ 118 #define ATH_NONQOS_TID_AC WME_AC_VO 119 120 #if 0 121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 122 #endif 123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 126 int tid); 127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 130 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 131 static struct ath_buf * 132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 133 struct ath_tid *tid, struct ath_buf *bf); 134 135 #ifdef ATH_DEBUG_ALQ 136 void 137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 138 { 139 struct ath_buf *bf; 140 int i, n; 141 const char *ds; 142 143 /* XXX we should skip out early if debugging isn't enabled! */ 144 bf = bf_first; 145 146 while (bf != NULL) { 147 /* XXX should ensure bf_nseg > 0! */ 148 if (bf->bf_nseg == 0) 149 break; 150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 151 for (i = 0, ds = (const char *) bf->bf_desc; 152 i < n; 153 i++, ds += sc->sc_tx_desclen) { 154 if_ath_alq_post(&sc->sc_alq, 155 ATH_ALQ_EDMA_TXDESC, 156 sc->sc_tx_desclen, 157 ds); 158 } 159 bf = bf->bf_next; 160 } 161 } 162 #endif /* ATH_DEBUG_ALQ */ 163 164 /* 165 * Whether to use the 11n rate scenario functions or not 166 */ 167 static inline int 168 ath_tx_is_11n(struct ath_softc *sc) 169 { 170 return ((sc->sc_ah->ah_magic == 0x20065416) || 171 (sc->sc_ah->ah_magic == 0x19741014)); 172 } 173 174 /* 175 * Obtain the current TID from the given frame. 176 * 177 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 178 * This has implications for which AC/priority the packet is placed 179 * in. 180 */ 181 static int 182 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 183 { 184 const struct ieee80211_frame *wh; 185 int pri = M_WME_GETAC(m0); 186 187 wh = mtod(m0, const struct ieee80211_frame *); 188 if (! IEEE80211_QOS_HAS_SEQ(wh)) 189 return IEEE80211_NONQOS_TID; 190 else 191 return WME_AC_TO_TID(pri); 192 } 193 194 static void 195 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 196 { 197 struct ieee80211_frame *wh; 198 199 wh = mtod(bf->bf_m, struct ieee80211_frame *); 200 /* Only update/resync if needed */ 201 if (bf->bf_state.bfs_isretried == 0) { 202 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 203 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 204 BUS_DMASYNC_PREWRITE); 205 } 206 bf->bf_state.bfs_isretried = 1; 207 bf->bf_state.bfs_retries ++; 208 } 209 210 /* 211 * Determine what the correct AC queue for the given frame 212 * should be. 213 * 214 * This code assumes that the TIDs map consistently to 215 * the underlying hardware (or software) ath_txq. 216 * Since the sender may try to set an AC which is 217 * arbitrary, non-QoS TIDs may end up being put on 218 * completely different ACs. There's no way to put a 219 * TID into multiple ath_txq's for scheduling, so 220 * for now we override the AC/TXQ selection and set 221 * non-QOS TID frames into the BE queue. 222 * 223 * This may be completely incorrect - specifically, 224 * some management frames may end up out of order 225 * compared to the QoS traffic they're controlling. 226 * I'll look into this later. 227 */ 228 static int 229 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 230 { 231 const struct ieee80211_frame *wh; 232 int pri = M_WME_GETAC(m0); 233 wh = mtod(m0, const struct ieee80211_frame *); 234 if (IEEE80211_QOS_HAS_SEQ(wh)) 235 return pri; 236 237 return ATH_NONQOS_TID_AC; 238 } 239 240 void 241 ath_txfrag_cleanup(struct ath_softc *sc, 242 ath_bufhead *frags, struct ieee80211_node *ni) 243 { 244 struct ath_buf *bf, *next; 245 246 ATH_TXBUF_LOCK_ASSERT(sc); 247 248 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 249 /* NB: bf assumed clean */ 250 TAILQ_REMOVE(frags, bf, bf_list); 251 ath_returnbuf_head(sc, bf); 252 ieee80211_node_decref(ni); 253 } 254 } 255 256 /* 257 * Setup xmit of a fragmented frame. Allocate a buffer 258 * for each frag and bump the node reference count to 259 * reflect the held reference to be setup by ath_tx_start. 260 */ 261 int 262 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 263 struct mbuf *m0, struct ieee80211_node *ni) 264 { 265 struct mbuf *m; 266 struct ath_buf *bf; 267 268 ATH_TXBUF_LOCK(sc); 269 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 270 /* XXX non-management? */ 271 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 272 if (bf == NULL) { /* out of buffers, cleanup */ 273 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 274 __func__); 275 ath_txfrag_cleanup(sc, frags, ni); 276 break; 277 } 278 ieee80211_node_incref(ni); 279 TAILQ_INSERT_TAIL(frags, bf, bf_list); 280 } 281 ATH_TXBUF_UNLOCK(sc); 282 283 return !TAILQ_EMPTY(frags); 284 } 285 286 /* 287 * Reclaim mbuf resources. For fragmented frames we 288 * need to claim each frag chained with m_nextpkt. 289 */ 290 void 291 ath_freetx(struct mbuf *m) 292 { 293 struct mbuf *next; 294 295 do { 296 next = m->m_nextpkt; 297 m->m_nextpkt = NULL; 298 m_freem(m); 299 } while ((m = next) != NULL); 300 } 301 302 static int 303 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 304 { 305 #if defined(__DragonFly__) 306 #else 307 struct mbuf *m; 308 #endif 309 int error; 310 311 /* 312 * Load the DMA map so any coalescing is done. This 313 * also calculates the number of descriptors we need. 314 */ 315 #if defined(__DragonFly__) 316 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m0, 317 bf->bf_segs, 1, &bf->bf_nseg, 318 BUS_DMA_NOWAIT); 319 #else 320 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 321 bf->bf_segs, &bf->bf_nseg, 322 BUS_DMA_NOWAIT); 323 #endif 324 if (error == EFBIG) { 325 /* XXX packet requires too many descriptors */ 326 bf->bf_nseg = ATH_MAX_SCATTER + 1; 327 } else if (error != 0) { 328 sc->sc_stats.ast_tx_busdma++; 329 ath_freetx(m0); 330 return error; 331 } 332 /* 333 * Discard null packets and check for packets that 334 * require too many TX descriptors. We try to convert 335 * the latter to a cluster. 336 */ 337 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 338 sc->sc_stats.ast_tx_linear++; 339 #if defined(__DragonFly__) 340 error = bus_dmamap_load_mbuf_defrag(sc->sc_dmat, 341 bf->bf_dmamap, &m0, 342 bf->bf_segs, ATH_TXDESC, 343 &bf->bf_nseg, BUS_DMA_NOWAIT); 344 #else 345 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 346 if (m == NULL) { 347 ath_freetx(m0); 348 sc->sc_stats.ast_tx_nombuf++; 349 return ENOMEM; 350 } 351 m0 = m; 352 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 353 bf->bf_segs, &bf->bf_nseg, 354 BUS_DMA_NOWAIT); 355 #endif 356 if (error != 0) { 357 sc->sc_stats.ast_tx_busdma++; 358 ath_freetx(m0); 359 return error; 360 } 361 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 362 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 363 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 364 sc->sc_stats.ast_tx_nodata++; 365 ath_freetx(m0); 366 return EIO; 367 } 368 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 369 __func__, m0, m0->m_pkthdr.len); 370 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 371 bf->bf_m = m0; 372 373 return 0; 374 } 375 376 /* 377 * Chain together segments+descriptors for a frame - 11n or otherwise. 378 * 379 * For aggregates, this is called on each frame in the aggregate. 380 */ 381 static void 382 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 383 struct ath_buf *bf, int is_aggr, int is_first_subframe, 384 int is_last_subframe) 385 { 386 struct ath_hal *ah = sc->sc_ah; 387 char *ds; 388 int i, bp, dsp; 389 HAL_DMA_ADDR bufAddrList[4]; 390 uint32_t segLenList[4]; 391 int numTxMaps = 1; 392 int isFirstDesc = 1; 393 394 /* 395 * XXX There's txdma and txdma_mgmt; the descriptor 396 * sizes must match. 397 */ 398 struct ath_descdma *dd = &sc->sc_txdma; 399 400 /* 401 * Fillin the remainder of the descriptor info. 402 */ 403 404 /* 405 * We need the number of TX data pointers in each descriptor. 406 * EDMA and later chips support 4 TX buffers per descriptor; 407 * previous chips just support one. 408 */ 409 numTxMaps = sc->sc_tx_nmaps; 410 411 /* 412 * For EDMA and later chips ensure the TX map is fully populated 413 * before advancing to the next descriptor. 414 */ 415 ds = (char *) bf->bf_desc; 416 bp = dsp = 0; 417 bzero(bufAddrList, sizeof(bufAddrList)); 418 bzero(segLenList, sizeof(segLenList)); 419 for (i = 0; i < bf->bf_nseg; i++) { 420 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 421 segLenList[bp] = bf->bf_segs[i].ds_len; 422 bp++; 423 424 /* 425 * Go to the next segment if this isn't the last segment 426 * and there's space in the current TX map. 427 */ 428 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 429 continue; 430 431 /* 432 * Last segment or we're out of buffer pointers. 433 */ 434 bp = 0; 435 436 if (i == bf->bf_nseg - 1) 437 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 438 else 439 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 440 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 441 442 /* 443 * XXX This assumes that bfs_txq is the actual destination 444 * hardware queue at this point. It may not have been 445 * assigned, it may actually be pointing to the multicast 446 * software TXQ id. These must be fixed! 447 */ 448 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 449 , bufAddrList 450 , segLenList 451 , bf->bf_descid /* XXX desc id */ 452 , bf->bf_state.bfs_tx_queue 453 , isFirstDesc /* first segment */ 454 , i == bf->bf_nseg - 1 /* last segment */ 455 , (struct ath_desc *) ds0 /* first descriptor */ 456 ); 457 458 /* 459 * Make sure the 11n aggregate fields are cleared. 460 * 461 * XXX TODO: this doesn't need to be called for 462 * aggregate frames; as it'll be called on all 463 * sub-frames. Since the descriptors are in 464 * non-cacheable memory, this leads to some 465 * rather slow writes on MIPS/ARM platforms. 466 */ 467 if (ath_tx_is_11n(sc)) 468 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 469 470 /* 471 * If 11n is enabled, set it up as if it's an aggregate 472 * frame. 473 */ 474 if (is_last_subframe) { 475 ath_hal_set11n_aggr_last(sc->sc_ah, 476 (struct ath_desc *) ds); 477 } else if (is_aggr) { 478 /* 479 * This clears the aggrlen field; so 480 * the caller needs to call set_aggr_first()! 481 * 482 * XXX TODO: don't call this for the first 483 * descriptor in the first frame in an 484 * aggregate! 485 */ 486 ath_hal_set11n_aggr_middle(sc->sc_ah, 487 (struct ath_desc *) ds, 488 bf->bf_state.bfs_ndelim); 489 } 490 isFirstDesc = 0; 491 bf->bf_lastds = (struct ath_desc *) ds; 492 493 /* 494 * Don't forget to skip to the next descriptor. 495 */ 496 ds += sc->sc_tx_desclen; 497 dsp++; 498 499 /* 500 * .. and don't forget to blank these out! 501 */ 502 bzero(bufAddrList, sizeof(bufAddrList)); 503 bzero(segLenList, sizeof(segLenList)); 504 } 505 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 506 } 507 508 /* 509 * Set the rate control fields in the given descriptor based on 510 * the bf_state fields and node state. 511 * 512 * The bfs fields should already be set with the relevant rate 513 * control information, including whether MRR is to be enabled. 514 * 515 * Since the FreeBSD HAL currently sets up the first TX rate 516 * in ath_hal_setuptxdesc(), this will setup the MRR 517 * conditionally for the pre-11n chips, and call ath_buf_set_rate 518 * unconditionally for 11n chips. These require the 11n rate 519 * scenario to be set if MCS rates are enabled, so it's easier 520 * to just always call it. The caller can then only set rates 2, 3 521 * and 4 if multi-rate retry is needed. 522 */ 523 static void 524 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 525 struct ath_buf *bf) 526 { 527 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 528 529 /* If mrr is disabled, blank tries 1, 2, 3 */ 530 if (! bf->bf_state.bfs_ismrr) 531 rc[1].tries = rc[2].tries = rc[3].tries = 0; 532 533 #if 0 534 /* 535 * If NOACK is set, just set ntries=1. 536 */ 537 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 538 rc[1].tries = rc[2].tries = rc[3].tries = 0; 539 rc[0].tries = 1; 540 } 541 #endif 542 543 /* 544 * Always call - that way a retried descriptor will 545 * have the MRR fields overwritten. 546 * 547 * XXX TODO: see if this is really needed - setting up 548 * the first descriptor should set the MRR fields to 0 549 * for us anyway. 550 */ 551 if (ath_tx_is_11n(sc)) { 552 ath_buf_set_rate(sc, ni, bf); 553 } else { 554 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 555 , rc[1].ratecode, rc[1].tries 556 , rc[2].ratecode, rc[2].tries 557 , rc[3].ratecode, rc[3].tries 558 ); 559 } 560 } 561 562 /* 563 * Setup segments+descriptors for an 11n aggregate. 564 * bf_first is the first buffer in the aggregate. 565 * The descriptor list must already been linked together using 566 * bf->bf_next. 567 */ 568 static void 569 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 570 { 571 struct ath_buf *bf, *bf_prev = NULL; 572 struct ath_desc *ds0 = bf_first->bf_desc; 573 574 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 575 __func__, bf_first->bf_state.bfs_nframes, 576 bf_first->bf_state.bfs_al); 577 578 bf = bf_first; 579 580 if (bf->bf_state.bfs_txrate0 == 0) 581 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 582 __func__, bf, 0); 583 if (bf->bf_state.bfs_rc[0].ratecode == 0) 584 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 585 __func__, bf, 0); 586 587 /* 588 * Setup all descriptors of all subframes - this will 589 * call ath_hal_set11naggrmiddle() on every frame. 590 */ 591 while (bf != NULL) { 592 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 593 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 594 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 595 SEQNO(bf->bf_state.bfs_seqno)); 596 597 /* 598 * Setup the initial fields for the first descriptor - all 599 * the non-11n specific stuff. 600 */ 601 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 602 , bf->bf_state.bfs_pktlen /* packet length */ 603 , bf->bf_state.bfs_hdrlen /* header length */ 604 , bf->bf_state.bfs_atype /* Atheros packet type */ 605 , bf->bf_state.bfs_txpower /* txpower */ 606 , bf->bf_state.bfs_txrate0 607 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 608 , bf->bf_state.bfs_keyix /* key cache index */ 609 , bf->bf_state.bfs_txantenna /* antenna mode */ 610 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 611 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 612 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 613 ); 614 615 /* 616 * First descriptor? Setup the rate control and initial 617 * aggregate header information. 618 */ 619 if (bf == bf_first) { 620 /* 621 * setup first desc with rate and aggr info 622 */ 623 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 624 } 625 626 /* 627 * Setup the descriptors for a multi-descriptor frame. 628 * This is both aggregate and non-aggregate aware. 629 */ 630 ath_tx_chaindesclist(sc, ds0, bf, 631 1, /* is_aggr */ 632 !! (bf == bf_first), /* is_first_subframe */ 633 !! (bf->bf_next == NULL) /* is_last_subframe */ 634 ); 635 636 if (bf == bf_first) { 637 /* 638 * Initialise the first 11n aggregate with the 639 * aggregate length and aggregate enable bits. 640 */ 641 ath_hal_set11n_aggr_first(sc->sc_ah, 642 ds0, 643 bf->bf_state.bfs_al, 644 bf->bf_state.bfs_ndelim); 645 } 646 647 /* 648 * Link the last descriptor of the previous frame 649 * to the beginning descriptor of this frame. 650 */ 651 if (bf_prev != NULL) 652 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 653 bf->bf_daddr); 654 655 /* Save a copy so we can link the next descriptor in */ 656 bf_prev = bf; 657 bf = bf->bf_next; 658 } 659 660 /* 661 * Set the first descriptor bf_lastds field to point to 662 * the last descriptor in the last subframe, that's where 663 * the status update will occur. 664 */ 665 bf_first->bf_lastds = bf_prev->bf_lastds; 666 667 /* 668 * And bf_last in the first descriptor points to the end of 669 * the aggregate list. 670 */ 671 bf_first->bf_last = bf_prev; 672 673 /* 674 * For non-AR9300 NICs, which require the rate control 675 * in the final descriptor - let's set that up now. 676 * 677 * This is because the filltxdesc() HAL call doesn't 678 * populate the last segment with rate control information 679 * if firstSeg is also true. For non-aggregate frames 680 * that is fine, as the first frame already has rate control 681 * info. But if the last frame in an aggregate has one 682 * descriptor, both firstseg and lastseg will be true and 683 * the rate info isn't copied. 684 * 685 * This is inefficient on MIPS/ARM platforms that have 686 * non-cachable memory for TX descriptors, but we'll just 687 * make do for now. 688 * 689 * As to why the rate table is stashed in the last descriptor 690 * rather than the first descriptor? Because proctxdesc() 691 * is called on the final descriptor in an MPDU or A-MPDU - 692 * ie, the one that gets updated by the hardware upon 693 * completion. That way proctxdesc() doesn't need to know 694 * about the first _and_ last TX descriptor. 695 */ 696 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 697 698 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 699 } 700 701 /* 702 * Hand-off a frame to the multicast TX queue. 703 * 704 * This is a software TXQ which will be appended to the CAB queue 705 * during the beacon setup code. 706 * 707 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 708 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 709 * with the actual hardware txq, or all of this will fall apart. 710 * 711 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 712 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 713 * correctly. 714 */ 715 static void 716 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 717 struct ath_buf *bf) 718 { 719 ATH_TX_LOCK_ASSERT(sc); 720 721 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 722 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 723 724 /* 725 * Ensure that the tx queue is the cabq, so things get 726 * mapped correctly. 727 */ 728 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 729 DPRINTF(sc, ATH_DEBUG_XMIT, 730 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 731 __func__, bf, bf->bf_state.bfs_tx_queue, 732 txq->axq_qnum); 733 } 734 735 ATH_TXQ_LOCK(txq); 736 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 737 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 738 struct ieee80211_frame *wh; 739 740 /* mark previous frame */ 741 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 742 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 743 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 744 BUS_DMASYNC_PREWRITE); 745 746 /* link descriptor */ 747 ath_hal_settxdesclink(sc->sc_ah, 748 bf_last->bf_lastds, 749 bf->bf_daddr); 750 } 751 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 752 ATH_TXQ_UNLOCK(txq); 753 } 754 755 /* 756 * Hand-off packet to a hardware queue. 757 */ 758 static void 759 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 760 struct ath_buf *bf) 761 { 762 struct ath_hal *ah = sc->sc_ah; 763 struct ath_buf *bf_first; 764 765 /* 766 * Insert the frame on the outbound list and pass it on 767 * to the hardware. Multicast frames buffered for power 768 * save stations and transmit from the CAB queue are stored 769 * on a s/w only queue and loaded on to the CAB queue in 770 * the SWBA handler since frames only go out on DTIM and 771 * to avoid possible races. 772 */ 773 ATH_TX_LOCK_ASSERT(sc); 774 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 775 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 776 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 777 ("ath_tx_handoff_hw called for mcast queue")); 778 779 /* 780 * XXX We should instead just verify that sc_txstart_cnt 781 * or ath_txproc_cnt > 0. That would mean that 782 * the reset is going to be waiting for us to complete. 783 */ 784 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { 785 device_printf(sc->sc_dev, 786 "%s: TX dispatch without holding txcount/txstart refcnt!\n", 787 __func__); 788 } 789 790 /* 791 * XXX .. this is going to cause the hardware to get upset; 792 * so we really should find some way to drop or queue 793 * things. 794 */ 795 796 ATH_TXQ_LOCK(txq); 797 798 /* 799 * XXX TODO: if there's a holdingbf, then 800 * ATH_TXQ_PUTRUNNING should be clear. 801 * 802 * If there is a holdingbf and the list is empty, 803 * then axq_link should be pointing to the holdingbf. 804 * 805 * Otherwise it should point to the last descriptor 806 * in the last ath_buf. 807 * 808 * In any case, we should really ensure that we 809 * update the previous descriptor link pointer to 810 * this descriptor, regardless of all of the above state. 811 * 812 * For now this is captured by having axq_link point 813 * to either the holdingbf (if the TXQ list is empty) 814 * or the end of the list (if the TXQ list isn't empty.) 815 * I'd rather just kill axq_link here and do it as above. 816 */ 817 818 /* 819 * Append the frame to the TX queue. 820 */ 821 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 822 ATH_KTR(sc, ATH_KTR_TX, 3, 823 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 824 "depth=%d", 825 txq->axq_qnum, 826 bf, 827 txq->axq_depth); 828 829 /* 830 * If there's a link pointer, update it. 831 * 832 * XXX we should replace this with the above logic, just 833 * to kill axq_link with fire. 834 */ 835 if (txq->axq_link != NULL) { 836 *txq->axq_link = bf->bf_daddr; 837 DPRINTF(sc, ATH_DEBUG_XMIT, 838 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 839 txq->axq_qnum, txq->axq_link, 840 (caddr_t)bf->bf_daddr, bf->bf_desc, 841 txq->axq_depth); 842 ATH_KTR(sc, ATH_KTR_TX, 5, 843 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 844 "lastds=%d", 845 txq->axq_qnum, txq->axq_link, 846 (caddr_t)bf->bf_daddr, bf->bf_desc, 847 bf->bf_lastds); 848 } 849 850 /* 851 * If we've not pushed anything into the hardware yet, 852 * push the head of the queue into the TxDP. 853 * 854 * Once we've started DMA, there's no guarantee that 855 * updating the TxDP with a new value will actually work. 856 * So we just don't do that - if we hit the end of the list, 857 * we keep that buffer around (the "holding buffer") and 858 * re-start DMA by updating the link pointer of _that_ 859 * descriptor and then restart DMA. 860 */ 861 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 862 bf_first = TAILQ_FIRST(&txq->axq_q); 863 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 864 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 865 DPRINTF(sc, ATH_DEBUG_XMIT, 866 "%s: TXDP[%u] = %p (%p) depth %d\n", 867 __func__, txq->axq_qnum, 868 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 869 txq->axq_depth); 870 ATH_KTR(sc, ATH_KTR_TX, 5, 871 "ath_tx_handoff: TXDP[%u] = %p (%p) " 872 "lastds=%p depth %d", 873 txq->axq_qnum, 874 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 875 bf_first->bf_lastds, 876 txq->axq_depth); 877 } 878 879 /* 880 * Ensure that the bf TXQ matches this TXQ, so later 881 * checking and holding buffer manipulation is sane. 882 */ 883 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 884 DPRINTF(sc, ATH_DEBUG_XMIT, 885 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 886 __func__, bf, bf->bf_state.bfs_tx_queue, 887 txq->axq_qnum); 888 } 889 890 /* 891 * Track aggregate queue depth. 892 */ 893 if (bf->bf_state.bfs_aggr) 894 txq->axq_aggr_depth++; 895 896 /* 897 * Update the link pointer. 898 */ 899 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 900 901 /* 902 * Start DMA. 903 * 904 * If we wrote a TxDP above, DMA will start from here. 905 * 906 * If DMA is running, it'll do nothing. 907 * 908 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 909 * or VEOL) then it stops at the last transmitted write. 910 * We then append a new frame by updating the link pointer 911 * in that descriptor and then kick TxE here; it will re-read 912 * that last descriptor and find the new descriptor to transmit. 913 * 914 * This is why we keep the holding descriptor around. 915 */ 916 ath_hal_txstart(ah, txq->axq_qnum); 917 ATH_TXQ_UNLOCK(txq); 918 ATH_KTR(sc, ATH_KTR_TX, 1, 919 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 920 } 921 922 /* 923 * Restart TX DMA for the given TXQ. 924 * 925 * This must be called whether the queue is empty or not. 926 */ 927 static void 928 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 929 { 930 struct ath_buf *bf, *bf_last; 931 932 ATH_TXQ_LOCK_ASSERT(txq); 933 934 /* XXX make this ATH_TXQ_FIRST */ 935 bf = TAILQ_FIRST(&txq->axq_q); 936 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 937 938 if (bf == NULL) 939 return; 940 941 DPRINTF(sc, ATH_DEBUG_RESET, 942 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 943 __func__, 944 txq->axq_qnum, 945 bf, 946 bf_last, 947 (uint32_t) bf->bf_daddr); 948 949 #ifdef ATH_DEBUG 950 if (sc->sc_debug & ATH_DEBUG_RESET) 951 ath_tx_dump(sc, txq); 952 #endif 953 954 /* 955 * This is called from a restart, so DMA is known to be 956 * completely stopped. 957 */ 958 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 959 ("%s: Q%d: called with PUTRUNNING=1\n", 960 __func__, 961 txq->axq_qnum)); 962 963 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 964 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 965 966 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 967 &txq->axq_link); 968 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 969 } 970 971 /* 972 * Hand off a packet to the hardware (or mcast queue.) 973 * 974 * The relevant hardware txq should be locked. 975 */ 976 static void 977 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 978 struct ath_buf *bf) 979 { 980 ATH_TX_LOCK_ASSERT(sc); 981 982 #ifdef ATH_DEBUG_ALQ 983 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 984 ath_tx_alq_post(sc, bf); 985 #endif 986 987 if (txq->axq_qnum == ATH_TXQ_SWQ) 988 ath_tx_handoff_mcast(sc, txq, bf); 989 else 990 ath_tx_handoff_hw(sc, txq, bf); 991 } 992 993 static int 994 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 995 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 996 int *keyix) 997 { 998 DPRINTF(sc, ATH_DEBUG_XMIT, 999 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 1000 __func__, 1001 *hdrlen, 1002 *pktlen, 1003 isfrag, 1004 iswep, 1005 m0); 1006 1007 if (iswep) { 1008 const struct ieee80211_cipher *cip; 1009 struct ieee80211_key *k; 1010 1011 /* 1012 * Construct the 802.11 header+trailer for an encrypted 1013 * frame. The only reason this can fail is because of an 1014 * unknown or unsupported cipher/key type. 1015 */ 1016 k = ieee80211_crypto_encap(ni, m0); 1017 if (k == NULL) { 1018 /* 1019 * This can happen when the key is yanked after the 1020 * frame was queued. Just discard the frame; the 1021 * 802.11 layer counts failures and provides 1022 * debugging/diagnostics. 1023 */ 1024 return (0); 1025 } 1026 /* 1027 * Adjust the packet + header lengths for the crypto 1028 * additions and calculate the h/w key index. When 1029 * a s/w mic is done the frame will have had any mic 1030 * added to it prior to entry so m0->m_pkthdr.len will 1031 * account for it. Otherwise we need to add it to the 1032 * packet length. 1033 */ 1034 cip = k->wk_cipher; 1035 (*hdrlen) += cip->ic_header; 1036 (*pktlen) += cip->ic_header + cip->ic_trailer; 1037 /* NB: frags always have any TKIP MIC done in s/w */ 1038 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1039 (*pktlen) += cip->ic_miclen; 1040 (*keyix) = k->wk_keyix; 1041 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1042 /* 1043 * Use station key cache slot, if assigned. 1044 */ 1045 (*keyix) = ni->ni_ucastkey.wk_keyix; 1046 if ((*keyix) == IEEE80211_KEYIX_NONE) 1047 (*keyix) = HAL_TXKEYIX_INVALID; 1048 } else 1049 (*keyix) = HAL_TXKEYIX_INVALID; 1050 1051 return (1); 1052 } 1053 1054 /* 1055 * Calculate whether interoperability protection is required for 1056 * this frame. 1057 * 1058 * This requires the rate control information be filled in, 1059 * as the protection requirement depends upon the current 1060 * operating mode / PHY. 1061 */ 1062 static void 1063 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1064 { 1065 struct ieee80211_frame *wh; 1066 uint8_t rix; 1067 uint16_t flags; 1068 int shortPreamble; 1069 const HAL_RATE_TABLE *rt = sc->sc_currates; 1070 struct ifnet *ifp = sc->sc_ifp; 1071 struct ieee80211com *ic = ifp->if_l2com; 1072 1073 flags = bf->bf_state.bfs_txflags; 1074 rix = bf->bf_state.bfs_rc[0].rix; 1075 shortPreamble = bf->bf_state.bfs_shpream; 1076 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1077 1078 /* 1079 * If 802.11g protection is enabled, determine whether 1080 * to use RTS/CTS or just CTS. Note that this is only 1081 * done for OFDM unicast frames. 1082 */ 1083 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1084 rt->info[rix].phy == IEEE80211_T_OFDM && 1085 (flags & HAL_TXDESC_NOACK) == 0) { 1086 bf->bf_state.bfs_doprot = 1; 1087 /* XXX fragments must use CCK rates w/ protection */ 1088 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1089 flags |= HAL_TXDESC_RTSENA; 1090 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1091 flags |= HAL_TXDESC_CTSENA; 1092 } 1093 /* 1094 * For frags it would be desirable to use the 1095 * highest CCK rate for RTS/CTS. But stations 1096 * farther away may detect it at a lower CCK rate 1097 * so use the configured protection rate instead 1098 * (for now). 1099 */ 1100 sc->sc_stats.ast_tx_protect++; 1101 } 1102 1103 /* 1104 * If 11n protection is enabled and it's a HT frame, 1105 * enable RTS. 1106 * 1107 * XXX ic_htprotmode or ic_curhtprotmode? 1108 * XXX should it_htprotmode only matter if ic_curhtprotmode 1109 * XXX indicates it's not a HT pure environment? 1110 */ 1111 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1112 rt->info[rix].phy == IEEE80211_T_HT && 1113 (flags & HAL_TXDESC_NOACK) == 0) { 1114 flags |= HAL_TXDESC_RTSENA; 1115 sc->sc_stats.ast_tx_htprotect++; 1116 } 1117 bf->bf_state.bfs_txflags = flags; 1118 } 1119 1120 /* 1121 * Update the frame duration given the currently selected rate. 1122 * 1123 * This also updates the frame duration value, so it will require 1124 * a DMA flush. 1125 */ 1126 static void 1127 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1128 { 1129 struct ieee80211_frame *wh; 1130 uint8_t rix; 1131 uint16_t flags; 1132 int shortPreamble; 1133 struct ath_hal *ah = sc->sc_ah; 1134 const HAL_RATE_TABLE *rt = sc->sc_currates; 1135 int isfrag = bf->bf_m->m_flags & M_FRAG; 1136 1137 flags = bf->bf_state.bfs_txflags; 1138 rix = bf->bf_state.bfs_rc[0].rix; 1139 shortPreamble = bf->bf_state.bfs_shpream; 1140 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1141 1142 /* 1143 * Calculate duration. This logically belongs in the 802.11 1144 * layer but it lacks sufficient information to calculate it. 1145 */ 1146 if ((flags & HAL_TXDESC_NOACK) == 0 && 1147 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1148 u_int16_t dur; 1149 if (shortPreamble) 1150 dur = rt->info[rix].spAckDuration; 1151 else 1152 dur = rt->info[rix].lpAckDuration; 1153 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1154 dur += dur; /* additional SIFS+ACK */ 1155 /* 1156 * Include the size of next fragment so NAV is 1157 * updated properly. The last fragment uses only 1158 * the ACK duration 1159 * 1160 * XXX TODO: ensure that the rate lookup for each 1161 * fragment is the same as the rate used by the 1162 * first fragment! 1163 */ 1164 dur += ath_hal_computetxtime(ah, 1165 rt, 1166 bf->bf_nextfraglen, 1167 rix, shortPreamble); 1168 } 1169 if (isfrag) { 1170 /* 1171 * Force hardware to use computed duration for next 1172 * fragment by disabling multi-rate retry which updates 1173 * duration based on the multi-rate duration table. 1174 */ 1175 bf->bf_state.bfs_ismrr = 0; 1176 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1177 /* XXX update bfs_rc[0].try? */ 1178 } 1179 1180 /* Update the duration field itself */ 1181 *(u_int16_t *)wh->i_dur = htole16(dur); 1182 } 1183 } 1184 1185 static uint8_t 1186 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1187 int cix, int shortPreamble) 1188 { 1189 uint8_t ctsrate; 1190 1191 /* 1192 * CTS transmit rate is derived from the transmit rate 1193 * by looking in the h/w rate table. We must also factor 1194 * in whether or not a short preamble is to be used. 1195 */ 1196 /* NB: cix is set above where RTS/CTS is enabled */ 1197 KASSERT(cix != 0xff, ("cix not setup")); 1198 ctsrate = rt->info[cix].rateCode; 1199 1200 /* XXX this should only matter for legacy rates */ 1201 if (shortPreamble) 1202 ctsrate |= rt->info[cix].shortPreamble; 1203 1204 return (ctsrate); 1205 } 1206 1207 /* 1208 * Calculate the RTS/CTS duration for legacy frames. 1209 */ 1210 static int 1211 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1212 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1213 int flags) 1214 { 1215 int ctsduration = 0; 1216 1217 /* This mustn't be called for HT modes */ 1218 if (rt->info[cix].phy == IEEE80211_T_HT) { 1219 kprintf("%s: HT rate where it shouldn't be (0x%x)\n", 1220 __func__, rt->info[cix].rateCode); 1221 return (-1); 1222 } 1223 1224 /* 1225 * Compute the transmit duration based on the frame 1226 * size and the size of an ACK frame. We call into the 1227 * HAL to do the computation since it depends on the 1228 * characteristics of the actual PHY being used. 1229 * 1230 * NB: CTS is assumed the same size as an ACK so we can 1231 * use the precalculated ACK durations. 1232 */ 1233 if (shortPreamble) { 1234 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1235 ctsduration += rt->info[cix].spAckDuration; 1236 ctsduration += ath_hal_computetxtime(ah, 1237 rt, pktlen, rix, AH_TRUE); 1238 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1239 ctsduration += rt->info[rix].spAckDuration; 1240 } else { 1241 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1242 ctsduration += rt->info[cix].lpAckDuration; 1243 ctsduration += ath_hal_computetxtime(ah, 1244 rt, pktlen, rix, AH_FALSE); 1245 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1246 ctsduration += rt->info[rix].lpAckDuration; 1247 } 1248 1249 return (ctsduration); 1250 } 1251 1252 /* 1253 * Update the given ath_buf with updated rts/cts setup and duration 1254 * values. 1255 * 1256 * To support rate lookups for each software retry, the rts/cts rate 1257 * and cts duration must be re-calculated. 1258 * 1259 * This function assumes the RTS/CTS flags have been set as needed; 1260 * mrr has been disabled; and the rate control lookup has been done. 1261 * 1262 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1263 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1264 */ 1265 static void 1266 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1267 { 1268 uint16_t ctsduration = 0; 1269 uint8_t ctsrate = 0; 1270 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1271 uint8_t cix = 0; 1272 const HAL_RATE_TABLE *rt = sc->sc_currates; 1273 1274 /* 1275 * No RTS/CTS enabled? Don't bother. 1276 */ 1277 if ((bf->bf_state.bfs_txflags & 1278 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1279 /* XXX is this really needed? */ 1280 bf->bf_state.bfs_ctsrate = 0; 1281 bf->bf_state.bfs_ctsduration = 0; 1282 return; 1283 } 1284 1285 /* 1286 * If protection is enabled, use the protection rix control 1287 * rate. Otherwise use the rate0 control rate. 1288 */ 1289 if (bf->bf_state.bfs_doprot) 1290 rix = sc->sc_protrix; 1291 else 1292 rix = bf->bf_state.bfs_rc[0].rix; 1293 1294 /* 1295 * If the raw path has hard-coded ctsrate0 to something, 1296 * use it. 1297 */ 1298 if (bf->bf_state.bfs_ctsrate0 != 0) 1299 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1300 else 1301 /* Control rate from above */ 1302 cix = rt->info[rix].controlRate; 1303 1304 /* Calculate the rtscts rate for the given cix */ 1305 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1306 bf->bf_state.bfs_shpream); 1307 1308 /* The 11n chipsets do ctsduration calculations for you */ 1309 if (! ath_tx_is_11n(sc)) 1310 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1311 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1312 rt, bf->bf_state.bfs_txflags); 1313 1314 /* Squirrel away in ath_buf */ 1315 bf->bf_state.bfs_ctsrate = ctsrate; 1316 bf->bf_state.bfs_ctsduration = ctsduration; 1317 1318 /* 1319 * Must disable multi-rate retry when using RTS/CTS. 1320 */ 1321 if (!sc->sc_mrrprot) { 1322 bf->bf_state.bfs_ismrr = 0; 1323 bf->bf_state.bfs_try0 = 1324 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1325 } 1326 } 1327 1328 /* 1329 * Setup the descriptor chain for a normal or fast-frame 1330 * frame. 1331 * 1332 * XXX TODO: extend to include the destination hardware QCU ID. 1333 * Make sure that is correct. Make sure that when being added 1334 * to the mcastq, the CABQ QCUID is set or things will get a bit 1335 * odd. 1336 */ 1337 static void 1338 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1339 { 1340 struct ath_desc *ds = bf->bf_desc; 1341 struct ath_hal *ah = sc->sc_ah; 1342 1343 if (bf->bf_state.bfs_txrate0 == 0) 1344 DPRINTF(sc, ATH_DEBUG_XMIT, 1345 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1346 1347 ath_hal_setuptxdesc(ah, ds 1348 , bf->bf_state.bfs_pktlen /* packet length */ 1349 , bf->bf_state.bfs_hdrlen /* header length */ 1350 , bf->bf_state.bfs_atype /* Atheros packet type */ 1351 , bf->bf_state.bfs_txpower /* txpower */ 1352 , bf->bf_state.bfs_txrate0 1353 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1354 , bf->bf_state.bfs_keyix /* key cache index */ 1355 , bf->bf_state.bfs_txantenna /* antenna mode */ 1356 , bf->bf_state.bfs_txflags /* flags */ 1357 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1358 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1359 ); 1360 1361 /* 1362 * This will be overriden when the descriptor chain is written. 1363 */ 1364 bf->bf_lastds = ds; 1365 bf->bf_last = bf; 1366 1367 /* Set rate control and descriptor chain for this frame */ 1368 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1369 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1370 } 1371 1372 /* 1373 * Do a rate lookup. 1374 * 1375 * This performs a rate lookup for the given ath_buf only if it's required. 1376 * Non-data frames and raw frames don't require it. 1377 * 1378 * This populates the primary and MRR entries; MRR values are 1379 * then disabled later on if something requires it (eg RTS/CTS on 1380 * pre-11n chipsets. 1381 * 1382 * This needs to be done before the RTS/CTS fields are calculated 1383 * as they may depend upon the rate chosen. 1384 */ 1385 static void 1386 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1387 { 1388 uint8_t rate, rix; 1389 int try0; 1390 1391 if (! bf->bf_state.bfs_doratelookup) 1392 return; 1393 1394 /* Get rid of any previous state */ 1395 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1396 1397 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1398 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1399 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1400 1401 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1402 bf->bf_state.bfs_rc[0].rix = rix; 1403 bf->bf_state.bfs_rc[0].ratecode = rate; 1404 bf->bf_state.bfs_rc[0].tries = try0; 1405 1406 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1407 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1408 bf->bf_state.bfs_rc); 1409 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1410 1411 sc->sc_txrix = rix; /* for LED blinking */ 1412 sc->sc_lastdatarix = rix; /* for fast frames */ 1413 bf->bf_state.bfs_try0 = try0; 1414 bf->bf_state.bfs_txrate0 = rate; 1415 } 1416 1417 /* 1418 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1419 */ 1420 static void 1421 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1422 struct ath_buf *bf) 1423 { 1424 struct ath_node *an = ATH_NODE(bf->bf_node); 1425 1426 ATH_TX_LOCK_ASSERT(sc); 1427 1428 if (an->clrdmask == 1) { 1429 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1430 an->clrdmask = 0; 1431 } 1432 } 1433 1434 /* 1435 * Return whether this frame should be software queued or 1436 * direct dispatched. 1437 * 1438 * When doing powersave, BAR frames should be queued but other management 1439 * frames should be directly sent. 1440 * 1441 * When not doing powersave, stick BAR frames into the hardware queue 1442 * so it goes out even though the queue is paused. 1443 * 1444 * For now, management frames are also software queued by default. 1445 */ 1446 static int 1447 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1448 struct mbuf *m0, int *queue_to_head) 1449 { 1450 struct ieee80211_node *ni = &an->an_node; 1451 struct ieee80211_frame *wh; 1452 uint8_t type, subtype; 1453 1454 wh = mtod(m0, struct ieee80211_frame *); 1455 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1456 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1457 1458 (*queue_to_head) = 0; 1459 1460 /* If it's not in powersave - direct-dispatch BAR */ 1461 if ((ATH_NODE(ni)->an_is_powersave == 0) 1462 && type == IEEE80211_FC0_TYPE_CTL && 1463 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1464 DPRINTF(sc, ATH_DEBUG_SW_TX, 1465 "%s: BAR: TX'ing direct\n", __func__); 1466 return (0); 1467 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1468 && type == IEEE80211_FC0_TYPE_CTL && 1469 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1470 /* BAR TX whilst asleep; queue */ 1471 DPRINTF(sc, ATH_DEBUG_SW_TX, 1472 "%s: swq: TX'ing\n", __func__); 1473 (*queue_to_head) = 1; 1474 return (1); 1475 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1476 && (type == IEEE80211_FC0_TYPE_MGT || 1477 type == IEEE80211_FC0_TYPE_CTL)) { 1478 /* 1479 * Other control/mgmt frame; bypass software queuing 1480 * for now! 1481 */ 1482 DPRINTF(sc, ATH_DEBUG_XMIT, 1483 "%s: %s: Node is asleep; sending mgmt " 1484 "(type=%d, subtype=%d)\n", 1485 __func__, ath_hal_ether_sprintf(ni->ni_macaddr), 1486 type, subtype); 1487 return (0); 1488 } else { 1489 return (1); 1490 } 1491 } 1492 1493 1494 /* 1495 * Transmit the given frame to the hardware. 1496 * 1497 * The frame must already be setup; rate control must already have 1498 * been done. 1499 * 1500 * XXX since the TXQ lock is being held here (and I dislike holding 1501 * it for this long when not doing software aggregation), later on 1502 * break this function into "setup_normal" and "xmit_normal". The 1503 * lock only needs to be held for the ath_tx_handoff call. 1504 * 1505 * XXX we don't update the leak count here - if we're doing 1506 * direct frame dispatch, we need to be able to do it without 1507 * decrementing the leak count (eg multicast queue frames.) 1508 */ 1509 static void 1510 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1511 struct ath_buf *bf) 1512 { 1513 struct ath_node *an = ATH_NODE(bf->bf_node); 1514 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1515 1516 ATH_TX_LOCK_ASSERT(sc); 1517 1518 /* 1519 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1520 * set a completion handler however it doesn't (yet) properly 1521 * handle the strict ordering requirements needed for normal, 1522 * non-aggregate session frames. 1523 * 1524 * Once this is implemented, only set CLRDMASK like this for 1525 * frames that must go out - eg management/raw frames. 1526 */ 1527 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1528 1529 /* Setup the descriptor before handoff */ 1530 ath_tx_do_ratelookup(sc, bf); 1531 ath_tx_calc_duration(sc, bf); 1532 ath_tx_calc_protection(sc, bf); 1533 ath_tx_set_rtscts(sc, bf); 1534 ath_tx_rate_fill_rcflags(sc, bf); 1535 ath_tx_setds(sc, bf); 1536 1537 /* Track per-TID hardware queue depth correctly */ 1538 tid->hwq_depth++; 1539 1540 /* Assign the completion handler */ 1541 bf->bf_comp = ath_tx_normal_comp; 1542 1543 /* Hand off to hardware */ 1544 ath_tx_handoff(sc, txq, bf); 1545 } 1546 1547 /* 1548 * Do the basic frame setup stuff that's required before the frame 1549 * is added to a software queue. 1550 * 1551 * All frames get mostly the same treatment and it's done once. 1552 * Retransmits fiddle with things like the rate control setup, 1553 * setting the retransmit bit in the packet; doing relevant DMA/bus 1554 * syncing and relinking it (back) into the hardware TX queue. 1555 * 1556 * Note that this may cause the mbuf to be reallocated, so 1557 * m0 may not be valid. 1558 */ 1559 static int 1560 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1561 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1562 { 1563 struct ieee80211vap *vap = ni->ni_vap; 1564 struct ath_hal *ah = sc->sc_ah; 1565 struct ifnet *ifp = sc->sc_ifp; 1566 struct ieee80211com *ic = ifp->if_l2com; 1567 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1568 int error, iswep, ismcast, isfrag, ismrr; 1569 int keyix, hdrlen, pktlen, try0 = 0; 1570 u_int8_t rix = 0, txrate = 0; 1571 struct ath_desc *ds; 1572 struct ieee80211_frame *wh; 1573 u_int subtype, flags; 1574 HAL_PKT_TYPE atype; 1575 const HAL_RATE_TABLE *rt; 1576 HAL_BOOL shortPreamble; 1577 struct ath_node *an; 1578 u_int pri; 1579 1580 /* 1581 * To ensure that both sequence numbers and the CCMP PN handling 1582 * is "correct", make sure that the relevant TID queue is locked. 1583 * Otherwise the CCMP PN and seqno may appear out of order, causing 1584 * re-ordered frames to have out of order CCMP PN's, resulting 1585 * in many, many frame drops. 1586 */ 1587 ATH_TX_LOCK_ASSERT(sc); 1588 1589 wh = mtod(m0, struct ieee80211_frame *); 1590 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1591 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1592 isfrag = m0->m_flags & M_FRAG; 1593 hdrlen = ieee80211_anyhdrsize(wh); 1594 /* 1595 * Packet length must not include any 1596 * pad bytes; deduct them here. 1597 */ 1598 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1599 1600 /* Handle encryption twiddling if needed */ 1601 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1602 &pktlen, &keyix)) { 1603 ath_freetx(m0); 1604 return EIO; 1605 } 1606 1607 /* packet header may have moved, reset our local pointer */ 1608 wh = mtod(m0, struct ieee80211_frame *); 1609 1610 pktlen += IEEE80211_CRC_LEN; 1611 1612 /* 1613 * Load the DMA map so any coalescing is done. This 1614 * also calculates the number of descriptors we need. 1615 */ 1616 error = ath_tx_dmasetup(sc, bf, m0); 1617 if (error != 0) 1618 return error; 1619 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 1620 bf->bf_node = ni; /* NB: held reference */ 1621 m0 = bf->bf_m; /* NB: may have changed */ 1622 wh = mtod(m0, struct ieee80211_frame *); 1623 1624 /* setup descriptors */ 1625 ds = bf->bf_desc; 1626 rt = sc->sc_currates; 1627 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1628 1629 /* 1630 * NB: the 802.11 layer marks whether or not we should 1631 * use short preamble based on the current mode and 1632 * negotiated parameters. 1633 */ 1634 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1635 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1636 shortPreamble = AH_TRUE; 1637 sc->sc_stats.ast_tx_shortpre++; 1638 } else { 1639 shortPreamble = AH_FALSE; 1640 } 1641 1642 an = ATH_NODE(ni); 1643 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1644 flags = 0; 1645 ismrr = 0; /* default no multi-rate retry*/ 1646 pri = M_WME_GETAC(m0); /* honor classification */ 1647 /* XXX use txparams instead of fixed values */ 1648 /* 1649 * Calculate Atheros packet type from IEEE80211 packet header, 1650 * setup for rate calculations, and select h/w transmit queue. 1651 */ 1652 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1653 case IEEE80211_FC0_TYPE_MGT: 1654 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1655 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1656 atype = HAL_PKT_TYPE_BEACON; 1657 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1658 atype = HAL_PKT_TYPE_PROBE_RESP; 1659 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1660 atype = HAL_PKT_TYPE_ATIM; 1661 else 1662 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1663 rix = an->an_mgmtrix; 1664 txrate = rt->info[rix].rateCode; 1665 if (shortPreamble) 1666 txrate |= rt->info[rix].shortPreamble; 1667 try0 = ATH_TXMGTTRY; 1668 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1669 break; 1670 case IEEE80211_FC0_TYPE_CTL: 1671 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1672 rix = an->an_mgmtrix; 1673 txrate = rt->info[rix].rateCode; 1674 if (shortPreamble) 1675 txrate |= rt->info[rix].shortPreamble; 1676 try0 = ATH_TXMGTTRY; 1677 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1678 break; 1679 case IEEE80211_FC0_TYPE_DATA: 1680 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1681 /* 1682 * Data frames: multicast frames go out at a fixed rate, 1683 * EAPOL frames use the mgmt frame rate; otherwise consult 1684 * the rate control module for the rate to use. 1685 */ 1686 if (ismcast) { 1687 rix = an->an_mcastrix; 1688 txrate = rt->info[rix].rateCode; 1689 if (shortPreamble) 1690 txrate |= rt->info[rix].shortPreamble; 1691 try0 = 1; 1692 } else if (m0->m_flags & M_EAPOL) { 1693 /* XXX? maybe always use long preamble? */ 1694 rix = an->an_mgmtrix; 1695 txrate = rt->info[rix].rateCode; 1696 if (shortPreamble) 1697 txrate |= rt->info[rix].shortPreamble; 1698 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1699 } else { 1700 /* 1701 * Do rate lookup on each TX, rather than using 1702 * the hard-coded TX information decided here. 1703 */ 1704 ismrr = 1; 1705 bf->bf_state.bfs_doratelookup = 1; 1706 } 1707 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1708 flags |= HAL_TXDESC_NOACK; 1709 break; 1710 default: 1711 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1712 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1713 /* XXX statistic */ 1714 /* XXX free tx dmamap */ 1715 ath_freetx(m0); 1716 return EIO; 1717 } 1718 1719 /* 1720 * There are two known scenarios where the frame AC doesn't match 1721 * what the destination TXQ is. 1722 * 1723 * + non-QoS frames (eg management?) that the net80211 stack has 1724 * assigned a higher AC to, but since it's a non-QoS TID, it's 1725 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1726 * It's quite possible that management frames should just be 1727 * direct dispatched to hardware rather than go via the software 1728 * queue; that should be investigated in the future. There are 1729 * some specific scenarios where this doesn't make sense, mostly 1730 * surrounding ADDBA request/response - hence why that is special 1731 * cased. 1732 * 1733 * + Multicast frames going into the VAP mcast queue. That shows up 1734 * as "TXQ 11". 1735 * 1736 * This driver should eventually support separate TID and TXQ locking, 1737 * allowing for arbitrary AC frames to appear on arbitrary software 1738 * queues, being queued to the "correct" hardware queue when needed. 1739 */ 1740 #if 0 1741 if (txq != sc->sc_ac2q[pri]) { 1742 DPRINTF(sc, ATH_DEBUG_XMIT, 1743 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1744 __func__, 1745 txq, 1746 txq->axq_qnum, 1747 pri, 1748 sc->sc_ac2q[pri], 1749 sc->sc_ac2q[pri]->axq_qnum); 1750 } 1751 #endif 1752 1753 /* 1754 * Calculate miscellaneous flags. 1755 */ 1756 if (ismcast) { 1757 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1758 } else if (pktlen > vap->iv_rtsthreshold && 1759 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1760 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1761 sc->sc_stats.ast_tx_rts++; 1762 } 1763 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1764 sc->sc_stats.ast_tx_noack++; 1765 #ifdef IEEE80211_SUPPORT_TDMA 1766 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1767 DPRINTF(sc, ATH_DEBUG_TDMA, 1768 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1769 sc->sc_stats.ast_tdma_ack++; 1770 /* XXX free tx dmamap */ 1771 ath_freetx(m0); 1772 return EIO; 1773 } 1774 #endif 1775 1776 /* 1777 * Determine if a tx interrupt should be generated for 1778 * this descriptor. We take a tx interrupt to reap 1779 * descriptors when the h/w hits an EOL condition or 1780 * when the descriptor is specifically marked to generate 1781 * an interrupt. We periodically mark descriptors in this 1782 * way to insure timely replenishing of the supply needed 1783 * for sending frames. Defering interrupts reduces system 1784 * load and potentially allows more concurrent work to be 1785 * done but if done to aggressively can cause senders to 1786 * backup. 1787 * 1788 * NB: use >= to deal with sc_txintrperiod changing 1789 * dynamically through sysctl. 1790 */ 1791 if (flags & HAL_TXDESC_INTREQ) { 1792 txq->axq_intrcnt = 0; 1793 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1794 flags |= HAL_TXDESC_INTREQ; 1795 txq->axq_intrcnt = 0; 1796 } 1797 1798 /* This point forward is actual TX bits */ 1799 1800 /* 1801 * At this point we are committed to sending the frame 1802 * and we don't need to look at m_nextpkt; clear it in 1803 * case this frame is part of frag chain. 1804 */ 1805 m0->m_nextpkt = NULL; 1806 1807 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1808 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1809 sc->sc_hwmap[rix].ieeerate, -1); 1810 1811 if (ieee80211_radiotap_active_vap(vap)) { 1812 u_int64_t tsf = ath_hal_gettsf64(ah); 1813 1814 sc->sc_tx_th.wt_tsf = htole64(tsf); 1815 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1816 if (iswep) 1817 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1818 if (isfrag) 1819 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1820 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1821 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1822 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1823 1824 ieee80211_radiotap_tx(vap, m0); 1825 } 1826 1827 /* Blank the legacy rate array */ 1828 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1829 1830 /* 1831 * ath_buf_set_rate needs at least one rate/try to setup 1832 * the rate scenario. 1833 */ 1834 bf->bf_state.bfs_rc[0].rix = rix; 1835 bf->bf_state.bfs_rc[0].tries = try0; 1836 bf->bf_state.bfs_rc[0].ratecode = txrate; 1837 1838 /* Store the decided rate index values away */ 1839 bf->bf_state.bfs_pktlen = pktlen; 1840 bf->bf_state.bfs_hdrlen = hdrlen; 1841 bf->bf_state.bfs_atype = atype; 1842 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1843 bf->bf_state.bfs_txrate0 = txrate; 1844 bf->bf_state.bfs_try0 = try0; 1845 bf->bf_state.bfs_keyix = keyix; 1846 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1847 bf->bf_state.bfs_txflags = flags; 1848 bf->bf_state.bfs_shpream = shortPreamble; 1849 1850 /* XXX this should be done in ath_tx_setrate() */ 1851 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1852 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1853 bf->bf_state.bfs_ctsduration = 0; 1854 bf->bf_state.bfs_ismrr = ismrr; 1855 1856 return 0; 1857 } 1858 1859 /* 1860 * Queue a frame to the hardware or software queue. 1861 * 1862 * This can be called by the net80211 code. 1863 * 1864 * XXX what about locking? Or, push the seqno assign into the 1865 * XXX aggregate scheduler so its serialised? 1866 * 1867 * XXX When sending management frames via ath_raw_xmit(), 1868 * should CLRDMASK be set unconditionally? 1869 */ 1870 int 1871 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1872 struct ath_buf *bf, struct mbuf *m0) 1873 { 1874 struct ieee80211vap *vap = ni->ni_vap; 1875 struct ath_vap *avp = ATH_VAP(vap); 1876 int r = 0; 1877 u_int pri; 1878 int tid; 1879 struct ath_txq *txq; 1880 int ismcast; 1881 const struct ieee80211_frame *wh; 1882 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1883 ieee80211_seq seqno; 1884 uint8_t type, subtype; 1885 int queue_to_head; 1886 1887 ATH_TX_LOCK_ASSERT(sc); 1888 1889 /* 1890 * Determine the target hardware queue. 1891 * 1892 * For multicast frames, the txq gets overridden appropriately 1893 * depending upon the state of PS. 1894 * 1895 * For any other frame, we do a TID/QoS lookup inside the frame 1896 * to see what the TID should be. If it's a non-QoS frame, the 1897 * AC and TID are overridden. The TID/TXQ code assumes the 1898 * TID is on a predictable hardware TXQ, so we don't support 1899 * having a node TID queued to multiple hardware TXQs. 1900 * This may change in the future but would require some locking 1901 * fudgery. 1902 */ 1903 pri = ath_tx_getac(sc, m0); 1904 tid = ath_tx_gettid(sc, m0); 1905 1906 txq = sc->sc_ac2q[pri]; 1907 wh = mtod(m0, struct ieee80211_frame *); 1908 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1909 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1910 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1911 1912 /* 1913 * Enforce how deep the multicast queue can grow. 1914 * 1915 * XXX duplicated in ath_raw_xmit(). 1916 */ 1917 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1918 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1919 > sc->sc_txq_mcastq_maxdepth) { 1920 sc->sc_stats.ast_tx_mcastq_overflow++; 1921 m_freem(m0); 1922 return (ENOBUFS); 1923 } 1924 } 1925 1926 /* 1927 * Enforce how deep the unicast queue can grow. 1928 * 1929 * If the node is in power save then we don't want 1930 * the software queue to grow too deep, or a node may 1931 * end up consuming all of the ath_buf entries. 1932 * 1933 * For now, only do this for DATA frames. 1934 * 1935 * We will want to cap how many management/control 1936 * frames get punted to the software queue so it doesn't 1937 * fill up. But the correct solution isn't yet obvious. 1938 * In any case, this check should at least let frames pass 1939 * that we are direct-dispatching. 1940 * 1941 * XXX TODO: duplicate this to the raw xmit path! 1942 */ 1943 if (type == IEEE80211_FC0_TYPE_DATA && 1944 ATH_NODE(ni)->an_is_powersave && 1945 ATH_NODE(ni)->an_swq_depth > 1946 sc->sc_txq_node_psq_maxdepth) { 1947 sc->sc_stats.ast_tx_node_psq_overflow++; 1948 m_freem(m0); 1949 return (ENOBUFS); 1950 } 1951 1952 /* A-MPDU TX */ 1953 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1954 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1955 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1956 1957 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1958 __func__, tid, pri, is_ampdu); 1959 1960 /* Set local packet state, used to queue packets to hardware */ 1961 bf->bf_state.bfs_tid = tid; 1962 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1963 bf->bf_state.bfs_pri = pri; 1964 1965 #if 1 1966 /* 1967 * When servicing one or more stations in power-save mode 1968 * (or) if there is some mcast data waiting on the mcast 1969 * queue (to prevent out of order delivery) multicast frames 1970 * must be bufferd until after the beacon. 1971 * 1972 * TODO: we should lock the mcastq before we check the length. 1973 */ 1974 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1975 txq = &avp->av_mcastq; 1976 /* 1977 * Mark the frame as eventually belonging on the CAB 1978 * queue, so the descriptor setup functions will 1979 * correctly initialise the descriptor 'qcuId' field. 1980 */ 1981 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1982 } 1983 #endif 1984 1985 /* Do the generic frame setup */ 1986 /* XXX should just bzero the bf_state? */ 1987 bf->bf_state.bfs_dobaw = 0; 1988 1989 /* A-MPDU TX? Manually set sequence number */ 1990 /* 1991 * Don't do it whilst pending; the net80211 layer still 1992 * assigns them. 1993 */ 1994 if (is_ampdu_tx) { 1995 /* 1996 * Always call; this function will 1997 * handle making sure that null data frames 1998 * don't get a sequence number from the current 1999 * TID and thus mess with the BAW. 2000 */ 2001 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 2002 2003 /* 2004 * Don't add QoS NULL frames to the BAW. 2005 */ 2006 if (IEEE80211_QOS_HAS_SEQ(wh) && 2007 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2008 bf->bf_state.bfs_dobaw = 1; 2009 } 2010 } 2011 2012 /* 2013 * If needed, the sequence number has been assigned. 2014 * Squirrel it away somewhere easy to get to. 2015 */ 2016 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2017 2018 /* Is ampdu pending? fetch the seqno and print it out */ 2019 if (is_ampdu_pending) 2020 DPRINTF(sc, ATH_DEBUG_SW_TX, 2021 "%s: tid %d: ampdu pending, seqno %d\n", 2022 __func__, tid, M_SEQNO_GET(m0)); 2023 2024 /* This also sets up the DMA map */ 2025 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2026 2027 if (r != 0) 2028 goto done; 2029 2030 /* At this point m0 could have changed! */ 2031 m0 = bf->bf_m; 2032 2033 #if 1 2034 /* 2035 * If it's a multicast frame, do a direct-dispatch to the 2036 * destination hardware queue. Don't bother software 2037 * queuing it. 2038 */ 2039 /* 2040 * If it's a BAR frame, do a direct dispatch to the 2041 * destination hardware queue. Don't bother software 2042 * queuing it, as the TID will now be paused. 2043 * Sending a BAR frame can occur from the net80211 txa timer 2044 * (ie, retries) or from the ath txtask (completion call.) 2045 * It queues directly to hardware because the TID is paused 2046 * at this point (and won't be unpaused until the BAR has 2047 * either been TXed successfully or max retries has been 2048 * reached.) 2049 */ 2050 /* 2051 * Until things are better debugged - if this node is asleep 2052 * and we're sending it a non-BAR frame, direct dispatch it. 2053 * Why? Because we need to figure out what's actually being 2054 * sent - eg, during reassociation/reauthentication after 2055 * the node (last) disappeared whilst asleep, the driver should 2056 * have unpaused/unsleep'ed the node. So until that is 2057 * sorted out, use this workaround. 2058 */ 2059 if (txq == &avp->av_mcastq) { 2060 DPRINTF(sc, ATH_DEBUG_SW_TX, 2061 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2062 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2063 ath_tx_xmit_normal(sc, txq, bf); 2064 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2065 &queue_to_head)) { 2066 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2067 } else { 2068 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2069 ath_tx_xmit_normal(sc, txq, bf); 2070 } 2071 #else 2072 /* 2073 * For now, since there's no software queue, 2074 * direct-dispatch to the hardware. 2075 */ 2076 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2077 /* 2078 * Update the current leak count if 2079 * we're leaking frames; and set the 2080 * MORE flag as appropriate. 2081 */ 2082 ath_tx_leak_count_update(sc, tid, bf); 2083 ath_tx_xmit_normal(sc, txq, bf); 2084 #endif 2085 done: 2086 return 0; 2087 } 2088 2089 static int 2090 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2091 struct ath_buf *bf, struct mbuf *m0, 2092 const struct ieee80211_bpf_params *params) 2093 { 2094 struct ifnet *ifp = sc->sc_ifp; 2095 struct ieee80211com *ic = ifp->if_l2com; 2096 struct ath_hal *ah = sc->sc_ah; 2097 struct ieee80211vap *vap = ni->ni_vap; 2098 int error, ismcast, ismrr; 2099 int keyix, hdrlen, pktlen, try0, txantenna; 2100 u_int8_t rix, txrate; 2101 struct ieee80211_frame *wh; 2102 u_int flags; 2103 HAL_PKT_TYPE atype; 2104 const HAL_RATE_TABLE *rt; 2105 struct ath_desc *ds; 2106 u_int pri; 2107 int o_tid = -1; 2108 int do_override; 2109 uint8_t type, subtype; 2110 int queue_to_head; 2111 struct ath_node *an = ATH_NODE(ni); 2112 2113 ATH_TX_LOCK_ASSERT(sc); 2114 2115 wh = mtod(m0, struct ieee80211_frame *); 2116 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2117 hdrlen = ieee80211_anyhdrsize(wh); 2118 /* 2119 * Packet length must not include any 2120 * pad bytes; deduct them here. 2121 */ 2122 /* XXX honor IEEE80211_BPF_DATAPAD */ 2123 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2124 2125 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2126 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2127 2128 ATH_KTR(sc, ATH_KTR_TX, 2, 2129 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2130 2131 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2132 __func__, ismcast); 2133 2134 pri = params->ibp_pri & 3; 2135 /* Override pri if the frame isn't a QoS one */ 2136 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2137 pri = ath_tx_getac(sc, m0); 2138 2139 /* XXX If it's an ADDBA, override the correct queue */ 2140 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2141 2142 /* Map ADDBA to the correct priority */ 2143 if (do_override) { 2144 #if 0 2145 DPRINTF(sc, ATH_DEBUG_XMIT, 2146 "%s: overriding tid %d pri %d -> %d\n", 2147 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2148 #endif 2149 pri = TID_TO_WME_AC(o_tid); 2150 } 2151 2152 /* Handle encryption twiddling if needed */ 2153 if (! ath_tx_tag_crypto(sc, ni, 2154 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2155 &hdrlen, &pktlen, &keyix)) { 2156 ath_freetx(m0); 2157 return EIO; 2158 } 2159 /* packet header may have moved, reset our local pointer */ 2160 wh = mtod(m0, struct ieee80211_frame *); 2161 2162 /* Do the generic frame setup */ 2163 /* XXX should just bzero the bf_state? */ 2164 bf->bf_state.bfs_dobaw = 0; 2165 2166 error = ath_tx_dmasetup(sc, bf, m0); 2167 if (error != 0) 2168 return error; 2169 m0 = bf->bf_m; /* NB: may have changed */ 2170 wh = mtod(m0, struct ieee80211_frame *); 2171 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 2172 bf->bf_node = ni; /* NB: held reference */ 2173 2174 /* Always enable CLRDMASK for raw frames for now.. */ 2175 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2176 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2177 if (params->ibp_flags & IEEE80211_BPF_RTS) 2178 flags |= HAL_TXDESC_RTSENA; 2179 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2180 /* XXX assume 11g/11n protection? */ 2181 bf->bf_state.bfs_doprot = 1; 2182 flags |= HAL_TXDESC_CTSENA; 2183 } 2184 /* XXX leave ismcast to injector? */ 2185 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2186 flags |= HAL_TXDESC_NOACK; 2187 2188 rt = sc->sc_currates; 2189 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2190 2191 /* Fetch first rate information */ 2192 rix = ath_tx_findrix(sc, params->ibp_rate0); 2193 try0 = params->ibp_try0; 2194 2195 /* 2196 * Override EAPOL rate as appropriate. 2197 */ 2198 if (m0->m_flags & M_EAPOL) { 2199 /* XXX? maybe always use long preamble? */ 2200 rix = an->an_mgmtrix; 2201 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 2202 } 2203 2204 txrate = rt->info[rix].rateCode; 2205 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2206 txrate |= rt->info[rix].shortPreamble; 2207 sc->sc_txrix = rix; 2208 ismrr = (params->ibp_try1 != 0); 2209 txantenna = params->ibp_pri >> 2; 2210 if (txantenna == 0) /* XXX? */ 2211 txantenna = sc->sc_txantenna; 2212 2213 /* 2214 * Since ctsrate is fixed, store it away for later 2215 * use when the descriptor fields are being set. 2216 */ 2217 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2218 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2219 2220 /* 2221 * NB: we mark all packets as type PSPOLL so the h/w won't 2222 * set the sequence number, duration, etc. 2223 */ 2224 atype = HAL_PKT_TYPE_PSPOLL; 2225 2226 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2227 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2228 sc->sc_hwmap[rix].ieeerate, -1); 2229 2230 if (ieee80211_radiotap_active_vap(vap)) { 2231 u_int64_t tsf = ath_hal_gettsf64(ah); 2232 2233 sc->sc_tx_th.wt_tsf = htole64(tsf); 2234 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2235 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2236 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2237 if (m0->m_flags & M_FRAG) 2238 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2239 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2240 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2241 ieee80211_get_node_txpower(ni)); 2242 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2243 2244 ieee80211_radiotap_tx(vap, m0); 2245 } 2246 2247 /* 2248 * Formulate first tx descriptor with tx controls. 2249 */ 2250 ds = bf->bf_desc; 2251 /* XXX check return value? */ 2252 2253 /* Store the decided rate index values away */ 2254 bf->bf_state.bfs_pktlen = pktlen; 2255 bf->bf_state.bfs_hdrlen = hdrlen; 2256 bf->bf_state.bfs_atype = atype; 2257 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2258 ieee80211_get_node_txpower(ni)); 2259 bf->bf_state.bfs_txrate0 = txrate; 2260 bf->bf_state.bfs_try0 = try0; 2261 bf->bf_state.bfs_keyix = keyix; 2262 bf->bf_state.bfs_txantenna = txantenna; 2263 bf->bf_state.bfs_txflags = flags; 2264 bf->bf_state.bfs_shpream = 2265 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2266 2267 /* Set local packet state, used to queue packets to hardware */ 2268 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2269 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2270 bf->bf_state.bfs_pri = pri; 2271 2272 /* XXX this should be done in ath_tx_setrate() */ 2273 bf->bf_state.bfs_ctsrate = 0; 2274 bf->bf_state.bfs_ctsduration = 0; 2275 bf->bf_state.bfs_ismrr = ismrr; 2276 2277 /* Blank the legacy rate array */ 2278 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2279 2280 bf->bf_state.bfs_rc[0].rix = rix; 2281 bf->bf_state.bfs_rc[0].tries = try0; 2282 bf->bf_state.bfs_rc[0].ratecode = txrate; 2283 2284 if (ismrr) { 2285 int rix; 2286 2287 rix = ath_tx_findrix(sc, params->ibp_rate1); 2288 bf->bf_state.bfs_rc[1].rix = rix; 2289 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2290 2291 rix = ath_tx_findrix(sc, params->ibp_rate2); 2292 bf->bf_state.bfs_rc[2].rix = rix; 2293 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2294 2295 rix = ath_tx_findrix(sc, params->ibp_rate3); 2296 bf->bf_state.bfs_rc[3].rix = rix; 2297 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2298 } 2299 /* 2300 * All the required rate control decisions have been made; 2301 * fill in the rc flags. 2302 */ 2303 ath_tx_rate_fill_rcflags(sc, bf); 2304 2305 /* NB: no buffered multicast in power save support */ 2306 2307 /* 2308 * If we're overiding the ADDBA destination, dump directly 2309 * into the hardware queue, right after any pending 2310 * frames to that node are. 2311 */ 2312 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2313 __func__, do_override); 2314 2315 #if 1 2316 /* 2317 * Put addba frames in the right place in the right TID/HWQ. 2318 */ 2319 if (do_override) { 2320 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2321 /* 2322 * XXX if it's addba frames, should we be leaking 2323 * them out via the frame leak method? 2324 * XXX for now let's not risk it; but we may wish 2325 * to investigate this later. 2326 */ 2327 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2328 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2329 &queue_to_head)) { 2330 /* Queue to software queue */ 2331 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2332 } else { 2333 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2334 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2335 } 2336 #else 2337 /* Direct-dispatch to the hardware */ 2338 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2339 /* 2340 * Update the current leak count if 2341 * we're leaking frames; and set the 2342 * MORE flag as appropriate. 2343 */ 2344 ath_tx_leak_count_update(sc, tid, bf); 2345 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2346 #endif 2347 return 0; 2348 } 2349 2350 /* 2351 * Send a raw frame. 2352 * 2353 * This can be called by net80211. 2354 */ 2355 int 2356 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2357 const struct ieee80211_bpf_params *params) 2358 { 2359 struct ieee80211com *ic = ni->ni_ic; 2360 struct ifnet *ifp = ic->ic_ifp; 2361 struct ath_softc *sc = ifp->if_softc; 2362 struct ath_buf *bf; 2363 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2364 int error = 0; 2365 2366 ATH_PCU_LOCK(sc); 2367 if (sc->sc_inreset_cnt > 0) { 2368 DPRINTF(sc, ATH_DEBUG_XMIT, 2369 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2370 error = EIO; 2371 ATH_PCU_UNLOCK(sc); 2372 goto badbad; 2373 } 2374 sc->sc_txstart_cnt++; 2375 ATH_PCU_UNLOCK(sc); 2376 2377 /* Wake the hardware up already */ 2378 ATH_LOCK(sc); 2379 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2380 ATH_UNLOCK(sc); 2381 2382 ATH_TX_LOCK(sc); 2383 2384 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) { 2385 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2386 (ifp->if_flags & IFF_RUNNING) == 0 ? 2387 "!running" : "invalid"); 2388 m_freem(m); 2389 error = ENETDOWN; 2390 goto bad; 2391 } 2392 2393 /* 2394 * Enforce how deep the multicast queue can grow. 2395 * 2396 * XXX duplicated in ath_tx_start(). 2397 */ 2398 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2399 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2400 > sc->sc_txq_mcastq_maxdepth) { 2401 sc->sc_stats.ast_tx_mcastq_overflow++; 2402 error = ENOBUFS; 2403 } 2404 2405 if (error != 0) { 2406 m_freem(m); 2407 goto bad; 2408 } 2409 } 2410 2411 /* 2412 * Grab a TX buffer and associated resources. 2413 */ 2414 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2415 if (bf == NULL) { 2416 sc->sc_stats.ast_tx_nobuf++; 2417 m_freem(m); 2418 error = ENOBUFS; 2419 goto bad; 2420 } 2421 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2422 m, params, bf); 2423 2424 if (params == NULL) { 2425 /* 2426 * Legacy path; interpret frame contents to decide 2427 * precisely how to send the frame. 2428 */ 2429 if (ath_tx_start(sc, ni, bf, m)) { 2430 error = EIO; /* XXX */ 2431 goto bad2; 2432 } 2433 } else { 2434 /* 2435 * Caller supplied explicit parameters to use in 2436 * sending the frame. 2437 */ 2438 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2439 error = EIO; /* XXX */ 2440 goto bad2; 2441 } 2442 } 2443 sc->sc_wd_timer = 5; 2444 #if defined(__DragonFly__) 2445 ++ifp->if_opackets; 2446 #else 2447 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2448 #endif 2449 sc->sc_stats.ast_tx_raw++; 2450 2451 /* 2452 * Update the TIM - if there's anything queued to the 2453 * software queue and power save is enabled, we should 2454 * set the TIM. 2455 */ 2456 ath_tx_update_tim(sc, ni, 1); 2457 2458 ATH_TX_UNLOCK(sc); 2459 2460 ATH_PCU_LOCK(sc); 2461 sc->sc_txstart_cnt--; 2462 ATH_PCU_UNLOCK(sc); 2463 2464 2465 /* Put the hardware back to sleep if required */ 2466 ATH_LOCK(sc); 2467 ath_power_restore_power_state(sc); 2468 ATH_UNLOCK(sc); 2469 2470 return 0; 2471 2472 bad2: 2473 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2474 "bf=%p", 2475 m, 2476 params, 2477 bf); 2478 ATH_TXBUF_LOCK(sc); 2479 ath_returnbuf_head(sc, bf); 2480 ATH_TXBUF_UNLOCK(sc); 2481 2482 bad: 2483 ATH_TX_UNLOCK(sc); 2484 2485 ATH_PCU_LOCK(sc); 2486 sc->sc_txstart_cnt--; 2487 ATH_PCU_UNLOCK(sc); 2488 2489 /* Put the hardware back to sleep if required */ 2490 ATH_LOCK(sc); 2491 ath_power_restore_power_state(sc); 2492 ATH_UNLOCK(sc); 2493 2494 badbad: 2495 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2496 m, params); 2497 #if defined(__DragonFly__) 2498 ++ifp->if_oerrors; 2499 #else 2500 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2501 #endif 2502 sc->sc_stats.ast_tx_raw_fail++; 2503 ieee80211_free_node(ni); 2504 2505 return error; 2506 } 2507 2508 /* Some helper functions */ 2509 2510 /* 2511 * ADDBA (and potentially others) need to be placed in the same 2512 * hardware queue as the TID/node it's relating to. This is so 2513 * it goes out after any pending non-aggregate frames to the 2514 * same node/TID. 2515 * 2516 * If this isn't done, the ADDBA can go out before the frames 2517 * queued in hardware. Even though these frames have a sequence 2518 * number -earlier- than the ADDBA can be transmitted (but 2519 * no frames whose sequence numbers are after the ADDBA should 2520 * be!) they'll arrive after the ADDBA - and the receiving end 2521 * will simply drop them as being out of the BAW. 2522 * 2523 * The frames can't be appended to the TID software queue - it'll 2524 * never be sent out. So these frames have to be directly 2525 * dispatched to the hardware, rather than queued in software. 2526 * So if this function returns true, the TXQ has to be 2527 * overridden and it has to be directly dispatched. 2528 * 2529 * It's a dirty hack, but someone's gotta do it. 2530 */ 2531 2532 /* 2533 * XXX doesn't belong here! 2534 */ 2535 static int 2536 ieee80211_is_action(struct ieee80211_frame *wh) 2537 { 2538 /* Type: Management frame? */ 2539 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2540 IEEE80211_FC0_TYPE_MGT) 2541 return 0; 2542 2543 /* Subtype: Action frame? */ 2544 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2545 IEEE80211_FC0_SUBTYPE_ACTION) 2546 return 0; 2547 2548 return 1; 2549 } 2550 2551 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2552 /* 2553 * Return an alternate TID for ADDBA request frames. 2554 * 2555 * Yes, this likely should be done in the net80211 layer. 2556 */ 2557 static int 2558 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2559 struct ieee80211_node *ni, 2560 struct mbuf *m0, int *tid) 2561 { 2562 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2563 struct ieee80211_action_ba_addbarequest *ia; 2564 uint8_t *frm; 2565 uint16_t baparamset; 2566 2567 /* Not action frame? Bail */ 2568 if (! ieee80211_is_action(wh)) 2569 return 0; 2570 2571 /* XXX Not needed for frames we send? */ 2572 #if 0 2573 /* Correct length? */ 2574 if (! ieee80211_parse_action(ni, m)) 2575 return 0; 2576 #endif 2577 2578 /* Extract out action frame */ 2579 frm = (u_int8_t *)&wh[1]; 2580 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2581 2582 /* Not ADDBA? Bail */ 2583 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2584 return 0; 2585 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2586 return 0; 2587 2588 /* Extract TID, return it */ 2589 baparamset = le16toh(ia->rq_baparamset); 2590 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2591 2592 return 1; 2593 } 2594 #undef MS 2595 2596 /* Per-node software queue operations */ 2597 2598 /* 2599 * Add the current packet to the given BAW. 2600 * It is assumed that the current packet 2601 * 2602 * + fits inside the BAW; 2603 * + already has had a sequence number allocated. 2604 * 2605 * Since the BAW status may be modified by both the ath task and 2606 * the net80211/ifnet contexts, the TID must be locked. 2607 */ 2608 void 2609 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2610 struct ath_tid *tid, struct ath_buf *bf) 2611 { 2612 int index, cindex; 2613 struct ieee80211_tx_ampdu *tap; 2614 2615 ATH_TX_LOCK_ASSERT(sc); 2616 2617 if (bf->bf_state.bfs_isretried) 2618 return; 2619 2620 tap = ath_tx_get_tx_tid(an, tid->tid); 2621 2622 if (! bf->bf_state.bfs_dobaw) { 2623 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2624 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2625 __func__, SEQNO(bf->bf_state.bfs_seqno), 2626 tap->txa_start, tap->txa_wnd); 2627 } 2628 2629 if (bf->bf_state.bfs_addedbaw) 2630 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2631 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2632 "baw head=%d tail=%d\n", 2633 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2634 tap->txa_start, tap->txa_wnd, tid->baw_head, 2635 tid->baw_tail); 2636 2637 /* 2638 * Verify that the given sequence number is not outside of the 2639 * BAW. Complain loudly if that's the case. 2640 */ 2641 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2642 SEQNO(bf->bf_state.bfs_seqno))) { 2643 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2644 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2645 "baw head=%d tail=%d\n", 2646 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2647 tap->txa_start, tap->txa_wnd, tid->baw_head, 2648 tid->baw_tail); 2649 } 2650 2651 /* 2652 * ni->ni_txseqs[] is the currently allocated seqno. 2653 * the txa state contains the current baw start. 2654 */ 2655 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2656 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2657 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2658 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2659 "baw head=%d tail=%d\n", 2660 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2661 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2662 tid->baw_tail); 2663 2664 2665 #if 0 2666 assert(tid->tx_buf[cindex] == NULL); 2667 #endif 2668 if (tid->tx_buf[cindex] != NULL) { 2669 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2670 "%s: ba packet dup (index=%d, cindex=%d, " 2671 "head=%d, tail=%d)\n", 2672 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2673 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2674 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2675 __func__, 2676 tid->tx_buf[cindex], 2677 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2678 bf, 2679 SEQNO(bf->bf_state.bfs_seqno) 2680 ); 2681 } 2682 tid->tx_buf[cindex] = bf; 2683 2684 if (index >= ((tid->baw_tail - tid->baw_head) & 2685 (ATH_TID_MAX_BUFS - 1))) { 2686 tid->baw_tail = cindex; 2687 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2688 } 2689 } 2690 2691 /* 2692 * Flip the BAW buffer entry over from the existing one to the new one. 2693 * 2694 * When software retransmitting a (sub-)frame, it is entirely possible that 2695 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2696 * In that instance the buffer is cloned and the new buffer is used for 2697 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2698 * tracking array to maintain consistency. 2699 */ 2700 static void 2701 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2702 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2703 { 2704 int index, cindex; 2705 struct ieee80211_tx_ampdu *tap; 2706 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2707 2708 ATH_TX_LOCK_ASSERT(sc); 2709 2710 tap = ath_tx_get_tx_tid(an, tid->tid); 2711 index = ATH_BA_INDEX(tap->txa_start, seqno); 2712 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2713 2714 /* 2715 * Just warn for now; if it happens then we should find out 2716 * about it. It's highly likely the aggregation session will 2717 * soon hang. 2718 */ 2719 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2720 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2721 "%s: retransmitted buffer" 2722 " has mismatching seqno's, BA session may hang.\n", 2723 __func__); 2724 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2725 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2726 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2727 } 2728 2729 if (tid->tx_buf[cindex] != old_bf) { 2730 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2731 "%s: ath_buf pointer incorrect; " 2732 " has m BA session may hang.\n", __func__); 2733 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2734 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2735 } 2736 2737 tid->tx_buf[cindex] = new_bf; 2738 } 2739 2740 /* 2741 * seq_start - left edge of BAW 2742 * seq_next - current/next sequence number to allocate 2743 * 2744 * Since the BAW status may be modified by both the ath task and 2745 * the net80211/ifnet contexts, the TID must be locked. 2746 */ 2747 static void 2748 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2749 struct ath_tid *tid, const struct ath_buf *bf) 2750 { 2751 int index, cindex; 2752 struct ieee80211_tx_ampdu *tap; 2753 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2754 2755 ATH_TX_LOCK_ASSERT(sc); 2756 2757 tap = ath_tx_get_tx_tid(an, tid->tid); 2758 index = ATH_BA_INDEX(tap->txa_start, seqno); 2759 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2760 2761 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2762 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2763 "baw head=%d, tail=%d\n", 2764 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2765 cindex, tid->baw_head, tid->baw_tail); 2766 2767 /* 2768 * If this occurs then we have a big problem - something else 2769 * has slid tap->txa_start along without updating the BAW 2770 * tracking start/end pointers. Thus the TX BAW state is now 2771 * completely busted. 2772 * 2773 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2774 * it's quite possible that a cloned buffer is making its way 2775 * here and causing it to fire off. Disable TDMA for now. 2776 */ 2777 if (tid->tx_buf[cindex] != bf) { 2778 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2779 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2780 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2781 tid->tx_buf[cindex], 2782 (tid->tx_buf[cindex] != NULL) ? 2783 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2784 } 2785 2786 tid->tx_buf[cindex] = NULL; 2787 2788 while (tid->baw_head != tid->baw_tail && 2789 !tid->tx_buf[tid->baw_head]) { 2790 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2791 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2792 } 2793 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2794 "%s: tid=%d: baw is now %d:%d, baw head=%d\n", 2795 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); 2796 } 2797 2798 static void 2799 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2800 struct ath_buf *bf) 2801 { 2802 struct ieee80211_frame *wh; 2803 2804 ATH_TX_LOCK_ASSERT(sc); 2805 2806 if (tid->an->an_leak_count > 0) { 2807 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2808 2809 /* 2810 * Update MORE based on the software/net80211 queue states. 2811 */ 2812 if ((tid->an->an_stack_psq > 0) 2813 || (tid->an->an_swq_depth > 0)) 2814 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2815 else 2816 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2817 2818 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2819 "%s: %s: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2820 __func__, 2821 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 2822 tid->an->an_leak_count, 2823 tid->an->an_stack_psq, 2824 tid->an->an_swq_depth, 2825 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2826 2827 /* 2828 * Re-sync the underlying buffer. 2829 */ 2830 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2831 BUS_DMASYNC_PREWRITE); 2832 2833 tid->an->an_leak_count --; 2834 } 2835 } 2836 2837 static int 2838 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2839 { 2840 2841 ATH_TX_LOCK_ASSERT(sc); 2842 2843 if (tid->an->an_leak_count > 0) { 2844 return (1); 2845 } 2846 if (tid->paused) 2847 return (0); 2848 return (1); 2849 } 2850 2851 /* 2852 * Mark the current node/TID as ready to TX. 2853 * 2854 * This is done to make it easy for the software scheduler to 2855 * find which nodes have data to send. 2856 * 2857 * The TXQ lock must be held. 2858 */ 2859 void 2860 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2861 { 2862 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2863 2864 ATH_TX_LOCK_ASSERT(sc); 2865 2866 /* 2867 * If we are leaking out a frame to this destination 2868 * for PS-POLL, ensure that we allow scheduling to 2869 * occur. 2870 */ 2871 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2872 return; /* paused, can't schedule yet */ 2873 2874 if (tid->sched) 2875 return; /* already scheduled */ 2876 2877 tid->sched = 1; 2878 2879 #if 0 2880 /* 2881 * If this is a sleeping node we're leaking to, given 2882 * it a higher priority. This is so bad for QoS it hurts. 2883 */ 2884 if (tid->an->an_leak_count) { 2885 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2886 } else { 2887 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2888 } 2889 #endif 2890 2891 /* 2892 * We can't do the above - it'll confuse the TXQ software 2893 * scheduler which will keep checking the _head_ TID 2894 * in the list to see if it has traffic. If we queue 2895 * a TID to the head of the list and it doesn't transmit, 2896 * we'll check it again. 2897 * 2898 * So, get the rest of this leaking frames support working 2899 * and reliable first and _then_ optimise it so they're 2900 * pushed out in front of any other pending software 2901 * queued nodes. 2902 */ 2903 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2904 } 2905 2906 /* 2907 * Mark the current node as no longer needing to be polled for 2908 * TX packets. 2909 * 2910 * The TXQ lock must be held. 2911 */ 2912 static void 2913 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2914 { 2915 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2916 2917 ATH_TX_LOCK_ASSERT(sc); 2918 2919 if (tid->sched == 0) 2920 return; 2921 2922 tid->sched = 0; 2923 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2924 } 2925 2926 /* 2927 * Assign a sequence number manually to the given frame. 2928 * 2929 * This should only be called for A-MPDU TX frames. 2930 */ 2931 static ieee80211_seq 2932 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2933 struct ath_buf *bf, struct mbuf *m0) 2934 { 2935 struct ieee80211_frame *wh; 2936 int tid, pri; 2937 ieee80211_seq seqno; 2938 uint8_t subtype; 2939 2940 /* TID lookup */ 2941 wh = mtod(m0, struct ieee80211_frame *); 2942 pri = M_WME_GETAC(m0); /* honor classification */ 2943 tid = WME_AC_TO_TID(pri); 2944 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2945 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2946 2947 /* XXX Is it a control frame? Ignore */ 2948 2949 /* Does the packet require a sequence number? */ 2950 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2951 return -1; 2952 2953 ATH_TX_LOCK_ASSERT(sc); 2954 2955 /* 2956 * Is it a QOS NULL Data frame? Give it a sequence number from 2957 * the default TID (IEEE80211_NONQOS_TID.) 2958 * 2959 * The RX path of everything I've looked at doesn't include the NULL 2960 * data frame sequence number in the aggregation state updates, so 2961 * assigning it a sequence number there will cause a BAW hole on the 2962 * RX side. 2963 */ 2964 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2965 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2966 /* XXX no locking for this TID? This is a bit of a problem. */ 2967 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2968 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2969 } else { 2970 /* Manually assign sequence number */ 2971 seqno = ni->ni_txseqs[tid]; 2972 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2973 } 2974 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2975 M_SEQNO_SET(m0, seqno); 2976 2977 /* Return so caller can do something with it if needed */ 2978 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2979 return seqno; 2980 } 2981 2982 /* 2983 * Attempt to direct dispatch an aggregate frame to hardware. 2984 * If the frame is out of BAW, queue. 2985 * Otherwise, schedule it as a single frame. 2986 */ 2987 static void 2988 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2989 struct ath_txq *txq, struct ath_buf *bf) 2990 { 2991 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2992 struct ieee80211_tx_ampdu *tap; 2993 2994 ATH_TX_LOCK_ASSERT(sc); 2995 2996 tap = ath_tx_get_tx_tid(an, tid->tid); 2997 2998 /* paused? queue */ 2999 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 3000 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3001 /* XXX don't sched - we're paused! */ 3002 return; 3003 } 3004 3005 /* outside baw? queue */ 3006 if (bf->bf_state.bfs_dobaw && 3007 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 3008 SEQNO(bf->bf_state.bfs_seqno)))) { 3009 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3010 ath_tx_tid_sched(sc, tid); 3011 return; 3012 } 3013 3014 /* 3015 * This is a temporary check and should be removed once 3016 * all the relevant code paths have been fixed. 3017 * 3018 * During aggregate retries, it's possible that the head 3019 * frame will fail (which has the bfs_aggr and bfs_nframes 3020 * fields set for said aggregate) and will be retried as 3021 * a single frame. In this instance, the values should 3022 * be reset or the completion code will get upset with you. 3023 */ 3024 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 3025 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3026 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 3027 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 3028 bf->bf_state.bfs_aggr = 0; 3029 bf->bf_state.bfs_nframes = 1; 3030 } 3031 3032 /* Update CLRDMASK just before this frame is queued */ 3033 ath_tx_update_clrdmask(sc, tid, bf); 3034 3035 /* Direct dispatch to hardware */ 3036 ath_tx_do_ratelookup(sc, bf); 3037 ath_tx_calc_duration(sc, bf); 3038 ath_tx_calc_protection(sc, bf); 3039 ath_tx_set_rtscts(sc, bf); 3040 ath_tx_rate_fill_rcflags(sc, bf); 3041 ath_tx_setds(sc, bf); 3042 3043 /* Statistics */ 3044 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3045 3046 /* Track per-TID hardware queue depth correctly */ 3047 tid->hwq_depth++; 3048 3049 /* Add to BAW */ 3050 if (bf->bf_state.bfs_dobaw) { 3051 ath_tx_addto_baw(sc, an, tid, bf); 3052 bf->bf_state.bfs_addedbaw = 1; 3053 } 3054 3055 /* Set completion handler, multi-frame aggregate or not */ 3056 bf->bf_comp = ath_tx_aggr_comp; 3057 3058 /* 3059 * Update the current leak count if 3060 * we're leaking frames; and set the 3061 * MORE flag as appropriate. 3062 */ 3063 ath_tx_leak_count_update(sc, tid, bf); 3064 3065 /* Hand off to hardware */ 3066 ath_tx_handoff(sc, txq, bf); 3067 } 3068 3069 /* 3070 * Attempt to send the packet. 3071 * If the queue isn't busy, direct-dispatch. 3072 * If the queue is busy enough, queue the given packet on the 3073 * relevant software queue. 3074 */ 3075 void 3076 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3077 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3078 { 3079 struct ath_node *an = ATH_NODE(ni); 3080 struct ieee80211_frame *wh; 3081 struct ath_tid *atid; 3082 int pri, tid; 3083 struct mbuf *m0 = bf->bf_m; 3084 3085 ATH_TX_LOCK_ASSERT(sc); 3086 3087 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3088 wh = mtod(m0, struct ieee80211_frame *); 3089 pri = ath_tx_getac(sc, m0); 3090 tid = ath_tx_gettid(sc, m0); 3091 atid = &an->an_tid[tid]; 3092 3093 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3094 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3095 3096 /* Set local packet state, used to queue packets to hardware */ 3097 /* XXX potentially duplicate info, re-check */ 3098 bf->bf_state.bfs_tid = tid; 3099 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3100 bf->bf_state.bfs_pri = pri; 3101 3102 /* 3103 * If the hardware queue isn't busy, queue it directly. 3104 * If the hardware queue is busy, queue it. 3105 * If the TID is paused or the traffic it outside BAW, software 3106 * queue it. 3107 * 3108 * If the node is in power-save and we're leaking a frame, 3109 * leak a single frame. 3110 */ 3111 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3112 /* TID is paused, queue */ 3113 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3114 /* 3115 * If the caller requested that it be sent at a high 3116 * priority, queue it at the head of the list. 3117 */ 3118 if (queue_to_head) 3119 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3120 else 3121 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3122 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3123 /* AMPDU pending; queue */ 3124 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3125 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3126 /* XXX sched? */ 3127 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3128 /* AMPDU running, attempt direct dispatch if possible */ 3129 3130 /* 3131 * Always queue the frame to the tail of the list. 3132 */ 3133 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3134 3135 /* 3136 * If the hardware queue isn't busy, direct dispatch 3137 * the head frame in the list. Don't schedule the 3138 * TID - let it build some more frames first? 3139 * 3140 * When running A-MPDU, always just check the hardware 3141 * queue depth against the aggregate frame limit. 3142 * We don't want to burst a large number of single frames 3143 * out to the hardware; we want to aggressively hold back. 3144 * 3145 * Otherwise, schedule the TID. 3146 */ 3147 /* XXX TXQ locking */ 3148 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { 3149 bf = ATH_TID_FIRST(atid); 3150 ATH_TID_REMOVE(atid, bf, bf_list); 3151 3152 /* 3153 * Ensure it's definitely treated as a non-AMPDU 3154 * frame - this information may have been left 3155 * over from a previous attempt. 3156 */ 3157 bf->bf_state.bfs_aggr = 0; 3158 bf->bf_state.bfs_nframes = 1; 3159 3160 /* Queue to the hardware */ 3161 ath_tx_xmit_aggr(sc, an, txq, bf); 3162 DPRINTF(sc, ATH_DEBUG_SW_TX, 3163 "%s: xmit_aggr\n", 3164 __func__); 3165 } else { 3166 DPRINTF(sc, ATH_DEBUG_SW_TX, 3167 "%s: ampdu; swq'ing\n", 3168 __func__); 3169 3170 ath_tx_tid_sched(sc, atid); 3171 } 3172 /* 3173 * If we're not doing A-MPDU, be prepared to direct dispatch 3174 * up to both limits if possible. This particular corner 3175 * case may end up with packet starvation between aggregate 3176 * traffic and non-aggregate traffic: we wnat to ensure 3177 * that non-aggregate stations get a few frames queued to the 3178 * hardware before the aggregate station(s) get their chance. 3179 * 3180 * So if you only ever see a couple of frames direct dispatched 3181 * to the hardware from a non-AMPDU client, check both here 3182 * and in the software queue dispatcher to ensure that those 3183 * non-AMPDU stations get a fair chance to transmit. 3184 */ 3185 /* XXX TXQ locking */ 3186 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3187 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3188 /* AMPDU not running, attempt direct dispatch */ 3189 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3190 /* See if clrdmask needs to be set */ 3191 ath_tx_update_clrdmask(sc, atid, bf); 3192 3193 /* 3194 * Update the current leak count if 3195 * we're leaking frames; and set the 3196 * MORE flag as appropriate. 3197 */ 3198 ath_tx_leak_count_update(sc, atid, bf); 3199 3200 /* 3201 * Dispatch the frame. 3202 */ 3203 ath_tx_xmit_normal(sc, txq, bf); 3204 } else { 3205 /* Busy; queue */ 3206 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3207 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3208 ath_tx_tid_sched(sc, atid); 3209 } 3210 } 3211 3212 /* 3213 * Only set the clrdmask bit if none of the nodes are currently 3214 * filtered. 3215 * 3216 * XXX TODO: go through all the callers and check to see 3217 * which are being called in the context of looping over all 3218 * TIDs (eg, if all tids are being paused, resumed, etc.) 3219 * That'll avoid O(n^2) complexity here. 3220 */ 3221 static void 3222 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3223 { 3224 int i; 3225 3226 ATH_TX_LOCK_ASSERT(sc); 3227 3228 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3229 if (an->an_tid[i].isfiltered == 1) 3230 return; 3231 } 3232 an->clrdmask = 1; 3233 } 3234 3235 /* 3236 * Configure the per-TID node state. 3237 * 3238 * This likely belongs in if_ath_node.c but I can't think of anywhere 3239 * else to put it just yet. 3240 * 3241 * This sets up the SLISTs and the mutex as appropriate. 3242 */ 3243 void 3244 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3245 { 3246 int i, j; 3247 struct ath_tid *atid; 3248 3249 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3250 atid = &an->an_tid[i]; 3251 3252 /* XXX now with this bzer(), is the field 0'ing needed? */ 3253 bzero(atid, sizeof(*atid)); 3254 3255 TAILQ_INIT(&atid->tid_q); 3256 TAILQ_INIT(&atid->filtq.tid_q); 3257 atid->tid = i; 3258 atid->an = an; 3259 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3260 atid->tx_buf[j] = NULL; 3261 atid->baw_head = atid->baw_tail = 0; 3262 atid->paused = 0; 3263 atid->sched = 0; 3264 atid->hwq_depth = 0; 3265 atid->cleanup_inprogress = 0; 3266 if (i == IEEE80211_NONQOS_TID) 3267 atid->ac = ATH_NONQOS_TID_AC; 3268 else 3269 atid->ac = TID_TO_WME_AC(i); 3270 } 3271 an->clrdmask = 1; /* Always start by setting this bit */ 3272 } 3273 3274 /* 3275 * Pause the current TID. This stops packets from being transmitted 3276 * on it. 3277 * 3278 * Since this is also called from upper layers as well as the driver, 3279 * it will get the TID lock. 3280 */ 3281 static void 3282 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3283 { 3284 3285 ATH_TX_LOCK_ASSERT(sc); 3286 tid->paused++; 3287 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%s]: tid=%d, paused = %d\n", 3288 __func__, 3289 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3290 tid->tid, 3291 tid->paused); 3292 } 3293 3294 /* 3295 * Unpause the current TID, and schedule it if needed. 3296 */ 3297 static void 3298 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3299 { 3300 ATH_TX_LOCK_ASSERT(sc); 3301 3302 /* 3303 * There's some odd places where ath_tx_tid_resume() is called 3304 * when it shouldn't be; this works around that particular issue 3305 * until it's actually resolved. 3306 */ 3307 if (tid->paused == 0) { 3308 device_printf(sc->sc_dev, 3309 "%s: [%s]: tid=%d, paused=0?\n", 3310 __func__, 3311 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3312 tid->tid); 3313 } else { 3314 tid->paused--; 3315 } 3316 3317 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3318 "%s: [%s]: tid=%d, unpaused = %d\n", 3319 __func__, 3320 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3321 tid->tid, 3322 tid->paused); 3323 3324 if (tid->paused) 3325 return; 3326 3327 /* 3328 * Override the clrdmask configuration for the next frame 3329 * from this TID, just to get the ball rolling. 3330 */ 3331 ath_tx_set_clrdmask(sc, tid->an); 3332 3333 if (tid->axq_depth == 0) 3334 return; 3335 3336 /* XXX isfiltered shouldn't ever be 0 at this point */ 3337 if (tid->isfiltered == 1) { 3338 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3339 __func__); 3340 return; 3341 } 3342 3343 ath_tx_tid_sched(sc, tid); 3344 3345 /* 3346 * Queue the software TX scheduler. 3347 */ 3348 ath_tx_swq_kick(sc); 3349 } 3350 3351 /* 3352 * Add the given ath_buf to the TID filtered frame list. 3353 * This requires the TID be filtered. 3354 */ 3355 static void 3356 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3357 struct ath_buf *bf) 3358 { 3359 3360 ATH_TX_LOCK_ASSERT(sc); 3361 3362 if (!tid->isfiltered) 3363 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3364 __func__); 3365 3366 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3367 3368 /* Set the retry bit and bump the retry counter */ 3369 ath_tx_set_retry(sc, bf); 3370 sc->sc_stats.ast_tx_swfiltered++; 3371 3372 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3373 } 3374 3375 /* 3376 * Handle a completed filtered frame from the given TID. 3377 * This just enables/pauses the filtered frame state if required 3378 * and appends the filtered frame to the filtered queue. 3379 */ 3380 static void 3381 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3382 struct ath_buf *bf) 3383 { 3384 3385 ATH_TX_LOCK_ASSERT(sc); 3386 3387 if (! tid->isfiltered) { 3388 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", 3389 __func__, tid->tid); 3390 tid->isfiltered = 1; 3391 ath_tx_tid_pause(sc, tid); 3392 } 3393 3394 /* Add the frame to the filter queue */ 3395 ath_tx_tid_filt_addbuf(sc, tid, bf); 3396 } 3397 3398 /* 3399 * Complete the filtered frame TX completion. 3400 * 3401 * If there are no more frames in the hardware queue, unpause/unfilter 3402 * the TID if applicable. Otherwise we will wait for a node PS transition 3403 * to unfilter. 3404 */ 3405 static void 3406 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3407 { 3408 struct ath_buf *bf; 3409 int do_resume = 0; 3410 3411 ATH_TX_LOCK_ASSERT(sc); 3412 3413 if (tid->hwq_depth != 0) 3414 return; 3415 3416 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", 3417 __func__, tid->tid); 3418 if (tid->isfiltered == 1) { 3419 tid->isfiltered = 0; 3420 do_resume = 1; 3421 } 3422 3423 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3424 ath_tx_set_clrdmask(sc, tid->an); 3425 3426 /* XXX this is really quite inefficient */ 3427 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3428 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3429 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3430 } 3431 3432 /* And only resume if we had paused before */ 3433 if (do_resume) 3434 ath_tx_tid_resume(sc, tid); 3435 } 3436 3437 /* 3438 * Called when a single (aggregate or otherwise) frame is completed. 3439 * 3440 * Returns 0 if the buffer could be added to the filtered list 3441 * (cloned or otherwise), 1 if the buffer couldn't be added to the 3442 * filtered list (failed clone; expired retry) and the caller should 3443 * free it and handle it like a failure (eg by sending a BAR.) 3444 * 3445 * since the buffer may be cloned, bf must be not touched after this 3446 * if the return value is 0. 3447 */ 3448 static int 3449 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3450 struct ath_buf *bf) 3451 { 3452 struct ath_buf *nbf; 3453 int retval; 3454 3455 ATH_TX_LOCK_ASSERT(sc); 3456 3457 /* 3458 * Don't allow a filtered frame to live forever. 3459 */ 3460 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3461 sc->sc_stats.ast_tx_swretrymax++; 3462 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3463 "%s: bf=%p, seqno=%d, exceeded retries\n", 3464 __func__, 3465 bf, 3466 SEQNO(bf->bf_state.bfs_seqno)); 3467 retval = 1; /* error */ 3468 goto finish; 3469 } 3470 3471 /* 3472 * A busy buffer can't be added to the retry list. 3473 * It needs to be cloned. 3474 */ 3475 if (bf->bf_flags & ATH_BUF_BUSY) { 3476 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3477 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3478 "%s: busy buffer clone: %p -> %p\n", 3479 __func__, bf, nbf); 3480 } else { 3481 nbf = bf; 3482 } 3483 3484 if (nbf == NULL) { 3485 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3486 "%s: busy buffer couldn't be cloned (%p)!\n", 3487 __func__, bf); 3488 retval = 1; /* error */ 3489 } else { 3490 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3491 retval = 0; /* ok */ 3492 } 3493 finish: 3494 ath_tx_tid_filt_comp_complete(sc, tid); 3495 3496 return (retval); 3497 } 3498 3499 static void 3500 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3501 struct ath_buf *bf_first, ath_bufhead *bf_q) 3502 { 3503 struct ath_buf *bf, *bf_next, *nbf; 3504 3505 ATH_TX_LOCK_ASSERT(sc); 3506 3507 bf = bf_first; 3508 while (bf) { 3509 bf_next = bf->bf_next; 3510 bf->bf_next = NULL; /* Remove it from the aggr list */ 3511 3512 /* 3513 * Don't allow a filtered frame to live forever. 3514 */ 3515 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3516 sc->sc_stats.ast_tx_swretrymax++; 3517 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3518 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", 3519 __func__, 3520 tid->tid, 3521 bf, 3522 SEQNO(bf->bf_state.bfs_seqno)); 3523 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3524 goto next; 3525 } 3526 3527 if (bf->bf_flags & ATH_BUF_BUSY) { 3528 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3529 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3530 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", 3531 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); 3532 } else { 3533 nbf = bf; 3534 } 3535 3536 /* 3537 * If the buffer couldn't be cloned, add it to bf_q; 3538 * the caller will free the buffer(s) as required. 3539 */ 3540 if (nbf == NULL) { 3541 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3542 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", 3543 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); 3544 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3545 } else { 3546 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3547 } 3548 next: 3549 bf = bf_next; 3550 } 3551 3552 ath_tx_tid_filt_comp_complete(sc, tid); 3553 } 3554 3555 /* 3556 * Suspend the queue because we need to TX a BAR. 3557 */ 3558 static void 3559 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3560 { 3561 3562 ATH_TX_LOCK_ASSERT(sc); 3563 3564 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3565 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3566 __func__, 3567 tid->tid, 3568 tid->bar_wait, 3569 tid->bar_tx); 3570 3571 /* We shouldn't be called when bar_tx is 1 */ 3572 if (tid->bar_tx) { 3573 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3574 "%s: bar_tx is 1?!\n", __func__); 3575 } 3576 3577 /* If we've already been called, just be patient. */ 3578 if (tid->bar_wait) 3579 return; 3580 3581 /* Wait! */ 3582 tid->bar_wait = 1; 3583 3584 /* Only one pause, no matter how many frames fail */ 3585 ath_tx_tid_pause(sc, tid); 3586 } 3587 3588 /* 3589 * We've finished with BAR handling - either we succeeded or 3590 * failed. Either way, unsuspend TX. 3591 */ 3592 static void 3593 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3594 { 3595 3596 ATH_TX_LOCK_ASSERT(sc); 3597 3598 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3599 "%s: %s: TID=%d, called\n", 3600 __func__, 3601 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3602 tid->tid); 3603 3604 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3605 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3606 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3607 __func__, 3608 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3609 tid->tid, tid->bar_tx, tid->bar_wait); 3610 } 3611 3612 tid->bar_tx = tid->bar_wait = 0; 3613 ath_tx_tid_resume(sc, tid); 3614 } 3615 3616 /* 3617 * Return whether we're ready to TX a BAR frame. 3618 * 3619 * Requires the TID lock be held. 3620 */ 3621 static int 3622 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3623 { 3624 3625 ATH_TX_LOCK_ASSERT(sc); 3626 3627 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3628 return (0); 3629 3630 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3631 "%s: %s: TID=%d, bar ready\n", 3632 __func__, 3633 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3634 tid->tid); 3635 3636 return (1); 3637 } 3638 3639 /* 3640 * Check whether the current TID is ready to have a BAR 3641 * TXed and if so, do the TX. 3642 * 3643 * Since the TID/TXQ lock can't be held during a call to 3644 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3645 * sending the BAR and locking it again. 3646 * 3647 * Eventually, the code to send the BAR should be broken out 3648 * from this routine so the lock doesn't have to be reacquired 3649 * just to be immediately dropped by the caller. 3650 */ 3651 static void 3652 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3653 { 3654 struct ieee80211_tx_ampdu *tap; 3655 3656 ATH_TX_LOCK_ASSERT(sc); 3657 3658 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3659 "%s: %s: TID=%d, called\n", 3660 __func__, 3661 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3662 tid->tid); 3663 3664 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3665 3666 /* 3667 * This is an error condition! 3668 */ 3669 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3670 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3671 "%s: %s: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3672 __func__, 3673 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3674 tid->tid, tid->bar_tx, tid->bar_wait); 3675 return; 3676 } 3677 3678 /* Don't do anything if we still have pending frames */ 3679 if (tid->hwq_depth > 0) { 3680 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3681 "%s: %s: TID=%d, hwq_depth=%d, waiting\n", 3682 __func__, 3683 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3684 tid->tid, 3685 tid->hwq_depth); 3686 return; 3687 } 3688 3689 /* We're now about to TX */ 3690 tid->bar_tx = 1; 3691 3692 /* 3693 * Override the clrdmask configuration for the next frame, 3694 * just to get the ball rolling. 3695 */ 3696 ath_tx_set_clrdmask(sc, tid->an); 3697 3698 /* 3699 * Calculate new BAW left edge, now that all frames have either 3700 * succeeded or failed. 3701 * 3702 * XXX verify this is _actually_ the valid value to begin at! 3703 */ 3704 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3705 "%s: %s: TID=%d, new BAW left edge=%d\n", 3706 __func__, 3707 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3708 tid->tid, 3709 tap->txa_start); 3710 3711 /* Try sending the BAR frame */ 3712 /* We can't hold the lock here! */ 3713 3714 ATH_TX_UNLOCK(sc); 3715 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3716 /* Success? Now we wait for notification that it's done */ 3717 ATH_TX_LOCK(sc); 3718 return; 3719 } 3720 3721 /* Failure? For now, warn loudly and continue */ 3722 ATH_TX_LOCK(sc); 3723 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3724 "%s: %s: TID=%d, failed to TX BAR, continue!\n", 3725 __func__, 3726 ath_hal_ether_sprintf(tid->an->an_node.ni_macaddr), 3727 tid->tid); 3728 ath_tx_tid_bar_unsuspend(sc, tid); 3729 } 3730 3731 static void 3732 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3733 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3734 { 3735 3736 ATH_TX_LOCK_ASSERT(sc); 3737 3738 /* 3739 * If the current TID is running AMPDU, update 3740 * the BAW. 3741 */ 3742 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3743 bf->bf_state.bfs_dobaw) { 3744 /* 3745 * Only remove the frame from the BAW if it's 3746 * been transmitted at least once; this means 3747 * the frame was in the BAW to begin with. 3748 */ 3749 if (bf->bf_state.bfs_retries > 0) { 3750 ath_tx_update_baw(sc, an, tid, bf); 3751 bf->bf_state.bfs_dobaw = 0; 3752 } 3753 #if 0 3754 /* 3755 * This has become a non-fatal error now 3756 */ 3757 if (! bf->bf_state.bfs_addedbaw) 3758 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3759 "%s: wasn't added: seqno %d\n", 3760 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3761 #endif 3762 } 3763 3764 /* Strip it out of an aggregate list if it was in one */ 3765 bf->bf_next = NULL; 3766 3767 /* Insert on the free queue to be freed by the caller */ 3768 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3769 } 3770 3771 static void 3772 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3773 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3774 { 3775 struct ieee80211_node *ni = &an->an_node; 3776 struct ath_txq *txq; 3777 struct ieee80211_tx_ampdu *tap; 3778 3779 txq = sc->sc_ac2q[tid->ac]; 3780 tap = ath_tx_get_tx_tid(an, tid->tid); 3781 3782 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3783 "%s: %s: %s: bf=%p: addbaw=%d, dobaw=%d, " 3784 "seqno=%d, retry=%d\n", 3785 __func__, 3786 pfx, 3787 ath_hal_ether_sprintf(ni->ni_macaddr), 3788 bf, 3789 bf->bf_state.bfs_addedbaw, 3790 bf->bf_state.bfs_dobaw, 3791 SEQNO(bf->bf_state.bfs_seqno), 3792 bf->bf_state.bfs_retries); 3793 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3794 "%s: %s: %s: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3795 __func__, 3796 pfx, 3797 ath_hal_ether_sprintf(ni->ni_macaddr), 3798 bf, 3799 txq->axq_qnum, 3800 txq->axq_depth, 3801 txq->axq_aggr_depth); 3802 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3803 "%s: %s: %s: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3804 "isfiltered=%d\n", 3805 __func__, 3806 pfx, 3807 ath_hal_ether_sprintf(ni->ni_macaddr), 3808 bf, 3809 tid->axq_depth, 3810 tid->hwq_depth, 3811 tid->bar_wait, 3812 tid->isfiltered); 3813 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3814 "%s: %s: %s: tid %d: " 3815 "sched=%d, paused=%d, " 3816 "incomp=%d, baw_head=%d, " 3817 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3818 __func__, 3819 pfx, 3820 ath_hal_ether_sprintf(ni->ni_macaddr), 3821 tid->tid, 3822 tid->sched, tid->paused, 3823 tid->incomp, tid->baw_head, 3824 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3825 ni->ni_txseqs[tid->tid]); 3826 3827 /* XXX Dump the frame, see what it is? */ 3828 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3829 ieee80211_dump_pkt(ni->ni_ic, 3830 mtod(bf->bf_m, const uint8_t *), 3831 bf->bf_m->m_len, 0, -1); 3832 } 3833 3834 /* 3835 * Free any packets currently pending in the software TX queue. 3836 * 3837 * This will be called when a node is being deleted. 3838 * 3839 * It can also be called on an active node during an interface 3840 * reset or state transition. 3841 * 3842 * (From Linux/reference): 3843 * 3844 * TODO: For frame(s) that are in the retry state, we will reuse the 3845 * sequence number(s) without setting the retry bit. The 3846 * alternative is to give up on these and BAR the receiver's window 3847 * forward. 3848 */ 3849 static void 3850 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3851 struct ath_tid *tid, ath_bufhead *bf_cq) 3852 { 3853 struct ath_buf *bf; 3854 struct ieee80211_tx_ampdu *tap; 3855 struct ieee80211_node *ni = &an->an_node; 3856 int t; 3857 3858 tap = ath_tx_get_tx_tid(an, tid->tid); 3859 3860 ATH_TX_LOCK_ASSERT(sc); 3861 3862 /* Walk the queue, free frames */ 3863 t = 0; 3864 for (;;) { 3865 bf = ATH_TID_FIRST(tid); 3866 if (bf == NULL) { 3867 break; 3868 } 3869 3870 if (t == 0) { 3871 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3872 // t = 1; 3873 } 3874 3875 ATH_TID_REMOVE(tid, bf, bf_list); 3876 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3877 } 3878 3879 /* And now, drain the filtered frame queue */ 3880 t = 0; 3881 for (;;) { 3882 bf = ATH_TID_FILT_FIRST(tid); 3883 if (bf == NULL) 3884 break; 3885 3886 if (t == 0) { 3887 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3888 // t = 1; 3889 } 3890 3891 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3892 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3893 } 3894 3895 /* 3896 * Override the clrdmask configuration for the next frame 3897 * in case there is some future transmission, just to get 3898 * the ball rolling. 3899 * 3900 * This won't hurt things if the TID is about to be freed. 3901 */ 3902 ath_tx_set_clrdmask(sc, tid->an); 3903 3904 /* 3905 * Now that it's completed, grab the TID lock and update 3906 * the sequence number and BAW window. 3907 * Because sequence numbers have been assigned to frames 3908 * that haven't been sent yet, it's entirely possible 3909 * we'll be called with some pending frames that have not 3910 * been transmitted. 3911 * 3912 * The cleaner solution is to do the sequence number allocation 3913 * when the packet is first transmitted - and thus the "retries" 3914 * check above would be enough to update the BAW/seqno. 3915 */ 3916 3917 /* But don't do it for non-QoS TIDs */ 3918 if (tap) { 3919 #if 1 3920 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3921 "%s: %s: node %p: TID %d: sliding BAW left edge to %d\n", 3922 __func__, 3923 ath_hal_ether_sprintf(ni->ni_macaddr), 3924 an, 3925 tid->tid, 3926 tap->txa_start); 3927 #endif 3928 ni->ni_txseqs[tid->tid] = tap->txa_start; 3929 tid->baw_tail = tid->baw_head; 3930 } 3931 } 3932 3933 /* 3934 * Reset the TID state. This must be only called once the node has 3935 * had its frames flushed from this TID, to ensure that no other 3936 * pause / unpause logic can kick in. 3937 */ 3938 static void 3939 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 3940 { 3941 3942 #if 0 3943 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 3944 tid->paused = tid->sched = tid->addba_tx_pending = 0; 3945 tid->incomp = tid->cleanup_inprogress = 0; 3946 #endif 3947 3948 /* 3949 * If we have a bar_wait set, we need to unpause the TID 3950 * here. Otherwise once cleanup has finished, the TID won't 3951 * have the right paused counter. 3952 * 3953 * XXX I'm not going through resume here - I don't want the 3954 * node to be rescheuled just yet. This however should be 3955 * methodized! 3956 */ 3957 if (tid->bar_wait) { 3958 if (tid->paused > 0) { 3959 tid->paused --; 3960 } 3961 } 3962 3963 /* 3964 * XXX same with a currently filtered TID. 3965 * 3966 * Since this is being called during a flush, we assume that 3967 * the filtered frame list is actually empty. 3968 * 3969 * XXX TODO: add in a check to ensure that the filtered queue 3970 * depth is actually 0! 3971 */ 3972 if (tid->isfiltered) { 3973 if (tid->paused > 0) { 3974 tid->paused --; 3975 } 3976 } 3977 3978 /* 3979 * Clear BAR, filtered frames, scheduled and ADDBA pending. 3980 * The TID may be going through cleanup from the last association 3981 * where things in the BAW are still in the hardware queue. 3982 */ 3983 tid->bar_wait = 0; 3984 tid->bar_tx = 0; 3985 tid->isfiltered = 0; 3986 tid->sched = 0; 3987 tid->addba_tx_pending = 0; 3988 3989 /* 3990 * XXX TODO: it may just be enough to walk the HWQs and mark 3991 * frames for that node as non-aggregate; or mark the ath_node 3992 * with something that indicates that aggregation is no longer 3993 * occuring. Then we can just toss the BAW complaints and 3994 * do a complete hard reset of state here - no pause, no 3995 * complete counter, etc. 3996 */ 3997 3998 } 3999 4000 /* 4001 * Flush all software queued packets for the given node. 4002 * 4003 * This occurs when a completion handler frees the last buffer 4004 * for a node, and the node is thus freed. This causes the node 4005 * to be cleaned up, which ends up calling ath_tx_node_flush. 4006 */ 4007 void 4008 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 4009 { 4010 int tid; 4011 ath_bufhead bf_cq; 4012 struct ath_buf *bf; 4013 4014 TAILQ_INIT(&bf_cq); 4015 4016 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 4017 &an->an_node); 4018 4019 ATH_TX_LOCK(sc); 4020 DPRINTF(sc, ATH_DEBUG_NODE, 4021 "%s: %s: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 4022 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 4023 __func__, 4024 ath_hal_ether_sprintf(an->an_node.ni_macaddr), 4025 an->an_is_powersave, 4026 an->an_stack_psq, 4027 an->an_tim_set, 4028 an->an_swq_depth, 4029 an->clrdmask, 4030 an->an_leak_count); 4031 4032 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 4033 struct ath_tid *atid = &an->an_tid[tid]; 4034 4035 /* Free packets */ 4036 ath_tx_tid_drain(sc, an, atid, &bf_cq); 4037 4038 /* Remove this tid from the list of active tids */ 4039 ath_tx_tid_unsched(sc, atid); 4040 4041 /* Reset the per-TID pause, BAR, etc state */ 4042 ath_tx_tid_reset(sc, atid); 4043 } 4044 4045 /* 4046 * Clear global leak count 4047 */ 4048 an->an_leak_count = 0; 4049 ATH_TX_UNLOCK(sc); 4050 4051 /* Handle completed frames */ 4052 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4053 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4054 ath_tx_default_comp(sc, bf, 0); 4055 } 4056 } 4057 4058 /* 4059 * Drain all the software TXQs currently with traffic queued. 4060 */ 4061 void 4062 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4063 { 4064 struct ath_tid *tid; 4065 ath_bufhead bf_cq; 4066 struct ath_buf *bf; 4067 4068 TAILQ_INIT(&bf_cq); 4069 ATH_TX_LOCK(sc); 4070 4071 /* 4072 * Iterate over all active tids for the given txq, 4073 * flushing and unsched'ing them 4074 */ 4075 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4076 tid = TAILQ_FIRST(&txq->axq_tidq); 4077 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4078 ath_tx_tid_unsched(sc, tid); 4079 } 4080 4081 ATH_TX_UNLOCK(sc); 4082 4083 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4084 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4085 ath_tx_default_comp(sc, bf, 0); 4086 } 4087 } 4088 4089 /* 4090 * Handle completion of non-aggregate session frames. 4091 * 4092 * This (currently) doesn't implement software retransmission of 4093 * non-aggregate frames! 4094 * 4095 * Software retransmission of non-aggregate frames needs to obey 4096 * the strict sequence number ordering, and drop any frames that 4097 * will fail this. 4098 * 4099 * For now, filtered frames and frame transmission will cause 4100 * all kinds of issues. So we don't support them. 4101 * 4102 * So anyone queuing frames via ath_tx_normal_xmit() or 4103 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4104 */ 4105 void 4106 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4107 { 4108 struct ieee80211_node *ni = bf->bf_node; 4109 struct ath_node *an = ATH_NODE(ni); 4110 int tid = bf->bf_state.bfs_tid; 4111 struct ath_tid *atid = &an->an_tid[tid]; 4112 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4113 4114 /* The TID state is protected behind the TXQ lock */ 4115 ATH_TX_LOCK(sc); 4116 4117 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4118 __func__, bf, fail, atid->hwq_depth - 1); 4119 4120 atid->hwq_depth--; 4121 4122 #if 0 4123 /* 4124 * If the frame was filtered, stick it on the filter frame 4125 * queue and complain about it. It shouldn't happen! 4126 */ 4127 if ((ts->ts_status & HAL_TXERR_FILT) || 4128 (ts->ts_status != 0 && atid->isfiltered)) { 4129 DPRINTF(sc, ATH_DEBUG_SW_TX, 4130 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4131 __func__, 4132 atid->isfiltered, 4133 ts->ts_status); 4134 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4135 } 4136 #endif 4137 if (atid->isfiltered) 4138 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4139 if (atid->hwq_depth < 0) 4140 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4141 __func__, atid->hwq_depth); 4142 4143 /* If the TID is being cleaned up, track things */ 4144 /* XXX refactor! */ 4145 if (atid->cleanup_inprogress) { 4146 atid->incomp--; 4147 if (atid->incomp == 0) { 4148 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4149 "%s: TID %d: cleaned up! resume!\n", 4150 __func__, tid); 4151 atid->cleanup_inprogress = 0; 4152 ath_tx_tid_resume(sc, atid); 4153 } 4154 } 4155 4156 /* 4157 * If the queue is filtered, potentially mark it as complete 4158 * and reschedule it as needed. 4159 * 4160 * This is required as there may be a subsequent TX descriptor 4161 * for this end-node that has CLRDMASK set, so it's quite possible 4162 * that a filtered frame will be followed by a non-filtered 4163 * (complete or otherwise) frame. 4164 * 4165 * XXX should we do this before we complete the frame? 4166 */ 4167 if (atid->isfiltered) 4168 ath_tx_tid_filt_comp_complete(sc, atid); 4169 ATH_TX_UNLOCK(sc); 4170 4171 /* 4172 * punt to rate control if we're not being cleaned up 4173 * during a hw queue drain and the frame wanted an ACK. 4174 */ 4175 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4176 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4177 ts, bf->bf_state.bfs_pktlen, 4178 1, (ts->ts_status == 0) ? 0 : 1); 4179 4180 ath_tx_default_comp(sc, bf, fail); 4181 } 4182 4183 /* 4184 * Handle cleanup of aggregate session packets that aren't 4185 * an A-MPDU. 4186 * 4187 * There's no need to update the BAW here - the session is being 4188 * torn down. 4189 */ 4190 static void 4191 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4192 { 4193 struct ieee80211_node *ni = bf->bf_node; 4194 struct ath_node *an = ATH_NODE(ni); 4195 int tid = bf->bf_state.bfs_tid; 4196 struct ath_tid *atid = &an->an_tid[tid]; 4197 4198 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4199 __func__, tid, atid->incomp); 4200 4201 ATH_TX_LOCK(sc); 4202 atid->incomp--; 4203 4204 /* XXX refactor! */ 4205 if (bf->bf_state.bfs_dobaw) { 4206 ath_tx_update_baw(sc, an, atid, bf); 4207 if (!bf->bf_state.bfs_addedbaw) 4208 DPRINTF(sc, ATH_DEBUG_SW_TX, 4209 "%s: wasn't added: seqno %d\n", 4210 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4211 } 4212 4213 if (atid->incomp == 0) { 4214 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4215 "%s: TID %d: cleaned up! resume!\n", 4216 __func__, tid); 4217 atid->cleanup_inprogress = 0; 4218 ath_tx_tid_resume(sc, atid); 4219 } 4220 ATH_TX_UNLOCK(sc); 4221 4222 ath_tx_default_comp(sc, bf, 0); 4223 } 4224 4225 4226 /* 4227 * This as it currently stands is a bit dumb. Ideally we'd just 4228 * fail the frame the normal way and have it permanently fail 4229 * via the normal aggregate completion path. 4230 */ 4231 static void 4232 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, 4233 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) 4234 { 4235 struct ath_tid *atid = &an->an_tid[tid]; 4236 struct ath_buf *bf, *bf_next; 4237 4238 ATH_TX_LOCK_ASSERT(sc); 4239 4240 /* 4241 * Remove this frame from the queue. 4242 */ 4243 ATH_TID_REMOVE(atid, bf_head, bf_list); 4244 4245 /* 4246 * Loop over all the frames in the aggregate. 4247 */ 4248 bf = bf_head; 4249 while (bf != NULL) { 4250 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ 4251 4252 /* 4253 * If it's been added to the BAW we need to kick 4254 * it out of the BAW before we continue. 4255 * 4256 * XXX if it's an aggregate, assert that it's in the 4257 * BAW - we shouldn't have it be in an aggregate 4258 * otherwise! 4259 */ 4260 if (bf->bf_state.bfs_addedbaw) { 4261 ath_tx_update_baw(sc, an, atid, bf); 4262 bf->bf_state.bfs_dobaw = 0; 4263 } 4264 4265 /* 4266 * Give it the default completion handler. 4267 */ 4268 bf->bf_comp = ath_tx_normal_comp; 4269 bf->bf_next = NULL; 4270 4271 /* 4272 * Add it to the list to free. 4273 */ 4274 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4275 4276 /* 4277 * Now advance to the next frame in the aggregate. 4278 */ 4279 bf = bf_next; 4280 } 4281 } 4282 4283 /* 4284 * Performs transmit side cleanup when TID changes from aggregated to 4285 * unaggregated and during reassociation. 4286 * 4287 * For now, this just tosses everything from the TID software queue 4288 * whether or not it has been retried and marks the TID as 4289 * pending completion if there's anything for this TID queued to 4290 * the hardware. 4291 * 4292 * The caller is responsible for pausing the TID and unpausing the 4293 * TID if no cleanup was required. Otherwise the cleanup path will 4294 * unpause the TID once the last hardware queued frame is completed. 4295 */ 4296 static void 4297 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4298 ath_bufhead *bf_cq) 4299 { 4300 struct ath_tid *atid = &an->an_tid[tid]; 4301 struct ath_buf *bf, *bf_next; 4302 4303 ATH_TX_LOCK_ASSERT(sc); 4304 4305 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4306 "%s: TID %d: called; inprogress=%d\n", __func__, tid, 4307 atid->cleanup_inprogress); 4308 4309 /* 4310 * Move the filtered frames to the TX queue, before 4311 * we run off and discard/process things. 4312 */ 4313 4314 /* XXX this is really quite inefficient */ 4315 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4316 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4317 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4318 } 4319 4320 /* 4321 * Update the frames in the software TX queue: 4322 * 4323 * + Discard retry frames in the queue 4324 * + Fix the completion function to be non-aggregate 4325 */ 4326 bf = ATH_TID_FIRST(atid); 4327 while (bf) { 4328 /* 4329 * Grab the next frame in the list, we may 4330 * be fiddling with the list. 4331 */ 4332 bf_next = TAILQ_NEXT(bf, bf_list); 4333 4334 /* 4335 * Free the frame and all subframes. 4336 */ 4337 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); 4338 4339 /* 4340 * Next frame! 4341 */ 4342 bf = bf_next; 4343 } 4344 4345 /* 4346 * If there's anything in the hardware queue we wait 4347 * for the TID HWQ to empty. 4348 */ 4349 if (atid->hwq_depth > 0) { 4350 /* 4351 * XXX how about we kill atid->incomp, and instead 4352 * replace it with a macro that checks that atid->hwq_depth 4353 * is 0? 4354 */ 4355 atid->incomp = atid->hwq_depth; 4356 atid->cleanup_inprogress = 1; 4357 } 4358 4359 if (atid->cleanup_inprogress) 4360 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4361 "%s: TID %d: cleanup needed: %d packets\n", 4362 __func__, tid, atid->incomp); 4363 4364 /* Owner now must free completed frames */ 4365 } 4366 4367 static struct ath_buf * 4368 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4369 struct ath_tid *tid, struct ath_buf *bf) 4370 { 4371 struct ath_buf *nbf; 4372 int error; 4373 4374 /* 4375 * Clone the buffer. This will handle the dma unmap and 4376 * copy the node reference to the new buffer. If this 4377 * works out, 'bf' will have no DMA mapping, no mbuf 4378 * pointer and no node reference. 4379 */ 4380 nbf = ath_buf_clone(sc, bf); 4381 4382 #if 0 4383 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4384 __func__); 4385 #endif 4386 4387 if (nbf == NULL) { 4388 /* Failed to clone */ 4389 DPRINTF(sc, ATH_DEBUG_XMIT, 4390 "%s: failed to clone a busy buffer\n", 4391 __func__); 4392 return NULL; 4393 } 4394 4395 /* Setup the dma for the new buffer */ 4396 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4397 if (error != 0) { 4398 DPRINTF(sc, ATH_DEBUG_XMIT, 4399 "%s: failed to setup dma for clone\n", 4400 __func__); 4401 /* 4402 * Put this at the head of the list, not tail; 4403 * that way it doesn't interfere with the 4404 * busy buffer logic (which uses the tail of 4405 * the list.) 4406 */ 4407 ATH_TXBUF_LOCK(sc); 4408 ath_returnbuf_head(sc, nbf); 4409 ATH_TXBUF_UNLOCK(sc); 4410 return NULL; 4411 } 4412 4413 /* Update BAW if required, before we free the original buf */ 4414 if (bf->bf_state.bfs_dobaw) 4415 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4416 4417 /* Free original buffer; return new buffer */ 4418 ath_freebuf(sc, bf); 4419 4420 return nbf; 4421 } 4422 4423 /* 4424 * Handle retrying an unaggregate frame in an aggregate 4425 * session. 4426 * 4427 * If too many retries occur, pause the TID, wait for 4428 * any further retransmits (as there's no reason why 4429 * non-aggregate frames in an aggregate session are 4430 * transmitted in-order; they just have to be in-BAW) 4431 * and then queue a BAR. 4432 */ 4433 static void 4434 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4435 { 4436 struct ieee80211_node *ni = bf->bf_node; 4437 struct ath_node *an = ATH_NODE(ni); 4438 int tid = bf->bf_state.bfs_tid; 4439 struct ath_tid *atid = &an->an_tid[tid]; 4440 struct ieee80211_tx_ampdu *tap; 4441 4442 ATH_TX_LOCK(sc); 4443 4444 tap = ath_tx_get_tx_tid(an, tid); 4445 4446 /* 4447 * If the buffer is marked as busy, we can't directly 4448 * reuse it. Instead, try to clone the buffer. 4449 * If the clone is successful, recycle the old buffer. 4450 * If the clone is unsuccessful, set bfs_retries to max 4451 * to force the next bit of code to free the buffer 4452 * for us. 4453 */ 4454 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4455 (bf->bf_flags & ATH_BUF_BUSY)) { 4456 struct ath_buf *nbf; 4457 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4458 if (nbf) 4459 /* bf has been freed at this point */ 4460 bf = nbf; 4461 else 4462 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4463 } 4464 4465 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4466 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4467 "%s: exceeded retries; seqno %d\n", 4468 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4469 sc->sc_stats.ast_tx_swretrymax++; 4470 4471 /* Update BAW anyway */ 4472 if (bf->bf_state.bfs_dobaw) { 4473 ath_tx_update_baw(sc, an, atid, bf); 4474 if (! bf->bf_state.bfs_addedbaw) 4475 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4476 "%s: wasn't added: seqno %d\n", 4477 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4478 } 4479 bf->bf_state.bfs_dobaw = 0; 4480 4481 /* Suspend the TX queue and get ready to send the BAR */ 4482 ath_tx_tid_bar_suspend(sc, atid); 4483 4484 /* Send the BAR if there are no other frames waiting */ 4485 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4486 ath_tx_tid_bar_tx(sc, atid); 4487 4488 ATH_TX_UNLOCK(sc); 4489 4490 /* Free buffer, bf is free after this call */ 4491 ath_tx_default_comp(sc, bf, 0); 4492 return; 4493 } 4494 4495 /* 4496 * This increments the retry counter as well as 4497 * sets the retry flag in the ath_buf and packet 4498 * body. 4499 */ 4500 ath_tx_set_retry(sc, bf); 4501 sc->sc_stats.ast_tx_swretries++; 4502 4503 /* 4504 * Insert this at the head of the queue, so it's 4505 * retried before any current/subsequent frames. 4506 */ 4507 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4508 ath_tx_tid_sched(sc, atid); 4509 /* Send the BAR if there are no other frames waiting */ 4510 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4511 ath_tx_tid_bar_tx(sc, atid); 4512 4513 ATH_TX_UNLOCK(sc); 4514 } 4515 4516 /* 4517 * Common code for aggregate excessive retry/subframe retry. 4518 * If retrying, queues buffers to bf_q. If not, frees the 4519 * buffers. 4520 * 4521 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4522 */ 4523 static int 4524 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4525 ath_bufhead *bf_q) 4526 { 4527 struct ieee80211_node *ni = bf->bf_node; 4528 struct ath_node *an = ATH_NODE(ni); 4529 int tid = bf->bf_state.bfs_tid; 4530 struct ath_tid *atid = &an->an_tid[tid]; 4531 4532 ATH_TX_LOCK_ASSERT(sc); 4533 4534 /* XXX clr11naggr should be done for all subframes */ 4535 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4536 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4537 4538 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4539 4540 /* 4541 * If the buffer is marked as busy, we can't directly 4542 * reuse it. Instead, try to clone the buffer. 4543 * If the clone is successful, recycle the old buffer. 4544 * If the clone is unsuccessful, set bfs_retries to max 4545 * to force the next bit of code to free the buffer 4546 * for us. 4547 */ 4548 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4549 (bf->bf_flags & ATH_BUF_BUSY)) { 4550 struct ath_buf *nbf; 4551 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4552 if (nbf) 4553 /* bf has been freed at this point */ 4554 bf = nbf; 4555 else 4556 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4557 } 4558 4559 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4560 sc->sc_stats.ast_tx_swretrymax++; 4561 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4562 "%s: max retries: seqno %d\n", 4563 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4564 ath_tx_update_baw(sc, an, atid, bf); 4565 if (!bf->bf_state.bfs_addedbaw) 4566 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4567 "%s: wasn't added: seqno %d\n", 4568 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4569 bf->bf_state.bfs_dobaw = 0; 4570 return 1; 4571 } 4572 4573 ath_tx_set_retry(sc, bf); 4574 sc->sc_stats.ast_tx_swretries++; 4575 bf->bf_next = NULL; /* Just to make sure */ 4576 4577 /* Clear the aggregate state */ 4578 bf->bf_state.bfs_aggr = 0; 4579 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4580 bf->bf_state.bfs_nframes = 1; 4581 4582 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4583 return 0; 4584 } 4585 4586 /* 4587 * error pkt completion for an aggregate destination 4588 */ 4589 static void 4590 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4591 struct ath_tid *tid) 4592 { 4593 struct ieee80211_node *ni = bf_first->bf_node; 4594 struct ath_node *an = ATH_NODE(ni); 4595 struct ath_buf *bf_next, *bf; 4596 ath_bufhead bf_q; 4597 int drops = 0; 4598 struct ieee80211_tx_ampdu *tap; 4599 ath_bufhead bf_cq; 4600 4601 TAILQ_INIT(&bf_q); 4602 TAILQ_INIT(&bf_cq); 4603 4604 /* 4605 * Update rate control - all frames have failed. 4606 * 4607 * XXX use the length in the first frame in the series; 4608 * XXX just so things are consistent for now. 4609 */ 4610 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4611 &bf_first->bf_status.ds_txstat, 4612 bf_first->bf_state.bfs_pktlen, 4613 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4614 4615 ATH_TX_LOCK(sc); 4616 tap = ath_tx_get_tx_tid(an, tid->tid); 4617 sc->sc_stats.ast_tx_aggr_failall++; 4618 4619 /* Retry all subframes */ 4620 bf = bf_first; 4621 while (bf) { 4622 bf_next = bf->bf_next; 4623 bf->bf_next = NULL; /* Remove it from the aggr list */ 4624 sc->sc_stats.ast_tx_aggr_fail++; 4625 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4626 drops++; 4627 bf->bf_next = NULL; 4628 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4629 } 4630 bf = bf_next; 4631 } 4632 4633 /* Prepend all frames to the beginning of the queue */ 4634 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4635 TAILQ_REMOVE(&bf_q, bf, bf_list); 4636 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4637 } 4638 4639 /* 4640 * Schedule the TID to be re-tried. 4641 */ 4642 ath_tx_tid_sched(sc, tid); 4643 4644 /* 4645 * send bar if we dropped any frames 4646 * 4647 * Keep the txq lock held for now, as we need to ensure 4648 * that ni_txseqs[] is consistent (as it's being updated 4649 * in the ifnet TX context or raw TX context.) 4650 */ 4651 if (drops) { 4652 /* Suspend the TX queue and get ready to send the BAR */ 4653 ath_tx_tid_bar_suspend(sc, tid); 4654 } 4655 4656 /* 4657 * Send BAR if required 4658 */ 4659 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4660 ath_tx_tid_bar_tx(sc, tid); 4661 4662 ATH_TX_UNLOCK(sc); 4663 4664 /* Complete frames which errored out */ 4665 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4666 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4667 ath_tx_default_comp(sc, bf, 0); 4668 } 4669 } 4670 4671 /* 4672 * Handle clean-up of packets from an aggregate list. 4673 * 4674 * There's no need to update the BAW here - the session is being 4675 * torn down. 4676 */ 4677 static void 4678 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4679 { 4680 struct ath_buf *bf, *bf_next; 4681 struct ieee80211_node *ni = bf_first->bf_node; 4682 struct ath_node *an = ATH_NODE(ni); 4683 int tid = bf_first->bf_state.bfs_tid; 4684 struct ath_tid *atid = &an->an_tid[tid]; 4685 4686 ATH_TX_LOCK(sc); 4687 4688 /* update incomp */ 4689 atid->incomp--; 4690 4691 /* Update the BAW */ 4692 bf = bf_first; 4693 while (bf) { 4694 /* XXX refactor! */ 4695 if (bf->bf_state.bfs_dobaw) { 4696 ath_tx_update_baw(sc, an, atid, bf); 4697 if (!bf->bf_state.bfs_addedbaw) 4698 DPRINTF(sc, ATH_DEBUG_SW_TX, 4699 "%s: wasn't added: seqno %d\n", 4700 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4701 } 4702 bf = bf->bf_next; 4703 } 4704 4705 if (atid->incomp == 0) { 4706 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4707 "%s: TID %d: cleaned up! resume!\n", 4708 __func__, tid); 4709 atid->cleanup_inprogress = 0; 4710 ath_tx_tid_resume(sc, atid); 4711 } 4712 4713 /* Send BAR if required */ 4714 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4715 /* 4716 * XXX TODO: we should likely just tear down the BAR state here, 4717 * rather than sending a BAR. 4718 */ 4719 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4720 ath_tx_tid_bar_tx(sc, atid); 4721 4722 ATH_TX_UNLOCK(sc); 4723 4724 /* Handle frame completion as individual frames */ 4725 bf = bf_first; 4726 while (bf) { 4727 bf_next = bf->bf_next; 4728 bf->bf_next = NULL; 4729 ath_tx_default_comp(sc, bf, 1); 4730 bf = bf_next; 4731 } 4732 } 4733 4734 /* 4735 * Handle completion of an set of aggregate frames. 4736 * 4737 * Note: the completion handler is the last descriptor in the aggregate, 4738 * not the last descriptor in the first frame. 4739 */ 4740 static void 4741 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4742 int fail) 4743 { 4744 //struct ath_desc *ds = bf->bf_lastds; 4745 struct ieee80211_node *ni = bf_first->bf_node; 4746 struct ath_node *an = ATH_NODE(ni); 4747 int tid = bf_first->bf_state.bfs_tid; 4748 struct ath_tid *atid = &an->an_tid[tid]; 4749 struct ath_tx_status ts; 4750 struct ieee80211_tx_ampdu *tap; 4751 ath_bufhead bf_q; 4752 ath_bufhead bf_cq; 4753 int seq_st, tx_ok; 4754 int hasba, isaggr; 4755 uint32_t ba[2]; 4756 struct ath_buf *bf, *bf_next; 4757 int ba_index; 4758 int drops = 0; 4759 int nframes = 0, nbad = 0, nf; 4760 int pktlen; 4761 /* XXX there's too much on the stack? */ 4762 struct ath_rc_series rc[ATH_RC_NUM]; 4763 int txseq; 4764 4765 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4766 __func__, atid->hwq_depth); 4767 4768 /* 4769 * Take a copy; this may be needed -after- bf_first 4770 * has been completed and freed. 4771 */ 4772 ts = bf_first->bf_status.ds_txstat; 4773 4774 TAILQ_INIT(&bf_q); 4775 TAILQ_INIT(&bf_cq); 4776 4777 /* The TID state is kept behind the TXQ lock */ 4778 ATH_TX_LOCK(sc); 4779 4780 atid->hwq_depth--; 4781 if (atid->hwq_depth < 0) 4782 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4783 __func__, atid->hwq_depth); 4784 4785 /* 4786 * If the TID is filtered, handle completing the filter 4787 * transition before potentially kicking it to the cleanup 4788 * function. 4789 * 4790 * XXX this is duplicate work, ew. 4791 */ 4792 if (atid->isfiltered) 4793 ath_tx_tid_filt_comp_complete(sc, atid); 4794 4795 /* 4796 * Punt cleanup to the relevant function, not our problem now 4797 */ 4798 if (atid->cleanup_inprogress) { 4799 if (atid->isfiltered) 4800 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4801 "%s: isfiltered=1, normal_comp?\n", 4802 __func__); 4803 ATH_TX_UNLOCK(sc); 4804 ath_tx_comp_cleanup_aggr(sc, bf_first); 4805 return; 4806 } 4807 4808 /* 4809 * If the frame is filtered, transition to filtered frame 4810 * mode and add this to the filtered frame list. 4811 * 4812 * XXX TODO: figure out how this interoperates with 4813 * BAR, pause and cleanup states. 4814 */ 4815 if ((ts.ts_status & HAL_TXERR_FILT) || 4816 (ts.ts_status != 0 && atid->isfiltered)) { 4817 if (fail != 0) 4818 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4819 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4820 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4821 4822 /* Remove from BAW */ 4823 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4824 if (bf->bf_state.bfs_addedbaw) 4825 drops++; 4826 if (bf->bf_state.bfs_dobaw) { 4827 ath_tx_update_baw(sc, an, atid, bf); 4828 if (!bf->bf_state.bfs_addedbaw) 4829 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4830 "%s: wasn't added: seqno %d\n", 4831 __func__, 4832 SEQNO(bf->bf_state.bfs_seqno)); 4833 } 4834 bf->bf_state.bfs_dobaw = 0; 4835 } 4836 /* 4837 * If any intermediate frames in the BAW were dropped when 4838 * handling filtering things, send a BAR. 4839 */ 4840 if (drops) 4841 ath_tx_tid_bar_suspend(sc, atid); 4842 4843 /* 4844 * Finish up by sending a BAR if required and freeing 4845 * the frames outside of the TX lock. 4846 */ 4847 goto finish_send_bar; 4848 } 4849 4850 /* 4851 * XXX for now, use the first frame in the aggregate for 4852 * XXX rate control completion; it's at least consistent. 4853 */ 4854 pktlen = bf_first->bf_state.bfs_pktlen; 4855 4856 /* 4857 * Handle errors first! 4858 * 4859 * Here, handle _any_ error as a "exceeded retries" error. 4860 * Later on (when filtered frames are to be specially handled) 4861 * it'll have to be expanded. 4862 */ 4863 #if 0 4864 if (ts.ts_status & HAL_TXERR_XRETRY) { 4865 #endif 4866 if (ts.ts_status != 0) { 4867 ATH_TX_UNLOCK(sc); 4868 ath_tx_comp_aggr_error(sc, bf_first, atid); 4869 return; 4870 } 4871 4872 tap = ath_tx_get_tx_tid(an, tid); 4873 4874 /* 4875 * extract starting sequence and block-ack bitmap 4876 */ 4877 /* XXX endian-ness of seq_st, ba? */ 4878 seq_st = ts.ts_seqnum; 4879 hasba = !! (ts.ts_flags & HAL_TX_BA); 4880 tx_ok = (ts.ts_status == 0); 4881 isaggr = bf_first->bf_state.bfs_aggr; 4882 ba[0] = ts.ts_ba_low; 4883 ba[1] = ts.ts_ba_high; 4884 4885 /* 4886 * Copy the TX completion status and the rate control 4887 * series from the first descriptor, as it may be freed 4888 * before the rate control code can get its grubby fingers 4889 * into things. 4890 */ 4891 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4892 4893 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4894 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4895 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4896 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4897 isaggr, seq_st, hasba, ba[0], ba[1]); 4898 4899 /* 4900 * The reference driver doesn't do this; it simply ignores 4901 * this check in its entirety. 4902 * 4903 * I've seen this occur when using iperf to send traffic 4904 * out tid 1 - the aggregate frames are all marked as TID 1, 4905 * but the TXSTATUS has TID=0. So, let's just ignore this 4906 * check. 4907 */ 4908 #if 0 4909 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4910 if (tid != ts.ts_tid) { 4911 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4912 __func__, tid, ts.ts_tid); 4913 tx_ok = 0; 4914 } 4915 #endif 4916 4917 /* AR5416 BA bug; this requires an interface reset */ 4918 if (isaggr && tx_ok && (! hasba)) { 4919 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4920 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4921 "seq_st=%d\n", 4922 __func__, hasba, tx_ok, isaggr, seq_st); 4923 /* XXX TODO: schedule an interface reset */ 4924 #ifdef ATH_DEBUG 4925 ath_printtxbuf(sc, bf_first, 4926 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4927 #endif 4928 } 4929 4930 /* 4931 * Walk the list of frames, figure out which ones were correctly 4932 * sent and which weren't. 4933 */ 4934 bf = bf_first; 4935 nf = bf_first->bf_state.bfs_nframes; 4936 4937 /* bf_first is going to be invalid once this list is walked */ 4938 bf_first = NULL; 4939 4940 /* 4941 * Walk the list of completed frames and determine 4942 * which need to be completed and which need to be 4943 * retransmitted. 4944 * 4945 * For completed frames, the completion functions need 4946 * to be called at the end of this function as the last 4947 * node reference may free the node. 4948 * 4949 * Finally, since the TXQ lock can't be held during the 4950 * completion callback (to avoid lock recursion), 4951 * the completion calls have to be done outside of the 4952 * lock. 4953 */ 4954 while (bf) { 4955 nframes++; 4956 ba_index = ATH_BA_INDEX(seq_st, 4957 SEQNO(bf->bf_state.bfs_seqno)); 4958 bf_next = bf->bf_next; 4959 bf->bf_next = NULL; /* Remove it from the aggr list */ 4960 4961 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4962 "%s: checking bf=%p seqno=%d; ack=%d\n", 4963 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4964 ATH_BA_ISSET(ba, ba_index)); 4965 4966 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4967 sc->sc_stats.ast_tx_aggr_ok++; 4968 ath_tx_update_baw(sc, an, atid, bf); 4969 bf->bf_state.bfs_dobaw = 0; 4970 if (!bf->bf_state.bfs_addedbaw) 4971 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4972 "%s: wasn't added: seqno %d\n", 4973 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4974 bf->bf_next = NULL; 4975 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4976 } else { 4977 sc->sc_stats.ast_tx_aggr_fail++; 4978 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4979 drops++; 4980 bf->bf_next = NULL; 4981 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4982 } 4983 nbad++; 4984 } 4985 bf = bf_next; 4986 } 4987 4988 /* 4989 * Now that the BAW updates have been done, unlock 4990 * 4991 * txseq is grabbed before the lock is released so we 4992 * have a consistent view of what -was- in the BAW. 4993 * Anything after this point will not yet have been 4994 * TXed. 4995 */ 4996 txseq = tap->txa_start; 4997 ATH_TX_UNLOCK(sc); 4998 4999 if (nframes != nf) 5000 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5001 "%s: num frames seen=%d; bf nframes=%d\n", 5002 __func__, nframes, nf); 5003 5004 /* 5005 * Now we know how many frames were bad, call the rate 5006 * control code. 5007 */ 5008 if (fail == 0) 5009 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 5010 nbad); 5011 5012 /* 5013 * send bar if we dropped any frames 5014 */ 5015 if (drops) { 5016 /* Suspend the TX queue and get ready to send the BAR */ 5017 ATH_TX_LOCK(sc); 5018 ath_tx_tid_bar_suspend(sc, atid); 5019 ATH_TX_UNLOCK(sc); 5020 } 5021 5022 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5023 "%s: txa_start now %d\n", __func__, tap->txa_start); 5024 5025 ATH_TX_LOCK(sc); 5026 5027 /* Prepend all frames to the beginning of the queue */ 5028 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 5029 TAILQ_REMOVE(&bf_q, bf, bf_list); 5030 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 5031 } 5032 5033 /* 5034 * Reschedule to grab some further frames. 5035 */ 5036 ath_tx_tid_sched(sc, atid); 5037 5038 /* 5039 * If the queue is filtered, re-schedule as required. 5040 * 5041 * This is required as there may be a subsequent TX descriptor 5042 * for this end-node that has CLRDMASK set, so it's quite possible 5043 * that a filtered frame will be followed by a non-filtered 5044 * (complete or otherwise) frame. 5045 * 5046 * XXX should we do this before we complete the frame? 5047 */ 5048 if (atid->isfiltered) 5049 ath_tx_tid_filt_comp_complete(sc, atid); 5050 5051 finish_send_bar: 5052 5053 /* 5054 * Send BAR if required 5055 */ 5056 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5057 ath_tx_tid_bar_tx(sc, atid); 5058 5059 ATH_TX_UNLOCK(sc); 5060 5061 /* Do deferred completion */ 5062 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5063 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5064 ath_tx_default_comp(sc, bf, 0); 5065 } 5066 } 5067 5068 /* 5069 * Handle completion of unaggregated frames in an ADDBA 5070 * session. 5071 * 5072 * Fail is set to 1 if the entry is being freed via a call to 5073 * ath_tx_draintxq(). 5074 */ 5075 static void 5076 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 5077 { 5078 struct ieee80211_node *ni = bf->bf_node; 5079 struct ath_node *an = ATH_NODE(ni); 5080 int tid = bf->bf_state.bfs_tid; 5081 struct ath_tid *atid = &an->an_tid[tid]; 5082 struct ath_tx_status ts; 5083 int drops = 0; 5084 5085 /* 5086 * Take a copy of this; filtering/cloning the frame may free the 5087 * bf pointer. 5088 */ 5089 ts = bf->bf_status.ds_txstat; 5090 5091 /* 5092 * Update rate control status here, before we possibly 5093 * punt to retry or cleanup. 5094 * 5095 * Do it outside of the TXQ lock. 5096 */ 5097 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 5098 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 5099 &bf->bf_status.ds_txstat, 5100 bf->bf_state.bfs_pktlen, 5101 1, (ts.ts_status == 0) ? 0 : 1); 5102 5103 /* 5104 * This is called early so atid->hwq_depth can be tracked. 5105 * This unfortunately means that it's released and regrabbed 5106 * during retry and cleanup. That's rather inefficient. 5107 */ 5108 ATH_TX_LOCK(sc); 5109 5110 if (tid == IEEE80211_NONQOS_TID) 5111 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 5112 5113 DPRINTF(sc, ATH_DEBUG_SW_TX, 5114 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 5115 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 5116 SEQNO(bf->bf_state.bfs_seqno)); 5117 5118 atid->hwq_depth--; 5119 if (atid->hwq_depth < 0) 5120 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 5121 __func__, atid->hwq_depth); 5122 5123 /* 5124 * If the TID is filtered, handle completing the filter 5125 * transition before potentially kicking it to the cleanup 5126 * function. 5127 */ 5128 if (atid->isfiltered) 5129 ath_tx_tid_filt_comp_complete(sc, atid); 5130 5131 /* 5132 * If a cleanup is in progress, punt to comp_cleanup; 5133 * rather than handling it here. It's thus their 5134 * responsibility to clean up, call the completion 5135 * function in net80211, etc. 5136 */ 5137 if (atid->cleanup_inprogress) { 5138 if (atid->isfiltered) 5139 DPRINTF(sc, ATH_DEBUG_SW_TX, 5140 "%s: isfiltered=1, normal_comp?\n", 5141 __func__); 5142 ATH_TX_UNLOCK(sc); 5143 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5144 __func__); 5145 ath_tx_comp_cleanup_unaggr(sc, bf); 5146 return; 5147 } 5148 5149 /* 5150 * XXX TODO: how does cleanup, BAR and filtered frame handling 5151 * overlap? 5152 * 5153 * If the frame is filtered OR if it's any failure but 5154 * the TID is filtered, the frame must be added to the 5155 * filtered frame list. 5156 * 5157 * However - a busy buffer can't be added to the filtered 5158 * list as it will end up being recycled without having 5159 * been made available for the hardware. 5160 */ 5161 if ((ts.ts_status & HAL_TXERR_FILT) || 5162 (ts.ts_status != 0 && atid->isfiltered)) { 5163 int freeframe; 5164 5165 if (fail != 0) 5166 DPRINTF(sc, ATH_DEBUG_SW_TX, 5167 "%s: isfiltered=1, fail=%d\n", 5168 __func__, fail); 5169 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5170 /* 5171 * If freeframe=0 then bf is no longer ours; don't 5172 * touch it. 5173 */ 5174 if (freeframe) { 5175 /* Remove from BAW */ 5176 if (bf->bf_state.bfs_addedbaw) 5177 drops++; 5178 if (bf->bf_state.bfs_dobaw) { 5179 ath_tx_update_baw(sc, an, atid, bf); 5180 if (!bf->bf_state.bfs_addedbaw) 5181 DPRINTF(sc, ATH_DEBUG_SW_TX, 5182 "%s: wasn't added: seqno %d\n", 5183 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5184 } 5185 bf->bf_state.bfs_dobaw = 0; 5186 } 5187 5188 /* 5189 * If the frame couldn't be filtered, treat it as a drop and 5190 * prepare to send a BAR. 5191 */ 5192 if (freeframe && drops) 5193 ath_tx_tid_bar_suspend(sc, atid); 5194 5195 /* 5196 * Send BAR if required 5197 */ 5198 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5199 ath_tx_tid_bar_tx(sc, atid); 5200 5201 ATH_TX_UNLOCK(sc); 5202 /* 5203 * If freeframe is set, then the frame couldn't be 5204 * cloned and bf is still valid. Just complete/free it. 5205 */ 5206 if (freeframe) 5207 ath_tx_default_comp(sc, bf, fail); 5208 5209 return; 5210 } 5211 /* 5212 * Don't bother with the retry check if all frames 5213 * are being failed (eg during queue deletion.) 5214 */ 5215 #if 0 5216 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5217 #endif 5218 if (fail == 0 && ts.ts_status != 0) { 5219 ATH_TX_UNLOCK(sc); 5220 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5221 __func__); 5222 ath_tx_aggr_retry_unaggr(sc, bf); 5223 return; 5224 } 5225 5226 /* Success? Complete */ 5227 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5228 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5229 if (bf->bf_state.bfs_dobaw) { 5230 ath_tx_update_baw(sc, an, atid, bf); 5231 bf->bf_state.bfs_dobaw = 0; 5232 if (!bf->bf_state.bfs_addedbaw) 5233 DPRINTF(sc, ATH_DEBUG_SW_TX, 5234 "%s: wasn't added: seqno %d\n", 5235 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5236 } 5237 5238 /* 5239 * If the queue is filtered, re-schedule as required. 5240 * 5241 * This is required as there may be a subsequent TX descriptor 5242 * for this end-node that has CLRDMASK set, so it's quite possible 5243 * that a filtered frame will be followed by a non-filtered 5244 * (complete or otherwise) frame. 5245 * 5246 * XXX should we do this before we complete the frame? 5247 */ 5248 if (atid->isfiltered) 5249 ath_tx_tid_filt_comp_complete(sc, atid); 5250 5251 /* 5252 * Send BAR if required 5253 */ 5254 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5255 ath_tx_tid_bar_tx(sc, atid); 5256 5257 ATH_TX_UNLOCK(sc); 5258 5259 ath_tx_default_comp(sc, bf, fail); 5260 /* bf is freed at this point */ 5261 } 5262 5263 void 5264 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5265 { 5266 if (bf->bf_state.bfs_aggr) 5267 ath_tx_aggr_comp_aggr(sc, bf, fail); 5268 else 5269 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5270 } 5271 5272 /* 5273 * Schedule some packets from the given node/TID to the hardware. 5274 * 5275 * This is the aggregate version. 5276 */ 5277 void 5278 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5279 struct ath_tid *tid) 5280 { 5281 struct ath_buf *bf; 5282 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5283 struct ieee80211_tx_ampdu *tap; 5284 ATH_AGGR_STATUS status; 5285 ath_bufhead bf_q; 5286 5287 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5288 ATH_TX_LOCK_ASSERT(sc); 5289 5290 /* 5291 * XXX TODO: If we're called for a queue that we're leaking frames to, 5292 * ensure we only leak one. 5293 */ 5294 5295 tap = ath_tx_get_tx_tid(an, tid->tid); 5296 5297 if (tid->tid == IEEE80211_NONQOS_TID) 5298 DPRINTF(sc, ATH_DEBUG_SW_TX, 5299 "%s: called for TID=NONQOS_TID?\n", __func__); 5300 5301 for (;;) { 5302 status = ATH_AGGR_DONE; 5303 5304 /* 5305 * If the upper layer has paused the TID, don't 5306 * queue any further packets. 5307 * 5308 * This can also occur from the completion task because 5309 * of packet loss; but as its serialised with this code, 5310 * it won't "appear" half way through queuing packets. 5311 */ 5312 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5313 break; 5314 5315 bf = ATH_TID_FIRST(tid); 5316 if (bf == NULL) { 5317 break; 5318 } 5319 5320 /* 5321 * If the packet doesn't fall within the BAW (eg a NULL 5322 * data frame), schedule it directly; continue. 5323 */ 5324 if (! bf->bf_state.bfs_dobaw) { 5325 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5326 "%s: non-baw packet\n", 5327 __func__); 5328 ATH_TID_REMOVE(tid, bf, bf_list); 5329 5330 if (bf->bf_state.bfs_nframes > 1) 5331 DPRINTF(sc, ATH_DEBUG_SW_TX, 5332 "%s: aggr=%d, nframes=%d\n", 5333 __func__, 5334 bf->bf_state.bfs_aggr, 5335 bf->bf_state.bfs_nframes); 5336 5337 /* 5338 * This shouldn't happen - such frames shouldn't 5339 * ever have been queued as an aggregate in the 5340 * first place. However, make sure the fields 5341 * are correctly setup just to be totally sure. 5342 */ 5343 bf->bf_state.bfs_aggr = 0; 5344 bf->bf_state.bfs_nframes = 1; 5345 5346 /* Update CLRDMASK just before this frame is queued */ 5347 ath_tx_update_clrdmask(sc, tid, bf); 5348 5349 ath_tx_do_ratelookup(sc, bf); 5350 ath_tx_calc_duration(sc, bf); 5351 ath_tx_calc_protection(sc, bf); 5352 ath_tx_set_rtscts(sc, bf); 5353 ath_tx_rate_fill_rcflags(sc, bf); 5354 ath_tx_setds(sc, bf); 5355 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5356 5357 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5358 5359 /* Queue the packet; continue */ 5360 goto queuepkt; 5361 } 5362 5363 TAILQ_INIT(&bf_q); 5364 5365 /* 5366 * Do a rate control lookup on the first frame in the 5367 * list. The rate control code needs that to occur 5368 * before it can determine whether to TX. 5369 * It's inaccurate because the rate control code doesn't 5370 * really "do" aggregate lookups, so it only considers 5371 * the size of the first frame. 5372 */ 5373 ath_tx_do_ratelookup(sc, bf); 5374 bf->bf_state.bfs_rc[3].rix = 0; 5375 bf->bf_state.bfs_rc[3].tries = 0; 5376 5377 ath_tx_calc_duration(sc, bf); 5378 ath_tx_calc_protection(sc, bf); 5379 5380 ath_tx_set_rtscts(sc, bf); 5381 ath_tx_rate_fill_rcflags(sc, bf); 5382 5383 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5384 5385 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5386 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5387 5388 /* 5389 * No frames to be picked up - out of BAW 5390 */ 5391 if (TAILQ_EMPTY(&bf_q)) 5392 break; 5393 5394 /* 5395 * This assumes that the descriptor list in the ath_bufhead 5396 * are already linked together via bf_next pointers. 5397 */ 5398 bf = TAILQ_FIRST(&bf_q); 5399 5400 if (status == ATH_AGGR_8K_LIMITED) 5401 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5402 5403 /* 5404 * If it's the only frame send as non-aggregate 5405 * assume that ath_tx_form_aggr() has checked 5406 * whether it's in the BAW and added it appropriately. 5407 */ 5408 if (bf->bf_state.bfs_nframes == 1) { 5409 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5410 "%s: single-frame aggregate\n", __func__); 5411 5412 /* Update CLRDMASK just before this frame is queued */ 5413 ath_tx_update_clrdmask(sc, tid, bf); 5414 5415 bf->bf_state.bfs_aggr = 0; 5416 bf->bf_state.bfs_ndelim = 0; 5417 ath_tx_setds(sc, bf); 5418 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5419 if (status == ATH_AGGR_BAW_CLOSED) 5420 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5421 else 5422 sc->sc_aggr_stats.aggr_single_pkt++; 5423 } else { 5424 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5425 "%s: multi-frame aggregate: %d frames, " 5426 "length %d\n", 5427 __func__, bf->bf_state.bfs_nframes, 5428 bf->bf_state.bfs_al); 5429 bf->bf_state.bfs_aggr = 1; 5430 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5431 sc->sc_aggr_stats.aggr_aggr_pkt++; 5432 5433 /* Update CLRDMASK just before this frame is queued */ 5434 ath_tx_update_clrdmask(sc, tid, bf); 5435 5436 /* 5437 * Calculate the duration/protection as required. 5438 */ 5439 ath_tx_calc_duration(sc, bf); 5440 ath_tx_calc_protection(sc, bf); 5441 5442 /* 5443 * Update the rate and rtscts information based on the 5444 * rate decision made by the rate control code; 5445 * the first frame in the aggregate needs it. 5446 */ 5447 ath_tx_set_rtscts(sc, bf); 5448 5449 /* 5450 * Setup the relevant descriptor fields 5451 * for aggregation. The first descriptor 5452 * already points to the rest in the chain. 5453 */ 5454 ath_tx_setds_11n(sc, bf); 5455 5456 } 5457 queuepkt: 5458 /* Set completion handler, multi-frame aggregate or not */ 5459 bf->bf_comp = ath_tx_aggr_comp; 5460 5461 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5462 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5463 5464 /* 5465 * Update leak count and frame config if were leaking frames. 5466 * 5467 * XXX TODO: it should update all frames in an aggregate 5468 * correctly! 5469 */ 5470 ath_tx_leak_count_update(sc, tid, bf); 5471 5472 /* Punt to txq */ 5473 ath_tx_handoff(sc, txq, bf); 5474 5475 /* Track outstanding buffer count to hardware */ 5476 /* aggregates are "one" buffer */ 5477 tid->hwq_depth++; 5478 5479 /* 5480 * Break out if ath_tx_form_aggr() indicated 5481 * there can't be any further progress (eg BAW is full.) 5482 * Checking for an empty txq is done above. 5483 * 5484 * XXX locking on txq here? 5485 */ 5486 /* XXX TXQ locking */ 5487 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5488 (status == ATH_AGGR_BAW_CLOSED || 5489 status == ATH_AGGR_LEAK_CLOSED)) 5490 break; 5491 } 5492 } 5493 5494 /* 5495 * Schedule some packets from the given node/TID to the hardware. 5496 * 5497 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5498 * It just dumps frames into the TXQ. We should limit how deep 5499 * the transmit queue can grow for frames dispatched to the given 5500 * TXQ. 5501 * 5502 * To avoid locking issues, either we need to own the TXQ lock 5503 * at this point, or we need to pass in the maximum frame count 5504 * from the caller. 5505 */ 5506 void 5507 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5508 struct ath_tid *tid) 5509 { 5510 struct ath_buf *bf; 5511 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5512 5513 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5514 __func__, an, tid->tid); 5515 5516 ATH_TX_LOCK_ASSERT(sc); 5517 5518 /* Check - is AMPDU pending or running? then print out something */ 5519 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5520 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5521 __func__, tid->tid); 5522 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5523 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5524 __func__, tid->tid); 5525 5526 for (;;) { 5527 5528 /* 5529 * If the upper layers have paused the TID, don't 5530 * queue any further packets. 5531 * 5532 * XXX if we are leaking frames, make sure we decrement 5533 * that counter _and_ we continue here. 5534 */ 5535 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5536 break; 5537 5538 bf = ATH_TID_FIRST(tid); 5539 if (bf == NULL) { 5540 break; 5541 } 5542 5543 ATH_TID_REMOVE(tid, bf, bf_list); 5544 5545 /* Sanity check! */ 5546 if (tid->tid != bf->bf_state.bfs_tid) { 5547 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5548 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5549 tid->tid); 5550 } 5551 /* Normal completion handler */ 5552 bf->bf_comp = ath_tx_normal_comp; 5553 5554 /* 5555 * Override this for now, until the non-aggregate 5556 * completion handler correctly handles software retransmits. 5557 */ 5558 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5559 5560 /* Update CLRDMASK just before this frame is queued */ 5561 ath_tx_update_clrdmask(sc, tid, bf); 5562 5563 /* Program descriptors + rate control */ 5564 ath_tx_do_ratelookup(sc, bf); 5565 ath_tx_calc_duration(sc, bf); 5566 ath_tx_calc_protection(sc, bf); 5567 ath_tx_set_rtscts(sc, bf); 5568 ath_tx_rate_fill_rcflags(sc, bf); 5569 ath_tx_setds(sc, bf); 5570 5571 /* 5572 * Update the current leak count if 5573 * we're leaking frames; and set the 5574 * MORE flag as appropriate. 5575 */ 5576 ath_tx_leak_count_update(sc, tid, bf); 5577 5578 /* Track outstanding buffer count to hardware */ 5579 /* aggregates are "one" buffer */ 5580 tid->hwq_depth++; 5581 5582 /* Punt to hardware or software txq */ 5583 ath_tx_handoff(sc, txq, bf); 5584 } 5585 } 5586 5587 /* 5588 * Schedule some packets to the given hardware queue. 5589 * 5590 * This function walks the list of TIDs (ie, ath_node TIDs 5591 * with queued traffic) and attempts to schedule traffic 5592 * from them. 5593 * 5594 * TID scheduling is implemented as a FIFO, with TIDs being 5595 * added to the end of the queue after some frames have been 5596 * scheduled. 5597 */ 5598 void 5599 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5600 { 5601 struct ath_tid *tid, *next, *last; 5602 5603 ATH_TX_LOCK_ASSERT(sc); 5604 5605 /* 5606 * Don't schedule if the hardware queue is busy. 5607 * This (hopefully) gives some more time to aggregate 5608 * some packets in the aggregation queue. 5609 * 5610 * XXX It doesn't stop a parallel sender from sneaking 5611 * in transmitting a frame! 5612 */ 5613 /* XXX TXQ locking */ 5614 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5615 sc->sc_aggr_stats.aggr_sched_nopkt++; 5616 return; 5617 } 5618 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5619 sc->sc_aggr_stats.aggr_sched_nopkt++; 5620 return; 5621 } 5622 5623 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5624 5625 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5626 /* 5627 * Suspend paused queues here; they'll be resumed 5628 * once the addba completes or times out. 5629 */ 5630 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5631 __func__, tid->tid, tid->paused); 5632 ath_tx_tid_unsched(sc, tid); 5633 /* 5634 * This node may be in power-save and we're leaking 5635 * a frame; be careful. 5636 */ 5637 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5638 goto loop_done; 5639 } 5640 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5641 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5642 else 5643 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5644 5645 /* Not empty? Re-schedule */ 5646 if (tid->axq_depth != 0) 5647 ath_tx_tid_sched(sc, tid); 5648 5649 /* 5650 * Give the software queue time to aggregate more 5651 * packets. If we aren't running aggregation then 5652 * we should still limit the hardware queue depth. 5653 */ 5654 /* XXX TXQ locking */ 5655 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5656 break; 5657 } 5658 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5659 break; 5660 } 5661 loop_done: 5662 /* 5663 * If this was the last entry on the original list, stop. 5664 * Otherwise nodes that have been rescheduled onto the end 5665 * of the TID FIFO list will just keep being rescheduled. 5666 * 5667 * XXX What should we do about nodes that were paused 5668 * but are pending a leaking frame in response to a ps-poll? 5669 * They'll be put at the front of the list; so they'll 5670 * prematurely trigger this condition! Ew. 5671 */ 5672 if (tid == last) 5673 break; 5674 } 5675 } 5676 5677 /* 5678 * TX addba handling 5679 */ 5680 5681 /* 5682 * Return net80211 TID struct pointer, or NULL for none 5683 */ 5684 struct ieee80211_tx_ampdu * 5685 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5686 { 5687 struct ieee80211_node *ni = &an->an_node; 5688 struct ieee80211_tx_ampdu *tap; 5689 5690 if (tid == IEEE80211_NONQOS_TID) 5691 return NULL; 5692 5693 tap = &ni->ni_tx_ampdu[tid]; 5694 return tap; 5695 } 5696 5697 /* 5698 * Is AMPDU-TX running? 5699 */ 5700 static int 5701 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5702 { 5703 struct ieee80211_tx_ampdu *tap; 5704 5705 if (tid == IEEE80211_NONQOS_TID) 5706 return 0; 5707 5708 tap = ath_tx_get_tx_tid(an, tid); 5709 if (tap == NULL) 5710 return 0; /* Not valid; default to not running */ 5711 5712 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5713 } 5714 5715 /* 5716 * Is AMPDU-TX negotiation pending? 5717 */ 5718 static int 5719 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5720 { 5721 struct ieee80211_tx_ampdu *tap; 5722 5723 if (tid == IEEE80211_NONQOS_TID) 5724 return 0; 5725 5726 tap = ath_tx_get_tx_tid(an, tid); 5727 if (tap == NULL) 5728 return 0; /* Not valid; default to not pending */ 5729 5730 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5731 } 5732 5733 /* 5734 * Is AMPDU-TX pending for the given TID? 5735 */ 5736 5737 5738 /* 5739 * Method to handle sending an ADDBA request. 5740 * 5741 * We tap this so the relevant flags can be set to pause the TID 5742 * whilst waiting for the response. 5743 * 5744 * XXX there's no timeout handler we can override? 5745 */ 5746 int 5747 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5748 int dialogtoken, int baparamset, int batimeout) 5749 { 5750 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5751 int tid = tap->txa_tid; 5752 struct ath_node *an = ATH_NODE(ni); 5753 struct ath_tid *atid = &an->an_tid[tid]; 5754 5755 /* 5756 * XXX danger Will Robinson! 5757 * 5758 * Although the taskqueue may be running and scheduling some more 5759 * packets, these should all be _before_ the addba sequence number. 5760 * However, net80211 will keep self-assigning sequence numbers 5761 * until addba has been negotiated. 5762 * 5763 * In the past, these packets would be "paused" (which still works 5764 * fine, as they're being scheduled to the driver in the same 5765 * serialised method which is calling the addba request routine) 5766 * and when the aggregation session begins, they'll be dequeued 5767 * as aggregate packets and added to the BAW. However, now there's 5768 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5769 * packets. Thus they never get included in the BAW tracking and 5770 * this can cause the initial burst of packets after the addba 5771 * negotiation to "hang", as they quickly fall outside the BAW. 5772 * 5773 * The "eventual" solution should be to tag these packets with 5774 * dobaw. Although net80211 has given us a sequence number, 5775 * it'll be "after" the left edge of the BAW and thus it'll 5776 * fall within it. 5777 */ 5778 ATH_TX_LOCK(sc); 5779 /* 5780 * This is a bit annoying. Until net80211 HT code inherits some 5781 * (any) locking, we may have this called in parallel BUT only 5782 * one response/timeout will be called. Grr. 5783 */ 5784 if (atid->addba_tx_pending == 0) { 5785 ath_tx_tid_pause(sc, atid); 5786 atid->addba_tx_pending = 1; 5787 } 5788 ATH_TX_UNLOCK(sc); 5789 5790 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5791 "%s: %s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5792 __func__, 5793 ath_hal_ether_sprintf(ni->ni_macaddr), 5794 dialogtoken, baparamset, batimeout); 5795 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5796 "%s: txa_start=%d, ni_txseqs=%d\n", 5797 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5798 5799 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5800 batimeout); 5801 } 5802 5803 /* 5804 * Handle an ADDBA response. 5805 * 5806 * We unpause the queue so TX'ing can resume. 5807 * 5808 * Any packets TX'ed from this point should be "aggregate" (whether 5809 * aggregate or not) so the BAW is updated. 5810 * 5811 * Note! net80211 keeps self-assigning sequence numbers until 5812 * ampdu is negotiated. This means the initially-negotiated BAW left 5813 * edge won't match the ni->ni_txseq. 5814 * 5815 * So, being very dirty, the BAW left edge is "slid" here to match 5816 * ni->ni_txseq. 5817 * 5818 * What likely SHOULD happen is that all packets subsequent to the 5819 * addba request should be tagged as aggregate and queued as non-aggregate 5820 * frames; thus updating the BAW. For now though, I'll just slide the 5821 * window. 5822 */ 5823 int 5824 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5825 int status, int code, int batimeout) 5826 { 5827 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5828 int tid = tap->txa_tid; 5829 struct ath_node *an = ATH_NODE(ni); 5830 struct ath_tid *atid = &an->an_tid[tid]; 5831 int r; 5832 5833 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5834 "%s: %s: called; status=%d, code=%d, batimeout=%d\n", __func__, 5835 ath_hal_ether_sprintf(ni->ni_macaddr), 5836 status, code, batimeout); 5837 5838 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5839 "%s: txa_start=%d, ni_txseqs=%d\n", 5840 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5841 5842 /* 5843 * Call this first, so the interface flags get updated 5844 * before the TID is unpaused. Otherwise a race condition 5845 * exists where the unpaused TID still doesn't yet have 5846 * IEEE80211_AGGR_RUNNING set. 5847 */ 5848 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5849 5850 ATH_TX_LOCK(sc); 5851 atid->addba_tx_pending = 0; 5852 /* 5853 * XXX dirty! 5854 * Slide the BAW left edge to wherever net80211 left it for us. 5855 * Read above for more information. 5856 */ 5857 tap->txa_start = ni->ni_txseqs[tid]; 5858 ath_tx_tid_resume(sc, atid); 5859 ATH_TX_UNLOCK(sc); 5860 return r; 5861 } 5862 5863 5864 /* 5865 * Stop ADDBA on a queue. 5866 * 5867 * This can be called whilst BAR TX is currently active on the queue, 5868 * so make sure this is unblocked before continuing. 5869 */ 5870 void 5871 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5872 { 5873 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5874 int tid = tap->txa_tid; 5875 struct ath_node *an = ATH_NODE(ni); 5876 struct ath_tid *atid = &an->an_tid[tid]; 5877 ath_bufhead bf_cq; 5878 struct ath_buf *bf; 5879 5880 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %s: called\n", 5881 __func__, 5882 ath_hal_ether_sprintf(ni->ni_macaddr)); 5883 5884 /* 5885 * Pause TID traffic early, so there aren't any races 5886 * Unblock the pending BAR held traffic, if it's currently paused. 5887 */ 5888 ATH_TX_LOCK(sc); 5889 ath_tx_tid_pause(sc, atid); 5890 if (atid->bar_wait) { 5891 /* 5892 * bar_unsuspend() expects bar_tx == 1, as it should be 5893 * called from the TX completion path. This quietens 5894 * the warning. It's cleared for us anyway. 5895 */ 5896 atid->bar_tx = 1; 5897 ath_tx_tid_bar_unsuspend(sc, atid); 5898 } 5899 ATH_TX_UNLOCK(sc); 5900 5901 /* There's no need to hold the TXQ lock here */ 5902 sc->sc_addba_stop(ni, tap); 5903 5904 /* 5905 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5906 * it'll set the cleanup flag, and it'll be unpaused once 5907 * things have been cleaned up. 5908 */ 5909 TAILQ_INIT(&bf_cq); 5910 ATH_TX_LOCK(sc); 5911 5912 /* 5913 * In case there's a followup call to this, only call it 5914 * if we don't have a cleanup in progress. 5915 * 5916 * Since we've paused the queue above, we need to make 5917 * sure we unpause if there's already a cleanup in 5918 * progress - it means something else is also doing 5919 * this stuff, so we don't need to also keep it paused. 5920 */ 5921 if (atid->cleanup_inprogress) { 5922 ath_tx_tid_resume(sc, atid); 5923 } else { 5924 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 5925 /* 5926 * Unpause the TID if no cleanup is required. 5927 */ 5928 if (! atid->cleanup_inprogress) 5929 ath_tx_tid_resume(sc, atid); 5930 } 5931 ATH_TX_UNLOCK(sc); 5932 5933 /* Handle completing frames and fail them */ 5934 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5935 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5936 ath_tx_default_comp(sc, bf, 1); 5937 } 5938 5939 } 5940 5941 /* 5942 * Handle a node reassociation. 5943 * 5944 * We may have a bunch of frames queued to the hardware; those need 5945 * to be marked as cleanup. 5946 */ 5947 void 5948 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 5949 { 5950 struct ath_tid *tid; 5951 int i; 5952 ath_bufhead bf_cq; 5953 struct ath_buf *bf; 5954 5955 TAILQ_INIT(&bf_cq); 5956 5957 ATH_TX_UNLOCK_ASSERT(sc); 5958 5959 ATH_TX_LOCK(sc); 5960 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 5961 tid = &an->an_tid[i]; 5962 if (tid->hwq_depth == 0) 5963 continue; 5964 DPRINTF(sc, ATH_DEBUG_NODE, 5965 "%s: %s: TID %d: cleaning up TID\n", 5966 __func__, 5967 ath_hal_ether_sprintf(an->an_node.ni_macaddr), 5968 i); 5969 /* 5970 * In case there's a followup call to this, only call it 5971 * if we don't have a cleanup in progress. 5972 */ 5973 if (! tid->cleanup_inprogress) { 5974 ath_tx_tid_pause(sc, tid); 5975 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 5976 /* 5977 * Unpause the TID if no cleanup is required. 5978 */ 5979 if (! tid->cleanup_inprogress) 5980 ath_tx_tid_resume(sc, tid); 5981 } 5982 } 5983 ATH_TX_UNLOCK(sc); 5984 5985 /* Handle completing frames and fail them */ 5986 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5987 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5988 ath_tx_default_comp(sc, bf, 1); 5989 } 5990 } 5991 5992 /* 5993 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5994 * it simply tears down the aggregation session. Ew. 5995 * 5996 * It however will call ieee80211_ampdu_stop() which will call 5997 * ic->ic_addba_stop(). 5998 * 5999 * XXX This uses a hard-coded max BAR count value; the whole 6000 * XXX BAR TX success or failure should be better handled! 6001 */ 6002 void 6003 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6004 int status) 6005 { 6006 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6007 int tid = tap->txa_tid; 6008 struct ath_node *an = ATH_NODE(ni); 6009 struct ath_tid *atid = &an->an_tid[tid]; 6010 int attempts = tap->txa_attempts; 6011 int old_txa_start; 6012 6013 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6014 "%s: %s: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 6015 __func__, 6016 ath_hal_ether_sprintf(ni->ni_macaddr), 6017 tap->txa_tid, 6018 atid->tid, 6019 status, 6020 attempts, 6021 tap->txa_start, 6022 tap->txa_seqpending); 6023 6024 /* Note: This may update the BAW details */ 6025 /* 6026 * XXX What if this does slide the BAW along? We need to somehow 6027 * XXX either fix things when it does happen, or prevent the 6028 * XXX seqpending value to be anything other than exactly what 6029 * XXX the hell we want! 6030 * 6031 * XXX So for now, how I do this inside the TX lock for now 6032 * XXX and just correct it afterwards? The below condition should 6033 * XXX never happen and if it does I need to fix all kinds of things. 6034 */ 6035 ATH_TX_LOCK(sc); 6036 old_txa_start = tap->txa_start; 6037 sc->sc_bar_response(ni, tap, status); 6038 if (tap->txa_start != old_txa_start) { 6039 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", 6040 __func__, 6041 tid, 6042 tap->txa_start, 6043 old_txa_start); 6044 } 6045 tap->txa_start = old_txa_start; 6046 ATH_TX_UNLOCK(sc); 6047 6048 /* Unpause the TID */ 6049 /* 6050 * XXX if this is attempt=50, the TID will be downgraded 6051 * XXX to a non-aggregate session. So we must unpause the 6052 * XXX TID here or it'll never be done. 6053 * 6054 * Also, don't call it if bar_tx/bar_wait are 0; something 6055 * has beaten us to the punch? (XXX figure out what?) 6056 */ 6057 if (status == 0 || attempts == 50) { 6058 ATH_TX_LOCK(sc); 6059 if (atid->bar_tx == 0 || atid->bar_wait == 0) 6060 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6061 "%s: huh? bar_tx=%d, bar_wait=%d\n", 6062 __func__, 6063 atid->bar_tx, atid->bar_wait); 6064 else 6065 ath_tx_tid_bar_unsuspend(sc, atid); 6066 ATH_TX_UNLOCK(sc); 6067 } 6068 } 6069 6070 /* 6071 * This is called whenever the pending ADDBA request times out. 6072 * Unpause and reschedule the TID. 6073 */ 6074 void 6075 ath_addba_response_timeout(struct ieee80211_node *ni, 6076 struct ieee80211_tx_ampdu *tap) 6077 { 6078 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6079 int tid = tap->txa_tid; 6080 struct ath_node *an = ATH_NODE(ni); 6081 struct ath_tid *atid = &an->an_tid[tid]; 6082 6083 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6084 "%s: %s: TID=%d, called; resuming\n", 6085 __func__, 6086 ath_hal_ether_sprintf(ni->ni_macaddr), 6087 tid); 6088 6089 ATH_TX_LOCK(sc); 6090 atid->addba_tx_pending = 0; 6091 ATH_TX_UNLOCK(sc); 6092 6093 /* Note: This updates the aggregate state to (again) pending */ 6094 sc->sc_addba_response_timeout(ni, tap); 6095 6096 /* Unpause the TID; which reschedules it */ 6097 ATH_TX_LOCK(sc); 6098 ath_tx_tid_resume(sc, atid); 6099 ATH_TX_UNLOCK(sc); 6100 } 6101 6102 /* 6103 * Check if a node is asleep or not. 6104 */ 6105 int 6106 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 6107 { 6108 6109 ATH_TX_LOCK_ASSERT(sc); 6110 6111 return (an->an_is_powersave); 6112 } 6113 6114 /* 6115 * Mark a node as currently "in powersaving." 6116 * This suspends all traffic on the node. 6117 * 6118 * This must be called with the node/tx locks free. 6119 * 6120 * XXX TODO: the locking silliness below is due to how the node 6121 * locking currently works. Right now, the node lock is grabbed 6122 * to do rate control lookups and these are done with the TX 6123 * queue lock held. This means the node lock can't be grabbed 6124 * first here or a LOR will occur. 6125 * 6126 * Eventually (hopefully!) the TX path code will only grab 6127 * the TXQ lock when transmitting and the ath_node lock when 6128 * doing node/TID operations. There are other complications - 6129 * the sched/unsched operations involve walking the per-txq 6130 * 'active tid' list and this requires both locks to be held. 6131 */ 6132 void 6133 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 6134 { 6135 struct ath_tid *atid; 6136 struct ath_txq *txq; 6137 int tid; 6138 6139 ATH_TX_UNLOCK_ASSERT(sc); 6140 6141 /* Suspend all traffic on the node */ 6142 ATH_TX_LOCK(sc); 6143 6144 if (an->an_is_powersave) { 6145 DPRINTF(sc, ATH_DEBUG_XMIT, 6146 "%s: %s: node was already asleep!\n", 6147 __func__, ath_hal_ether_sprintf(an->an_node.ni_macaddr)); 6148 ATH_TX_UNLOCK(sc); 6149 return; 6150 } 6151 6152 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6153 atid = &an->an_tid[tid]; 6154 txq = sc->sc_ac2q[atid->ac]; 6155 6156 ath_tx_tid_pause(sc, atid); 6157 } 6158 6159 /* Mark node as in powersaving */ 6160 an->an_is_powersave = 1; 6161 6162 ATH_TX_UNLOCK(sc); 6163 } 6164 6165 /* 6166 * Mark a node as currently "awake." 6167 * This resumes all traffic to the node. 6168 */ 6169 void 6170 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 6171 { 6172 struct ath_tid *atid; 6173 struct ath_txq *txq; 6174 int tid; 6175 6176 ATH_TX_UNLOCK_ASSERT(sc); 6177 6178 ATH_TX_LOCK(sc); 6179 6180 /* !? */ 6181 if (an->an_is_powersave == 0) { 6182 ATH_TX_UNLOCK(sc); 6183 DPRINTF(sc, ATH_DEBUG_XMIT, 6184 "%s: an=%p: node was already awake\n", 6185 __func__, an); 6186 return; 6187 } 6188 6189 /* Mark node as awake */ 6190 an->an_is_powersave = 0; 6191 /* 6192 * Clear any pending leaked frame requests 6193 */ 6194 an->an_leak_count = 0; 6195 6196 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6197 atid = &an->an_tid[tid]; 6198 txq = sc->sc_ac2q[atid->ac]; 6199 6200 ath_tx_tid_resume(sc, atid); 6201 } 6202 ATH_TX_UNLOCK(sc); 6203 } 6204 6205 static int 6206 ath_legacy_dma_txsetup(struct ath_softc *sc) 6207 { 6208 6209 /* nothing new needed */ 6210 return (0); 6211 } 6212 6213 static int 6214 ath_legacy_dma_txteardown(struct ath_softc *sc) 6215 { 6216 6217 /* nothing new needed */ 6218 return (0); 6219 } 6220 6221 void 6222 ath_xmit_setup_legacy(struct ath_softc *sc) 6223 { 6224 /* 6225 * For now, just set the descriptor length to sizeof(ath_desc); 6226 * worry about extracting the real length out of the HAL later. 6227 */ 6228 sc->sc_tx_desclen = sizeof(struct ath_desc); 6229 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6230 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6231 6232 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6233 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6234 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6235 6236 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6237 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6238 6239 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6240 } 6241