1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41 #include "opt_inet.h" 42 #include "opt_ath.h" 43 #include "opt_wlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysctl.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/errno.h> 56 #include <sys/callout.h> 57 #include <sys/bus.h> 58 #include <sys/endian.h> 59 #include <sys/kthread.h> 60 #include <sys/taskqueue.h> 61 #include <sys/priv.h> 62 #include <sys/ktr.h> 63 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <net/if_dl.h> 67 #include <net/if_media.h> 68 #include <net/if_types.h> 69 #include <net/if_arp.h> 70 #include <net/ethernet.h> 71 #include <net/if_llc.h> 72 73 #include <netproto/802_11/ieee80211_var.h> 74 #include <netproto/802_11/ieee80211_regdomain.h> 75 #ifdef IEEE80211_SUPPORT_SUPERG 76 #include <netproto/802_11/ieee80211_superg.h> 77 #endif 78 #ifdef IEEE80211_SUPPORT_TDMA 79 #include <netproto/802_11/ieee80211_tdma.h> 80 #endif 81 #include <netproto/802_11/ieee80211_ht.h> 82 83 #include <net/bpf.h> 84 85 #ifdef INET 86 #include <netinet/in.h> 87 #include <netinet/if_ether.h> 88 #endif 89 90 #include <dev/netif/ath/ath/if_athvar.h> 91 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 92 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 93 94 #include <dev/netif/ath/ath/if_ath_debug.h> 95 96 #ifdef ATH_TX99_DIAG 97 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 98 #endif 99 100 #include <dev/netif/ath/ath/if_ath_misc.h> 101 #include <dev/netif/ath/ath/if_ath_tx.h> 102 #include <dev/netif/ath/ath/if_ath_tx_ht.h> 103 104 #ifdef ATH_DEBUG_ALQ 105 #include <dev/netif/ath/ath/if_ath_alq.h> 106 #endif 107 108 /* 109 * How many retries to perform in software 110 */ 111 #define SWMAX_RETRIES 10 112 113 /* 114 * What queue to throw the non-QoS TID traffic into 115 */ 116 #define ATH_NONQOS_TID_AC WME_AC_VO 117 118 #if 0 119 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 120 #endif 121 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 122 int tid); 123 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 126 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 127 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 129 static struct ath_buf * 130 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 131 struct ath_tid *tid, struct ath_buf *bf); 132 133 #ifdef ATH_DEBUG_ALQ 134 void 135 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 136 { 137 struct ath_buf *bf; 138 int i, n; 139 const char *ds; 140 141 /* XXX we should skip out early if debugging isn't enabled! */ 142 bf = bf_first; 143 144 while (bf != NULL) { 145 /* XXX should ensure bf_nseg > 0! */ 146 if (bf->bf_nseg == 0) 147 break; 148 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 149 for (i = 0, ds = (const char *) bf->bf_desc; 150 i < n; 151 i++, ds += sc->sc_tx_desclen) { 152 if_ath_alq_post(&sc->sc_alq, 153 ATH_ALQ_EDMA_TXDESC, 154 sc->sc_tx_desclen, 155 ds); 156 } 157 bf = bf->bf_next; 158 } 159 } 160 #endif /* ATH_DEBUG_ALQ */ 161 162 /* 163 * Whether to use the 11n rate scenario functions or not 164 */ 165 static inline int 166 ath_tx_is_11n(struct ath_softc *sc) 167 { 168 return ((sc->sc_ah->ah_magic == 0x20065416) || 169 (sc->sc_ah->ah_magic == 0x19741014)); 170 } 171 172 /* 173 * Obtain the current TID from the given frame. 174 * 175 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 176 * This has implications for which AC/priority the packet is placed 177 * in. 178 */ 179 static int 180 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 181 { 182 const struct ieee80211_frame *wh; 183 int pri = M_WME_GETAC(m0); 184 185 wh = mtod(m0, const struct ieee80211_frame *); 186 if (! IEEE80211_QOS_HAS_SEQ(wh)) 187 return IEEE80211_NONQOS_TID; 188 else 189 return WME_AC_TO_TID(pri); 190 } 191 192 static void 193 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 194 { 195 struct ieee80211_frame *wh; 196 197 wh = mtod(bf->bf_m, struct ieee80211_frame *); 198 /* Only update/resync if needed */ 199 if (bf->bf_state.bfs_isretried == 0) { 200 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 201 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 202 BUS_DMASYNC_PREWRITE); 203 } 204 bf->bf_state.bfs_isretried = 1; 205 bf->bf_state.bfs_retries ++; 206 } 207 208 /* 209 * Determine what the correct AC queue for the given frame 210 * should be. 211 * 212 * This code assumes that the TIDs map consistently to 213 * the underlying hardware (or software) ath_txq. 214 * Since the sender may try to set an AC which is 215 * arbitrary, non-QoS TIDs may end up being put on 216 * completely different ACs. There's no way to put a 217 * TID into multiple ath_txq's for scheduling, so 218 * for now we override the AC/TXQ selection and set 219 * non-QOS TID frames into the BE queue. 220 * 221 * This may be completely incorrect - specifically, 222 * some management frames may end up out of order 223 * compared to the QoS traffic they're controlling. 224 * I'll look into this later. 225 */ 226 static int 227 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 228 { 229 const struct ieee80211_frame *wh; 230 int pri = M_WME_GETAC(m0); 231 wh = mtod(m0, const struct ieee80211_frame *); 232 if (IEEE80211_QOS_HAS_SEQ(wh)) 233 return pri; 234 235 return ATH_NONQOS_TID_AC; 236 } 237 238 void 239 ath_txfrag_cleanup(struct ath_softc *sc, 240 ath_bufhead *frags, struct ieee80211_node *ni) 241 { 242 struct ath_buf *bf, *next; 243 244 ATH_TXBUF_LOCK_ASSERT(sc); 245 246 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 247 /* NB: bf assumed clean */ 248 TAILQ_REMOVE(frags, bf, bf_list); 249 ath_returnbuf_head(sc, bf); 250 ieee80211_node_decref(ni); 251 } 252 } 253 254 /* 255 * Setup xmit of a fragmented frame. Allocate a buffer 256 * for each frag and bump the node reference count to 257 * reflect the held reference to be setup by ath_tx_start. 258 */ 259 int 260 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 261 struct mbuf *m0, struct ieee80211_node *ni) 262 { 263 struct mbuf *m; 264 struct ath_buf *bf; 265 266 ATH_TXBUF_LOCK(sc); 267 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 268 /* XXX non-management? */ 269 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 270 if (bf == NULL) { /* out of buffers, cleanup */ 271 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 272 __func__); 273 ath_txfrag_cleanup(sc, frags, ni); 274 break; 275 } 276 ieee80211_node_incref(ni); 277 TAILQ_INSERT_TAIL(frags, bf, bf_list); 278 } 279 ATH_TXBUF_UNLOCK(sc); 280 281 return !TAILQ_EMPTY(frags); 282 } 283 284 /* 285 * Reclaim mbuf resources. For fragmented frames we 286 * need to claim each frag chained with m_nextpkt. 287 */ 288 void 289 ath_freetx(struct mbuf *m) 290 { 291 struct mbuf *next; 292 293 do { 294 next = m->m_nextpkt; 295 m->m_nextpkt = NULL; 296 m_freem(m); 297 } while ((m = next) != NULL); 298 } 299 300 static int 301 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 302 { 303 #if defined(__DragonFly__) 304 #else 305 struct mbuf *m; 306 #endif 307 int error; 308 309 /* 310 * Load the DMA map so any coalescing is done. This 311 * also calculates the number of descriptors we need. 312 */ 313 #if defined(__DragonFly__) 314 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m0, 315 bf->bf_segs, 1, &bf->bf_nseg, 316 BUS_DMA_NOWAIT); 317 #else 318 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 319 bf->bf_segs, &bf->bf_nseg, 320 BUS_DMA_NOWAIT); 321 #endif 322 if (error == EFBIG) { 323 /* XXX packet requires too many descriptors */ 324 bf->bf_nseg = ATH_MAX_SCATTER + 1; 325 } else if (error != 0) { 326 sc->sc_stats.ast_tx_busdma++; 327 ath_freetx(m0); 328 return error; 329 } 330 /* 331 * Discard null packets and check for packets that 332 * require too many TX descriptors. We try to convert 333 * the latter to a cluster. 334 */ 335 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 336 sc->sc_stats.ast_tx_linear++; 337 #if defined(__DragonFly__) 338 error = bus_dmamap_load_mbuf_defrag(sc->sc_dmat, 339 bf->bf_dmamap, &m0, 340 bf->bf_segs, ATH_TXDESC, 341 &bf->bf_nseg, BUS_DMA_NOWAIT); 342 #else 343 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 344 if (m == NULL) { 345 ath_freetx(m0); 346 sc->sc_stats.ast_tx_nombuf++; 347 return ENOMEM; 348 } 349 m0 = m; 350 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 351 bf->bf_segs, &bf->bf_nseg, 352 BUS_DMA_NOWAIT); 353 #endif 354 if (error != 0) { 355 sc->sc_stats.ast_tx_busdma++; 356 ath_freetx(m0); 357 return error; 358 } 359 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 360 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 361 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 362 sc->sc_stats.ast_tx_nodata++; 363 ath_freetx(m0); 364 return EIO; 365 } 366 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 367 __func__, m0, m0->m_pkthdr.len); 368 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 369 bf->bf_m = m0; 370 371 return 0; 372 } 373 374 /* 375 * Chain together segments+descriptors for a frame - 11n or otherwise. 376 * 377 * For aggregates, this is called on each frame in the aggregate. 378 */ 379 static void 380 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 381 struct ath_buf *bf, int is_aggr, int is_first_subframe, 382 int is_last_subframe) 383 { 384 struct ath_hal *ah = sc->sc_ah; 385 char *ds; 386 int i, bp, dsp; 387 HAL_DMA_ADDR bufAddrList[4]; 388 uint32_t segLenList[4]; 389 int numTxMaps = 1; 390 int isFirstDesc = 1; 391 392 /* 393 * XXX There's txdma and txdma_mgmt; the descriptor 394 * sizes must match. 395 */ 396 struct ath_descdma *dd = &sc->sc_txdma; 397 398 /* 399 * Fillin the remainder of the descriptor info. 400 */ 401 402 /* 403 * We need the number of TX data pointers in each descriptor. 404 * EDMA and later chips support 4 TX buffers per descriptor; 405 * previous chips just support one. 406 */ 407 numTxMaps = sc->sc_tx_nmaps; 408 409 /* 410 * For EDMA and later chips ensure the TX map is fully populated 411 * before advancing to the next descriptor. 412 */ 413 ds = (char *) bf->bf_desc; 414 bp = dsp = 0; 415 bzero(bufAddrList, sizeof(bufAddrList)); 416 bzero(segLenList, sizeof(segLenList)); 417 for (i = 0; i < bf->bf_nseg; i++) { 418 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 419 segLenList[bp] = bf->bf_segs[i].ds_len; 420 bp++; 421 422 /* 423 * Go to the next segment if this isn't the last segment 424 * and there's space in the current TX map. 425 */ 426 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 427 continue; 428 429 /* 430 * Last segment or we're out of buffer pointers. 431 */ 432 bp = 0; 433 434 if (i == bf->bf_nseg - 1) 435 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 436 else 437 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 438 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 439 440 /* 441 * XXX This assumes that bfs_txq is the actual destination 442 * hardware queue at this point. It may not have been 443 * assigned, it may actually be pointing to the multicast 444 * software TXQ id. These must be fixed! 445 */ 446 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 447 , bufAddrList 448 , segLenList 449 , bf->bf_descid /* XXX desc id */ 450 , bf->bf_state.bfs_tx_queue 451 , isFirstDesc /* first segment */ 452 , i == bf->bf_nseg - 1 /* last segment */ 453 , (struct ath_desc *) ds0 /* first descriptor */ 454 ); 455 456 /* 457 * Make sure the 11n aggregate fields are cleared. 458 * 459 * XXX TODO: this doesn't need to be called for 460 * aggregate frames; as it'll be called on all 461 * sub-frames. Since the descriptors are in 462 * non-cacheable memory, this leads to some 463 * rather slow writes on MIPS/ARM platforms. 464 */ 465 if (ath_tx_is_11n(sc)) 466 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 467 468 /* 469 * If 11n is enabled, set it up as if it's an aggregate 470 * frame. 471 */ 472 if (is_last_subframe) { 473 ath_hal_set11n_aggr_last(sc->sc_ah, 474 (struct ath_desc *) ds); 475 } else if (is_aggr) { 476 /* 477 * This clears the aggrlen field; so 478 * the caller needs to call set_aggr_first()! 479 * 480 * XXX TODO: don't call this for the first 481 * descriptor in the first frame in an 482 * aggregate! 483 */ 484 ath_hal_set11n_aggr_middle(sc->sc_ah, 485 (struct ath_desc *) ds, 486 bf->bf_state.bfs_ndelim); 487 } 488 isFirstDesc = 0; 489 bf->bf_lastds = (struct ath_desc *) ds; 490 491 /* 492 * Don't forget to skip to the next descriptor. 493 */ 494 ds += sc->sc_tx_desclen; 495 dsp++; 496 497 /* 498 * .. and don't forget to blank these out! 499 */ 500 bzero(bufAddrList, sizeof(bufAddrList)); 501 bzero(segLenList, sizeof(segLenList)); 502 } 503 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 504 } 505 506 /* 507 * Set the rate control fields in the given descriptor based on 508 * the bf_state fields and node state. 509 * 510 * The bfs fields should already be set with the relevant rate 511 * control information, including whether MRR is to be enabled. 512 * 513 * Since the FreeBSD HAL currently sets up the first TX rate 514 * in ath_hal_setuptxdesc(), this will setup the MRR 515 * conditionally for the pre-11n chips, and call ath_buf_set_rate 516 * unconditionally for 11n chips. These require the 11n rate 517 * scenario to be set if MCS rates are enabled, so it's easier 518 * to just always call it. The caller can then only set rates 2, 3 519 * and 4 if multi-rate retry is needed. 520 */ 521 static void 522 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 523 struct ath_buf *bf) 524 { 525 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 526 527 /* If mrr is disabled, blank tries 1, 2, 3 */ 528 if (! bf->bf_state.bfs_ismrr) 529 rc[1].tries = rc[2].tries = rc[3].tries = 0; 530 531 #if 0 532 /* 533 * If NOACK is set, just set ntries=1. 534 */ 535 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 536 rc[1].tries = rc[2].tries = rc[3].tries = 0; 537 rc[0].tries = 1; 538 } 539 #endif 540 541 /* 542 * Always call - that way a retried descriptor will 543 * have the MRR fields overwritten. 544 * 545 * XXX TODO: see if this is really needed - setting up 546 * the first descriptor should set the MRR fields to 0 547 * for us anyway. 548 */ 549 if (ath_tx_is_11n(sc)) { 550 ath_buf_set_rate(sc, ni, bf); 551 } else { 552 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 553 , rc[1].ratecode, rc[1].tries 554 , rc[2].ratecode, rc[2].tries 555 , rc[3].ratecode, rc[3].tries 556 ); 557 } 558 } 559 560 /* 561 * Setup segments+descriptors for an 11n aggregate. 562 * bf_first is the first buffer in the aggregate. 563 * The descriptor list must already been linked together using 564 * bf->bf_next. 565 */ 566 static void 567 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 568 { 569 struct ath_buf *bf, *bf_prev = NULL; 570 struct ath_desc *ds0 = bf_first->bf_desc; 571 572 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 573 __func__, bf_first->bf_state.bfs_nframes, 574 bf_first->bf_state.bfs_al); 575 576 bf = bf_first; 577 578 if (bf->bf_state.bfs_txrate0 == 0) 579 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 580 __func__, bf, 0); 581 if (bf->bf_state.bfs_rc[0].ratecode == 0) 582 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 583 __func__, bf, 0); 584 585 /* 586 * Setup all descriptors of all subframes - this will 587 * call ath_hal_set11naggrmiddle() on every frame. 588 */ 589 while (bf != NULL) { 590 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 591 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 592 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 593 SEQNO(bf->bf_state.bfs_seqno)); 594 595 /* 596 * Setup the initial fields for the first descriptor - all 597 * the non-11n specific stuff. 598 */ 599 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 600 , bf->bf_state.bfs_pktlen /* packet length */ 601 , bf->bf_state.bfs_hdrlen /* header length */ 602 , bf->bf_state.bfs_atype /* Atheros packet type */ 603 , bf->bf_state.bfs_txpower /* txpower */ 604 , bf->bf_state.bfs_txrate0 605 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 606 , bf->bf_state.bfs_keyix /* key cache index */ 607 , bf->bf_state.bfs_txantenna /* antenna mode */ 608 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 609 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 610 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 611 ); 612 613 /* 614 * First descriptor? Setup the rate control and initial 615 * aggregate header information. 616 */ 617 if (bf == bf_first) { 618 /* 619 * setup first desc with rate and aggr info 620 */ 621 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 622 } 623 624 /* 625 * Setup the descriptors for a multi-descriptor frame. 626 * This is both aggregate and non-aggregate aware. 627 */ 628 ath_tx_chaindesclist(sc, ds0, bf, 629 1, /* is_aggr */ 630 !! (bf == bf_first), /* is_first_subframe */ 631 !! (bf->bf_next == NULL) /* is_last_subframe */ 632 ); 633 634 if (bf == bf_first) { 635 /* 636 * Initialise the first 11n aggregate with the 637 * aggregate length and aggregate enable bits. 638 */ 639 ath_hal_set11n_aggr_first(sc->sc_ah, 640 ds0, 641 bf->bf_state.bfs_al, 642 bf->bf_state.bfs_ndelim); 643 } 644 645 /* 646 * Link the last descriptor of the previous frame 647 * to the beginning descriptor of this frame. 648 */ 649 if (bf_prev != NULL) 650 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 651 bf->bf_daddr); 652 653 /* Save a copy so we can link the next descriptor in */ 654 bf_prev = bf; 655 bf = bf->bf_next; 656 } 657 658 /* 659 * Set the first descriptor bf_lastds field to point to 660 * the last descriptor in the last subframe, that's where 661 * the status update will occur. 662 */ 663 bf_first->bf_lastds = bf_prev->bf_lastds; 664 665 /* 666 * And bf_last in the first descriptor points to the end of 667 * the aggregate list. 668 */ 669 bf_first->bf_last = bf_prev; 670 671 /* 672 * For non-AR9300 NICs, which require the rate control 673 * in the final descriptor - let's set that up now. 674 * 675 * This is because the filltxdesc() HAL call doesn't 676 * populate the last segment with rate control information 677 * if firstSeg is also true. For non-aggregate frames 678 * that is fine, as the first frame already has rate control 679 * info. But if the last frame in an aggregate has one 680 * descriptor, both firstseg and lastseg will be true and 681 * the rate info isn't copied. 682 * 683 * This is inefficient on MIPS/ARM platforms that have 684 * non-cachable memory for TX descriptors, but we'll just 685 * make do for now. 686 * 687 * As to why the rate table is stashed in the last descriptor 688 * rather than the first descriptor? Because proctxdesc() 689 * is called on the final descriptor in an MPDU or A-MPDU - 690 * ie, the one that gets updated by the hardware upon 691 * completion. That way proctxdesc() doesn't need to know 692 * about the first _and_ last TX descriptor. 693 */ 694 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 695 696 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 697 } 698 699 /* 700 * Hand-off a frame to the multicast TX queue. 701 * 702 * This is a software TXQ which will be appended to the CAB queue 703 * during the beacon setup code. 704 * 705 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 706 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 707 * with the actual hardware txq, or all of this will fall apart. 708 * 709 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 710 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 711 * correctly. 712 */ 713 static void 714 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 715 struct ath_buf *bf) 716 { 717 ATH_TX_LOCK_ASSERT(sc); 718 719 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 720 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 721 722 /* 723 * Ensure that the tx queue is the cabq, so things get 724 * mapped correctly. 725 */ 726 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 727 DPRINTF(sc, ATH_DEBUG_XMIT, 728 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 729 __func__, bf, bf->bf_state.bfs_tx_queue, 730 txq->axq_qnum); 731 } 732 733 ATH_TXQ_LOCK(txq); 734 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 735 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 736 struct ieee80211_frame *wh; 737 738 /* mark previous frame */ 739 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 740 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 741 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 742 BUS_DMASYNC_PREWRITE); 743 744 /* link descriptor */ 745 ath_hal_settxdesclink(sc->sc_ah, 746 bf_last->bf_lastds, 747 bf->bf_daddr); 748 } 749 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 750 ATH_TXQ_UNLOCK(txq); 751 } 752 753 /* 754 * Hand-off packet to a hardware queue. 755 */ 756 static void 757 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 758 struct ath_buf *bf) 759 { 760 struct ath_hal *ah = sc->sc_ah; 761 struct ath_buf *bf_first; 762 763 /* 764 * Insert the frame on the outbound list and pass it on 765 * to the hardware. Multicast frames buffered for power 766 * save stations and transmit from the CAB queue are stored 767 * on a s/w only queue and loaded on to the CAB queue in 768 * the SWBA handler since frames only go out on DTIM and 769 * to avoid possible races. 770 */ 771 ATH_TX_LOCK_ASSERT(sc); 772 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 773 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 774 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 775 ("ath_tx_handoff_hw called for mcast queue")); 776 777 /* 778 * XXX We should instead just verify that sc_txstart_cnt 779 * or ath_txproc_cnt > 0. That would mean that 780 * the reset is going to be waiting for us to complete. 781 */ 782 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { 783 device_printf(sc->sc_dev, 784 "%s: TX dispatch without holding txcount/txstart refcnt!\n", 785 __func__); 786 } 787 788 /* 789 * XXX .. this is going to cause the hardware to get upset; 790 * so we really should find some way to drop or queue 791 * things. 792 */ 793 794 ATH_TXQ_LOCK(txq); 795 796 /* 797 * XXX TODO: if there's a holdingbf, then 798 * ATH_TXQ_PUTRUNNING should be clear. 799 * 800 * If there is a holdingbf and the list is empty, 801 * then axq_link should be pointing to the holdingbf. 802 * 803 * Otherwise it should point to the last descriptor 804 * in the last ath_buf. 805 * 806 * In any case, we should really ensure that we 807 * update the previous descriptor link pointer to 808 * this descriptor, regardless of all of the above state. 809 * 810 * For now this is captured by having axq_link point 811 * to either the holdingbf (if the TXQ list is empty) 812 * or the end of the list (if the TXQ list isn't empty.) 813 * I'd rather just kill axq_link here and do it as above. 814 */ 815 816 /* 817 * Append the frame to the TX queue. 818 */ 819 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 820 ATH_KTR(sc, ATH_KTR_TX, 3, 821 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 822 "depth=%d", 823 txq->axq_qnum, 824 bf, 825 txq->axq_depth); 826 827 /* 828 * If there's a link pointer, update it. 829 * 830 * XXX we should replace this with the above logic, just 831 * to kill axq_link with fire. 832 */ 833 if (txq->axq_link != NULL) { 834 *txq->axq_link = bf->bf_daddr; 835 DPRINTF(sc, ATH_DEBUG_XMIT, 836 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 837 txq->axq_qnum, txq->axq_link, 838 (caddr_t)bf->bf_daddr, bf->bf_desc, 839 txq->axq_depth); 840 ATH_KTR(sc, ATH_KTR_TX, 5, 841 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 842 "lastds=%d", 843 txq->axq_qnum, txq->axq_link, 844 (caddr_t)bf->bf_daddr, bf->bf_desc, 845 bf->bf_lastds); 846 } 847 848 /* 849 * If we've not pushed anything into the hardware yet, 850 * push the head of the queue into the TxDP. 851 * 852 * Once we've started DMA, there's no guarantee that 853 * updating the TxDP with a new value will actually work. 854 * So we just don't do that - if we hit the end of the list, 855 * we keep that buffer around (the "holding buffer") and 856 * re-start DMA by updating the link pointer of _that_ 857 * descriptor and then restart DMA. 858 */ 859 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 860 bf_first = TAILQ_FIRST(&txq->axq_q); 861 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 862 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 863 DPRINTF(sc, ATH_DEBUG_XMIT, 864 "%s: TXDP[%u] = %p (%p) depth %d\n", 865 __func__, txq->axq_qnum, 866 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 867 txq->axq_depth); 868 ATH_KTR(sc, ATH_KTR_TX, 5, 869 "ath_tx_handoff: TXDP[%u] = %p (%p) " 870 "lastds=%p depth %d", 871 txq->axq_qnum, 872 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 873 bf_first->bf_lastds, 874 txq->axq_depth); 875 } 876 877 /* 878 * Ensure that the bf TXQ matches this TXQ, so later 879 * checking and holding buffer manipulation is sane. 880 */ 881 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 882 DPRINTF(sc, ATH_DEBUG_XMIT, 883 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 884 __func__, bf, bf->bf_state.bfs_tx_queue, 885 txq->axq_qnum); 886 } 887 888 /* 889 * Track aggregate queue depth. 890 */ 891 if (bf->bf_state.bfs_aggr) 892 txq->axq_aggr_depth++; 893 894 /* 895 * Update the link pointer. 896 */ 897 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 898 899 /* 900 * Start DMA. 901 * 902 * If we wrote a TxDP above, DMA will start from here. 903 * 904 * If DMA is running, it'll do nothing. 905 * 906 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 907 * or VEOL) then it stops at the last transmitted write. 908 * We then append a new frame by updating the link pointer 909 * in that descriptor and then kick TxE here; it will re-read 910 * that last descriptor and find the new descriptor to transmit. 911 * 912 * This is why we keep the holding descriptor around. 913 */ 914 ath_hal_txstart(ah, txq->axq_qnum); 915 ATH_TXQ_UNLOCK(txq); 916 ATH_KTR(sc, ATH_KTR_TX, 1, 917 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 918 } 919 920 /* 921 * Restart TX DMA for the given TXQ. 922 * 923 * This must be called whether the queue is empty or not. 924 */ 925 static void 926 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 927 { 928 struct ath_buf *bf, *bf_last; 929 930 ATH_TXQ_LOCK_ASSERT(txq); 931 932 /* XXX make this ATH_TXQ_FIRST */ 933 bf = TAILQ_FIRST(&txq->axq_q); 934 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 935 936 if (bf == NULL) 937 return; 938 939 DPRINTF(sc, ATH_DEBUG_RESET, 940 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 941 __func__, 942 txq->axq_qnum, 943 bf, 944 bf_last, 945 (uint32_t) bf->bf_daddr); 946 947 #ifdef ATH_DEBUG 948 if (sc->sc_debug & ATH_DEBUG_RESET) 949 ath_tx_dump(sc, txq); 950 #endif 951 952 /* 953 * This is called from a restart, so DMA is known to be 954 * completely stopped. 955 */ 956 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 957 ("%s: Q%d: called with PUTRUNNING=1\n", 958 __func__, 959 txq->axq_qnum)); 960 961 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 962 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 963 964 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 965 &txq->axq_link); 966 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 967 } 968 969 /* 970 * Hand off a packet to the hardware (or mcast queue.) 971 * 972 * The relevant hardware txq should be locked. 973 */ 974 static void 975 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 976 struct ath_buf *bf) 977 { 978 ATH_TX_LOCK_ASSERT(sc); 979 980 #ifdef ATH_DEBUG_ALQ 981 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 982 ath_tx_alq_post(sc, bf); 983 #endif 984 985 if (txq->axq_qnum == ATH_TXQ_SWQ) 986 ath_tx_handoff_mcast(sc, txq, bf); 987 else 988 ath_tx_handoff_hw(sc, txq, bf); 989 } 990 991 static int 992 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 993 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 994 int *keyix) 995 { 996 DPRINTF(sc, ATH_DEBUG_XMIT, 997 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 998 __func__, 999 *hdrlen, 1000 *pktlen, 1001 isfrag, 1002 iswep, 1003 m0); 1004 1005 if (iswep) { 1006 const struct ieee80211_cipher *cip; 1007 struct ieee80211_key *k; 1008 1009 /* 1010 * Construct the 802.11 header+trailer for an encrypted 1011 * frame. The only reason this can fail is because of an 1012 * unknown or unsupported cipher/key type. 1013 */ 1014 k = ieee80211_crypto_encap(ni, m0); 1015 if (k == NULL) { 1016 /* 1017 * This can happen when the key is yanked after the 1018 * frame was queued. Just discard the frame; the 1019 * 802.11 layer counts failures and provides 1020 * debugging/diagnostics. 1021 */ 1022 return (0); 1023 } 1024 /* 1025 * Adjust the packet + header lengths for the crypto 1026 * additions and calculate the h/w key index. When 1027 * a s/w mic is done the frame will have had any mic 1028 * added to it prior to entry so m0->m_pkthdr.len will 1029 * account for it. Otherwise we need to add it to the 1030 * packet length. 1031 */ 1032 cip = k->wk_cipher; 1033 (*hdrlen) += cip->ic_header; 1034 (*pktlen) += cip->ic_header + cip->ic_trailer; 1035 /* NB: frags always have any TKIP MIC done in s/w */ 1036 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1037 (*pktlen) += cip->ic_miclen; 1038 (*keyix) = k->wk_keyix; 1039 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1040 /* 1041 * Use station key cache slot, if assigned. 1042 */ 1043 (*keyix) = ni->ni_ucastkey.wk_keyix; 1044 if ((*keyix) == IEEE80211_KEYIX_NONE) 1045 (*keyix) = HAL_TXKEYIX_INVALID; 1046 } else 1047 (*keyix) = HAL_TXKEYIX_INVALID; 1048 1049 return (1); 1050 } 1051 1052 /* 1053 * Calculate whether interoperability protection is required for 1054 * this frame. 1055 * 1056 * This requires the rate control information be filled in, 1057 * as the protection requirement depends upon the current 1058 * operating mode / PHY. 1059 */ 1060 static void 1061 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1062 { 1063 struct ieee80211_frame *wh; 1064 uint8_t rix; 1065 uint16_t flags; 1066 int shortPreamble; 1067 const HAL_RATE_TABLE *rt = sc->sc_currates; 1068 struct ifnet *ifp = sc->sc_ifp; 1069 struct ieee80211com *ic = ifp->if_l2com; 1070 1071 flags = bf->bf_state.bfs_txflags; 1072 rix = bf->bf_state.bfs_rc[0].rix; 1073 shortPreamble = bf->bf_state.bfs_shpream; 1074 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1075 1076 /* 1077 * If 802.11g protection is enabled, determine whether 1078 * to use RTS/CTS or just CTS. Note that this is only 1079 * done for OFDM unicast frames. 1080 */ 1081 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1082 rt->info[rix].phy == IEEE80211_T_OFDM && 1083 (flags & HAL_TXDESC_NOACK) == 0) { 1084 bf->bf_state.bfs_doprot = 1; 1085 /* XXX fragments must use CCK rates w/ protection */ 1086 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1087 flags |= HAL_TXDESC_RTSENA; 1088 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1089 flags |= HAL_TXDESC_CTSENA; 1090 } 1091 /* 1092 * For frags it would be desirable to use the 1093 * highest CCK rate for RTS/CTS. But stations 1094 * farther away may detect it at a lower CCK rate 1095 * so use the configured protection rate instead 1096 * (for now). 1097 */ 1098 sc->sc_stats.ast_tx_protect++; 1099 } 1100 1101 /* 1102 * If 11n protection is enabled and it's a HT frame, 1103 * enable RTS. 1104 * 1105 * XXX ic_htprotmode or ic_curhtprotmode? 1106 * XXX should it_htprotmode only matter if ic_curhtprotmode 1107 * XXX indicates it's not a HT pure environment? 1108 */ 1109 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1110 rt->info[rix].phy == IEEE80211_T_HT && 1111 (flags & HAL_TXDESC_NOACK) == 0) { 1112 flags |= HAL_TXDESC_RTSENA; 1113 sc->sc_stats.ast_tx_htprotect++; 1114 } 1115 bf->bf_state.bfs_txflags = flags; 1116 } 1117 1118 /* 1119 * Update the frame duration given the currently selected rate. 1120 * 1121 * This also updates the frame duration value, so it will require 1122 * a DMA flush. 1123 */ 1124 static void 1125 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1126 { 1127 struct ieee80211_frame *wh; 1128 uint8_t rix; 1129 uint16_t flags; 1130 int shortPreamble; 1131 struct ath_hal *ah = sc->sc_ah; 1132 const HAL_RATE_TABLE *rt = sc->sc_currates; 1133 int isfrag = bf->bf_m->m_flags & M_FRAG; 1134 1135 flags = bf->bf_state.bfs_txflags; 1136 rix = bf->bf_state.bfs_rc[0].rix; 1137 shortPreamble = bf->bf_state.bfs_shpream; 1138 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1139 1140 /* 1141 * Calculate duration. This logically belongs in the 802.11 1142 * layer but it lacks sufficient information to calculate it. 1143 */ 1144 if ((flags & HAL_TXDESC_NOACK) == 0 && 1145 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1146 u_int16_t dur; 1147 if (shortPreamble) 1148 dur = rt->info[rix].spAckDuration; 1149 else 1150 dur = rt->info[rix].lpAckDuration; 1151 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1152 dur += dur; /* additional SIFS+ACK */ 1153 /* 1154 * Include the size of next fragment so NAV is 1155 * updated properly. The last fragment uses only 1156 * the ACK duration 1157 * 1158 * XXX TODO: ensure that the rate lookup for each 1159 * fragment is the same as the rate used by the 1160 * first fragment! 1161 */ 1162 dur += ath_hal_computetxtime(ah, 1163 rt, 1164 bf->bf_nextfraglen, 1165 rix, shortPreamble); 1166 } 1167 if (isfrag) { 1168 /* 1169 * Force hardware to use computed duration for next 1170 * fragment by disabling multi-rate retry which updates 1171 * duration based on the multi-rate duration table. 1172 */ 1173 bf->bf_state.bfs_ismrr = 0; 1174 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1175 /* XXX update bfs_rc[0].try? */ 1176 } 1177 1178 /* Update the duration field itself */ 1179 *(u_int16_t *)wh->i_dur = htole16(dur); 1180 } 1181 } 1182 1183 static uint8_t 1184 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1185 int cix, int shortPreamble) 1186 { 1187 uint8_t ctsrate; 1188 1189 /* 1190 * CTS transmit rate is derived from the transmit rate 1191 * by looking in the h/w rate table. We must also factor 1192 * in whether or not a short preamble is to be used. 1193 */ 1194 /* NB: cix is set above where RTS/CTS is enabled */ 1195 KASSERT(cix != 0xff, ("cix not setup")); 1196 ctsrate = rt->info[cix].rateCode; 1197 1198 /* XXX this should only matter for legacy rates */ 1199 if (shortPreamble) 1200 ctsrate |= rt->info[cix].shortPreamble; 1201 1202 return (ctsrate); 1203 } 1204 1205 /* 1206 * Calculate the RTS/CTS duration for legacy frames. 1207 */ 1208 static int 1209 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1210 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1211 int flags) 1212 { 1213 int ctsduration = 0; 1214 1215 /* This mustn't be called for HT modes */ 1216 if (rt->info[cix].phy == IEEE80211_T_HT) { 1217 kprintf("%s: HT rate where it shouldn't be (0x%x)\n", 1218 __func__, rt->info[cix].rateCode); 1219 return (-1); 1220 } 1221 1222 /* 1223 * Compute the transmit duration based on the frame 1224 * size and the size of an ACK frame. We call into the 1225 * HAL to do the computation since it depends on the 1226 * characteristics of the actual PHY being used. 1227 * 1228 * NB: CTS is assumed the same size as an ACK so we can 1229 * use the precalculated ACK durations. 1230 */ 1231 if (shortPreamble) { 1232 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1233 ctsduration += rt->info[cix].spAckDuration; 1234 ctsduration += ath_hal_computetxtime(ah, 1235 rt, pktlen, rix, AH_TRUE); 1236 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1237 ctsduration += rt->info[rix].spAckDuration; 1238 } else { 1239 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1240 ctsduration += rt->info[cix].lpAckDuration; 1241 ctsduration += ath_hal_computetxtime(ah, 1242 rt, pktlen, rix, AH_FALSE); 1243 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1244 ctsduration += rt->info[rix].lpAckDuration; 1245 } 1246 1247 return (ctsduration); 1248 } 1249 1250 /* 1251 * Update the given ath_buf with updated rts/cts setup and duration 1252 * values. 1253 * 1254 * To support rate lookups for each software retry, the rts/cts rate 1255 * and cts duration must be re-calculated. 1256 * 1257 * This function assumes the RTS/CTS flags have been set as needed; 1258 * mrr has been disabled; and the rate control lookup has been done. 1259 * 1260 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1261 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1262 */ 1263 static void 1264 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1265 { 1266 uint16_t ctsduration = 0; 1267 uint8_t ctsrate = 0; 1268 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1269 uint8_t cix = 0; 1270 const HAL_RATE_TABLE *rt = sc->sc_currates; 1271 1272 /* 1273 * No RTS/CTS enabled? Don't bother. 1274 */ 1275 if ((bf->bf_state.bfs_txflags & 1276 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1277 /* XXX is this really needed? */ 1278 bf->bf_state.bfs_ctsrate = 0; 1279 bf->bf_state.bfs_ctsduration = 0; 1280 return; 1281 } 1282 1283 /* 1284 * If protection is enabled, use the protection rix control 1285 * rate. Otherwise use the rate0 control rate. 1286 */ 1287 if (bf->bf_state.bfs_doprot) 1288 rix = sc->sc_protrix; 1289 else 1290 rix = bf->bf_state.bfs_rc[0].rix; 1291 1292 /* 1293 * If the raw path has hard-coded ctsrate0 to something, 1294 * use it. 1295 */ 1296 if (bf->bf_state.bfs_ctsrate0 != 0) 1297 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1298 else 1299 /* Control rate from above */ 1300 cix = rt->info[rix].controlRate; 1301 1302 /* Calculate the rtscts rate for the given cix */ 1303 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1304 bf->bf_state.bfs_shpream); 1305 1306 /* The 11n chipsets do ctsduration calculations for you */ 1307 if (! ath_tx_is_11n(sc)) 1308 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1309 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1310 rt, bf->bf_state.bfs_txflags); 1311 1312 /* Squirrel away in ath_buf */ 1313 bf->bf_state.bfs_ctsrate = ctsrate; 1314 bf->bf_state.bfs_ctsduration = ctsduration; 1315 1316 /* 1317 * Must disable multi-rate retry when using RTS/CTS. 1318 */ 1319 if (!sc->sc_mrrprot) { 1320 bf->bf_state.bfs_ismrr = 0; 1321 bf->bf_state.bfs_try0 = 1322 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1323 } 1324 } 1325 1326 /* 1327 * Setup the descriptor chain for a normal or fast-frame 1328 * frame. 1329 * 1330 * XXX TODO: extend to include the destination hardware QCU ID. 1331 * Make sure that is correct. Make sure that when being added 1332 * to the mcastq, the CABQ QCUID is set or things will get a bit 1333 * odd. 1334 */ 1335 static void 1336 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1337 { 1338 struct ath_desc *ds = bf->bf_desc; 1339 struct ath_hal *ah = sc->sc_ah; 1340 1341 if (bf->bf_state.bfs_txrate0 == 0) 1342 DPRINTF(sc, ATH_DEBUG_XMIT, 1343 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1344 1345 ath_hal_setuptxdesc(ah, ds 1346 , bf->bf_state.bfs_pktlen /* packet length */ 1347 , bf->bf_state.bfs_hdrlen /* header length */ 1348 , bf->bf_state.bfs_atype /* Atheros packet type */ 1349 , bf->bf_state.bfs_txpower /* txpower */ 1350 , bf->bf_state.bfs_txrate0 1351 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1352 , bf->bf_state.bfs_keyix /* key cache index */ 1353 , bf->bf_state.bfs_txantenna /* antenna mode */ 1354 , bf->bf_state.bfs_txflags /* flags */ 1355 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1356 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1357 ); 1358 1359 /* 1360 * This will be overriden when the descriptor chain is written. 1361 */ 1362 bf->bf_lastds = ds; 1363 bf->bf_last = bf; 1364 1365 /* Set rate control and descriptor chain for this frame */ 1366 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1367 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1368 } 1369 1370 /* 1371 * Do a rate lookup. 1372 * 1373 * This performs a rate lookup for the given ath_buf only if it's required. 1374 * Non-data frames and raw frames don't require it. 1375 * 1376 * This populates the primary and MRR entries; MRR values are 1377 * then disabled later on if something requires it (eg RTS/CTS on 1378 * pre-11n chipsets. 1379 * 1380 * This needs to be done before the RTS/CTS fields are calculated 1381 * as they may depend upon the rate chosen. 1382 */ 1383 static void 1384 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1385 { 1386 uint8_t rate, rix; 1387 int try0; 1388 1389 if (! bf->bf_state.bfs_doratelookup) 1390 return; 1391 1392 /* Get rid of any previous state */ 1393 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1394 1395 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1396 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1397 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1398 1399 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1400 bf->bf_state.bfs_rc[0].rix = rix; 1401 bf->bf_state.bfs_rc[0].ratecode = rate; 1402 bf->bf_state.bfs_rc[0].tries = try0; 1403 1404 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1405 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1406 bf->bf_state.bfs_rc); 1407 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1408 1409 sc->sc_txrix = rix; /* for LED blinking */ 1410 sc->sc_lastdatarix = rix; /* for fast frames */ 1411 bf->bf_state.bfs_try0 = try0; 1412 bf->bf_state.bfs_txrate0 = rate; 1413 } 1414 1415 /* 1416 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1417 */ 1418 static void 1419 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1420 struct ath_buf *bf) 1421 { 1422 struct ath_node *an = ATH_NODE(bf->bf_node); 1423 1424 ATH_TX_LOCK_ASSERT(sc); 1425 1426 if (an->clrdmask == 1) { 1427 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1428 an->clrdmask = 0; 1429 } 1430 } 1431 1432 /* 1433 * Return whether this frame should be software queued or 1434 * direct dispatched. 1435 * 1436 * When doing powersave, BAR frames should be queued but other management 1437 * frames should be directly sent. 1438 * 1439 * When not doing powersave, stick BAR frames into the hardware queue 1440 * so it goes out even though the queue is paused. 1441 * 1442 * For now, management frames are also software queued by default. 1443 */ 1444 static int 1445 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1446 struct mbuf *m0, int *queue_to_head) 1447 { 1448 struct ieee80211_node *ni = &an->an_node; 1449 struct ieee80211_frame *wh; 1450 uint8_t type, subtype; 1451 1452 wh = mtod(m0, struct ieee80211_frame *); 1453 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1454 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1455 1456 (*queue_to_head) = 0; 1457 1458 /* If it's not in powersave - direct-dispatch BAR */ 1459 if ((ATH_NODE(ni)->an_is_powersave == 0) 1460 && type == IEEE80211_FC0_TYPE_CTL && 1461 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1462 DPRINTF(sc, ATH_DEBUG_SW_TX, 1463 "%s: BAR: TX'ing direct\n", __func__); 1464 return (0); 1465 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1466 && type == IEEE80211_FC0_TYPE_CTL && 1467 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1468 /* BAR TX whilst asleep; queue */ 1469 DPRINTF(sc, ATH_DEBUG_SW_TX, 1470 "%s: swq: TX'ing\n", __func__); 1471 (*queue_to_head) = 1; 1472 return (1); 1473 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1474 && (type == IEEE80211_FC0_TYPE_MGT || 1475 type == IEEE80211_FC0_TYPE_CTL)) { 1476 /* 1477 * Other control/mgmt frame; bypass software queuing 1478 * for now! 1479 */ 1480 DPRINTF(sc, ATH_DEBUG_XMIT, 1481 "%s: %6D: Node is asleep; sending mgmt " 1482 "(type=%d, subtype=%d)\n", 1483 __func__, ni->ni_macaddr, ":", type, subtype); 1484 return (0); 1485 } else { 1486 return (1); 1487 } 1488 } 1489 1490 1491 /* 1492 * Transmit the given frame to the hardware. 1493 * 1494 * The frame must already be setup; rate control must already have 1495 * been done. 1496 * 1497 * XXX since the TXQ lock is being held here (and I dislike holding 1498 * it for this long when not doing software aggregation), later on 1499 * break this function into "setup_normal" and "xmit_normal". The 1500 * lock only needs to be held for the ath_tx_handoff call. 1501 * 1502 * XXX we don't update the leak count here - if we're doing 1503 * direct frame dispatch, we need to be able to do it without 1504 * decrementing the leak count (eg multicast queue frames.) 1505 */ 1506 static void 1507 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1508 struct ath_buf *bf) 1509 { 1510 struct ath_node *an = ATH_NODE(bf->bf_node); 1511 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1512 1513 ATH_TX_LOCK_ASSERT(sc); 1514 1515 /* 1516 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1517 * set a completion handler however it doesn't (yet) properly 1518 * handle the strict ordering requirements needed for normal, 1519 * non-aggregate session frames. 1520 * 1521 * Once this is implemented, only set CLRDMASK like this for 1522 * frames that must go out - eg management/raw frames. 1523 */ 1524 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1525 1526 /* Setup the descriptor before handoff */ 1527 ath_tx_do_ratelookup(sc, bf); 1528 ath_tx_calc_duration(sc, bf); 1529 ath_tx_calc_protection(sc, bf); 1530 ath_tx_set_rtscts(sc, bf); 1531 ath_tx_rate_fill_rcflags(sc, bf); 1532 ath_tx_setds(sc, bf); 1533 1534 /* Track per-TID hardware queue depth correctly */ 1535 tid->hwq_depth++; 1536 1537 /* Assign the completion handler */ 1538 bf->bf_comp = ath_tx_normal_comp; 1539 1540 /* Hand off to hardware */ 1541 ath_tx_handoff(sc, txq, bf); 1542 } 1543 1544 /* 1545 * Do the basic frame setup stuff that's required before the frame 1546 * is added to a software queue. 1547 * 1548 * All frames get mostly the same treatment and it's done once. 1549 * Retransmits fiddle with things like the rate control setup, 1550 * setting the retransmit bit in the packet; doing relevant DMA/bus 1551 * syncing and relinking it (back) into the hardware TX queue. 1552 * 1553 * Note that this may cause the mbuf to be reallocated, so 1554 * m0 may not be valid. 1555 */ 1556 static int 1557 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1558 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1559 { 1560 struct ieee80211vap *vap = ni->ni_vap; 1561 struct ath_hal *ah = sc->sc_ah; 1562 struct ifnet *ifp = sc->sc_ifp; 1563 struct ieee80211com *ic = ifp->if_l2com; 1564 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1565 int error, iswep, ismcast, isfrag, ismrr; 1566 int keyix, hdrlen, pktlen, try0 = 0; 1567 u_int8_t rix = 0, txrate = 0; 1568 struct ath_desc *ds; 1569 struct ieee80211_frame *wh; 1570 u_int subtype, flags; 1571 HAL_PKT_TYPE atype; 1572 const HAL_RATE_TABLE *rt; 1573 HAL_BOOL shortPreamble; 1574 struct ath_node *an; 1575 u_int pri; 1576 1577 /* 1578 * To ensure that both sequence numbers and the CCMP PN handling 1579 * is "correct", make sure that the relevant TID queue is locked. 1580 * Otherwise the CCMP PN and seqno may appear out of order, causing 1581 * re-ordered frames to have out of order CCMP PN's, resulting 1582 * in many, many frame drops. 1583 */ 1584 ATH_TX_LOCK_ASSERT(sc); 1585 1586 wh = mtod(m0, struct ieee80211_frame *); 1587 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1588 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1589 isfrag = m0->m_flags & M_FRAG; 1590 hdrlen = ieee80211_anyhdrsize(wh); 1591 /* 1592 * Packet length must not include any 1593 * pad bytes; deduct them here. 1594 */ 1595 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1596 1597 /* Handle encryption twiddling if needed */ 1598 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1599 &pktlen, &keyix)) { 1600 ath_freetx(m0); 1601 return EIO; 1602 } 1603 1604 /* packet header may have moved, reset our local pointer */ 1605 wh = mtod(m0, struct ieee80211_frame *); 1606 1607 pktlen += IEEE80211_CRC_LEN; 1608 1609 /* 1610 * Load the DMA map so any coalescing is done. This 1611 * also calculates the number of descriptors we need. 1612 */ 1613 error = ath_tx_dmasetup(sc, bf, m0); 1614 if (error != 0) 1615 return error; 1616 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 1617 bf->bf_node = ni; /* NB: held reference */ 1618 m0 = bf->bf_m; /* NB: may have changed */ 1619 wh = mtod(m0, struct ieee80211_frame *); 1620 1621 /* setup descriptors */ 1622 ds = bf->bf_desc; 1623 rt = sc->sc_currates; 1624 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1625 1626 /* 1627 * NB: the 802.11 layer marks whether or not we should 1628 * use short preamble based on the current mode and 1629 * negotiated parameters. 1630 */ 1631 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1632 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1633 shortPreamble = AH_TRUE; 1634 sc->sc_stats.ast_tx_shortpre++; 1635 } else { 1636 shortPreamble = AH_FALSE; 1637 } 1638 1639 an = ATH_NODE(ni); 1640 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1641 flags = 0; 1642 ismrr = 0; /* default no multi-rate retry*/ 1643 pri = M_WME_GETAC(m0); /* honor classification */ 1644 /* XXX use txparams instead of fixed values */ 1645 /* 1646 * Calculate Atheros packet type from IEEE80211 packet header, 1647 * setup for rate calculations, and select h/w transmit queue. 1648 */ 1649 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1650 case IEEE80211_FC0_TYPE_MGT: 1651 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1652 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1653 atype = HAL_PKT_TYPE_BEACON; 1654 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1655 atype = HAL_PKT_TYPE_PROBE_RESP; 1656 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1657 atype = HAL_PKT_TYPE_ATIM; 1658 else 1659 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1660 rix = an->an_mgmtrix; 1661 txrate = rt->info[rix].rateCode; 1662 if (shortPreamble) 1663 txrate |= rt->info[rix].shortPreamble; 1664 try0 = ATH_TXMGTTRY; 1665 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1666 break; 1667 case IEEE80211_FC0_TYPE_CTL: 1668 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1669 rix = an->an_mgmtrix; 1670 txrate = rt->info[rix].rateCode; 1671 if (shortPreamble) 1672 txrate |= rt->info[rix].shortPreamble; 1673 try0 = ATH_TXMGTTRY; 1674 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1675 break; 1676 case IEEE80211_FC0_TYPE_DATA: 1677 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1678 /* 1679 * Data frames: multicast frames go out at a fixed rate, 1680 * EAPOL frames use the mgmt frame rate; otherwise consult 1681 * the rate control module for the rate to use. 1682 */ 1683 if (ismcast) { 1684 rix = an->an_mcastrix; 1685 txrate = rt->info[rix].rateCode; 1686 if (shortPreamble) 1687 txrate |= rt->info[rix].shortPreamble; 1688 try0 = 1; 1689 } else if (m0->m_flags & M_EAPOL) { 1690 /* XXX? maybe always use long preamble? */ 1691 rix = an->an_mgmtrix; 1692 txrate = rt->info[rix].rateCode; 1693 if (shortPreamble) 1694 txrate |= rt->info[rix].shortPreamble; 1695 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1696 } else { 1697 /* 1698 * Do rate lookup on each TX, rather than using 1699 * the hard-coded TX information decided here. 1700 */ 1701 ismrr = 1; 1702 bf->bf_state.bfs_doratelookup = 1; 1703 } 1704 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1705 flags |= HAL_TXDESC_NOACK; 1706 break; 1707 default: 1708 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1709 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1710 /* XXX statistic */ 1711 /* XXX free tx dmamap */ 1712 ath_freetx(m0); 1713 return EIO; 1714 } 1715 1716 /* 1717 * There are two known scenarios where the frame AC doesn't match 1718 * what the destination TXQ is. 1719 * 1720 * + non-QoS frames (eg management?) that the net80211 stack has 1721 * assigned a higher AC to, but since it's a non-QoS TID, it's 1722 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1723 * It's quite possible that management frames should just be 1724 * direct dispatched to hardware rather than go via the software 1725 * queue; that should be investigated in the future. There are 1726 * some specific scenarios where this doesn't make sense, mostly 1727 * surrounding ADDBA request/response - hence why that is special 1728 * cased. 1729 * 1730 * + Multicast frames going into the VAP mcast queue. That shows up 1731 * as "TXQ 11". 1732 * 1733 * This driver should eventually support separate TID and TXQ locking, 1734 * allowing for arbitrary AC frames to appear on arbitrary software 1735 * queues, being queued to the "correct" hardware queue when needed. 1736 */ 1737 #if 0 1738 if (txq != sc->sc_ac2q[pri]) { 1739 DPRINTF(sc, ATH_DEBUG_XMIT, 1740 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1741 __func__, 1742 txq, 1743 txq->axq_qnum, 1744 pri, 1745 sc->sc_ac2q[pri], 1746 sc->sc_ac2q[pri]->axq_qnum); 1747 } 1748 #endif 1749 1750 /* 1751 * Calculate miscellaneous flags. 1752 */ 1753 if (ismcast) { 1754 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1755 } else if (pktlen > vap->iv_rtsthreshold && 1756 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1757 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1758 sc->sc_stats.ast_tx_rts++; 1759 } 1760 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1761 sc->sc_stats.ast_tx_noack++; 1762 #ifdef IEEE80211_SUPPORT_TDMA 1763 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1764 DPRINTF(sc, ATH_DEBUG_TDMA, 1765 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1766 sc->sc_stats.ast_tdma_ack++; 1767 /* XXX free tx dmamap */ 1768 ath_freetx(m0); 1769 return EIO; 1770 } 1771 #endif 1772 1773 /* 1774 * Determine if a tx interrupt should be generated for 1775 * this descriptor. We take a tx interrupt to reap 1776 * descriptors when the h/w hits an EOL condition or 1777 * when the descriptor is specifically marked to generate 1778 * an interrupt. We periodically mark descriptors in this 1779 * way to insure timely replenishing of the supply needed 1780 * for sending frames. Defering interrupts reduces system 1781 * load and potentially allows more concurrent work to be 1782 * done but if done to aggressively can cause senders to 1783 * backup. 1784 * 1785 * NB: use >= to deal with sc_txintrperiod changing 1786 * dynamically through sysctl. 1787 */ 1788 if (flags & HAL_TXDESC_INTREQ) { 1789 txq->axq_intrcnt = 0; 1790 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1791 flags |= HAL_TXDESC_INTREQ; 1792 txq->axq_intrcnt = 0; 1793 } 1794 1795 /* This point forward is actual TX bits */ 1796 1797 /* 1798 * At this point we are committed to sending the frame 1799 * and we don't need to look at m_nextpkt; clear it in 1800 * case this frame is part of frag chain. 1801 */ 1802 m0->m_nextpkt = NULL; 1803 1804 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1805 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1806 sc->sc_hwmap[rix].ieeerate, -1); 1807 1808 if (ieee80211_radiotap_active_vap(vap)) { 1809 u_int64_t tsf = ath_hal_gettsf64(ah); 1810 1811 sc->sc_tx_th.wt_tsf = htole64(tsf); 1812 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1813 if (iswep) 1814 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1815 if (isfrag) 1816 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1817 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1818 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1819 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1820 1821 ieee80211_radiotap_tx(vap, m0); 1822 } 1823 1824 /* Blank the legacy rate array */ 1825 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1826 1827 /* 1828 * ath_buf_set_rate needs at least one rate/try to setup 1829 * the rate scenario. 1830 */ 1831 bf->bf_state.bfs_rc[0].rix = rix; 1832 bf->bf_state.bfs_rc[0].tries = try0; 1833 bf->bf_state.bfs_rc[0].ratecode = txrate; 1834 1835 /* Store the decided rate index values away */ 1836 bf->bf_state.bfs_pktlen = pktlen; 1837 bf->bf_state.bfs_hdrlen = hdrlen; 1838 bf->bf_state.bfs_atype = atype; 1839 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1840 bf->bf_state.bfs_txrate0 = txrate; 1841 bf->bf_state.bfs_try0 = try0; 1842 bf->bf_state.bfs_keyix = keyix; 1843 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1844 bf->bf_state.bfs_txflags = flags; 1845 bf->bf_state.bfs_shpream = shortPreamble; 1846 1847 /* XXX this should be done in ath_tx_setrate() */ 1848 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1849 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1850 bf->bf_state.bfs_ctsduration = 0; 1851 bf->bf_state.bfs_ismrr = ismrr; 1852 1853 return 0; 1854 } 1855 1856 /* 1857 * Queue a frame to the hardware or software queue. 1858 * 1859 * This can be called by the net80211 code. 1860 * 1861 * XXX what about locking? Or, push the seqno assign into the 1862 * XXX aggregate scheduler so its serialised? 1863 * 1864 * XXX When sending management frames via ath_raw_xmit(), 1865 * should CLRDMASK be set unconditionally? 1866 */ 1867 int 1868 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1869 struct ath_buf *bf, struct mbuf *m0) 1870 { 1871 struct ieee80211vap *vap = ni->ni_vap; 1872 struct ath_vap *avp = ATH_VAP(vap); 1873 int r = 0; 1874 u_int pri; 1875 int tid; 1876 struct ath_txq *txq; 1877 int ismcast; 1878 const struct ieee80211_frame *wh; 1879 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1880 ieee80211_seq seqno; 1881 uint8_t type, subtype; 1882 int queue_to_head; 1883 1884 ATH_TX_LOCK_ASSERT(sc); 1885 1886 /* 1887 * Determine the target hardware queue. 1888 * 1889 * For multicast frames, the txq gets overridden appropriately 1890 * depending upon the state of PS. 1891 * 1892 * For any other frame, we do a TID/QoS lookup inside the frame 1893 * to see what the TID should be. If it's a non-QoS frame, the 1894 * AC and TID are overridden. The TID/TXQ code assumes the 1895 * TID is on a predictable hardware TXQ, so we don't support 1896 * having a node TID queued to multiple hardware TXQs. 1897 * This may change in the future but would require some locking 1898 * fudgery. 1899 */ 1900 pri = ath_tx_getac(sc, m0); 1901 tid = ath_tx_gettid(sc, m0); 1902 1903 txq = sc->sc_ac2q[pri]; 1904 wh = mtod(m0, struct ieee80211_frame *); 1905 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1906 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1907 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1908 1909 /* 1910 * Enforce how deep the multicast queue can grow. 1911 * 1912 * XXX duplicated in ath_raw_xmit(). 1913 */ 1914 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1915 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1916 > sc->sc_txq_mcastq_maxdepth) { 1917 sc->sc_stats.ast_tx_mcastq_overflow++; 1918 m_freem(m0); 1919 return (ENOBUFS); 1920 } 1921 } 1922 1923 /* 1924 * Enforce how deep the unicast queue can grow. 1925 * 1926 * If the node is in power save then we don't want 1927 * the software queue to grow too deep, or a node may 1928 * end up consuming all of the ath_buf entries. 1929 * 1930 * For now, only do this for DATA frames. 1931 * 1932 * We will want to cap how many management/control 1933 * frames get punted to the software queue so it doesn't 1934 * fill up. But the correct solution isn't yet obvious. 1935 * In any case, this check should at least let frames pass 1936 * that we are direct-dispatching. 1937 * 1938 * XXX TODO: duplicate this to the raw xmit path! 1939 */ 1940 if (type == IEEE80211_FC0_TYPE_DATA && 1941 ATH_NODE(ni)->an_is_powersave && 1942 ATH_NODE(ni)->an_swq_depth > 1943 sc->sc_txq_node_psq_maxdepth) { 1944 sc->sc_stats.ast_tx_node_psq_overflow++; 1945 m_freem(m0); 1946 return (ENOBUFS); 1947 } 1948 1949 /* A-MPDU TX */ 1950 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1951 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1952 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1953 1954 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1955 __func__, tid, pri, is_ampdu); 1956 1957 /* Set local packet state, used to queue packets to hardware */ 1958 bf->bf_state.bfs_tid = tid; 1959 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1960 bf->bf_state.bfs_pri = pri; 1961 1962 #if 1 1963 /* 1964 * When servicing one or more stations in power-save mode 1965 * (or) if there is some mcast data waiting on the mcast 1966 * queue (to prevent out of order delivery) multicast frames 1967 * must be bufferd until after the beacon. 1968 * 1969 * TODO: we should lock the mcastq before we check the length. 1970 */ 1971 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1972 txq = &avp->av_mcastq; 1973 /* 1974 * Mark the frame as eventually belonging on the CAB 1975 * queue, so the descriptor setup functions will 1976 * correctly initialise the descriptor 'qcuId' field. 1977 */ 1978 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1979 } 1980 #endif 1981 1982 /* Do the generic frame setup */ 1983 /* XXX should just bzero the bf_state? */ 1984 bf->bf_state.bfs_dobaw = 0; 1985 1986 /* A-MPDU TX? Manually set sequence number */ 1987 /* 1988 * Don't do it whilst pending; the net80211 layer still 1989 * assigns them. 1990 */ 1991 if (is_ampdu_tx) { 1992 /* 1993 * Always call; this function will 1994 * handle making sure that null data frames 1995 * don't get a sequence number from the current 1996 * TID and thus mess with the BAW. 1997 */ 1998 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1999 2000 /* 2001 * Don't add QoS NULL frames to the BAW. 2002 */ 2003 if (IEEE80211_QOS_HAS_SEQ(wh) && 2004 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2005 bf->bf_state.bfs_dobaw = 1; 2006 } 2007 } 2008 2009 /* 2010 * If needed, the sequence number has been assigned. 2011 * Squirrel it away somewhere easy to get to. 2012 */ 2013 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2014 2015 /* Is ampdu pending? fetch the seqno and print it out */ 2016 if (is_ampdu_pending) 2017 DPRINTF(sc, ATH_DEBUG_SW_TX, 2018 "%s: tid %d: ampdu pending, seqno %d\n", 2019 __func__, tid, M_SEQNO_GET(m0)); 2020 2021 /* This also sets up the DMA map */ 2022 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2023 2024 if (r != 0) 2025 goto done; 2026 2027 /* At this point m0 could have changed! */ 2028 m0 = bf->bf_m; 2029 2030 #if 1 2031 /* 2032 * If it's a multicast frame, do a direct-dispatch to the 2033 * destination hardware queue. Don't bother software 2034 * queuing it. 2035 */ 2036 /* 2037 * If it's a BAR frame, do a direct dispatch to the 2038 * destination hardware queue. Don't bother software 2039 * queuing it, as the TID will now be paused. 2040 * Sending a BAR frame can occur from the net80211 txa timer 2041 * (ie, retries) or from the ath txtask (completion call.) 2042 * It queues directly to hardware because the TID is paused 2043 * at this point (and won't be unpaused until the BAR has 2044 * either been TXed successfully or max retries has been 2045 * reached.) 2046 */ 2047 /* 2048 * Until things are better debugged - if this node is asleep 2049 * and we're sending it a non-BAR frame, direct dispatch it. 2050 * Why? Because we need to figure out what's actually being 2051 * sent - eg, during reassociation/reauthentication after 2052 * the node (last) disappeared whilst asleep, the driver should 2053 * have unpaused/unsleep'ed the node. So until that is 2054 * sorted out, use this workaround. 2055 */ 2056 if (txq == &avp->av_mcastq) { 2057 DPRINTF(sc, ATH_DEBUG_SW_TX, 2058 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2059 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2060 ath_tx_xmit_normal(sc, txq, bf); 2061 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2062 &queue_to_head)) { 2063 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2064 } else { 2065 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2066 ath_tx_xmit_normal(sc, txq, bf); 2067 } 2068 #else 2069 /* 2070 * For now, since there's no software queue, 2071 * direct-dispatch to the hardware. 2072 */ 2073 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2074 /* 2075 * Update the current leak count if 2076 * we're leaking frames; and set the 2077 * MORE flag as appropriate. 2078 */ 2079 ath_tx_leak_count_update(sc, tid, bf); 2080 ath_tx_xmit_normal(sc, txq, bf); 2081 #endif 2082 done: 2083 return 0; 2084 } 2085 2086 static int 2087 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2088 struct ath_buf *bf, struct mbuf *m0, 2089 const struct ieee80211_bpf_params *params) 2090 { 2091 struct ifnet *ifp = sc->sc_ifp; 2092 struct ieee80211com *ic = ifp->if_l2com; 2093 struct ath_hal *ah = sc->sc_ah; 2094 struct ieee80211vap *vap = ni->ni_vap; 2095 int error, ismcast, ismrr; 2096 int keyix, hdrlen, pktlen, try0, txantenna; 2097 u_int8_t rix, txrate; 2098 struct ieee80211_frame *wh; 2099 u_int flags; 2100 HAL_PKT_TYPE atype; 2101 const HAL_RATE_TABLE *rt; 2102 struct ath_desc *ds; 2103 u_int pri; 2104 int o_tid = -1; 2105 int do_override; 2106 uint8_t type, subtype; 2107 int queue_to_head; 2108 struct ath_node *an = ATH_NODE(ni); 2109 2110 ATH_TX_LOCK_ASSERT(sc); 2111 2112 wh = mtod(m0, struct ieee80211_frame *); 2113 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2114 hdrlen = ieee80211_anyhdrsize(wh); 2115 /* 2116 * Packet length must not include any 2117 * pad bytes; deduct them here. 2118 */ 2119 /* XXX honor IEEE80211_BPF_DATAPAD */ 2120 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2121 2122 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2123 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2124 2125 ATH_KTR(sc, ATH_KTR_TX, 2, 2126 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2127 2128 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2129 __func__, ismcast); 2130 2131 pri = params->ibp_pri & 3; 2132 /* Override pri if the frame isn't a QoS one */ 2133 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2134 pri = ath_tx_getac(sc, m0); 2135 2136 /* XXX If it's an ADDBA, override the correct queue */ 2137 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2138 2139 /* Map ADDBA to the correct priority */ 2140 if (do_override) { 2141 #if 0 2142 DPRINTF(sc, ATH_DEBUG_XMIT, 2143 "%s: overriding tid %d pri %d -> %d\n", 2144 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2145 #endif 2146 pri = TID_TO_WME_AC(o_tid); 2147 } 2148 2149 /* Handle encryption twiddling if needed */ 2150 if (! ath_tx_tag_crypto(sc, ni, 2151 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2152 &hdrlen, &pktlen, &keyix)) { 2153 ath_freetx(m0); 2154 return EIO; 2155 } 2156 /* packet header may have moved, reset our local pointer */ 2157 wh = mtod(m0, struct ieee80211_frame *); 2158 2159 /* Do the generic frame setup */ 2160 /* XXX should just bzero the bf_state? */ 2161 bf->bf_state.bfs_dobaw = 0; 2162 2163 error = ath_tx_dmasetup(sc, bf, m0); 2164 if (error != 0) 2165 return error; 2166 m0 = bf->bf_m; /* NB: may have changed */ 2167 wh = mtod(m0, struct ieee80211_frame *); 2168 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 2169 bf->bf_node = ni; /* NB: held reference */ 2170 2171 /* Always enable CLRDMASK for raw frames for now.. */ 2172 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2173 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2174 if (params->ibp_flags & IEEE80211_BPF_RTS) 2175 flags |= HAL_TXDESC_RTSENA; 2176 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2177 /* XXX assume 11g/11n protection? */ 2178 bf->bf_state.bfs_doprot = 1; 2179 flags |= HAL_TXDESC_CTSENA; 2180 } 2181 /* XXX leave ismcast to injector? */ 2182 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2183 flags |= HAL_TXDESC_NOACK; 2184 2185 rt = sc->sc_currates; 2186 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2187 2188 /* Fetch first rate information */ 2189 rix = ath_tx_findrix(sc, params->ibp_rate0); 2190 try0 = params->ibp_try0; 2191 2192 /* 2193 * Override EAPOL rate as appropriate. 2194 */ 2195 if (m0->m_flags & M_EAPOL) { 2196 /* XXX? maybe always use long preamble? */ 2197 rix = an->an_mgmtrix; 2198 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 2199 } 2200 2201 txrate = rt->info[rix].rateCode; 2202 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2203 txrate |= rt->info[rix].shortPreamble; 2204 sc->sc_txrix = rix; 2205 ismrr = (params->ibp_try1 != 0); 2206 txantenna = params->ibp_pri >> 2; 2207 if (txantenna == 0) /* XXX? */ 2208 txantenna = sc->sc_txantenna; 2209 2210 /* 2211 * Since ctsrate is fixed, store it away for later 2212 * use when the descriptor fields are being set. 2213 */ 2214 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2215 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2216 2217 /* 2218 * NB: we mark all packets as type PSPOLL so the h/w won't 2219 * set the sequence number, duration, etc. 2220 */ 2221 atype = HAL_PKT_TYPE_PSPOLL; 2222 2223 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2224 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2225 sc->sc_hwmap[rix].ieeerate, -1); 2226 2227 if (ieee80211_radiotap_active_vap(vap)) { 2228 u_int64_t tsf = ath_hal_gettsf64(ah); 2229 2230 sc->sc_tx_th.wt_tsf = htole64(tsf); 2231 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2232 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2233 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2234 if (m0->m_flags & M_FRAG) 2235 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2236 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2237 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2238 ieee80211_get_node_txpower(ni)); 2239 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2240 2241 ieee80211_radiotap_tx(vap, m0); 2242 } 2243 2244 /* 2245 * Formulate first tx descriptor with tx controls. 2246 */ 2247 ds = bf->bf_desc; 2248 /* XXX check return value? */ 2249 2250 /* Store the decided rate index values away */ 2251 bf->bf_state.bfs_pktlen = pktlen; 2252 bf->bf_state.bfs_hdrlen = hdrlen; 2253 bf->bf_state.bfs_atype = atype; 2254 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2255 ieee80211_get_node_txpower(ni)); 2256 bf->bf_state.bfs_txrate0 = txrate; 2257 bf->bf_state.bfs_try0 = try0; 2258 bf->bf_state.bfs_keyix = keyix; 2259 bf->bf_state.bfs_txantenna = txantenna; 2260 bf->bf_state.bfs_txflags = flags; 2261 bf->bf_state.bfs_shpream = 2262 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2263 2264 /* Set local packet state, used to queue packets to hardware */ 2265 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2266 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2267 bf->bf_state.bfs_pri = pri; 2268 2269 /* XXX this should be done in ath_tx_setrate() */ 2270 bf->bf_state.bfs_ctsrate = 0; 2271 bf->bf_state.bfs_ctsduration = 0; 2272 bf->bf_state.bfs_ismrr = ismrr; 2273 2274 /* Blank the legacy rate array */ 2275 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2276 2277 bf->bf_state.bfs_rc[0].rix = rix; 2278 bf->bf_state.bfs_rc[0].tries = try0; 2279 bf->bf_state.bfs_rc[0].ratecode = txrate; 2280 2281 if (ismrr) { 2282 int rix; 2283 2284 rix = ath_tx_findrix(sc, params->ibp_rate1); 2285 bf->bf_state.bfs_rc[1].rix = rix; 2286 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2287 2288 rix = ath_tx_findrix(sc, params->ibp_rate2); 2289 bf->bf_state.bfs_rc[2].rix = rix; 2290 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2291 2292 rix = ath_tx_findrix(sc, params->ibp_rate3); 2293 bf->bf_state.bfs_rc[3].rix = rix; 2294 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2295 } 2296 /* 2297 * All the required rate control decisions have been made; 2298 * fill in the rc flags. 2299 */ 2300 ath_tx_rate_fill_rcflags(sc, bf); 2301 2302 /* NB: no buffered multicast in power save support */ 2303 2304 /* 2305 * If we're overiding the ADDBA destination, dump directly 2306 * into the hardware queue, right after any pending 2307 * frames to that node are. 2308 */ 2309 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2310 __func__, do_override); 2311 2312 #if 1 2313 /* 2314 * Put addba frames in the right place in the right TID/HWQ. 2315 */ 2316 if (do_override) { 2317 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2318 /* 2319 * XXX if it's addba frames, should we be leaking 2320 * them out via the frame leak method? 2321 * XXX for now let's not risk it; but we may wish 2322 * to investigate this later. 2323 */ 2324 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2325 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2326 &queue_to_head)) { 2327 /* Queue to software queue */ 2328 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2329 } else { 2330 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2331 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2332 } 2333 #else 2334 /* Direct-dispatch to the hardware */ 2335 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2336 /* 2337 * Update the current leak count if 2338 * we're leaking frames; and set the 2339 * MORE flag as appropriate. 2340 */ 2341 ath_tx_leak_count_update(sc, tid, bf); 2342 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2343 #endif 2344 return 0; 2345 } 2346 2347 /* 2348 * Send a raw frame. 2349 * 2350 * This can be called by net80211. 2351 */ 2352 int 2353 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2354 const struct ieee80211_bpf_params *params) 2355 { 2356 struct ieee80211com *ic = ni->ni_ic; 2357 struct ifnet *ifp = ic->ic_ifp; 2358 struct ath_softc *sc = ifp->if_softc; 2359 struct ath_buf *bf; 2360 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2361 int error = 0; 2362 2363 ATH_PCU_LOCK(sc); 2364 if (sc->sc_inreset_cnt > 0) { 2365 DPRINTF(sc, ATH_DEBUG_XMIT, 2366 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2367 error = EIO; 2368 ATH_PCU_UNLOCK(sc); 2369 goto badbad; 2370 } 2371 sc->sc_txstart_cnt++; 2372 ATH_PCU_UNLOCK(sc); 2373 2374 /* Wake the hardware up already */ 2375 ATH_LOCK(sc); 2376 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2377 ATH_UNLOCK(sc); 2378 2379 ATH_TX_LOCK(sc); 2380 2381 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) { 2382 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2383 (ifp->if_flags & IFF_RUNNING) == 0 ? 2384 "!running" : "invalid"); 2385 m_freem(m); 2386 error = ENETDOWN; 2387 goto bad; 2388 } 2389 2390 /* 2391 * Enforce how deep the multicast queue can grow. 2392 * 2393 * XXX duplicated in ath_tx_start(). 2394 */ 2395 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2396 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2397 > sc->sc_txq_mcastq_maxdepth) { 2398 sc->sc_stats.ast_tx_mcastq_overflow++; 2399 error = ENOBUFS; 2400 } 2401 2402 if (error != 0) { 2403 m_freem(m); 2404 goto bad; 2405 } 2406 } 2407 2408 /* 2409 * Grab a TX buffer and associated resources. 2410 */ 2411 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2412 if (bf == NULL) { 2413 sc->sc_stats.ast_tx_nobuf++; 2414 m_freem(m); 2415 error = ENOBUFS; 2416 goto bad; 2417 } 2418 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2419 m, params, bf); 2420 2421 if (params == NULL) { 2422 /* 2423 * Legacy path; interpret frame contents to decide 2424 * precisely how to send the frame. 2425 */ 2426 if (ath_tx_start(sc, ni, bf, m)) { 2427 error = EIO; /* XXX */ 2428 goto bad2; 2429 } 2430 } else { 2431 /* 2432 * Caller supplied explicit parameters to use in 2433 * sending the frame. 2434 */ 2435 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2436 error = EIO; /* XXX */ 2437 goto bad2; 2438 } 2439 } 2440 sc->sc_wd_timer = 5; 2441 #if defined(__DragonFly__) 2442 ++ifp->if_opackets; 2443 #else 2444 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2445 #endif 2446 sc->sc_stats.ast_tx_raw++; 2447 2448 /* 2449 * Update the TIM - if there's anything queued to the 2450 * software queue and power save is enabled, we should 2451 * set the TIM. 2452 */ 2453 ath_tx_update_tim(sc, ni, 1); 2454 2455 ATH_TX_UNLOCK(sc); 2456 2457 ATH_PCU_LOCK(sc); 2458 sc->sc_txstart_cnt--; 2459 ATH_PCU_UNLOCK(sc); 2460 2461 2462 /* Put the hardware back to sleep if required */ 2463 ATH_LOCK(sc); 2464 ath_power_restore_power_state(sc); 2465 ATH_UNLOCK(sc); 2466 2467 return 0; 2468 2469 bad2: 2470 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2471 "bf=%p", 2472 m, 2473 params, 2474 bf); 2475 ATH_TXBUF_LOCK(sc); 2476 ath_returnbuf_head(sc, bf); 2477 ATH_TXBUF_UNLOCK(sc); 2478 2479 bad: 2480 ATH_TX_UNLOCK(sc); 2481 2482 ATH_PCU_LOCK(sc); 2483 sc->sc_txstart_cnt--; 2484 ATH_PCU_UNLOCK(sc); 2485 2486 /* Put the hardware back to sleep if required */ 2487 ATH_LOCK(sc); 2488 ath_power_restore_power_state(sc); 2489 ATH_UNLOCK(sc); 2490 2491 badbad: 2492 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2493 m, params); 2494 #if defined(__DragonFly__) 2495 ++ifp->if_oerrors; 2496 #else 2497 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2498 #endif 2499 sc->sc_stats.ast_tx_raw_fail++; 2500 ieee80211_free_node(ni); 2501 2502 return error; 2503 } 2504 2505 /* Some helper functions */ 2506 2507 /* 2508 * ADDBA (and potentially others) need to be placed in the same 2509 * hardware queue as the TID/node it's relating to. This is so 2510 * it goes out after any pending non-aggregate frames to the 2511 * same node/TID. 2512 * 2513 * If this isn't done, the ADDBA can go out before the frames 2514 * queued in hardware. Even though these frames have a sequence 2515 * number -earlier- than the ADDBA can be transmitted (but 2516 * no frames whose sequence numbers are after the ADDBA should 2517 * be!) they'll arrive after the ADDBA - and the receiving end 2518 * will simply drop them as being out of the BAW. 2519 * 2520 * The frames can't be appended to the TID software queue - it'll 2521 * never be sent out. So these frames have to be directly 2522 * dispatched to the hardware, rather than queued in software. 2523 * So if this function returns true, the TXQ has to be 2524 * overridden and it has to be directly dispatched. 2525 * 2526 * It's a dirty hack, but someone's gotta do it. 2527 */ 2528 2529 /* 2530 * XXX doesn't belong here! 2531 */ 2532 static int 2533 ieee80211_is_action(struct ieee80211_frame *wh) 2534 { 2535 /* Type: Management frame? */ 2536 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2537 IEEE80211_FC0_TYPE_MGT) 2538 return 0; 2539 2540 /* Subtype: Action frame? */ 2541 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2542 IEEE80211_FC0_SUBTYPE_ACTION) 2543 return 0; 2544 2545 return 1; 2546 } 2547 2548 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2549 /* 2550 * Return an alternate TID for ADDBA request frames. 2551 * 2552 * Yes, this likely should be done in the net80211 layer. 2553 */ 2554 static int 2555 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2556 struct ieee80211_node *ni, 2557 struct mbuf *m0, int *tid) 2558 { 2559 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2560 struct ieee80211_action_ba_addbarequest *ia; 2561 uint8_t *frm; 2562 uint16_t baparamset; 2563 2564 /* Not action frame? Bail */ 2565 if (! ieee80211_is_action(wh)) 2566 return 0; 2567 2568 /* XXX Not needed for frames we send? */ 2569 #if 0 2570 /* Correct length? */ 2571 if (! ieee80211_parse_action(ni, m)) 2572 return 0; 2573 #endif 2574 2575 /* Extract out action frame */ 2576 frm = (u_int8_t *)&wh[1]; 2577 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2578 2579 /* Not ADDBA? Bail */ 2580 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2581 return 0; 2582 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2583 return 0; 2584 2585 /* Extract TID, return it */ 2586 baparamset = le16toh(ia->rq_baparamset); 2587 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2588 2589 return 1; 2590 } 2591 #undef MS 2592 2593 /* Per-node software queue operations */ 2594 2595 /* 2596 * Add the current packet to the given BAW. 2597 * It is assumed that the current packet 2598 * 2599 * + fits inside the BAW; 2600 * + already has had a sequence number allocated. 2601 * 2602 * Since the BAW status may be modified by both the ath task and 2603 * the net80211/ifnet contexts, the TID must be locked. 2604 */ 2605 void 2606 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2607 struct ath_tid *tid, struct ath_buf *bf) 2608 { 2609 int index, cindex; 2610 struct ieee80211_tx_ampdu *tap; 2611 2612 ATH_TX_LOCK_ASSERT(sc); 2613 2614 if (bf->bf_state.bfs_isretried) 2615 return; 2616 2617 tap = ath_tx_get_tx_tid(an, tid->tid); 2618 2619 if (! bf->bf_state.bfs_dobaw) { 2620 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2621 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2622 __func__, SEQNO(bf->bf_state.bfs_seqno), 2623 tap->txa_start, tap->txa_wnd); 2624 } 2625 2626 if (bf->bf_state.bfs_addedbaw) 2627 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2628 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2629 "baw head=%d tail=%d\n", 2630 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2631 tap->txa_start, tap->txa_wnd, tid->baw_head, 2632 tid->baw_tail); 2633 2634 /* 2635 * Verify that the given sequence number is not outside of the 2636 * BAW. Complain loudly if that's the case. 2637 */ 2638 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2639 SEQNO(bf->bf_state.bfs_seqno))) { 2640 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2641 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2642 "baw head=%d tail=%d\n", 2643 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2644 tap->txa_start, tap->txa_wnd, tid->baw_head, 2645 tid->baw_tail); 2646 } 2647 2648 /* 2649 * ni->ni_txseqs[] is the currently allocated seqno. 2650 * the txa state contains the current baw start. 2651 */ 2652 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2653 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2654 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2655 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2656 "baw head=%d tail=%d\n", 2657 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2658 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2659 tid->baw_tail); 2660 2661 2662 #if 0 2663 assert(tid->tx_buf[cindex] == NULL); 2664 #endif 2665 if (tid->tx_buf[cindex] != NULL) { 2666 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2667 "%s: ba packet dup (index=%d, cindex=%d, " 2668 "head=%d, tail=%d)\n", 2669 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2670 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2671 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2672 __func__, 2673 tid->tx_buf[cindex], 2674 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2675 bf, 2676 SEQNO(bf->bf_state.bfs_seqno) 2677 ); 2678 } 2679 tid->tx_buf[cindex] = bf; 2680 2681 if (index >= ((tid->baw_tail - tid->baw_head) & 2682 (ATH_TID_MAX_BUFS - 1))) { 2683 tid->baw_tail = cindex; 2684 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2685 } 2686 } 2687 2688 /* 2689 * Flip the BAW buffer entry over from the existing one to the new one. 2690 * 2691 * When software retransmitting a (sub-)frame, it is entirely possible that 2692 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2693 * In that instance the buffer is cloned and the new buffer is used for 2694 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2695 * tracking array to maintain consistency. 2696 */ 2697 static void 2698 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2699 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2700 { 2701 int index, cindex; 2702 struct ieee80211_tx_ampdu *tap; 2703 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2704 2705 ATH_TX_LOCK_ASSERT(sc); 2706 2707 tap = ath_tx_get_tx_tid(an, tid->tid); 2708 index = ATH_BA_INDEX(tap->txa_start, seqno); 2709 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2710 2711 /* 2712 * Just warn for now; if it happens then we should find out 2713 * about it. It's highly likely the aggregation session will 2714 * soon hang. 2715 */ 2716 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2717 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2718 "%s: retransmitted buffer" 2719 " has mismatching seqno's, BA session may hang.\n", 2720 __func__); 2721 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2722 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2723 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2724 } 2725 2726 if (tid->tx_buf[cindex] != old_bf) { 2727 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2728 "%s: ath_buf pointer incorrect; " 2729 " has m BA session may hang.\n", __func__); 2730 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2731 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2732 } 2733 2734 tid->tx_buf[cindex] = new_bf; 2735 } 2736 2737 /* 2738 * seq_start - left edge of BAW 2739 * seq_next - current/next sequence number to allocate 2740 * 2741 * Since the BAW status may be modified by both the ath task and 2742 * the net80211/ifnet contexts, the TID must be locked. 2743 */ 2744 static void 2745 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2746 struct ath_tid *tid, const struct ath_buf *bf) 2747 { 2748 int index, cindex; 2749 struct ieee80211_tx_ampdu *tap; 2750 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2751 2752 ATH_TX_LOCK_ASSERT(sc); 2753 2754 tap = ath_tx_get_tx_tid(an, tid->tid); 2755 index = ATH_BA_INDEX(tap->txa_start, seqno); 2756 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2757 2758 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2759 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2760 "baw head=%d, tail=%d\n", 2761 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2762 cindex, tid->baw_head, tid->baw_tail); 2763 2764 /* 2765 * If this occurs then we have a big problem - something else 2766 * has slid tap->txa_start along without updating the BAW 2767 * tracking start/end pointers. Thus the TX BAW state is now 2768 * completely busted. 2769 * 2770 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2771 * it's quite possible that a cloned buffer is making its way 2772 * here and causing it to fire off. Disable TDMA for now. 2773 */ 2774 if (tid->tx_buf[cindex] != bf) { 2775 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2776 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2777 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2778 tid->tx_buf[cindex], 2779 (tid->tx_buf[cindex] != NULL) ? 2780 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2781 } 2782 2783 tid->tx_buf[cindex] = NULL; 2784 2785 while (tid->baw_head != tid->baw_tail && 2786 !tid->tx_buf[tid->baw_head]) { 2787 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2788 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2789 } 2790 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2791 "%s: tid=%d: baw is now %d:%d, baw head=%d\n", 2792 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); 2793 } 2794 2795 static void 2796 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2797 struct ath_buf *bf) 2798 { 2799 struct ieee80211_frame *wh; 2800 2801 ATH_TX_LOCK_ASSERT(sc); 2802 2803 if (tid->an->an_leak_count > 0) { 2804 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2805 2806 /* 2807 * Update MORE based on the software/net80211 queue states. 2808 */ 2809 if ((tid->an->an_stack_psq > 0) 2810 || (tid->an->an_swq_depth > 0)) 2811 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2812 else 2813 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2814 2815 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2816 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2817 __func__, 2818 tid->an->an_node.ni_macaddr, 2819 ":", 2820 tid->an->an_leak_count, 2821 tid->an->an_stack_psq, 2822 tid->an->an_swq_depth, 2823 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2824 2825 /* 2826 * Re-sync the underlying buffer. 2827 */ 2828 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2829 BUS_DMASYNC_PREWRITE); 2830 2831 tid->an->an_leak_count --; 2832 } 2833 } 2834 2835 static int 2836 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2837 { 2838 2839 ATH_TX_LOCK_ASSERT(sc); 2840 2841 if (tid->an->an_leak_count > 0) { 2842 return (1); 2843 } 2844 if (tid->paused) 2845 return (0); 2846 return (1); 2847 } 2848 2849 /* 2850 * Mark the current node/TID as ready to TX. 2851 * 2852 * This is done to make it easy for the software scheduler to 2853 * find which nodes have data to send. 2854 * 2855 * The TXQ lock must be held. 2856 */ 2857 void 2858 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2859 { 2860 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2861 2862 ATH_TX_LOCK_ASSERT(sc); 2863 2864 /* 2865 * If we are leaking out a frame to this destination 2866 * for PS-POLL, ensure that we allow scheduling to 2867 * occur. 2868 */ 2869 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2870 return; /* paused, can't schedule yet */ 2871 2872 if (tid->sched) 2873 return; /* already scheduled */ 2874 2875 tid->sched = 1; 2876 2877 #if 0 2878 /* 2879 * If this is a sleeping node we're leaking to, given 2880 * it a higher priority. This is so bad for QoS it hurts. 2881 */ 2882 if (tid->an->an_leak_count) { 2883 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2884 } else { 2885 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2886 } 2887 #endif 2888 2889 /* 2890 * We can't do the above - it'll confuse the TXQ software 2891 * scheduler which will keep checking the _head_ TID 2892 * in the list to see if it has traffic. If we queue 2893 * a TID to the head of the list and it doesn't transmit, 2894 * we'll check it again. 2895 * 2896 * So, get the rest of this leaking frames support working 2897 * and reliable first and _then_ optimise it so they're 2898 * pushed out in front of any other pending software 2899 * queued nodes. 2900 */ 2901 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2902 } 2903 2904 /* 2905 * Mark the current node as no longer needing to be polled for 2906 * TX packets. 2907 * 2908 * The TXQ lock must be held. 2909 */ 2910 static void 2911 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2912 { 2913 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2914 2915 ATH_TX_LOCK_ASSERT(sc); 2916 2917 if (tid->sched == 0) 2918 return; 2919 2920 tid->sched = 0; 2921 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2922 } 2923 2924 /* 2925 * Assign a sequence number manually to the given frame. 2926 * 2927 * This should only be called for A-MPDU TX frames. 2928 */ 2929 static ieee80211_seq 2930 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2931 struct ath_buf *bf, struct mbuf *m0) 2932 { 2933 struct ieee80211_frame *wh; 2934 int tid, pri; 2935 ieee80211_seq seqno; 2936 uint8_t subtype; 2937 2938 /* TID lookup */ 2939 wh = mtod(m0, struct ieee80211_frame *); 2940 pri = M_WME_GETAC(m0); /* honor classification */ 2941 tid = WME_AC_TO_TID(pri); 2942 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2943 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2944 2945 /* XXX Is it a control frame? Ignore */ 2946 2947 /* Does the packet require a sequence number? */ 2948 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2949 return -1; 2950 2951 ATH_TX_LOCK_ASSERT(sc); 2952 2953 /* 2954 * Is it a QOS NULL Data frame? Give it a sequence number from 2955 * the default TID (IEEE80211_NONQOS_TID.) 2956 * 2957 * The RX path of everything I've looked at doesn't include the NULL 2958 * data frame sequence number in the aggregation state updates, so 2959 * assigning it a sequence number there will cause a BAW hole on the 2960 * RX side. 2961 */ 2962 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2963 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2964 /* XXX no locking for this TID? This is a bit of a problem. */ 2965 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2966 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2967 } else { 2968 /* Manually assign sequence number */ 2969 seqno = ni->ni_txseqs[tid]; 2970 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2971 } 2972 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2973 M_SEQNO_SET(m0, seqno); 2974 2975 /* Return so caller can do something with it if needed */ 2976 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2977 return seqno; 2978 } 2979 2980 /* 2981 * Attempt to direct dispatch an aggregate frame to hardware. 2982 * If the frame is out of BAW, queue. 2983 * Otherwise, schedule it as a single frame. 2984 */ 2985 static void 2986 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2987 struct ath_txq *txq, struct ath_buf *bf) 2988 { 2989 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2990 struct ieee80211_tx_ampdu *tap; 2991 2992 ATH_TX_LOCK_ASSERT(sc); 2993 2994 tap = ath_tx_get_tx_tid(an, tid->tid); 2995 2996 /* paused? queue */ 2997 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 2998 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2999 /* XXX don't sched - we're paused! */ 3000 return; 3001 } 3002 3003 /* outside baw? queue */ 3004 if (bf->bf_state.bfs_dobaw && 3005 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 3006 SEQNO(bf->bf_state.bfs_seqno)))) { 3007 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3008 ath_tx_tid_sched(sc, tid); 3009 return; 3010 } 3011 3012 /* 3013 * This is a temporary check and should be removed once 3014 * all the relevant code paths have been fixed. 3015 * 3016 * During aggregate retries, it's possible that the head 3017 * frame will fail (which has the bfs_aggr and bfs_nframes 3018 * fields set for said aggregate) and will be retried as 3019 * a single frame. In this instance, the values should 3020 * be reset or the completion code will get upset with you. 3021 */ 3022 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 3023 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3024 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 3025 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 3026 bf->bf_state.bfs_aggr = 0; 3027 bf->bf_state.bfs_nframes = 1; 3028 } 3029 3030 /* Update CLRDMASK just before this frame is queued */ 3031 ath_tx_update_clrdmask(sc, tid, bf); 3032 3033 /* Direct dispatch to hardware */ 3034 ath_tx_do_ratelookup(sc, bf); 3035 ath_tx_calc_duration(sc, bf); 3036 ath_tx_calc_protection(sc, bf); 3037 ath_tx_set_rtscts(sc, bf); 3038 ath_tx_rate_fill_rcflags(sc, bf); 3039 ath_tx_setds(sc, bf); 3040 3041 /* Statistics */ 3042 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3043 3044 /* Track per-TID hardware queue depth correctly */ 3045 tid->hwq_depth++; 3046 3047 /* Add to BAW */ 3048 if (bf->bf_state.bfs_dobaw) { 3049 ath_tx_addto_baw(sc, an, tid, bf); 3050 bf->bf_state.bfs_addedbaw = 1; 3051 } 3052 3053 /* Set completion handler, multi-frame aggregate or not */ 3054 bf->bf_comp = ath_tx_aggr_comp; 3055 3056 /* 3057 * Update the current leak count if 3058 * we're leaking frames; and set the 3059 * MORE flag as appropriate. 3060 */ 3061 ath_tx_leak_count_update(sc, tid, bf); 3062 3063 /* Hand off to hardware */ 3064 ath_tx_handoff(sc, txq, bf); 3065 } 3066 3067 /* 3068 * Attempt to send the packet. 3069 * If the queue isn't busy, direct-dispatch. 3070 * If the queue is busy enough, queue the given packet on the 3071 * relevant software queue. 3072 */ 3073 void 3074 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3075 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3076 { 3077 struct ath_node *an = ATH_NODE(ni); 3078 struct ieee80211_frame *wh; 3079 struct ath_tid *atid; 3080 int pri, tid; 3081 struct mbuf *m0 = bf->bf_m; 3082 3083 ATH_TX_LOCK_ASSERT(sc); 3084 3085 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3086 wh = mtod(m0, struct ieee80211_frame *); 3087 pri = ath_tx_getac(sc, m0); 3088 tid = ath_tx_gettid(sc, m0); 3089 atid = &an->an_tid[tid]; 3090 3091 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3092 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3093 3094 /* Set local packet state, used to queue packets to hardware */ 3095 /* XXX potentially duplicate info, re-check */ 3096 bf->bf_state.bfs_tid = tid; 3097 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3098 bf->bf_state.bfs_pri = pri; 3099 3100 /* 3101 * If the hardware queue isn't busy, queue it directly. 3102 * If the hardware queue is busy, queue it. 3103 * If the TID is paused or the traffic it outside BAW, software 3104 * queue it. 3105 * 3106 * If the node is in power-save and we're leaking a frame, 3107 * leak a single frame. 3108 */ 3109 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3110 /* TID is paused, queue */ 3111 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3112 /* 3113 * If the caller requested that it be sent at a high 3114 * priority, queue it at the head of the list. 3115 */ 3116 if (queue_to_head) 3117 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3118 else 3119 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3120 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3121 /* AMPDU pending; queue */ 3122 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3123 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3124 /* XXX sched? */ 3125 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3126 /* AMPDU running, attempt direct dispatch if possible */ 3127 3128 /* 3129 * Always queue the frame to the tail of the list. 3130 */ 3131 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3132 3133 /* 3134 * If the hardware queue isn't busy, direct dispatch 3135 * the head frame in the list. Don't schedule the 3136 * TID - let it build some more frames first? 3137 * 3138 * When running A-MPDU, always just check the hardware 3139 * queue depth against the aggregate frame limit. 3140 * We don't want to burst a large number of single frames 3141 * out to the hardware; we want to aggressively hold back. 3142 * 3143 * Otherwise, schedule the TID. 3144 */ 3145 /* XXX TXQ locking */ 3146 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { 3147 bf = ATH_TID_FIRST(atid); 3148 ATH_TID_REMOVE(atid, bf, bf_list); 3149 3150 /* 3151 * Ensure it's definitely treated as a non-AMPDU 3152 * frame - this information may have been left 3153 * over from a previous attempt. 3154 */ 3155 bf->bf_state.bfs_aggr = 0; 3156 bf->bf_state.bfs_nframes = 1; 3157 3158 /* Queue to the hardware */ 3159 ath_tx_xmit_aggr(sc, an, txq, bf); 3160 DPRINTF(sc, ATH_DEBUG_SW_TX, 3161 "%s: xmit_aggr\n", 3162 __func__); 3163 } else { 3164 DPRINTF(sc, ATH_DEBUG_SW_TX, 3165 "%s: ampdu; swq'ing\n", 3166 __func__); 3167 3168 ath_tx_tid_sched(sc, atid); 3169 } 3170 /* 3171 * If we're not doing A-MPDU, be prepared to direct dispatch 3172 * up to both limits if possible. This particular corner 3173 * case may end up with packet starvation between aggregate 3174 * traffic and non-aggregate traffic: we wnat to ensure 3175 * that non-aggregate stations get a few frames queued to the 3176 * hardware before the aggregate station(s) get their chance. 3177 * 3178 * So if you only ever see a couple of frames direct dispatched 3179 * to the hardware from a non-AMPDU client, check both here 3180 * and in the software queue dispatcher to ensure that those 3181 * non-AMPDU stations get a fair chance to transmit. 3182 */ 3183 /* XXX TXQ locking */ 3184 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3185 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3186 /* AMPDU not running, attempt direct dispatch */ 3187 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3188 /* See if clrdmask needs to be set */ 3189 ath_tx_update_clrdmask(sc, atid, bf); 3190 3191 /* 3192 * Update the current leak count if 3193 * we're leaking frames; and set the 3194 * MORE flag as appropriate. 3195 */ 3196 ath_tx_leak_count_update(sc, atid, bf); 3197 3198 /* 3199 * Dispatch the frame. 3200 */ 3201 ath_tx_xmit_normal(sc, txq, bf); 3202 } else { 3203 /* Busy; queue */ 3204 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3205 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3206 ath_tx_tid_sched(sc, atid); 3207 } 3208 } 3209 3210 /* 3211 * Only set the clrdmask bit if none of the nodes are currently 3212 * filtered. 3213 * 3214 * XXX TODO: go through all the callers and check to see 3215 * which are being called in the context of looping over all 3216 * TIDs (eg, if all tids are being paused, resumed, etc.) 3217 * That'll avoid O(n^2) complexity here. 3218 */ 3219 static void 3220 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3221 { 3222 int i; 3223 3224 ATH_TX_LOCK_ASSERT(sc); 3225 3226 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3227 if (an->an_tid[i].isfiltered == 1) 3228 return; 3229 } 3230 an->clrdmask = 1; 3231 } 3232 3233 /* 3234 * Configure the per-TID node state. 3235 * 3236 * This likely belongs in if_ath_node.c but I can't think of anywhere 3237 * else to put it just yet. 3238 * 3239 * This sets up the SLISTs and the mutex as appropriate. 3240 */ 3241 void 3242 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3243 { 3244 int i, j; 3245 struct ath_tid *atid; 3246 3247 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3248 atid = &an->an_tid[i]; 3249 3250 /* XXX now with this bzer(), is the field 0'ing needed? */ 3251 bzero(atid, sizeof(*atid)); 3252 3253 TAILQ_INIT(&atid->tid_q); 3254 TAILQ_INIT(&atid->filtq.tid_q); 3255 atid->tid = i; 3256 atid->an = an; 3257 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3258 atid->tx_buf[j] = NULL; 3259 atid->baw_head = atid->baw_tail = 0; 3260 atid->paused = 0; 3261 atid->sched = 0; 3262 atid->hwq_depth = 0; 3263 atid->cleanup_inprogress = 0; 3264 if (i == IEEE80211_NONQOS_TID) 3265 atid->ac = ATH_NONQOS_TID_AC; 3266 else 3267 atid->ac = TID_TO_WME_AC(i); 3268 } 3269 an->clrdmask = 1; /* Always start by setting this bit */ 3270 } 3271 3272 /* 3273 * Pause the current TID. This stops packets from being transmitted 3274 * on it. 3275 * 3276 * Since this is also called from upper layers as well as the driver, 3277 * it will get the TID lock. 3278 */ 3279 static void 3280 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3281 { 3282 3283 ATH_TX_LOCK_ASSERT(sc); 3284 tid->paused++; 3285 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", 3286 __func__, 3287 tid->an->an_node.ni_macaddr, ":", 3288 tid->tid, 3289 tid->paused); 3290 } 3291 3292 /* 3293 * Unpause the current TID, and schedule it if needed. 3294 */ 3295 static void 3296 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3297 { 3298 ATH_TX_LOCK_ASSERT(sc); 3299 3300 /* 3301 * There's some odd places where ath_tx_tid_resume() is called 3302 * when it shouldn't be; this works around that particular issue 3303 * until it's actually resolved. 3304 */ 3305 if (tid->paused == 0) { 3306 athdev_printf(sc->sc_dev, 3307 "%s: [%6D]: tid=%d, paused=0?\n", 3308 __func__, 3309 tid->an->an_node.ni_macaddr, ":", 3310 tid->tid); 3311 } else { 3312 tid->paused--; 3313 } 3314 3315 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3316 "%s: [%6D]: tid=%d, unpaused = %d\n", 3317 __func__, 3318 tid->an->an_node.ni_macaddr, ":", 3319 tid->tid, 3320 tid->paused); 3321 3322 if (tid->paused) 3323 return; 3324 3325 /* 3326 * Override the clrdmask configuration for the next frame 3327 * from this TID, just to get the ball rolling. 3328 */ 3329 ath_tx_set_clrdmask(sc, tid->an); 3330 3331 if (tid->axq_depth == 0) 3332 return; 3333 3334 /* XXX isfiltered shouldn't ever be 0 at this point */ 3335 if (tid->isfiltered == 1) { 3336 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3337 __func__); 3338 return; 3339 } 3340 3341 ath_tx_tid_sched(sc, tid); 3342 3343 /* 3344 * Queue the software TX scheduler. 3345 */ 3346 ath_tx_swq_kick(sc); 3347 } 3348 3349 /* 3350 * Add the given ath_buf to the TID filtered frame list. 3351 * This requires the TID be filtered. 3352 */ 3353 static void 3354 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3355 struct ath_buf *bf) 3356 { 3357 3358 ATH_TX_LOCK_ASSERT(sc); 3359 3360 if (!tid->isfiltered) 3361 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3362 __func__); 3363 3364 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3365 3366 /* Set the retry bit and bump the retry counter */ 3367 ath_tx_set_retry(sc, bf); 3368 sc->sc_stats.ast_tx_swfiltered++; 3369 3370 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3371 } 3372 3373 /* 3374 * Handle a completed filtered frame from the given TID. 3375 * This just enables/pauses the filtered frame state if required 3376 * and appends the filtered frame to the filtered queue. 3377 */ 3378 static void 3379 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3380 struct ath_buf *bf) 3381 { 3382 3383 ATH_TX_LOCK_ASSERT(sc); 3384 3385 if (! tid->isfiltered) { 3386 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", 3387 __func__, tid->tid); 3388 tid->isfiltered = 1; 3389 ath_tx_tid_pause(sc, tid); 3390 } 3391 3392 /* Add the frame to the filter queue */ 3393 ath_tx_tid_filt_addbuf(sc, tid, bf); 3394 } 3395 3396 /* 3397 * Complete the filtered frame TX completion. 3398 * 3399 * If there are no more frames in the hardware queue, unpause/unfilter 3400 * the TID if applicable. Otherwise we will wait for a node PS transition 3401 * to unfilter. 3402 */ 3403 static void 3404 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3405 { 3406 struct ath_buf *bf; 3407 int do_resume = 0; 3408 3409 ATH_TX_LOCK_ASSERT(sc); 3410 3411 if (tid->hwq_depth != 0) 3412 return; 3413 3414 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", 3415 __func__, tid->tid); 3416 if (tid->isfiltered == 1) { 3417 tid->isfiltered = 0; 3418 do_resume = 1; 3419 } 3420 3421 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3422 ath_tx_set_clrdmask(sc, tid->an); 3423 3424 /* XXX this is really quite inefficient */ 3425 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3426 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3427 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3428 } 3429 3430 /* And only resume if we had paused before */ 3431 if (do_resume) 3432 ath_tx_tid_resume(sc, tid); 3433 } 3434 3435 /* 3436 * Called when a single (aggregate or otherwise) frame is completed. 3437 * 3438 * Returns 0 if the buffer could be added to the filtered list 3439 * (cloned or otherwise), 1 if the buffer couldn't be added to the 3440 * filtered list (failed clone; expired retry) and the caller should 3441 * free it and handle it like a failure (eg by sending a BAR.) 3442 * 3443 * since the buffer may be cloned, bf must be not touched after this 3444 * if the return value is 0. 3445 */ 3446 static int 3447 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3448 struct ath_buf *bf) 3449 { 3450 struct ath_buf *nbf; 3451 int retval; 3452 3453 ATH_TX_LOCK_ASSERT(sc); 3454 3455 /* 3456 * Don't allow a filtered frame to live forever. 3457 */ 3458 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3459 sc->sc_stats.ast_tx_swretrymax++; 3460 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3461 "%s: bf=%p, seqno=%d, exceeded retries\n", 3462 __func__, 3463 bf, 3464 SEQNO(bf->bf_state.bfs_seqno)); 3465 retval = 1; /* error */ 3466 goto finish; 3467 } 3468 3469 /* 3470 * A busy buffer can't be added to the retry list. 3471 * It needs to be cloned. 3472 */ 3473 if (bf->bf_flags & ATH_BUF_BUSY) { 3474 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3475 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3476 "%s: busy buffer clone: %p -> %p\n", 3477 __func__, bf, nbf); 3478 } else { 3479 nbf = bf; 3480 } 3481 3482 if (nbf == NULL) { 3483 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3484 "%s: busy buffer couldn't be cloned (%p)!\n", 3485 __func__, bf); 3486 retval = 1; /* error */ 3487 } else { 3488 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3489 retval = 0; /* ok */ 3490 } 3491 finish: 3492 ath_tx_tid_filt_comp_complete(sc, tid); 3493 3494 return (retval); 3495 } 3496 3497 static void 3498 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3499 struct ath_buf *bf_first, ath_bufhead *bf_q) 3500 { 3501 struct ath_buf *bf, *bf_next, *nbf; 3502 3503 ATH_TX_LOCK_ASSERT(sc); 3504 3505 bf = bf_first; 3506 while (bf) { 3507 bf_next = bf->bf_next; 3508 bf->bf_next = NULL; /* Remove it from the aggr list */ 3509 3510 /* 3511 * Don't allow a filtered frame to live forever. 3512 */ 3513 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3514 sc->sc_stats.ast_tx_swretrymax++; 3515 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3516 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", 3517 __func__, 3518 tid->tid, 3519 bf, 3520 SEQNO(bf->bf_state.bfs_seqno)); 3521 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3522 goto next; 3523 } 3524 3525 if (bf->bf_flags & ATH_BUF_BUSY) { 3526 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3527 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3528 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", 3529 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); 3530 } else { 3531 nbf = bf; 3532 } 3533 3534 /* 3535 * If the buffer couldn't be cloned, add it to bf_q; 3536 * the caller will free the buffer(s) as required. 3537 */ 3538 if (nbf == NULL) { 3539 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3540 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", 3541 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); 3542 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3543 } else { 3544 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3545 } 3546 next: 3547 bf = bf_next; 3548 } 3549 3550 ath_tx_tid_filt_comp_complete(sc, tid); 3551 } 3552 3553 /* 3554 * Suspend the queue because we need to TX a BAR. 3555 */ 3556 static void 3557 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3558 { 3559 3560 ATH_TX_LOCK_ASSERT(sc); 3561 3562 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3563 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3564 __func__, 3565 tid->tid, 3566 tid->bar_wait, 3567 tid->bar_tx); 3568 3569 /* We shouldn't be called when bar_tx is 1 */ 3570 if (tid->bar_tx) { 3571 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3572 "%s: bar_tx is 1?!\n", __func__); 3573 } 3574 3575 /* If we've already been called, just be patient. */ 3576 if (tid->bar_wait) 3577 return; 3578 3579 /* Wait! */ 3580 tid->bar_wait = 1; 3581 3582 /* Only one pause, no matter how many frames fail */ 3583 ath_tx_tid_pause(sc, tid); 3584 } 3585 3586 /* 3587 * We've finished with BAR handling - either we succeeded or 3588 * failed. Either way, unsuspend TX. 3589 */ 3590 static void 3591 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3592 { 3593 3594 ATH_TX_LOCK_ASSERT(sc); 3595 3596 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3597 "%s: %6D: TID=%d, called\n", 3598 __func__, 3599 tid->an->an_node.ni_macaddr, 3600 ":", 3601 tid->tid); 3602 3603 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3604 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3605 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3606 __func__, tid->an->an_node.ni_macaddr, ":", 3607 tid->tid, tid->bar_tx, tid->bar_wait); 3608 } 3609 3610 tid->bar_tx = tid->bar_wait = 0; 3611 ath_tx_tid_resume(sc, tid); 3612 } 3613 3614 /* 3615 * Return whether we're ready to TX a BAR frame. 3616 * 3617 * Requires the TID lock be held. 3618 */ 3619 static int 3620 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3621 { 3622 3623 ATH_TX_LOCK_ASSERT(sc); 3624 3625 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3626 return (0); 3627 3628 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3629 "%s: %6D: TID=%d, bar ready\n", 3630 __func__, 3631 tid->an->an_node.ni_macaddr, 3632 ":", 3633 tid->tid); 3634 3635 return (1); 3636 } 3637 3638 /* 3639 * Check whether the current TID is ready to have a BAR 3640 * TXed and if so, do the TX. 3641 * 3642 * Since the TID/TXQ lock can't be held during a call to 3643 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3644 * sending the BAR and locking it again. 3645 * 3646 * Eventually, the code to send the BAR should be broken out 3647 * from this routine so the lock doesn't have to be reacquired 3648 * just to be immediately dropped by the caller. 3649 */ 3650 static void 3651 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3652 { 3653 struct ieee80211_tx_ampdu *tap; 3654 3655 ATH_TX_LOCK_ASSERT(sc); 3656 3657 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3658 "%s: %6D: TID=%d, called\n", 3659 __func__, 3660 tid->an->an_node.ni_macaddr, 3661 ":", 3662 tid->tid); 3663 3664 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3665 3666 /* 3667 * This is an error condition! 3668 */ 3669 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3670 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3671 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3672 __func__, tid->an->an_node.ni_macaddr, ":", 3673 tid->tid, tid->bar_tx, tid->bar_wait); 3674 return; 3675 } 3676 3677 /* Don't do anything if we still have pending frames */ 3678 if (tid->hwq_depth > 0) { 3679 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3680 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3681 __func__, 3682 tid->an->an_node.ni_macaddr, 3683 ":", 3684 tid->tid, 3685 tid->hwq_depth); 3686 return; 3687 } 3688 3689 /* We're now about to TX */ 3690 tid->bar_tx = 1; 3691 3692 /* 3693 * Override the clrdmask configuration for the next frame, 3694 * just to get the ball rolling. 3695 */ 3696 ath_tx_set_clrdmask(sc, tid->an); 3697 3698 /* 3699 * Calculate new BAW left edge, now that all frames have either 3700 * succeeded or failed. 3701 * 3702 * XXX verify this is _actually_ the valid value to begin at! 3703 */ 3704 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3705 "%s: %6D: TID=%d, new BAW left edge=%d\n", 3706 __func__, 3707 tid->an->an_node.ni_macaddr, 3708 ":", 3709 tid->tid, 3710 tap->txa_start); 3711 3712 /* Try sending the BAR frame */ 3713 /* We can't hold the lock here! */ 3714 3715 ATH_TX_UNLOCK(sc); 3716 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3717 /* Success? Now we wait for notification that it's done */ 3718 ATH_TX_LOCK(sc); 3719 return; 3720 } 3721 3722 /* Failure? For now, warn loudly and continue */ 3723 ATH_TX_LOCK(sc); 3724 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3725 "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3726 __func__, tid->an->an_node.ni_macaddr, ":", 3727 tid->tid); 3728 ath_tx_tid_bar_unsuspend(sc, tid); 3729 } 3730 3731 static void 3732 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3733 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3734 { 3735 3736 ATH_TX_LOCK_ASSERT(sc); 3737 3738 /* 3739 * If the current TID is running AMPDU, update 3740 * the BAW. 3741 */ 3742 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3743 bf->bf_state.bfs_dobaw) { 3744 /* 3745 * Only remove the frame from the BAW if it's 3746 * been transmitted at least once; this means 3747 * the frame was in the BAW to begin with. 3748 */ 3749 if (bf->bf_state.bfs_retries > 0) { 3750 ath_tx_update_baw(sc, an, tid, bf); 3751 bf->bf_state.bfs_dobaw = 0; 3752 } 3753 #if 0 3754 /* 3755 * This has become a non-fatal error now 3756 */ 3757 if (! bf->bf_state.bfs_addedbaw) 3758 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3759 "%s: wasn't added: seqno %d\n", 3760 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3761 #endif 3762 } 3763 3764 /* Strip it out of an aggregate list if it was in one */ 3765 bf->bf_next = NULL; 3766 3767 /* Insert on the free queue to be freed by the caller */ 3768 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3769 } 3770 3771 static void 3772 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3773 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3774 { 3775 struct ieee80211_node *ni = &an->an_node; 3776 struct ath_txq *txq; 3777 struct ieee80211_tx_ampdu *tap; 3778 3779 txq = sc->sc_ac2q[tid->ac]; 3780 tap = ath_tx_get_tx_tid(an, tid->tid); 3781 3782 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3783 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3784 "seqno=%d, retry=%d\n", 3785 __func__, 3786 pfx, 3787 ni->ni_macaddr, 3788 ":", 3789 bf, 3790 bf->bf_state.bfs_addedbaw, 3791 bf->bf_state.bfs_dobaw, 3792 SEQNO(bf->bf_state.bfs_seqno), 3793 bf->bf_state.bfs_retries); 3794 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3795 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3796 __func__, 3797 pfx, 3798 ni->ni_macaddr, 3799 ":", 3800 bf, 3801 txq->axq_qnum, 3802 txq->axq_depth, 3803 txq->axq_aggr_depth); 3804 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3805 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3806 "isfiltered=%d\n", 3807 __func__, 3808 pfx, 3809 ni->ni_macaddr, 3810 ":", 3811 bf, 3812 tid->axq_depth, 3813 tid->hwq_depth, 3814 tid->bar_wait, 3815 tid->isfiltered); 3816 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3817 "%s: %s: %6D: tid %d: " 3818 "sched=%d, paused=%d, " 3819 "incomp=%d, baw_head=%d, " 3820 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3821 __func__, 3822 pfx, 3823 ni->ni_macaddr, 3824 ":", 3825 tid->tid, 3826 tid->sched, tid->paused, 3827 tid->incomp, tid->baw_head, 3828 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3829 ni->ni_txseqs[tid->tid]); 3830 3831 /* XXX Dump the frame, see what it is? */ 3832 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3833 ieee80211_dump_pkt(ni->ni_ic, 3834 mtod(bf->bf_m, const uint8_t *), 3835 bf->bf_m->m_len, 0, -1); 3836 } 3837 3838 /* 3839 * Free any packets currently pending in the software TX queue. 3840 * 3841 * This will be called when a node is being deleted. 3842 * 3843 * It can also be called on an active node during an interface 3844 * reset or state transition. 3845 * 3846 * (From Linux/reference): 3847 * 3848 * TODO: For frame(s) that are in the retry state, we will reuse the 3849 * sequence number(s) without setting the retry bit. The 3850 * alternative is to give up on these and BAR the receiver's window 3851 * forward. 3852 */ 3853 static void 3854 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3855 struct ath_tid *tid, ath_bufhead *bf_cq) 3856 { 3857 struct ath_buf *bf; 3858 struct ieee80211_tx_ampdu *tap; 3859 struct ieee80211_node *ni = &an->an_node; 3860 int t; 3861 3862 tap = ath_tx_get_tx_tid(an, tid->tid); 3863 3864 ATH_TX_LOCK_ASSERT(sc); 3865 3866 /* Walk the queue, free frames */ 3867 t = 0; 3868 for (;;) { 3869 bf = ATH_TID_FIRST(tid); 3870 if (bf == NULL) { 3871 break; 3872 } 3873 3874 if (t == 0) { 3875 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3876 // t = 1; 3877 } 3878 3879 ATH_TID_REMOVE(tid, bf, bf_list); 3880 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3881 } 3882 3883 /* And now, drain the filtered frame queue */ 3884 t = 0; 3885 for (;;) { 3886 bf = ATH_TID_FILT_FIRST(tid); 3887 if (bf == NULL) 3888 break; 3889 3890 if (t == 0) { 3891 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3892 // t = 1; 3893 } 3894 3895 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3896 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3897 } 3898 3899 /* 3900 * Override the clrdmask configuration for the next frame 3901 * in case there is some future transmission, just to get 3902 * the ball rolling. 3903 * 3904 * This won't hurt things if the TID is about to be freed. 3905 */ 3906 ath_tx_set_clrdmask(sc, tid->an); 3907 3908 /* 3909 * Now that it's completed, grab the TID lock and update 3910 * the sequence number and BAW window. 3911 * Because sequence numbers have been assigned to frames 3912 * that haven't been sent yet, it's entirely possible 3913 * we'll be called with some pending frames that have not 3914 * been transmitted. 3915 * 3916 * The cleaner solution is to do the sequence number allocation 3917 * when the packet is first transmitted - and thus the "retries" 3918 * check above would be enough to update the BAW/seqno. 3919 */ 3920 3921 /* But don't do it for non-QoS TIDs */ 3922 if (tap) { 3923 #if 1 3924 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3925 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 3926 __func__, 3927 ni->ni_macaddr, 3928 ":", 3929 an, 3930 tid->tid, 3931 tap->txa_start); 3932 #endif 3933 ni->ni_txseqs[tid->tid] = tap->txa_start; 3934 tid->baw_tail = tid->baw_head; 3935 } 3936 } 3937 3938 /* 3939 * Reset the TID state. This must be only called once the node has 3940 * had its frames flushed from this TID, to ensure that no other 3941 * pause / unpause logic can kick in. 3942 */ 3943 static void 3944 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 3945 { 3946 3947 #if 0 3948 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 3949 tid->paused = tid->sched = tid->addba_tx_pending = 0; 3950 tid->incomp = tid->cleanup_inprogress = 0; 3951 #endif 3952 3953 /* 3954 * If we have a bar_wait set, we need to unpause the TID 3955 * here. Otherwise once cleanup has finished, the TID won't 3956 * have the right paused counter. 3957 * 3958 * XXX I'm not going through resume here - I don't want the 3959 * node to be rescheuled just yet. This however should be 3960 * methodized! 3961 */ 3962 if (tid->bar_wait) { 3963 if (tid->paused > 0) { 3964 tid->paused --; 3965 } 3966 } 3967 3968 /* 3969 * XXX same with a currently filtered TID. 3970 * 3971 * Since this is being called during a flush, we assume that 3972 * the filtered frame list is actually empty. 3973 * 3974 * XXX TODO: add in a check to ensure that the filtered queue 3975 * depth is actually 0! 3976 */ 3977 if (tid->isfiltered) { 3978 if (tid->paused > 0) { 3979 tid->paused --; 3980 } 3981 } 3982 3983 /* 3984 * Clear BAR, filtered frames, scheduled and ADDBA pending. 3985 * The TID may be going through cleanup from the last association 3986 * where things in the BAW are still in the hardware queue. 3987 */ 3988 tid->bar_wait = 0; 3989 tid->bar_tx = 0; 3990 tid->isfiltered = 0; 3991 tid->sched = 0; 3992 tid->addba_tx_pending = 0; 3993 3994 /* 3995 * XXX TODO: it may just be enough to walk the HWQs and mark 3996 * frames for that node as non-aggregate; or mark the ath_node 3997 * with something that indicates that aggregation is no longer 3998 * occuring. Then we can just toss the BAW complaints and 3999 * do a complete hard reset of state here - no pause, no 4000 * complete counter, etc. 4001 */ 4002 4003 } 4004 4005 /* 4006 * Flush all software queued packets for the given node. 4007 * 4008 * This occurs when a completion handler frees the last buffer 4009 * for a node, and the node is thus freed. This causes the node 4010 * to be cleaned up, which ends up calling ath_tx_node_flush. 4011 */ 4012 void 4013 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 4014 { 4015 int tid; 4016 ath_bufhead bf_cq; 4017 struct ath_buf *bf; 4018 4019 TAILQ_INIT(&bf_cq); 4020 4021 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 4022 &an->an_node); 4023 4024 ATH_TX_LOCK(sc); 4025 DPRINTF(sc, ATH_DEBUG_NODE, 4026 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 4027 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 4028 __func__, 4029 an->an_node.ni_macaddr, 4030 ":", 4031 an->an_is_powersave, 4032 an->an_stack_psq, 4033 an->an_tim_set, 4034 an->an_swq_depth, 4035 an->clrdmask, 4036 an->an_leak_count); 4037 4038 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 4039 struct ath_tid *atid = &an->an_tid[tid]; 4040 4041 /* Free packets */ 4042 ath_tx_tid_drain(sc, an, atid, &bf_cq); 4043 4044 /* Remove this tid from the list of active tids */ 4045 ath_tx_tid_unsched(sc, atid); 4046 4047 /* Reset the per-TID pause, BAR, etc state */ 4048 ath_tx_tid_reset(sc, atid); 4049 } 4050 4051 /* 4052 * Clear global leak count 4053 */ 4054 an->an_leak_count = 0; 4055 ATH_TX_UNLOCK(sc); 4056 4057 /* Handle completed frames */ 4058 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4059 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4060 ath_tx_default_comp(sc, bf, 0); 4061 } 4062 } 4063 4064 /* 4065 * Drain all the software TXQs currently with traffic queued. 4066 */ 4067 void 4068 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4069 { 4070 struct ath_tid *tid; 4071 ath_bufhead bf_cq; 4072 struct ath_buf *bf; 4073 4074 TAILQ_INIT(&bf_cq); 4075 ATH_TX_LOCK(sc); 4076 4077 /* 4078 * Iterate over all active tids for the given txq, 4079 * flushing and unsched'ing them 4080 */ 4081 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4082 tid = TAILQ_FIRST(&txq->axq_tidq); 4083 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4084 ath_tx_tid_unsched(sc, tid); 4085 } 4086 4087 ATH_TX_UNLOCK(sc); 4088 4089 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4090 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4091 ath_tx_default_comp(sc, bf, 0); 4092 } 4093 } 4094 4095 /* 4096 * Handle completion of non-aggregate session frames. 4097 * 4098 * This (currently) doesn't implement software retransmission of 4099 * non-aggregate frames! 4100 * 4101 * Software retransmission of non-aggregate frames needs to obey 4102 * the strict sequence number ordering, and drop any frames that 4103 * will fail this. 4104 * 4105 * For now, filtered frames and frame transmission will cause 4106 * all kinds of issues. So we don't support them. 4107 * 4108 * So anyone queuing frames via ath_tx_normal_xmit() or 4109 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4110 */ 4111 void 4112 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4113 { 4114 struct ieee80211_node *ni = bf->bf_node; 4115 struct ath_node *an = ATH_NODE(ni); 4116 int tid = bf->bf_state.bfs_tid; 4117 struct ath_tid *atid = &an->an_tid[tid]; 4118 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4119 4120 /* The TID state is protected behind the TXQ lock */ 4121 ATH_TX_LOCK(sc); 4122 4123 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4124 __func__, bf, fail, atid->hwq_depth - 1); 4125 4126 atid->hwq_depth--; 4127 4128 #if 0 4129 /* 4130 * If the frame was filtered, stick it on the filter frame 4131 * queue and complain about it. It shouldn't happen! 4132 */ 4133 if ((ts->ts_status & HAL_TXERR_FILT) || 4134 (ts->ts_status != 0 && atid->isfiltered)) { 4135 DPRINTF(sc, ATH_DEBUG_SW_TX, 4136 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4137 __func__, 4138 atid->isfiltered, 4139 ts->ts_status); 4140 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4141 } 4142 #endif 4143 if (atid->isfiltered) 4144 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4145 if (atid->hwq_depth < 0) 4146 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4147 __func__, atid->hwq_depth); 4148 4149 /* If the TID is being cleaned up, track things */ 4150 /* XXX refactor! */ 4151 if (atid->cleanup_inprogress) { 4152 atid->incomp--; 4153 if (atid->incomp == 0) { 4154 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4155 "%s: TID %d: cleaned up! resume!\n", 4156 __func__, tid); 4157 atid->cleanup_inprogress = 0; 4158 ath_tx_tid_resume(sc, atid); 4159 } 4160 } 4161 4162 /* 4163 * If the queue is filtered, potentially mark it as complete 4164 * and reschedule it as needed. 4165 * 4166 * This is required as there may be a subsequent TX descriptor 4167 * for this end-node that has CLRDMASK set, so it's quite possible 4168 * that a filtered frame will be followed by a non-filtered 4169 * (complete or otherwise) frame. 4170 * 4171 * XXX should we do this before we complete the frame? 4172 */ 4173 if (atid->isfiltered) 4174 ath_tx_tid_filt_comp_complete(sc, atid); 4175 ATH_TX_UNLOCK(sc); 4176 4177 /* 4178 * punt to rate control if we're not being cleaned up 4179 * during a hw queue drain and the frame wanted an ACK. 4180 */ 4181 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4182 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4183 ts, bf->bf_state.bfs_pktlen, 4184 1, (ts->ts_status == 0) ? 0 : 1); 4185 4186 ath_tx_default_comp(sc, bf, fail); 4187 } 4188 4189 /* 4190 * Handle cleanup of aggregate session packets that aren't 4191 * an A-MPDU. 4192 * 4193 * There's no need to update the BAW here - the session is being 4194 * torn down. 4195 */ 4196 static void 4197 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4198 { 4199 struct ieee80211_node *ni = bf->bf_node; 4200 struct ath_node *an = ATH_NODE(ni); 4201 int tid = bf->bf_state.bfs_tid; 4202 struct ath_tid *atid = &an->an_tid[tid]; 4203 4204 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4205 __func__, tid, atid->incomp); 4206 4207 ATH_TX_LOCK(sc); 4208 atid->incomp--; 4209 4210 /* XXX refactor! */ 4211 if (bf->bf_state.bfs_dobaw) { 4212 ath_tx_update_baw(sc, an, atid, bf); 4213 if (!bf->bf_state.bfs_addedbaw) 4214 DPRINTF(sc, ATH_DEBUG_SW_TX, 4215 "%s: wasn't added: seqno %d\n", 4216 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4217 } 4218 4219 if (atid->incomp == 0) { 4220 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4221 "%s: TID %d: cleaned up! resume!\n", 4222 __func__, tid); 4223 atid->cleanup_inprogress = 0; 4224 ath_tx_tid_resume(sc, atid); 4225 } 4226 ATH_TX_UNLOCK(sc); 4227 4228 ath_tx_default_comp(sc, bf, 0); 4229 } 4230 4231 4232 /* 4233 * This as it currently stands is a bit dumb. Ideally we'd just 4234 * fail the frame the normal way and have it permanently fail 4235 * via the normal aggregate completion path. 4236 */ 4237 static void 4238 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, 4239 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) 4240 { 4241 struct ath_tid *atid = &an->an_tid[tid]; 4242 struct ath_buf *bf, *bf_next; 4243 4244 ATH_TX_LOCK_ASSERT(sc); 4245 4246 /* 4247 * Remove this frame from the queue. 4248 */ 4249 ATH_TID_REMOVE(atid, bf_head, bf_list); 4250 4251 /* 4252 * Loop over all the frames in the aggregate. 4253 */ 4254 bf = bf_head; 4255 while (bf != NULL) { 4256 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ 4257 4258 /* 4259 * If it's been added to the BAW we need to kick 4260 * it out of the BAW before we continue. 4261 * 4262 * XXX if it's an aggregate, assert that it's in the 4263 * BAW - we shouldn't have it be in an aggregate 4264 * otherwise! 4265 */ 4266 if (bf->bf_state.bfs_addedbaw) { 4267 ath_tx_update_baw(sc, an, atid, bf); 4268 bf->bf_state.bfs_dobaw = 0; 4269 } 4270 4271 /* 4272 * Give it the default completion handler. 4273 */ 4274 bf->bf_comp = ath_tx_normal_comp; 4275 bf->bf_next = NULL; 4276 4277 /* 4278 * Add it to the list to free. 4279 */ 4280 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4281 4282 /* 4283 * Now advance to the next frame in the aggregate. 4284 */ 4285 bf = bf_next; 4286 } 4287 } 4288 4289 /* 4290 * Performs transmit side cleanup when TID changes from aggregated to 4291 * unaggregated and during reassociation. 4292 * 4293 * For now, this just tosses everything from the TID software queue 4294 * whether or not it has been retried and marks the TID as 4295 * pending completion if there's anything for this TID queued to 4296 * the hardware. 4297 * 4298 * The caller is responsible for pausing the TID and unpausing the 4299 * TID if no cleanup was required. Otherwise the cleanup path will 4300 * unpause the TID once the last hardware queued frame is completed. 4301 */ 4302 static void 4303 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4304 ath_bufhead *bf_cq) 4305 { 4306 struct ath_tid *atid = &an->an_tid[tid]; 4307 struct ath_buf *bf, *bf_next; 4308 4309 ATH_TX_LOCK_ASSERT(sc); 4310 4311 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4312 "%s: TID %d: called; inprogress=%d\n", __func__, tid, 4313 atid->cleanup_inprogress); 4314 4315 /* 4316 * Move the filtered frames to the TX queue, before 4317 * we run off and discard/process things. 4318 */ 4319 4320 /* XXX this is really quite inefficient */ 4321 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4322 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4323 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4324 } 4325 4326 /* 4327 * Update the frames in the software TX queue: 4328 * 4329 * + Discard retry frames in the queue 4330 * + Fix the completion function to be non-aggregate 4331 */ 4332 bf = ATH_TID_FIRST(atid); 4333 while (bf) { 4334 /* 4335 * Grab the next frame in the list, we may 4336 * be fiddling with the list. 4337 */ 4338 bf_next = TAILQ_NEXT(bf, bf_list); 4339 4340 /* 4341 * Free the frame and all subframes. 4342 */ 4343 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); 4344 4345 /* 4346 * Next frame! 4347 */ 4348 bf = bf_next; 4349 } 4350 4351 /* 4352 * If there's anything in the hardware queue we wait 4353 * for the TID HWQ to empty. 4354 */ 4355 if (atid->hwq_depth > 0) { 4356 /* 4357 * XXX how about we kill atid->incomp, and instead 4358 * replace it with a macro that checks that atid->hwq_depth 4359 * is 0? 4360 */ 4361 atid->incomp = atid->hwq_depth; 4362 atid->cleanup_inprogress = 1; 4363 } 4364 4365 if (atid->cleanup_inprogress) 4366 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4367 "%s: TID %d: cleanup needed: %d packets\n", 4368 __func__, tid, atid->incomp); 4369 4370 /* Owner now must free completed frames */ 4371 } 4372 4373 static struct ath_buf * 4374 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4375 struct ath_tid *tid, struct ath_buf *bf) 4376 { 4377 struct ath_buf *nbf; 4378 int error; 4379 4380 /* 4381 * Clone the buffer. This will handle the dma unmap and 4382 * copy the node reference to the new buffer. If this 4383 * works out, 'bf' will have no DMA mapping, no mbuf 4384 * pointer and no node reference. 4385 */ 4386 nbf = ath_buf_clone(sc, bf); 4387 4388 #if 0 4389 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4390 __func__); 4391 #endif 4392 4393 if (nbf == NULL) { 4394 /* Failed to clone */ 4395 DPRINTF(sc, ATH_DEBUG_XMIT, 4396 "%s: failed to clone a busy buffer\n", 4397 __func__); 4398 return NULL; 4399 } 4400 4401 /* Setup the dma for the new buffer */ 4402 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4403 if (error != 0) { 4404 DPRINTF(sc, ATH_DEBUG_XMIT, 4405 "%s: failed to setup dma for clone\n", 4406 __func__); 4407 /* 4408 * Put this at the head of the list, not tail; 4409 * that way it doesn't interfere with the 4410 * busy buffer logic (which uses the tail of 4411 * the list.) 4412 */ 4413 ATH_TXBUF_LOCK(sc); 4414 ath_returnbuf_head(sc, nbf); 4415 ATH_TXBUF_UNLOCK(sc); 4416 return NULL; 4417 } 4418 4419 /* Update BAW if required, before we free the original buf */ 4420 if (bf->bf_state.bfs_dobaw) 4421 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4422 4423 /* Free original buffer; return new buffer */ 4424 ath_freebuf(sc, bf); 4425 4426 return nbf; 4427 } 4428 4429 /* 4430 * Handle retrying an unaggregate frame in an aggregate 4431 * session. 4432 * 4433 * If too many retries occur, pause the TID, wait for 4434 * any further retransmits (as there's no reason why 4435 * non-aggregate frames in an aggregate session are 4436 * transmitted in-order; they just have to be in-BAW) 4437 * and then queue a BAR. 4438 */ 4439 static void 4440 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4441 { 4442 struct ieee80211_node *ni = bf->bf_node; 4443 struct ath_node *an = ATH_NODE(ni); 4444 int tid = bf->bf_state.bfs_tid; 4445 struct ath_tid *atid = &an->an_tid[tid]; 4446 struct ieee80211_tx_ampdu *tap; 4447 4448 ATH_TX_LOCK(sc); 4449 4450 tap = ath_tx_get_tx_tid(an, tid); 4451 4452 /* 4453 * If the buffer is marked as busy, we can't directly 4454 * reuse it. Instead, try to clone the buffer. 4455 * If the clone is successful, recycle the old buffer. 4456 * If the clone is unsuccessful, set bfs_retries to max 4457 * to force the next bit of code to free the buffer 4458 * for us. 4459 */ 4460 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4461 (bf->bf_flags & ATH_BUF_BUSY)) { 4462 struct ath_buf *nbf; 4463 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4464 if (nbf) 4465 /* bf has been freed at this point */ 4466 bf = nbf; 4467 else 4468 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4469 } 4470 4471 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4472 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4473 "%s: exceeded retries; seqno %d\n", 4474 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4475 sc->sc_stats.ast_tx_swretrymax++; 4476 4477 /* Update BAW anyway */ 4478 if (bf->bf_state.bfs_dobaw) { 4479 ath_tx_update_baw(sc, an, atid, bf); 4480 if (! bf->bf_state.bfs_addedbaw) 4481 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4482 "%s: wasn't added: seqno %d\n", 4483 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4484 } 4485 bf->bf_state.bfs_dobaw = 0; 4486 4487 /* Suspend the TX queue and get ready to send the BAR */ 4488 ath_tx_tid_bar_suspend(sc, atid); 4489 4490 /* Send the BAR if there are no other frames waiting */ 4491 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4492 ath_tx_tid_bar_tx(sc, atid); 4493 4494 ATH_TX_UNLOCK(sc); 4495 4496 /* Free buffer, bf is free after this call */ 4497 ath_tx_default_comp(sc, bf, 0); 4498 return; 4499 } 4500 4501 /* 4502 * This increments the retry counter as well as 4503 * sets the retry flag in the ath_buf and packet 4504 * body. 4505 */ 4506 ath_tx_set_retry(sc, bf); 4507 sc->sc_stats.ast_tx_swretries++; 4508 4509 /* 4510 * Insert this at the head of the queue, so it's 4511 * retried before any current/subsequent frames. 4512 */ 4513 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4514 ath_tx_tid_sched(sc, atid); 4515 /* Send the BAR if there are no other frames waiting */ 4516 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4517 ath_tx_tid_bar_tx(sc, atid); 4518 4519 ATH_TX_UNLOCK(sc); 4520 } 4521 4522 /* 4523 * Common code for aggregate excessive retry/subframe retry. 4524 * If retrying, queues buffers to bf_q. If not, frees the 4525 * buffers. 4526 * 4527 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4528 */ 4529 static int 4530 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4531 ath_bufhead *bf_q) 4532 { 4533 struct ieee80211_node *ni = bf->bf_node; 4534 struct ath_node *an = ATH_NODE(ni); 4535 int tid = bf->bf_state.bfs_tid; 4536 struct ath_tid *atid = &an->an_tid[tid]; 4537 4538 ATH_TX_LOCK_ASSERT(sc); 4539 4540 /* XXX clr11naggr should be done for all subframes */ 4541 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4542 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4543 4544 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4545 4546 /* 4547 * If the buffer is marked as busy, we can't directly 4548 * reuse it. Instead, try to clone the buffer. 4549 * If the clone is successful, recycle the old buffer. 4550 * If the clone is unsuccessful, set bfs_retries to max 4551 * to force the next bit of code to free the buffer 4552 * for us. 4553 */ 4554 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4555 (bf->bf_flags & ATH_BUF_BUSY)) { 4556 struct ath_buf *nbf; 4557 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4558 if (nbf) 4559 /* bf has been freed at this point */ 4560 bf = nbf; 4561 else 4562 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4563 } 4564 4565 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4566 sc->sc_stats.ast_tx_swretrymax++; 4567 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4568 "%s: max retries: seqno %d\n", 4569 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4570 ath_tx_update_baw(sc, an, atid, bf); 4571 if (!bf->bf_state.bfs_addedbaw) 4572 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4573 "%s: wasn't added: seqno %d\n", 4574 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4575 bf->bf_state.bfs_dobaw = 0; 4576 return 1; 4577 } 4578 4579 ath_tx_set_retry(sc, bf); 4580 sc->sc_stats.ast_tx_swretries++; 4581 bf->bf_next = NULL; /* Just to make sure */ 4582 4583 /* Clear the aggregate state */ 4584 bf->bf_state.bfs_aggr = 0; 4585 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4586 bf->bf_state.bfs_nframes = 1; 4587 4588 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4589 return 0; 4590 } 4591 4592 /* 4593 * error pkt completion for an aggregate destination 4594 */ 4595 static void 4596 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4597 struct ath_tid *tid) 4598 { 4599 struct ieee80211_node *ni = bf_first->bf_node; 4600 struct ath_node *an = ATH_NODE(ni); 4601 struct ath_buf *bf_next, *bf; 4602 ath_bufhead bf_q; 4603 int drops = 0; 4604 struct ieee80211_tx_ampdu *tap; 4605 ath_bufhead bf_cq; 4606 4607 TAILQ_INIT(&bf_q); 4608 TAILQ_INIT(&bf_cq); 4609 4610 /* 4611 * Update rate control - all frames have failed. 4612 * 4613 * XXX use the length in the first frame in the series; 4614 * XXX just so things are consistent for now. 4615 */ 4616 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4617 &bf_first->bf_status.ds_txstat, 4618 bf_first->bf_state.bfs_pktlen, 4619 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4620 4621 ATH_TX_LOCK(sc); 4622 tap = ath_tx_get_tx_tid(an, tid->tid); 4623 sc->sc_stats.ast_tx_aggr_failall++; 4624 4625 /* Retry all subframes */ 4626 bf = bf_first; 4627 while (bf) { 4628 bf_next = bf->bf_next; 4629 bf->bf_next = NULL; /* Remove it from the aggr list */ 4630 sc->sc_stats.ast_tx_aggr_fail++; 4631 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4632 drops++; 4633 bf->bf_next = NULL; 4634 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4635 } 4636 bf = bf_next; 4637 } 4638 4639 /* Prepend all frames to the beginning of the queue */ 4640 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4641 TAILQ_REMOVE(&bf_q, bf, bf_list); 4642 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4643 } 4644 4645 /* 4646 * Schedule the TID to be re-tried. 4647 */ 4648 ath_tx_tid_sched(sc, tid); 4649 4650 /* 4651 * send bar if we dropped any frames 4652 * 4653 * Keep the txq lock held for now, as we need to ensure 4654 * that ni_txseqs[] is consistent (as it's being updated 4655 * in the ifnet TX context or raw TX context.) 4656 */ 4657 if (drops) { 4658 /* Suspend the TX queue and get ready to send the BAR */ 4659 ath_tx_tid_bar_suspend(sc, tid); 4660 } 4661 4662 /* 4663 * Send BAR if required 4664 */ 4665 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4666 ath_tx_tid_bar_tx(sc, tid); 4667 4668 ATH_TX_UNLOCK(sc); 4669 4670 /* Complete frames which errored out */ 4671 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4672 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4673 ath_tx_default_comp(sc, bf, 0); 4674 } 4675 } 4676 4677 /* 4678 * Handle clean-up of packets from an aggregate list. 4679 * 4680 * There's no need to update the BAW here - the session is being 4681 * torn down. 4682 */ 4683 static void 4684 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4685 { 4686 struct ath_buf *bf, *bf_next; 4687 struct ieee80211_node *ni = bf_first->bf_node; 4688 struct ath_node *an = ATH_NODE(ni); 4689 int tid = bf_first->bf_state.bfs_tid; 4690 struct ath_tid *atid = &an->an_tid[tid]; 4691 4692 ATH_TX_LOCK(sc); 4693 4694 /* update incomp */ 4695 atid->incomp--; 4696 4697 /* Update the BAW */ 4698 bf = bf_first; 4699 while (bf) { 4700 /* XXX refactor! */ 4701 if (bf->bf_state.bfs_dobaw) { 4702 ath_tx_update_baw(sc, an, atid, bf); 4703 if (!bf->bf_state.bfs_addedbaw) 4704 DPRINTF(sc, ATH_DEBUG_SW_TX, 4705 "%s: wasn't added: seqno %d\n", 4706 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4707 } 4708 bf = bf->bf_next; 4709 } 4710 4711 if (atid->incomp == 0) { 4712 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4713 "%s: TID %d: cleaned up! resume!\n", 4714 __func__, tid); 4715 atid->cleanup_inprogress = 0; 4716 ath_tx_tid_resume(sc, atid); 4717 } 4718 4719 /* Send BAR if required */ 4720 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4721 /* 4722 * XXX TODO: we should likely just tear down the BAR state here, 4723 * rather than sending a BAR. 4724 */ 4725 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4726 ath_tx_tid_bar_tx(sc, atid); 4727 4728 ATH_TX_UNLOCK(sc); 4729 4730 /* Handle frame completion as individual frames */ 4731 bf = bf_first; 4732 while (bf) { 4733 bf_next = bf->bf_next; 4734 bf->bf_next = NULL; 4735 ath_tx_default_comp(sc, bf, 1); 4736 bf = bf_next; 4737 } 4738 } 4739 4740 /* 4741 * Handle completion of an set of aggregate frames. 4742 * 4743 * Note: the completion handler is the last descriptor in the aggregate, 4744 * not the last descriptor in the first frame. 4745 */ 4746 static void 4747 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4748 int fail) 4749 { 4750 //struct ath_desc *ds = bf->bf_lastds; 4751 struct ieee80211_node *ni = bf_first->bf_node; 4752 struct ath_node *an = ATH_NODE(ni); 4753 int tid = bf_first->bf_state.bfs_tid; 4754 struct ath_tid *atid = &an->an_tid[tid]; 4755 struct ath_tx_status ts; 4756 struct ieee80211_tx_ampdu *tap; 4757 ath_bufhead bf_q; 4758 ath_bufhead bf_cq; 4759 int seq_st, tx_ok; 4760 int hasba, isaggr; 4761 uint32_t ba[2]; 4762 struct ath_buf *bf, *bf_next; 4763 int ba_index; 4764 int drops = 0; 4765 int nframes = 0, nbad = 0, nf; 4766 int pktlen; 4767 /* XXX there's too much on the stack? */ 4768 struct ath_rc_series rc[ATH_RC_NUM]; 4769 int txseq; 4770 4771 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4772 __func__, atid->hwq_depth); 4773 4774 /* 4775 * Take a copy; this may be needed -after- bf_first 4776 * has been completed and freed. 4777 */ 4778 ts = bf_first->bf_status.ds_txstat; 4779 4780 TAILQ_INIT(&bf_q); 4781 TAILQ_INIT(&bf_cq); 4782 4783 /* The TID state is kept behind the TXQ lock */ 4784 ATH_TX_LOCK(sc); 4785 4786 atid->hwq_depth--; 4787 if (atid->hwq_depth < 0) 4788 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4789 __func__, atid->hwq_depth); 4790 4791 /* 4792 * If the TID is filtered, handle completing the filter 4793 * transition before potentially kicking it to the cleanup 4794 * function. 4795 * 4796 * XXX this is duplicate work, ew. 4797 */ 4798 if (atid->isfiltered) 4799 ath_tx_tid_filt_comp_complete(sc, atid); 4800 4801 /* 4802 * Punt cleanup to the relevant function, not our problem now 4803 */ 4804 if (atid->cleanup_inprogress) { 4805 if (atid->isfiltered) 4806 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4807 "%s: isfiltered=1, normal_comp?\n", 4808 __func__); 4809 ATH_TX_UNLOCK(sc); 4810 ath_tx_comp_cleanup_aggr(sc, bf_first); 4811 return; 4812 } 4813 4814 /* 4815 * If the frame is filtered, transition to filtered frame 4816 * mode and add this to the filtered frame list. 4817 * 4818 * XXX TODO: figure out how this interoperates with 4819 * BAR, pause and cleanup states. 4820 */ 4821 if ((ts.ts_status & HAL_TXERR_FILT) || 4822 (ts.ts_status != 0 && atid->isfiltered)) { 4823 if (fail != 0) 4824 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4825 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4826 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4827 4828 /* Remove from BAW */ 4829 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4830 if (bf->bf_state.bfs_addedbaw) 4831 drops++; 4832 if (bf->bf_state.bfs_dobaw) { 4833 ath_tx_update_baw(sc, an, atid, bf); 4834 if (!bf->bf_state.bfs_addedbaw) 4835 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4836 "%s: wasn't added: seqno %d\n", 4837 __func__, 4838 SEQNO(bf->bf_state.bfs_seqno)); 4839 } 4840 bf->bf_state.bfs_dobaw = 0; 4841 } 4842 /* 4843 * If any intermediate frames in the BAW were dropped when 4844 * handling filtering things, send a BAR. 4845 */ 4846 if (drops) 4847 ath_tx_tid_bar_suspend(sc, atid); 4848 4849 /* 4850 * Finish up by sending a BAR if required and freeing 4851 * the frames outside of the TX lock. 4852 */ 4853 goto finish_send_bar; 4854 } 4855 4856 /* 4857 * XXX for now, use the first frame in the aggregate for 4858 * XXX rate control completion; it's at least consistent. 4859 */ 4860 pktlen = bf_first->bf_state.bfs_pktlen; 4861 4862 /* 4863 * Handle errors first! 4864 * 4865 * Here, handle _any_ error as a "exceeded retries" error. 4866 * Later on (when filtered frames are to be specially handled) 4867 * it'll have to be expanded. 4868 */ 4869 #if 0 4870 if (ts.ts_status & HAL_TXERR_XRETRY) { 4871 #endif 4872 if (ts.ts_status != 0) { 4873 ATH_TX_UNLOCK(sc); 4874 ath_tx_comp_aggr_error(sc, bf_first, atid); 4875 return; 4876 } 4877 4878 tap = ath_tx_get_tx_tid(an, tid); 4879 4880 /* 4881 * extract starting sequence and block-ack bitmap 4882 */ 4883 /* XXX endian-ness of seq_st, ba? */ 4884 seq_st = ts.ts_seqnum; 4885 hasba = !! (ts.ts_flags & HAL_TX_BA); 4886 tx_ok = (ts.ts_status == 0); 4887 isaggr = bf_first->bf_state.bfs_aggr; 4888 ba[0] = ts.ts_ba_low; 4889 ba[1] = ts.ts_ba_high; 4890 4891 /* 4892 * Copy the TX completion status and the rate control 4893 * series from the first descriptor, as it may be freed 4894 * before the rate control code can get its grubby fingers 4895 * into things. 4896 */ 4897 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4898 4899 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4900 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4901 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4902 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4903 isaggr, seq_st, hasba, ba[0], ba[1]); 4904 4905 /* 4906 * The reference driver doesn't do this; it simply ignores 4907 * this check in its entirety. 4908 * 4909 * I've seen this occur when using iperf to send traffic 4910 * out tid 1 - the aggregate frames are all marked as TID 1, 4911 * but the TXSTATUS has TID=0. So, let's just ignore this 4912 * check. 4913 */ 4914 #if 0 4915 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4916 if (tid != ts.ts_tid) { 4917 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4918 __func__, tid, ts.ts_tid); 4919 tx_ok = 0; 4920 } 4921 #endif 4922 4923 /* AR5416 BA bug; this requires an interface reset */ 4924 if (isaggr && tx_ok && (! hasba)) { 4925 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4926 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4927 "seq_st=%d\n", 4928 __func__, hasba, tx_ok, isaggr, seq_st); 4929 /* XXX TODO: schedule an interface reset */ 4930 #ifdef ATH_DEBUG 4931 ath_printtxbuf(sc, bf_first, 4932 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4933 #endif 4934 } 4935 4936 /* 4937 * Walk the list of frames, figure out which ones were correctly 4938 * sent and which weren't. 4939 */ 4940 bf = bf_first; 4941 nf = bf_first->bf_state.bfs_nframes; 4942 4943 /* bf_first is going to be invalid once this list is walked */ 4944 bf_first = NULL; 4945 4946 /* 4947 * Walk the list of completed frames and determine 4948 * which need to be completed and which need to be 4949 * retransmitted. 4950 * 4951 * For completed frames, the completion functions need 4952 * to be called at the end of this function as the last 4953 * node reference may free the node. 4954 * 4955 * Finally, since the TXQ lock can't be held during the 4956 * completion callback (to avoid lock recursion), 4957 * the completion calls have to be done outside of the 4958 * lock. 4959 */ 4960 while (bf) { 4961 nframes++; 4962 ba_index = ATH_BA_INDEX(seq_st, 4963 SEQNO(bf->bf_state.bfs_seqno)); 4964 bf_next = bf->bf_next; 4965 bf->bf_next = NULL; /* Remove it from the aggr list */ 4966 4967 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4968 "%s: checking bf=%p seqno=%d; ack=%d\n", 4969 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4970 ATH_BA_ISSET(ba, ba_index)); 4971 4972 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4973 sc->sc_stats.ast_tx_aggr_ok++; 4974 ath_tx_update_baw(sc, an, atid, bf); 4975 bf->bf_state.bfs_dobaw = 0; 4976 if (!bf->bf_state.bfs_addedbaw) 4977 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4978 "%s: wasn't added: seqno %d\n", 4979 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4980 bf->bf_next = NULL; 4981 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4982 } else { 4983 sc->sc_stats.ast_tx_aggr_fail++; 4984 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4985 drops++; 4986 bf->bf_next = NULL; 4987 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4988 } 4989 nbad++; 4990 } 4991 bf = bf_next; 4992 } 4993 4994 /* 4995 * Now that the BAW updates have been done, unlock 4996 * 4997 * txseq is grabbed before the lock is released so we 4998 * have a consistent view of what -was- in the BAW. 4999 * Anything after this point will not yet have been 5000 * TXed. 5001 */ 5002 txseq = tap->txa_start; 5003 ATH_TX_UNLOCK(sc); 5004 5005 if (nframes != nf) 5006 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5007 "%s: num frames seen=%d; bf nframes=%d\n", 5008 __func__, nframes, nf); 5009 5010 /* 5011 * Now we know how many frames were bad, call the rate 5012 * control code. 5013 */ 5014 if (fail == 0) 5015 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 5016 nbad); 5017 5018 /* 5019 * send bar if we dropped any frames 5020 */ 5021 if (drops) { 5022 /* Suspend the TX queue and get ready to send the BAR */ 5023 ATH_TX_LOCK(sc); 5024 ath_tx_tid_bar_suspend(sc, atid); 5025 ATH_TX_UNLOCK(sc); 5026 } 5027 5028 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5029 "%s: txa_start now %d\n", __func__, tap->txa_start); 5030 5031 ATH_TX_LOCK(sc); 5032 5033 /* Prepend all frames to the beginning of the queue */ 5034 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 5035 TAILQ_REMOVE(&bf_q, bf, bf_list); 5036 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 5037 } 5038 5039 /* 5040 * Reschedule to grab some further frames. 5041 */ 5042 ath_tx_tid_sched(sc, atid); 5043 5044 /* 5045 * If the queue is filtered, re-schedule as required. 5046 * 5047 * This is required as there may be a subsequent TX descriptor 5048 * for this end-node that has CLRDMASK set, so it's quite possible 5049 * that a filtered frame will be followed by a non-filtered 5050 * (complete or otherwise) frame. 5051 * 5052 * XXX should we do this before we complete the frame? 5053 */ 5054 if (atid->isfiltered) 5055 ath_tx_tid_filt_comp_complete(sc, atid); 5056 5057 finish_send_bar: 5058 5059 /* 5060 * Send BAR if required 5061 */ 5062 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5063 ath_tx_tid_bar_tx(sc, atid); 5064 5065 ATH_TX_UNLOCK(sc); 5066 5067 /* Do deferred completion */ 5068 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5069 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5070 ath_tx_default_comp(sc, bf, 0); 5071 } 5072 } 5073 5074 /* 5075 * Handle completion of unaggregated frames in an ADDBA 5076 * session. 5077 * 5078 * Fail is set to 1 if the entry is being freed via a call to 5079 * ath_tx_draintxq(). 5080 */ 5081 static void 5082 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 5083 { 5084 struct ieee80211_node *ni = bf->bf_node; 5085 struct ath_node *an = ATH_NODE(ni); 5086 int tid = bf->bf_state.bfs_tid; 5087 struct ath_tid *atid = &an->an_tid[tid]; 5088 struct ath_tx_status ts; 5089 int drops = 0; 5090 5091 /* 5092 * Take a copy of this; filtering/cloning the frame may free the 5093 * bf pointer. 5094 */ 5095 ts = bf->bf_status.ds_txstat; 5096 5097 /* 5098 * Update rate control status here, before we possibly 5099 * punt to retry or cleanup. 5100 * 5101 * Do it outside of the TXQ lock. 5102 */ 5103 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 5104 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 5105 &bf->bf_status.ds_txstat, 5106 bf->bf_state.bfs_pktlen, 5107 1, (ts.ts_status == 0) ? 0 : 1); 5108 5109 /* 5110 * This is called early so atid->hwq_depth can be tracked. 5111 * This unfortunately means that it's released and regrabbed 5112 * during retry and cleanup. That's rather inefficient. 5113 */ 5114 ATH_TX_LOCK(sc); 5115 5116 if (tid == IEEE80211_NONQOS_TID) 5117 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 5118 5119 DPRINTF(sc, ATH_DEBUG_SW_TX, 5120 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 5121 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 5122 SEQNO(bf->bf_state.bfs_seqno)); 5123 5124 atid->hwq_depth--; 5125 if (atid->hwq_depth < 0) 5126 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 5127 __func__, atid->hwq_depth); 5128 5129 /* 5130 * If the TID is filtered, handle completing the filter 5131 * transition before potentially kicking it to the cleanup 5132 * function. 5133 */ 5134 if (atid->isfiltered) 5135 ath_tx_tid_filt_comp_complete(sc, atid); 5136 5137 /* 5138 * If a cleanup is in progress, punt to comp_cleanup; 5139 * rather than handling it here. It's thus their 5140 * responsibility to clean up, call the completion 5141 * function in net80211, etc. 5142 */ 5143 if (atid->cleanup_inprogress) { 5144 if (atid->isfiltered) 5145 DPRINTF(sc, ATH_DEBUG_SW_TX, 5146 "%s: isfiltered=1, normal_comp?\n", 5147 __func__); 5148 ATH_TX_UNLOCK(sc); 5149 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5150 __func__); 5151 ath_tx_comp_cleanup_unaggr(sc, bf); 5152 return; 5153 } 5154 5155 /* 5156 * XXX TODO: how does cleanup, BAR and filtered frame handling 5157 * overlap? 5158 * 5159 * If the frame is filtered OR if it's any failure but 5160 * the TID is filtered, the frame must be added to the 5161 * filtered frame list. 5162 * 5163 * However - a busy buffer can't be added to the filtered 5164 * list as it will end up being recycled without having 5165 * been made available for the hardware. 5166 */ 5167 if ((ts.ts_status & HAL_TXERR_FILT) || 5168 (ts.ts_status != 0 && atid->isfiltered)) { 5169 int freeframe; 5170 5171 if (fail != 0) 5172 DPRINTF(sc, ATH_DEBUG_SW_TX, 5173 "%s: isfiltered=1, fail=%d\n", 5174 __func__, fail); 5175 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5176 /* 5177 * If freeframe=0 then bf is no longer ours; don't 5178 * touch it. 5179 */ 5180 if (freeframe) { 5181 /* Remove from BAW */ 5182 if (bf->bf_state.bfs_addedbaw) 5183 drops++; 5184 if (bf->bf_state.bfs_dobaw) { 5185 ath_tx_update_baw(sc, an, atid, bf); 5186 if (!bf->bf_state.bfs_addedbaw) 5187 DPRINTF(sc, ATH_DEBUG_SW_TX, 5188 "%s: wasn't added: seqno %d\n", 5189 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5190 } 5191 bf->bf_state.bfs_dobaw = 0; 5192 } 5193 5194 /* 5195 * If the frame couldn't be filtered, treat it as a drop and 5196 * prepare to send a BAR. 5197 */ 5198 if (freeframe && drops) 5199 ath_tx_tid_bar_suspend(sc, atid); 5200 5201 /* 5202 * Send BAR if required 5203 */ 5204 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5205 ath_tx_tid_bar_tx(sc, atid); 5206 5207 ATH_TX_UNLOCK(sc); 5208 /* 5209 * If freeframe is set, then the frame couldn't be 5210 * cloned and bf is still valid. Just complete/free it. 5211 */ 5212 if (freeframe) 5213 ath_tx_default_comp(sc, bf, fail); 5214 5215 return; 5216 } 5217 /* 5218 * Don't bother with the retry check if all frames 5219 * are being failed (eg during queue deletion.) 5220 */ 5221 #if 0 5222 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5223 #endif 5224 if (fail == 0 && ts.ts_status != 0) { 5225 ATH_TX_UNLOCK(sc); 5226 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5227 __func__); 5228 ath_tx_aggr_retry_unaggr(sc, bf); 5229 return; 5230 } 5231 5232 /* Success? Complete */ 5233 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5234 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5235 if (bf->bf_state.bfs_dobaw) { 5236 ath_tx_update_baw(sc, an, atid, bf); 5237 bf->bf_state.bfs_dobaw = 0; 5238 if (!bf->bf_state.bfs_addedbaw) 5239 DPRINTF(sc, ATH_DEBUG_SW_TX, 5240 "%s: wasn't added: seqno %d\n", 5241 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5242 } 5243 5244 /* 5245 * If the queue is filtered, re-schedule as required. 5246 * 5247 * This is required as there may be a subsequent TX descriptor 5248 * for this end-node that has CLRDMASK set, so it's quite possible 5249 * that a filtered frame will be followed by a non-filtered 5250 * (complete or otherwise) frame. 5251 * 5252 * XXX should we do this before we complete the frame? 5253 */ 5254 if (atid->isfiltered) 5255 ath_tx_tid_filt_comp_complete(sc, atid); 5256 5257 /* 5258 * Send BAR if required 5259 */ 5260 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5261 ath_tx_tid_bar_tx(sc, atid); 5262 5263 ATH_TX_UNLOCK(sc); 5264 5265 ath_tx_default_comp(sc, bf, fail); 5266 /* bf is freed at this point */ 5267 } 5268 5269 void 5270 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5271 { 5272 if (bf->bf_state.bfs_aggr) 5273 ath_tx_aggr_comp_aggr(sc, bf, fail); 5274 else 5275 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5276 } 5277 5278 /* 5279 * Schedule some packets from the given node/TID to the hardware. 5280 * 5281 * This is the aggregate version. 5282 */ 5283 void 5284 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5285 struct ath_tid *tid) 5286 { 5287 struct ath_buf *bf; 5288 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5289 struct ieee80211_tx_ampdu *tap; 5290 ATH_AGGR_STATUS status; 5291 ath_bufhead bf_q; 5292 5293 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5294 ATH_TX_LOCK_ASSERT(sc); 5295 5296 /* 5297 * XXX TODO: If we're called for a queue that we're leaking frames to, 5298 * ensure we only leak one. 5299 */ 5300 5301 tap = ath_tx_get_tx_tid(an, tid->tid); 5302 5303 if (tid->tid == IEEE80211_NONQOS_TID) 5304 DPRINTF(sc, ATH_DEBUG_SW_TX, 5305 "%s: called for TID=NONQOS_TID?\n", __func__); 5306 5307 for (;;) { 5308 status = ATH_AGGR_DONE; 5309 5310 /* 5311 * If the upper layer has paused the TID, don't 5312 * queue any further packets. 5313 * 5314 * This can also occur from the completion task because 5315 * of packet loss; but as its serialised with this code, 5316 * it won't "appear" half way through queuing packets. 5317 */ 5318 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5319 break; 5320 5321 bf = ATH_TID_FIRST(tid); 5322 if (bf == NULL) { 5323 break; 5324 } 5325 5326 /* 5327 * If the packet doesn't fall within the BAW (eg a NULL 5328 * data frame), schedule it directly; continue. 5329 */ 5330 if (! bf->bf_state.bfs_dobaw) { 5331 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5332 "%s: non-baw packet\n", 5333 __func__); 5334 ATH_TID_REMOVE(tid, bf, bf_list); 5335 5336 if (bf->bf_state.bfs_nframes > 1) 5337 DPRINTF(sc, ATH_DEBUG_SW_TX, 5338 "%s: aggr=%d, nframes=%d\n", 5339 __func__, 5340 bf->bf_state.bfs_aggr, 5341 bf->bf_state.bfs_nframes); 5342 5343 /* 5344 * This shouldn't happen - such frames shouldn't 5345 * ever have been queued as an aggregate in the 5346 * first place. However, make sure the fields 5347 * are correctly setup just to be totally sure. 5348 */ 5349 bf->bf_state.bfs_aggr = 0; 5350 bf->bf_state.bfs_nframes = 1; 5351 5352 /* Update CLRDMASK just before this frame is queued */ 5353 ath_tx_update_clrdmask(sc, tid, bf); 5354 5355 ath_tx_do_ratelookup(sc, bf); 5356 ath_tx_calc_duration(sc, bf); 5357 ath_tx_calc_protection(sc, bf); 5358 ath_tx_set_rtscts(sc, bf); 5359 ath_tx_rate_fill_rcflags(sc, bf); 5360 ath_tx_setds(sc, bf); 5361 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5362 5363 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5364 5365 /* Queue the packet; continue */ 5366 goto queuepkt; 5367 } 5368 5369 TAILQ_INIT(&bf_q); 5370 5371 /* 5372 * Do a rate control lookup on the first frame in the 5373 * list. The rate control code needs that to occur 5374 * before it can determine whether to TX. 5375 * It's inaccurate because the rate control code doesn't 5376 * really "do" aggregate lookups, so it only considers 5377 * the size of the first frame. 5378 */ 5379 ath_tx_do_ratelookup(sc, bf); 5380 bf->bf_state.bfs_rc[3].rix = 0; 5381 bf->bf_state.bfs_rc[3].tries = 0; 5382 5383 ath_tx_calc_duration(sc, bf); 5384 ath_tx_calc_protection(sc, bf); 5385 5386 ath_tx_set_rtscts(sc, bf); 5387 ath_tx_rate_fill_rcflags(sc, bf); 5388 5389 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5390 5391 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5392 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5393 5394 /* 5395 * No frames to be picked up - out of BAW 5396 */ 5397 if (TAILQ_EMPTY(&bf_q)) 5398 break; 5399 5400 /* 5401 * This assumes that the descriptor list in the ath_bufhead 5402 * are already linked together via bf_next pointers. 5403 */ 5404 bf = TAILQ_FIRST(&bf_q); 5405 5406 if (status == ATH_AGGR_8K_LIMITED) 5407 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5408 5409 /* 5410 * If it's the only frame send as non-aggregate 5411 * assume that ath_tx_form_aggr() has checked 5412 * whether it's in the BAW and added it appropriately. 5413 */ 5414 if (bf->bf_state.bfs_nframes == 1) { 5415 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5416 "%s: single-frame aggregate\n", __func__); 5417 5418 /* Update CLRDMASK just before this frame is queued */ 5419 ath_tx_update_clrdmask(sc, tid, bf); 5420 5421 bf->bf_state.bfs_aggr = 0; 5422 bf->bf_state.bfs_ndelim = 0; 5423 ath_tx_setds(sc, bf); 5424 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5425 if (status == ATH_AGGR_BAW_CLOSED) 5426 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5427 else 5428 sc->sc_aggr_stats.aggr_single_pkt++; 5429 } else { 5430 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5431 "%s: multi-frame aggregate: %d frames, " 5432 "length %d\n", 5433 __func__, bf->bf_state.bfs_nframes, 5434 bf->bf_state.bfs_al); 5435 bf->bf_state.bfs_aggr = 1; 5436 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5437 sc->sc_aggr_stats.aggr_aggr_pkt++; 5438 5439 /* Update CLRDMASK just before this frame is queued */ 5440 ath_tx_update_clrdmask(sc, tid, bf); 5441 5442 /* 5443 * Calculate the duration/protection as required. 5444 */ 5445 ath_tx_calc_duration(sc, bf); 5446 ath_tx_calc_protection(sc, bf); 5447 5448 /* 5449 * Update the rate and rtscts information based on the 5450 * rate decision made by the rate control code; 5451 * the first frame in the aggregate needs it. 5452 */ 5453 ath_tx_set_rtscts(sc, bf); 5454 5455 /* 5456 * Setup the relevant descriptor fields 5457 * for aggregation. The first descriptor 5458 * already points to the rest in the chain. 5459 */ 5460 ath_tx_setds_11n(sc, bf); 5461 5462 } 5463 queuepkt: 5464 /* Set completion handler, multi-frame aggregate or not */ 5465 bf->bf_comp = ath_tx_aggr_comp; 5466 5467 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5468 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5469 5470 /* 5471 * Update leak count and frame config if were leaking frames. 5472 * 5473 * XXX TODO: it should update all frames in an aggregate 5474 * correctly! 5475 */ 5476 ath_tx_leak_count_update(sc, tid, bf); 5477 5478 /* Punt to txq */ 5479 ath_tx_handoff(sc, txq, bf); 5480 5481 /* Track outstanding buffer count to hardware */ 5482 /* aggregates are "one" buffer */ 5483 tid->hwq_depth++; 5484 5485 /* 5486 * Break out if ath_tx_form_aggr() indicated 5487 * there can't be any further progress (eg BAW is full.) 5488 * Checking for an empty txq is done above. 5489 * 5490 * XXX locking on txq here? 5491 */ 5492 /* XXX TXQ locking */ 5493 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5494 (status == ATH_AGGR_BAW_CLOSED || 5495 status == ATH_AGGR_LEAK_CLOSED)) 5496 break; 5497 } 5498 } 5499 5500 /* 5501 * Schedule some packets from the given node/TID to the hardware. 5502 * 5503 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5504 * It just dumps frames into the TXQ. We should limit how deep 5505 * the transmit queue can grow for frames dispatched to the given 5506 * TXQ. 5507 * 5508 * To avoid locking issues, either we need to own the TXQ lock 5509 * at this point, or we need to pass in the maximum frame count 5510 * from the caller. 5511 */ 5512 void 5513 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5514 struct ath_tid *tid) 5515 { 5516 struct ath_buf *bf; 5517 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5518 5519 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5520 __func__, an, tid->tid); 5521 5522 ATH_TX_LOCK_ASSERT(sc); 5523 5524 /* Check - is AMPDU pending or running? then print out something */ 5525 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5526 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5527 __func__, tid->tid); 5528 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5529 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5530 __func__, tid->tid); 5531 5532 for (;;) { 5533 5534 /* 5535 * If the upper layers have paused the TID, don't 5536 * queue any further packets. 5537 * 5538 * XXX if we are leaking frames, make sure we decrement 5539 * that counter _and_ we continue here. 5540 */ 5541 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5542 break; 5543 5544 bf = ATH_TID_FIRST(tid); 5545 if (bf == NULL) { 5546 break; 5547 } 5548 5549 ATH_TID_REMOVE(tid, bf, bf_list); 5550 5551 /* Sanity check! */ 5552 if (tid->tid != bf->bf_state.bfs_tid) { 5553 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5554 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5555 tid->tid); 5556 } 5557 /* Normal completion handler */ 5558 bf->bf_comp = ath_tx_normal_comp; 5559 5560 /* 5561 * Override this for now, until the non-aggregate 5562 * completion handler correctly handles software retransmits. 5563 */ 5564 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5565 5566 /* Update CLRDMASK just before this frame is queued */ 5567 ath_tx_update_clrdmask(sc, tid, bf); 5568 5569 /* Program descriptors + rate control */ 5570 ath_tx_do_ratelookup(sc, bf); 5571 ath_tx_calc_duration(sc, bf); 5572 ath_tx_calc_protection(sc, bf); 5573 ath_tx_set_rtscts(sc, bf); 5574 ath_tx_rate_fill_rcflags(sc, bf); 5575 ath_tx_setds(sc, bf); 5576 5577 /* 5578 * Update the current leak count if 5579 * we're leaking frames; and set the 5580 * MORE flag as appropriate. 5581 */ 5582 ath_tx_leak_count_update(sc, tid, bf); 5583 5584 /* Track outstanding buffer count to hardware */ 5585 /* aggregates are "one" buffer */ 5586 tid->hwq_depth++; 5587 5588 /* Punt to hardware or software txq */ 5589 ath_tx_handoff(sc, txq, bf); 5590 } 5591 } 5592 5593 /* 5594 * Schedule some packets to the given hardware queue. 5595 * 5596 * This function walks the list of TIDs (ie, ath_node TIDs 5597 * with queued traffic) and attempts to schedule traffic 5598 * from them. 5599 * 5600 * TID scheduling is implemented as a FIFO, with TIDs being 5601 * added to the end of the queue after some frames have been 5602 * scheduled. 5603 */ 5604 void 5605 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5606 { 5607 struct ath_tid *tid, *next, *last; 5608 5609 ATH_TX_LOCK_ASSERT(sc); 5610 5611 /* 5612 * Don't schedule if the hardware queue is busy. 5613 * This (hopefully) gives some more time to aggregate 5614 * some packets in the aggregation queue. 5615 * 5616 * XXX It doesn't stop a parallel sender from sneaking 5617 * in transmitting a frame! 5618 */ 5619 /* XXX TXQ locking */ 5620 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5621 sc->sc_aggr_stats.aggr_sched_nopkt++; 5622 return; 5623 } 5624 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5625 sc->sc_aggr_stats.aggr_sched_nopkt++; 5626 return; 5627 } 5628 5629 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5630 5631 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5632 /* 5633 * Suspend paused queues here; they'll be resumed 5634 * once the addba completes or times out. 5635 */ 5636 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5637 __func__, tid->tid, tid->paused); 5638 ath_tx_tid_unsched(sc, tid); 5639 /* 5640 * This node may be in power-save and we're leaking 5641 * a frame; be careful. 5642 */ 5643 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5644 goto loop_done; 5645 } 5646 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5647 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5648 else 5649 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5650 5651 /* Not empty? Re-schedule */ 5652 if (tid->axq_depth != 0) 5653 ath_tx_tid_sched(sc, tid); 5654 5655 /* 5656 * Give the software queue time to aggregate more 5657 * packets. If we aren't running aggregation then 5658 * we should still limit the hardware queue depth. 5659 */ 5660 /* XXX TXQ locking */ 5661 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5662 break; 5663 } 5664 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5665 break; 5666 } 5667 loop_done: 5668 /* 5669 * If this was the last entry on the original list, stop. 5670 * Otherwise nodes that have been rescheduled onto the end 5671 * of the TID FIFO list will just keep being rescheduled. 5672 * 5673 * XXX What should we do about nodes that were paused 5674 * but are pending a leaking frame in response to a ps-poll? 5675 * They'll be put at the front of the list; so they'll 5676 * prematurely trigger this condition! Ew. 5677 */ 5678 if (tid == last) 5679 break; 5680 } 5681 } 5682 5683 /* 5684 * TX addba handling 5685 */ 5686 5687 /* 5688 * Return net80211 TID struct pointer, or NULL for none 5689 */ 5690 struct ieee80211_tx_ampdu * 5691 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5692 { 5693 struct ieee80211_node *ni = &an->an_node; 5694 struct ieee80211_tx_ampdu *tap; 5695 5696 if (tid == IEEE80211_NONQOS_TID) 5697 return NULL; 5698 5699 tap = &ni->ni_tx_ampdu[tid]; 5700 return tap; 5701 } 5702 5703 /* 5704 * Is AMPDU-TX running? 5705 */ 5706 static int 5707 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5708 { 5709 struct ieee80211_tx_ampdu *tap; 5710 5711 if (tid == IEEE80211_NONQOS_TID) 5712 return 0; 5713 5714 tap = ath_tx_get_tx_tid(an, tid); 5715 if (tap == NULL) 5716 return 0; /* Not valid; default to not running */ 5717 5718 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5719 } 5720 5721 /* 5722 * Is AMPDU-TX negotiation pending? 5723 */ 5724 static int 5725 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5726 { 5727 struct ieee80211_tx_ampdu *tap; 5728 5729 if (tid == IEEE80211_NONQOS_TID) 5730 return 0; 5731 5732 tap = ath_tx_get_tx_tid(an, tid); 5733 if (tap == NULL) 5734 return 0; /* Not valid; default to not pending */ 5735 5736 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5737 } 5738 5739 /* 5740 * Is AMPDU-TX pending for the given TID? 5741 */ 5742 5743 5744 /* 5745 * Method to handle sending an ADDBA request. 5746 * 5747 * We tap this so the relevant flags can be set to pause the TID 5748 * whilst waiting for the response. 5749 * 5750 * XXX there's no timeout handler we can override? 5751 */ 5752 int 5753 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5754 int dialogtoken, int baparamset, int batimeout) 5755 { 5756 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5757 int tid = tap->txa_tid; 5758 struct ath_node *an = ATH_NODE(ni); 5759 struct ath_tid *atid = &an->an_tid[tid]; 5760 5761 /* 5762 * XXX danger Will Robinson! 5763 * 5764 * Although the taskqueue may be running and scheduling some more 5765 * packets, these should all be _before_ the addba sequence number. 5766 * However, net80211 will keep self-assigning sequence numbers 5767 * until addba has been negotiated. 5768 * 5769 * In the past, these packets would be "paused" (which still works 5770 * fine, as they're being scheduled to the driver in the same 5771 * serialised method which is calling the addba request routine) 5772 * and when the aggregation session begins, they'll be dequeued 5773 * as aggregate packets and added to the BAW. However, now there's 5774 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5775 * packets. Thus they never get included in the BAW tracking and 5776 * this can cause the initial burst of packets after the addba 5777 * negotiation to "hang", as they quickly fall outside the BAW. 5778 * 5779 * The "eventual" solution should be to tag these packets with 5780 * dobaw. Although net80211 has given us a sequence number, 5781 * it'll be "after" the left edge of the BAW and thus it'll 5782 * fall within it. 5783 */ 5784 ATH_TX_LOCK(sc); 5785 /* 5786 * This is a bit annoying. Until net80211 HT code inherits some 5787 * (any) locking, we may have this called in parallel BUT only 5788 * one response/timeout will be called. Grr. 5789 */ 5790 if (atid->addba_tx_pending == 0) { 5791 ath_tx_tid_pause(sc, atid); 5792 atid->addba_tx_pending = 1; 5793 } 5794 ATH_TX_UNLOCK(sc); 5795 5796 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5797 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5798 __func__, 5799 ni->ni_macaddr, 5800 ":", 5801 dialogtoken, baparamset, batimeout); 5802 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5803 "%s: txa_start=%d, ni_txseqs=%d\n", 5804 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5805 5806 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5807 batimeout); 5808 } 5809 5810 /* 5811 * Handle an ADDBA response. 5812 * 5813 * We unpause the queue so TX'ing can resume. 5814 * 5815 * Any packets TX'ed from this point should be "aggregate" (whether 5816 * aggregate or not) so the BAW is updated. 5817 * 5818 * Note! net80211 keeps self-assigning sequence numbers until 5819 * ampdu is negotiated. This means the initially-negotiated BAW left 5820 * edge won't match the ni->ni_txseq. 5821 * 5822 * So, being very dirty, the BAW left edge is "slid" here to match 5823 * ni->ni_txseq. 5824 * 5825 * What likely SHOULD happen is that all packets subsequent to the 5826 * addba request should be tagged as aggregate and queued as non-aggregate 5827 * frames; thus updating the BAW. For now though, I'll just slide the 5828 * window. 5829 */ 5830 int 5831 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5832 int status, int code, int batimeout) 5833 { 5834 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5835 int tid = tap->txa_tid; 5836 struct ath_node *an = ATH_NODE(ni); 5837 struct ath_tid *atid = &an->an_tid[tid]; 5838 int r; 5839 5840 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5841 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 5842 ni->ni_macaddr, 5843 ":", 5844 status, code, batimeout); 5845 5846 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5847 "%s: txa_start=%d, ni_txseqs=%d\n", 5848 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5849 5850 /* 5851 * Call this first, so the interface flags get updated 5852 * before the TID is unpaused. Otherwise a race condition 5853 * exists where the unpaused TID still doesn't yet have 5854 * IEEE80211_AGGR_RUNNING set. 5855 */ 5856 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5857 5858 ATH_TX_LOCK(sc); 5859 atid->addba_tx_pending = 0; 5860 /* 5861 * XXX dirty! 5862 * Slide the BAW left edge to wherever net80211 left it for us. 5863 * Read above for more information. 5864 */ 5865 tap->txa_start = ni->ni_txseqs[tid]; 5866 ath_tx_tid_resume(sc, atid); 5867 ATH_TX_UNLOCK(sc); 5868 return r; 5869 } 5870 5871 5872 /* 5873 * Stop ADDBA on a queue. 5874 * 5875 * This can be called whilst BAR TX is currently active on the queue, 5876 * so make sure this is unblocked before continuing. 5877 */ 5878 void 5879 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5880 { 5881 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5882 int tid = tap->txa_tid; 5883 struct ath_node *an = ATH_NODE(ni); 5884 struct ath_tid *atid = &an->an_tid[tid]; 5885 ath_bufhead bf_cq; 5886 struct ath_buf *bf; 5887 5888 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 5889 __func__, 5890 ni->ni_macaddr, 5891 ":"); 5892 5893 /* 5894 * Pause TID traffic early, so there aren't any races 5895 * Unblock the pending BAR held traffic, if it's currently paused. 5896 */ 5897 ATH_TX_LOCK(sc); 5898 ath_tx_tid_pause(sc, atid); 5899 if (atid->bar_wait) { 5900 /* 5901 * bar_unsuspend() expects bar_tx == 1, as it should be 5902 * called from the TX completion path. This quietens 5903 * the warning. It's cleared for us anyway. 5904 */ 5905 atid->bar_tx = 1; 5906 ath_tx_tid_bar_unsuspend(sc, atid); 5907 } 5908 ATH_TX_UNLOCK(sc); 5909 5910 /* There's no need to hold the TXQ lock here */ 5911 sc->sc_addba_stop(ni, tap); 5912 5913 /* 5914 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5915 * it'll set the cleanup flag, and it'll be unpaused once 5916 * things have been cleaned up. 5917 */ 5918 TAILQ_INIT(&bf_cq); 5919 ATH_TX_LOCK(sc); 5920 5921 /* 5922 * In case there's a followup call to this, only call it 5923 * if we don't have a cleanup in progress. 5924 * 5925 * Since we've paused the queue above, we need to make 5926 * sure we unpause if there's already a cleanup in 5927 * progress - it means something else is also doing 5928 * this stuff, so we don't need to also keep it paused. 5929 */ 5930 if (atid->cleanup_inprogress) { 5931 ath_tx_tid_resume(sc, atid); 5932 } else { 5933 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 5934 /* 5935 * Unpause the TID if no cleanup is required. 5936 */ 5937 if (! atid->cleanup_inprogress) 5938 ath_tx_tid_resume(sc, atid); 5939 } 5940 ATH_TX_UNLOCK(sc); 5941 5942 /* Handle completing frames and fail them */ 5943 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5944 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5945 ath_tx_default_comp(sc, bf, 1); 5946 } 5947 5948 } 5949 5950 /* 5951 * Handle a node reassociation. 5952 * 5953 * We may have a bunch of frames queued to the hardware; those need 5954 * to be marked as cleanup. 5955 */ 5956 void 5957 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 5958 { 5959 struct ath_tid *tid; 5960 int i; 5961 ath_bufhead bf_cq; 5962 struct ath_buf *bf; 5963 5964 TAILQ_INIT(&bf_cq); 5965 5966 ATH_TX_UNLOCK_ASSERT(sc); 5967 5968 ATH_TX_LOCK(sc); 5969 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 5970 tid = &an->an_tid[i]; 5971 if (tid->hwq_depth == 0) 5972 continue; 5973 DPRINTF(sc, ATH_DEBUG_NODE, 5974 "%s: %6D: TID %d: cleaning up TID\n", 5975 __func__, 5976 an->an_node.ni_macaddr, 5977 ":", 5978 i); 5979 /* 5980 * In case there's a followup call to this, only call it 5981 * if we don't have a cleanup in progress. 5982 */ 5983 if (! tid->cleanup_inprogress) { 5984 ath_tx_tid_pause(sc, tid); 5985 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 5986 /* 5987 * Unpause the TID if no cleanup is required. 5988 */ 5989 if (! tid->cleanup_inprogress) 5990 ath_tx_tid_resume(sc, tid); 5991 } 5992 } 5993 ATH_TX_UNLOCK(sc); 5994 5995 /* Handle completing frames and fail them */ 5996 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5997 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5998 ath_tx_default_comp(sc, bf, 1); 5999 } 6000 } 6001 6002 /* 6003 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 6004 * it simply tears down the aggregation session. Ew. 6005 * 6006 * It however will call ieee80211_ampdu_stop() which will call 6007 * ic->ic_addba_stop(). 6008 * 6009 * XXX This uses a hard-coded max BAR count value; the whole 6010 * XXX BAR TX success or failure should be better handled! 6011 */ 6012 void 6013 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6014 int status) 6015 { 6016 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6017 int tid = tap->txa_tid; 6018 struct ath_node *an = ATH_NODE(ni); 6019 struct ath_tid *atid = &an->an_tid[tid]; 6020 int attempts = tap->txa_attempts; 6021 int old_txa_start; 6022 6023 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6024 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 6025 __func__, 6026 ni->ni_macaddr, 6027 ":", 6028 tap->txa_tid, 6029 atid->tid, 6030 status, 6031 attempts, 6032 tap->txa_start, 6033 tap->txa_seqpending); 6034 6035 /* Note: This may update the BAW details */ 6036 /* 6037 * XXX What if this does slide the BAW along? We need to somehow 6038 * XXX either fix things when it does happen, or prevent the 6039 * XXX seqpending value to be anything other than exactly what 6040 * XXX the hell we want! 6041 * 6042 * XXX So for now, how I do this inside the TX lock for now 6043 * XXX and just correct it afterwards? The below condition should 6044 * XXX never happen and if it does I need to fix all kinds of things. 6045 */ 6046 ATH_TX_LOCK(sc); 6047 old_txa_start = tap->txa_start; 6048 sc->sc_bar_response(ni, tap, status); 6049 if (tap->txa_start != old_txa_start) { 6050 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", 6051 __func__, 6052 tid, 6053 tap->txa_start, 6054 old_txa_start); 6055 } 6056 tap->txa_start = old_txa_start; 6057 ATH_TX_UNLOCK(sc); 6058 6059 /* Unpause the TID */ 6060 /* 6061 * XXX if this is attempt=50, the TID will be downgraded 6062 * XXX to a non-aggregate session. So we must unpause the 6063 * XXX TID here or it'll never be done. 6064 * 6065 * Also, don't call it if bar_tx/bar_wait are 0; something 6066 * has beaten us to the punch? (XXX figure out what?) 6067 */ 6068 if (status == 0 || attempts == 50) { 6069 ATH_TX_LOCK(sc); 6070 if (atid->bar_tx == 0 || atid->bar_wait == 0) 6071 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6072 "%s: huh? bar_tx=%d, bar_wait=%d\n", 6073 __func__, 6074 atid->bar_tx, atid->bar_wait); 6075 else 6076 ath_tx_tid_bar_unsuspend(sc, atid); 6077 ATH_TX_UNLOCK(sc); 6078 } 6079 } 6080 6081 /* 6082 * This is called whenever the pending ADDBA request times out. 6083 * Unpause and reschedule the TID. 6084 */ 6085 void 6086 ath_addba_response_timeout(struct ieee80211_node *ni, 6087 struct ieee80211_tx_ampdu *tap) 6088 { 6089 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6090 int tid = tap->txa_tid; 6091 struct ath_node *an = ATH_NODE(ni); 6092 struct ath_tid *atid = &an->an_tid[tid]; 6093 6094 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6095 "%s: %6D: TID=%d, called; resuming\n", 6096 __func__, 6097 ni->ni_macaddr, 6098 ":", 6099 tid); 6100 6101 ATH_TX_LOCK(sc); 6102 atid->addba_tx_pending = 0; 6103 ATH_TX_UNLOCK(sc); 6104 6105 /* Note: This updates the aggregate state to (again) pending */ 6106 sc->sc_addba_response_timeout(ni, tap); 6107 6108 /* Unpause the TID; which reschedules it */ 6109 ATH_TX_LOCK(sc); 6110 ath_tx_tid_resume(sc, atid); 6111 ATH_TX_UNLOCK(sc); 6112 } 6113 6114 /* 6115 * Check if a node is asleep or not. 6116 */ 6117 int 6118 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 6119 { 6120 6121 ATH_TX_LOCK_ASSERT(sc); 6122 6123 return (an->an_is_powersave); 6124 } 6125 6126 /* 6127 * Mark a node as currently "in powersaving." 6128 * This suspends all traffic on the node. 6129 * 6130 * This must be called with the node/tx locks free. 6131 * 6132 * XXX TODO: the locking silliness below is due to how the node 6133 * locking currently works. Right now, the node lock is grabbed 6134 * to do rate control lookups and these are done with the TX 6135 * queue lock held. This means the node lock can't be grabbed 6136 * first here or a LOR will occur. 6137 * 6138 * Eventually (hopefully!) the TX path code will only grab 6139 * the TXQ lock when transmitting and the ath_node lock when 6140 * doing node/TID operations. There are other complications - 6141 * the sched/unsched operations involve walking the per-txq 6142 * 'active tid' list and this requires both locks to be held. 6143 */ 6144 void 6145 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 6146 { 6147 struct ath_tid *atid; 6148 struct ath_txq *txq; 6149 int tid; 6150 6151 ATH_TX_UNLOCK_ASSERT(sc); 6152 6153 /* Suspend all traffic on the node */ 6154 ATH_TX_LOCK(sc); 6155 6156 if (an->an_is_powersave) { 6157 DPRINTF(sc, ATH_DEBUG_XMIT, 6158 "%s: %6D: node was already asleep!\n", 6159 __func__, an->an_node.ni_macaddr, ":"); 6160 ATH_TX_UNLOCK(sc); 6161 return; 6162 } 6163 6164 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6165 atid = &an->an_tid[tid]; 6166 txq = sc->sc_ac2q[atid->ac]; 6167 6168 ath_tx_tid_pause(sc, atid); 6169 } 6170 6171 /* Mark node as in powersaving */ 6172 an->an_is_powersave = 1; 6173 6174 ATH_TX_UNLOCK(sc); 6175 } 6176 6177 /* 6178 * Mark a node as currently "awake." 6179 * This resumes all traffic to the node. 6180 */ 6181 void 6182 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 6183 { 6184 struct ath_tid *atid; 6185 struct ath_txq *txq; 6186 int tid; 6187 6188 ATH_TX_UNLOCK_ASSERT(sc); 6189 6190 ATH_TX_LOCK(sc); 6191 6192 /* !? */ 6193 if (an->an_is_powersave == 0) { 6194 ATH_TX_UNLOCK(sc); 6195 DPRINTF(sc, ATH_DEBUG_XMIT, 6196 "%s: an=%p: node was already awake\n", 6197 __func__, an); 6198 return; 6199 } 6200 6201 /* Mark node as awake */ 6202 an->an_is_powersave = 0; 6203 /* 6204 * Clear any pending leaked frame requests 6205 */ 6206 an->an_leak_count = 0; 6207 6208 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6209 atid = &an->an_tid[tid]; 6210 txq = sc->sc_ac2q[atid->ac]; 6211 6212 ath_tx_tid_resume(sc, atid); 6213 } 6214 ATH_TX_UNLOCK(sc); 6215 } 6216 6217 static int 6218 ath_legacy_dma_txsetup(struct ath_softc *sc) 6219 { 6220 6221 /* nothing new needed */ 6222 return (0); 6223 } 6224 6225 static int 6226 ath_legacy_dma_txteardown(struct ath_softc *sc) 6227 { 6228 6229 /* nothing new needed */ 6230 return (0); 6231 } 6232 6233 void 6234 ath_xmit_setup_legacy(struct ath_softc *sc) 6235 { 6236 /* 6237 * For now, just set the descriptor length to sizeof(ath_desc); 6238 * worry about extracting the real length out of the HAL later. 6239 */ 6240 sc->sc_tx_desclen = sizeof(struct ath_desc); 6241 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6242 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6243 6244 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6245 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6246 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6247 6248 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6249 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6250 6251 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6252 } 6253