1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/kernel.h> 59 #include <sys/socket.h> 60 #include <sys/sockio.h> 61 #include <sys/errno.h> 62 #include <sys/callout.h> 63 #include <sys/bus.h> 64 #include <sys/endian.h> 65 #include <sys/kthread.h> 66 #include <sys/taskqueue.h> 67 #include <sys/priv.h> 68 #include <sys/module.h> 69 #include <sys/ktr.h> 70 71 #if defined(__DragonFly__) 72 /* empty */ 73 #else 74 #include <sys/smp.h> 75 #include <machine/bus.h> 76 #endif 77 78 #include <net/if.h> 79 #include <net/if_var.h> 80 #include <net/if_dl.h> 81 #include <net/if_media.h> 82 #include <net/if_types.h> 83 #include <net/if_arp.h> 84 #include <net/ethernet.h> 85 #include <net/if_llc.h> 86 #if defined(__DragonFly__) 87 #include <net/ifq_var.h> 88 #endif 89 90 #include <netproto/802_11/ieee80211_var.h> 91 #include <netproto/802_11/ieee80211_regdomain.h> 92 #ifdef IEEE80211_SUPPORT_SUPERG 93 #include <netproto/802_11/ieee80211_superg.h> 94 #endif 95 #ifdef IEEE80211_SUPPORT_TDMA 96 #include <netproto/802_11/ieee80211_tdma.h> 97 #endif 98 99 #include <net/bpf.h> 100 101 #ifdef INET 102 #include <netinet/in.h> 103 #include <netinet/if_ether.h> 104 #endif 105 106 #include <dev/netif/ath/ath/if_athvar.h> 107 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 108 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 109 110 #include <dev/netif/ath/ath/if_ath_debug.h> 111 #include <dev/netif/ath/ath/if_ath_misc.h> 112 #include <dev/netif/ath/ath/if_ath_tsf.h> 113 #include <dev/netif/ath/ath/if_ath_tx.h> 114 #include <dev/netif/ath/ath/if_ath_sysctl.h> 115 #include <dev/netif/ath/ath/if_ath_led.h> 116 #include <dev/netif/ath/ath/if_ath_keycache.h> 117 #include <dev/netif/ath/ath/if_ath_rx.h> 118 #include <dev/netif/ath/ath/if_ath_beacon.h> 119 #include <dev/netif/ath/ath/if_athdfs.h> 120 #include <dev/netif/ath/ath/if_ath_descdma.h> 121 122 #ifdef ATH_TX99_DIAG 123 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 124 #endif 125 126 #include <dev/netif/ath/ath/if_ath_tx_edma.h> 127 128 #ifdef ATH_DEBUG_ALQ 129 #include <dev/netif/ath/ath/if_ath_alq.h> 130 #endif 131 132 /* 133 * some general macros 134 */ 135 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 136 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 137 138 /* 139 * XXX doesn't belong here, and should be tunable 140 */ 141 #define ATH_TXSTATUS_RING_SIZE 512 142 143 MALLOC_DECLARE(M_ATHDEV); 144 145 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched); 146 147 /* 148 * Push some frames into the TX FIFO if we have space. 149 */ 150 static void 151 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq) 152 { 153 struct ath_buf *bf, *bf_last; 154 int i = 0; 155 156 ATH_TXQ_LOCK_ASSERT(txq); 157 158 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n", 159 __func__, 160 txq->axq_qnum); 161 162 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 163 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) 164 break; 165 166 /* 167 * We have space in the FIFO - so let's push a frame 168 * into it. 169 */ 170 171 /* 172 * Remove it from the normal list 173 */ 174 ATH_TXQ_REMOVE(txq, bf, bf_list); 175 176 /* 177 * XXX for now, we only dequeue a frame at a time, so 178 * that's only one buffer. Later on when we just 179 * push this staging _list_ into the queue, we'll 180 * set bf_last to the end pointer in the list. 181 */ 182 bf_last = bf; 183 DPRINTF(sc, ATH_DEBUG_TX_PROC, 184 "%s: Q%d: depth=%d; pushing %p->%p\n", 185 __func__, 186 txq->axq_qnum, 187 txq->axq_fifo_depth, 188 bf, 189 bf_last); 190 191 /* 192 * Append it to the FIFO staging list 193 */ 194 ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list); 195 196 /* 197 * Set fifo start / fifo end flags appropriately 198 * 199 */ 200 bf->bf_flags |= ATH_BUF_FIFOPTR; 201 bf_last->bf_flags |= ATH_BUF_FIFOEND; 202 203 /* 204 * Push _into_ the FIFO. 205 */ 206 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 207 #ifdef ATH_DEBUG 208 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 209 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 210 #endif/* ATH_DEBUG */ 211 #ifdef ATH_DEBUG_ALQ 212 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 213 ath_tx_alq_post(sc, bf); 214 #endif /* ATH_DEBUG_ALQ */ 215 txq->axq_fifo_depth++; 216 i++; 217 } 218 if (i > 0) 219 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 220 } 221 222 /* 223 * Re-initialise the DMA FIFO with the current contents of 224 * said TXQ. 225 * 226 * This should only be called as part of the chip reset path, as it 227 * assumes the FIFO is currently empty. 228 */ 229 static void 230 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 231 { 232 struct ath_buf *bf; 233 int i = 0; 234 int fifostart = 1; 235 int old_fifo_depth; 236 237 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n", 238 __func__, 239 txq->axq_qnum); 240 241 ATH_TXQ_LOCK_ASSERT(txq); 242 243 /* 244 * Let's log if the tracked FIFO depth doesn't match 245 * what we actually push in. 246 */ 247 old_fifo_depth = txq->axq_fifo_depth; 248 txq->axq_fifo_depth = 0; 249 250 /* 251 * Walk the FIFO staging list, looking for "head" entries. 252 * Since we may have a partially completed list of frames, 253 * we push the first frame we see into the FIFO and re-mark 254 * it as the head entry. We then skip entries until we see 255 * FIFO end, at which point we get ready to push another 256 * entry into the FIFO. 257 */ 258 TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) { 259 /* 260 * If we're looking for FIFOEND and we haven't found 261 * it, skip. 262 * 263 * If we're looking for FIFOEND and we've found it, 264 * reset for another descriptor. 265 */ 266 #ifdef ATH_DEBUG 267 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 268 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 269 #endif/* ATH_DEBUG */ 270 #ifdef ATH_DEBUG_ALQ 271 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 272 ath_tx_alq_post(sc, bf); 273 #endif /* ATH_DEBUG_ALQ */ 274 275 if (fifostart == 0) { 276 if (bf->bf_flags & ATH_BUF_FIFOEND) 277 fifostart = 1; 278 continue; 279 } 280 281 /* Make sure we're not overflowing the FIFO! */ 282 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) { 283 device_printf(sc->sc_dev, 284 "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n", 285 __func__, 286 txq->axq_qnum, 287 txq->axq_fifo_depth); 288 } 289 290 #if 0 291 DPRINTF(sc, ATH_DEBUG_RESET, 292 "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n", 293 __func__, 294 txq->axq_qnum, 295 txq->axq_fifo_depth, 296 bf, 297 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 298 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 299 #endif 300 301 /* 302 * Set this to be the first buffer in the FIFO 303 * list - even if it's also the last buffer in 304 * a FIFO list! 305 */ 306 bf->bf_flags |= ATH_BUF_FIFOPTR; 307 308 /* Push it into the FIFO and bump the FIFO count */ 309 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 310 txq->axq_fifo_depth++; 311 312 /* 313 * If this isn't the last entry either, let's 314 * clear fifostart so we continue looking for 315 * said last entry. 316 */ 317 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 318 fifostart = 0; 319 i++; 320 } 321 322 /* Only bother starting the queue if there's something in it */ 323 if (i > 0) 324 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 325 326 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n", 327 __func__, 328 txq->axq_qnum, 329 old_fifo_depth, 330 txq->axq_fifo_depth); 331 332 /* And now, let's check! */ 333 if (txq->axq_fifo_depth != old_fifo_depth) { 334 device_printf(sc->sc_dev, 335 "%s: Q%d: FIFO depth should be %d, is %d\n", 336 __func__, 337 txq->axq_qnum, 338 old_fifo_depth, 339 txq->axq_fifo_depth); 340 } 341 } 342 343 /* 344 * Hand off this frame to a hardware queue. 345 * 346 * Things are a bit hairy in the EDMA world. The TX FIFO is only 347 * 8 entries deep, so we need to keep track of exactly what we've 348 * pushed into the FIFO and what's just sitting in the TX queue, 349 * waiting to go out. 350 * 351 * So this is split into two halves - frames get appended to the 352 * TXQ; then a scheduler is called to push some frames into the 353 * actual TX FIFO. 354 */ 355 static void 356 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 357 struct ath_buf *bf) 358 { 359 360 ATH_TXQ_LOCK(txq); 361 362 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 363 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 364 365 /* 366 * XXX TODO: write a hard-coded check to ensure that 367 * the queue id in the TX descriptor matches txq->axq_qnum. 368 */ 369 370 /* Update aggr stats */ 371 if (bf->bf_state.bfs_aggr) 372 txq->axq_aggr_depth++; 373 374 /* Push and update frame stats */ 375 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 376 377 /* For now, set the link pointer in the last descriptor 378 * to be NULL. 379 * 380 * Later on, when it comes time to handling multiple descriptors 381 * in one FIFO push, we can link descriptors together this way. 382 */ 383 384 /* 385 * Finally, call the FIFO schedule routine to schedule some 386 * frames to the FIFO. 387 */ 388 ath_edma_tx_fifo_fill(sc, txq); 389 ATH_TXQ_UNLOCK(txq); 390 } 391 392 /* 393 * Hand off this frame to a multicast software queue. 394 * 395 * The EDMA TX CABQ will get a list of chained frames, chained 396 * together using the next pointer. The single head of that 397 * particular queue is pushed to the hardware CABQ. 398 */ 399 static void 400 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 401 struct ath_buf *bf) 402 { 403 404 ATH_TX_LOCK_ASSERT(sc); 405 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 406 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 407 408 ATH_TXQ_LOCK(txq); 409 /* 410 * XXX this is mostly duplicated in ath_tx_handoff_mcast(). 411 */ 412 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 413 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 414 struct ieee80211_frame *wh; 415 416 /* mark previous frame */ 417 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 418 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 419 420 /* re-sync buffer to memory */ 421 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 422 BUS_DMASYNC_PREWRITE); 423 424 /* link descriptor */ 425 ath_hal_settxdesclink(sc->sc_ah, 426 bf_last->bf_lastds, 427 bf->bf_daddr); 428 } 429 #ifdef ATH_DEBUG_ALQ 430 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 431 ath_tx_alq_post(sc, bf); 432 #endif /* ATH_DEBUG_ALQ */ 433 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 434 ATH_TXQ_UNLOCK(txq); 435 } 436 437 /* 438 * Handoff this frame to the hardware. 439 * 440 * For the multicast queue, this will treat it as a software queue 441 * and append it to the list, after updating the MORE_DATA flag 442 * in the previous frame. The cabq processing code will ensure 443 * that the queue contents gets transferred over. 444 * 445 * For the hardware queues, this will queue a frame to the queue 446 * like before, then populate the FIFO from that. Since the 447 * EDMA hardware has 8 FIFO slots per TXQ, this ensures that 448 * frames such as management frames don't get prematurely dropped. 449 * 450 * This does imply that a similar flush-hwq-to-fifoq method will 451 * need to be called from the processq function, before the 452 * per-node software scheduler is called. 453 */ 454 static void 455 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 456 struct ath_buf *bf) 457 { 458 459 DPRINTF(sc, ATH_DEBUG_XMIT_DESC, 460 "%s: called; bf=%p, txq=%p, qnum=%d\n", 461 __func__, 462 bf, 463 txq, 464 txq->axq_qnum); 465 466 if (txq->axq_qnum == ATH_TXQ_SWQ) 467 ath_edma_xmit_handoff_mcast(sc, txq, bf); 468 else 469 ath_edma_xmit_handoff_hw(sc, txq, bf); 470 } 471 472 static int 473 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum) 474 { 475 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 476 477 te->m_fifo = kmalloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH, 478 M_ATHDEV, M_INTWAIT | M_ZERO); 479 if (te->m_fifo == NULL) { 480 device_printf(sc->sc_dev, "%s: malloc failed\n", 481 __func__); 482 return (-ENOMEM); 483 } 484 485 /* 486 * Set initial "empty" state. 487 */ 488 te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0; 489 490 return (0); 491 } 492 493 static int 494 ath_edma_free_txfifo(struct ath_softc *sc, int qnum) 495 { 496 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 497 498 /* XXX TODO: actually deref the ath_buf entries? */ 499 kfree(te->m_fifo, M_ATHDEV); 500 return (0); 501 } 502 503 static int 504 ath_edma_dma_txsetup(struct ath_softc *sc) 505 { 506 int error; 507 int i; 508 509 error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma, 510 NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE); 511 if (error != 0) 512 return (error); 513 514 ath_hal_setuptxstatusring(sc->sc_ah, 515 (void *) sc->sc_txsdma.dd_desc, 516 sc->sc_txsdma.dd_desc_paddr, 517 ATH_TXSTATUS_RING_SIZE); 518 519 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 520 ath_edma_setup_txfifo(sc, i); 521 } 522 523 return (0); 524 } 525 526 static int 527 ath_edma_dma_txteardown(struct ath_softc *sc) 528 { 529 int i; 530 531 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 532 ath_edma_free_txfifo(sc, i); 533 } 534 535 ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL); 536 return (0); 537 } 538 539 /* 540 * Drain all TXQs, potentially after completing the existing completed 541 * frames. 542 */ 543 static void 544 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 545 { 546 int i; 547 548 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 549 550 (void) ath_stoptxdma(sc); 551 552 /* 553 * If reset type is noloss, the TX FIFO needs to be serviced 554 * and those frames need to be handled. 555 * 556 * Otherwise, just toss everything in each TX queue. 557 */ 558 if (reset_type == ATH_RESET_NOLOSS) { 559 ath_edma_tx_processq(sc, 0); 560 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 561 if (ATH_TXQ_SETUP(sc, i)) { 562 ATH_TXQ_LOCK(&sc->sc_txq[i]); 563 /* 564 * Free the holding buffer; DMA is now 565 * stopped. 566 */ 567 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 568 /* 569 * Reset the link pointer to NULL; there's 570 * no frames to chain DMA to. 571 */ 572 sc->sc_txq[i].axq_link = NULL; 573 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 574 } 575 } 576 } else { 577 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 578 if (ATH_TXQ_SETUP(sc, i)) 579 ath_tx_draintxq(sc, &sc->sc_txq[i]); 580 } 581 } 582 583 /* XXX dump out the TX completion FIFO contents */ 584 585 /* XXX dump out the frames */ 586 587 sc->sc_wd_timer = 0; 588 } 589 590 /* 591 * TX completion tasklet. 592 */ 593 594 static void 595 ath_edma_tx_proc(void *arg, int npending) 596 { 597 struct ath_softc *sc = (struct ath_softc *) arg; 598 599 #if 0 600 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n", 601 __func__, npending); 602 #endif 603 ath_edma_tx_processq(sc, 1); 604 } 605 606 /* 607 * Process the TX status queue. 608 */ 609 static void 610 ath_edma_tx_processq(struct ath_softc *sc, int dosched) 611 { 612 struct ath_hal *ah = sc->sc_ah; 613 HAL_STATUS status; 614 struct ath_tx_status ts; 615 struct ath_txq *txq; 616 struct ath_buf *bf; 617 struct ieee80211_node *ni; 618 int nacked = 0; 619 int idx; 620 621 #ifdef ATH_DEBUG 622 /* XXX */ 623 uint32_t txstatus[32]; 624 #endif 625 626 for (idx = 0; ; idx++) { 627 bzero(&ts, sizeof(ts)); 628 629 ATH_TXSTATUS_LOCK(sc); 630 #ifdef ATH_DEBUG 631 ath_hal_gettxrawtxdesc(ah, txstatus); 632 #endif 633 status = ath_hal_txprocdesc(ah, NULL, (void *) &ts); 634 ATH_TXSTATUS_UNLOCK(sc); 635 636 if (status == HAL_EINPROGRESS) 637 break; 638 639 #ifdef ATH_DEBUG 640 if (sc->sc_debug & ATH_DEBUG_TX_PROC) 641 if (ts.ts_queue_id != sc->sc_bhalq) 642 ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id, 643 idx, (status == HAL_OK)); 644 #endif 645 646 /* 647 * If there is an error with this descriptor, continue 648 * processing. 649 * 650 * XXX TBD: log some statistics? 651 */ 652 if (status == HAL_EIO) { 653 device_printf(sc->sc_dev, "%s: invalid TX status?\n", 654 __func__); 655 break; 656 } 657 658 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG) 659 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) 660 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 661 sc->sc_tx_statuslen, 662 (char *) txstatus); 663 #endif /* ATH_DEBUG_ALQ */ 664 665 /* 666 * At this point we have a valid status descriptor. 667 * The QID and descriptor ID (which currently isn't set) 668 * is part of the status. 669 * 670 * We then assume that the descriptor in question is the 671 * -head- of the given QID. Eventually we should verify 672 * this by using the descriptor ID. 673 */ 674 675 /* 676 * The beacon queue is not currently a "real" queue. 677 * Frames aren't pushed onto it and the lock isn't setup. 678 * So skip it for now; the beacon handling code will 679 * free and alloc more beacon buffers as appropriate. 680 */ 681 if (ts.ts_queue_id == sc->sc_bhalq) 682 continue; 683 684 txq = &sc->sc_txq[ts.ts_queue_id]; 685 686 ATH_TXQ_LOCK(txq); 687 bf = ATH_TXQ_FIRST(&txq->fifo); 688 689 /* 690 * Work around the situation where I'm seeing notifications 691 * for Q1 when no frames are available. That needs to be 692 * debugged but not by crashing _here_. 693 */ 694 if (bf == NULL) { 695 device_printf(sc->sc_dev, "%s: Q%d: empty?\n", 696 __func__, 697 ts.ts_queue_id); 698 ATH_TXQ_UNLOCK(txq); 699 continue; 700 } 701 702 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n", 703 __func__, 704 ts.ts_queue_id, bf, 705 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 706 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 707 708 /* XXX TODO: actually output debugging info about this */ 709 710 #if 0 711 /* XXX assert the buffer/descriptor matches the status descid */ 712 if (ts.ts_desc_id != bf->bf_descid) { 713 device_printf(sc->sc_dev, 714 "%s: mismatched descid (qid=%d, tsdescid=%d, " 715 "bfdescid=%d\n", 716 __func__, 717 ts.ts_queue_id, 718 ts.ts_desc_id, 719 bf->bf_descid); 720 } 721 #endif 722 723 /* This removes the buffer and decrements the queue depth */ 724 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 725 if (bf->bf_state.bfs_aggr) 726 txq->axq_aggr_depth--; 727 728 /* 729 * If this was the end of a FIFO set, decrement FIFO depth 730 */ 731 if (bf->bf_flags & ATH_BUF_FIFOEND) 732 txq->axq_fifo_depth--; 733 734 /* 735 * If this isn't the final buffer in a FIFO set, mark 736 * the buffer as busy so it goes onto the holding queue. 737 */ 738 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 739 bf->bf_flags |= ATH_BUF_BUSY; 740 741 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n", 742 __func__, 743 txq->axq_qnum, 744 txq->axq_fifo_depth, 745 txq->fifo.axq_depth); 746 747 /* XXX assert FIFO depth >= 0 */ 748 ATH_TXQ_UNLOCK(txq); 749 750 /* 751 * Outside of the TX lock - if the buffer is end 752 * end buffer in this FIFO, we don't need a holding 753 * buffer any longer. 754 */ 755 if (bf->bf_flags & ATH_BUF_FIFOEND) { 756 ATH_TXQ_LOCK(txq); 757 ath_txq_freeholdingbuf(sc, txq); 758 ATH_TXQ_UNLOCK(txq); 759 } 760 761 /* 762 * First we need to make sure ts_rate is valid. 763 * 764 * Pre-EDMA chips pass the whole TX descriptor to 765 * the proctxdesc function which will then fill out 766 * ts_rate based on the ts_finaltsi (final TX index) 767 * in the TX descriptor. However the TX completion 768 * FIFO doesn't have this information. So here we 769 * do a separate HAL call to populate that information. 770 * 771 * The same problem exists with ts_longretry. 772 * The FreeBSD HAL corrects ts_longretry in the HAL layer; 773 * the AR9380 HAL currently doesn't. So until the HAL 774 * is imported and this can be added, we correct for it 775 * here. 776 */ 777 /* XXX TODO */ 778 /* XXX faked for now. Ew. */ 779 if (ts.ts_finaltsi < 4) { 780 ts.ts_rate = 781 bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode; 782 switch (ts.ts_finaltsi) { 783 case 3: ts.ts_longretry += 784 bf->bf_state.bfs_rc[2].tries; 785 case 2: ts.ts_longretry += 786 bf->bf_state.bfs_rc[1].tries; 787 case 1: ts.ts_longretry += 788 bf->bf_state.bfs_rc[0].tries; 789 } 790 } else { 791 device_printf(sc->sc_dev, "%s: finaltsi=%d\n", 792 __func__, 793 ts.ts_finaltsi); 794 ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode; 795 } 796 797 /* 798 * XXX This is terrible. 799 * 800 * Right now, some code uses the TX status that is 801 * passed in here, but the completion handlers in the 802 * software TX path also use bf_status.ds_txstat. 803 * Ew. That should all go away. 804 * 805 * XXX It's also possible the rate control completion 806 * routine is called twice. 807 */ 808 memcpy(&bf->bf_status, &ts, sizeof(ts)); 809 810 ni = bf->bf_node; 811 812 /* Update RSSI */ 813 /* XXX duplicate from ath_tx_processq */ 814 if (ni != NULL && ts.ts_status == 0 && 815 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 816 nacked++; 817 sc->sc_stats.ast_tx_rssi = ts.ts_rssi; 818 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 819 ts.ts_rssi); 820 } 821 822 /* Handle frame completion and rate control update */ 823 ath_tx_process_buf_completion(sc, txq, &ts, bf); 824 825 /* bf is invalid at this point */ 826 827 /* 828 * Now that there's space in the FIFO, let's push some 829 * more frames into it. 830 */ 831 ATH_TXQ_LOCK(txq); 832 if (dosched) 833 ath_edma_tx_fifo_fill(sc, txq); 834 ATH_TXQ_UNLOCK(txq); 835 } 836 837 sc->sc_wd_timer = 0; 838 839 /* Kick software scheduler */ 840 /* 841 * XXX It's inefficient to do this if the FIFO queue is full, 842 * but there's no easy way right now to only populate 843 * the txq task for _one_ TXQ. This should be fixed. 844 */ 845 if (dosched) 846 ath_tx_swq_kick(sc); 847 } 848 849 static void 850 ath_edma_attach_comp_func(struct ath_softc *sc) 851 { 852 853 TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc); 854 } 855 856 void 857 ath_xmit_setup_edma(struct ath_softc *sc) 858 { 859 860 /* Fetch EDMA field and buffer sizes */ 861 (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen); 862 (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen); 863 (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps); 864 865 if (bootverbose) { 866 device_printf(sc->sc_dev, "TX descriptor length: %d\n", 867 sc->sc_tx_desclen); 868 device_printf(sc->sc_dev, "TX status length: %d\n", 869 sc->sc_tx_statuslen); 870 device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n", 871 sc->sc_tx_nmaps); 872 } 873 874 sc->sc_tx.xmit_setup = ath_edma_dma_txsetup; 875 sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown; 876 sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func; 877 878 sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart; 879 sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff; 880 sc->sc_tx.xmit_drain = ath_edma_tx_drain; 881 } 882