1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 72 #include <net/if.h> 73 #include <net/if_var.h> 74 #include <net/if_dl.h> 75 #include <net/if_media.h> 76 #include <net/if_types.h> 77 #include <net/if_arp.h> 78 #include <net/ethernet.h> 79 #include <net/if_llc.h> 80 #include <net/ifq_var.h> 81 82 #include <netproto/802_11/ieee80211_var.h> 83 #include <netproto/802_11/ieee80211_regdomain.h> 84 #ifdef IEEE80211_SUPPORT_SUPERG 85 #include <netproto/802_11/ieee80211_superg.h> 86 #endif 87 #ifdef IEEE80211_SUPPORT_TDMA 88 #include <netproto/802_11/ieee80211_tdma.h> 89 #endif 90 91 #include <net/bpf.h> 92 93 #ifdef INET 94 #include <netinet/in.h> 95 #include <netinet/if_ether.h> 96 #endif 97 98 #include <dev/netif/ath/ath/if_athvar.h> 99 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 100 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 101 102 #include <dev/netif/ath/ath/if_ath_debug.h> 103 #include <dev/netif/ath/ath/if_ath_misc.h> 104 #include <dev/netif/ath/ath/if_ath_tsf.h> 105 #include <dev/netif/ath/ath/if_ath_tx.h> 106 #include <dev/netif/ath/ath/if_ath_sysctl.h> 107 #include <dev/netif/ath/ath/if_ath_led.h> 108 #include <dev/netif/ath/ath/if_ath_keycache.h> 109 #include <dev/netif/ath/ath/if_ath_rx.h> 110 #include <dev/netif/ath/ath/if_ath_beacon.h> 111 #include <dev/netif/ath/ath/if_athdfs.h> 112 113 #ifdef ATH_TX99_DIAG 114 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 115 #endif 116 117 #include <dev/netif/ath/ath/if_ath_tx_edma.h> 118 119 #ifdef ATH_DEBUG_ALQ 120 #include <dev/netif/ath/ath/if_ath_alq.h> 121 #endif 122 123 /* 124 * some general macros 125 */ 126 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 127 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 128 129 /* 130 * XXX doesn't belong here, and should be tunable 131 */ 132 #define ATH_TXSTATUS_RING_SIZE 512 133 134 MALLOC_DECLARE(M_ATHDEV); 135 136 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched); 137 138 /* 139 * Push some frames into the TX FIFO if we have space. 140 */ 141 static void 142 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq) 143 { 144 struct ath_buf *bf, *bf_last; 145 int i = 0; 146 147 ATH_TXQ_LOCK_ASSERT(txq); 148 149 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n", 150 __func__, 151 txq->axq_qnum); 152 153 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 154 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) 155 break; 156 157 /* 158 * We have space in the FIFO - so let's push a frame 159 * into it. 160 */ 161 162 /* 163 * Remove it from the normal list 164 */ 165 ATH_TXQ_REMOVE(txq, bf, bf_list); 166 167 /* 168 * XXX for now, we only dequeue a frame at a time, so 169 * that's only one buffer. Later on when we just 170 * push this staging _list_ into the queue, we'll 171 * set bf_last to the end pointer in the list. 172 */ 173 bf_last = bf; 174 DPRINTF(sc, ATH_DEBUG_TX_PROC, 175 "%s: Q%d: depth=%d; pushing %p->%p\n", 176 __func__, 177 txq->axq_qnum, 178 txq->axq_fifo_depth, 179 bf, 180 bf_last); 181 182 /* 183 * Append it to the FIFO staging list 184 */ 185 ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list); 186 187 /* 188 * Set fifo start / fifo end flags appropriately 189 * 190 */ 191 bf->bf_flags |= ATH_BUF_FIFOPTR; 192 bf_last->bf_flags |= ATH_BUF_FIFOEND; 193 194 /* 195 * Push _into_ the FIFO. 196 */ 197 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 198 #ifdef ATH_DEBUG 199 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 200 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 201 #endif/* ATH_DEBUG */ 202 #ifdef ATH_DEBUG_ALQ 203 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 204 ath_tx_alq_post(sc, bf); 205 #endif /* ATH_DEBUG_ALQ */ 206 txq->axq_fifo_depth++; 207 i++; 208 } 209 if (i > 0) 210 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 211 } 212 213 /* 214 * Re-initialise the DMA FIFO with the current contents of 215 * said TXQ. 216 * 217 * This should only be called as part of the chip reset path, as it 218 * assumes the FIFO is currently empty. 219 */ 220 static void 221 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 222 { 223 struct ath_buf *bf; 224 int i = 0; 225 int fifostart = 1; 226 int old_fifo_depth; 227 228 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n", 229 __func__, 230 txq->axq_qnum); 231 232 ATH_TXQ_LOCK_ASSERT(txq); 233 234 /* 235 * Let's log if the tracked FIFO depth doesn't match 236 * what we actually push in. 237 */ 238 old_fifo_depth = txq->axq_fifo_depth; 239 txq->axq_fifo_depth = 0; 240 241 /* 242 * Walk the FIFO staging list, looking for "head" entries. 243 * Since we may have a partially completed list of frames, 244 * we push the first frame we see into the FIFO and re-mark 245 * it as the head entry. We then skip entries until we see 246 * FIFO end, at which point we get ready to push another 247 * entry into the FIFO. 248 */ 249 TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) { 250 /* 251 * If we're looking for FIFOEND and we haven't found 252 * it, skip. 253 * 254 * If we're looking for FIFOEND and we've found it, 255 * reset for another descriptor. 256 */ 257 #ifdef ATH_DEBUG 258 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 259 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 260 #endif/* ATH_DEBUG */ 261 #ifdef ATH_DEBUG_ALQ 262 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 263 ath_tx_alq_post(sc, bf); 264 #endif /* ATH_DEBUG_ALQ */ 265 266 if (fifostart == 0) { 267 if (bf->bf_flags & ATH_BUF_FIFOEND) 268 fifostart = 1; 269 continue; 270 } 271 272 /* Make sure we're not overflowing the FIFO! */ 273 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) { 274 device_printf(sc->sc_dev, 275 "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n", 276 __func__, 277 txq->axq_qnum, 278 txq->axq_fifo_depth); 279 } 280 281 #if 0 282 DPRINTF(sc, ATH_DEBUG_RESET, 283 "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n", 284 __func__, 285 txq->axq_qnum, 286 txq->axq_fifo_depth, 287 bf, 288 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 289 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 290 #endif 291 292 /* 293 * Set this to be the first buffer in the FIFO 294 * list - even if it's also the last buffer in 295 * a FIFO list! 296 */ 297 bf->bf_flags |= ATH_BUF_FIFOPTR; 298 299 /* Push it into the FIFO and bump the FIFO count */ 300 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 301 txq->axq_fifo_depth++; 302 303 /* 304 * If this isn't the last entry either, let's 305 * clear fifostart so we continue looking for 306 * said last entry. 307 */ 308 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 309 fifostart = 0; 310 i++; 311 } 312 313 /* Only bother starting the queue if there's something in it */ 314 if (i > 0) 315 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 316 317 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n", 318 __func__, 319 txq->axq_qnum, 320 old_fifo_depth, 321 txq->axq_fifo_depth); 322 323 /* And now, let's check! */ 324 if (txq->axq_fifo_depth != old_fifo_depth) { 325 device_printf(sc->sc_dev, 326 "%s: Q%d: FIFO depth should be %d, is %d\n", 327 __func__, 328 txq->axq_qnum, 329 old_fifo_depth, 330 txq->axq_fifo_depth); 331 } 332 } 333 334 /* 335 * Hand off this frame to a hardware queue. 336 * 337 * Things are a bit hairy in the EDMA world. The TX FIFO is only 338 * 8 entries deep, so we need to keep track of exactly what we've 339 * pushed into the FIFO and what's just sitting in the TX queue, 340 * waiting to go out. 341 * 342 * So this is split into two halves - frames get appended to the 343 * TXQ; then a scheduler is called to push some frames into the 344 * actual TX FIFO. 345 */ 346 static void 347 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 348 struct ath_buf *bf) 349 { 350 351 ATH_TXQ_LOCK(txq); 352 353 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 354 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 355 356 /* 357 * XXX TODO: write a hard-coded check to ensure that 358 * the queue id in the TX descriptor matches txq->axq_qnum. 359 */ 360 361 /* Update aggr stats */ 362 if (bf->bf_state.bfs_aggr) 363 txq->axq_aggr_depth++; 364 365 /* Push and update frame stats */ 366 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 367 368 /* For now, set the link pointer in the last descriptor 369 * to be NULL. 370 * 371 * Later on, when it comes time to handling multiple descriptors 372 * in one FIFO push, we can link descriptors together this way. 373 */ 374 375 /* 376 * Finally, call the FIFO schedule routine to schedule some 377 * frames to the FIFO. 378 */ 379 ath_edma_tx_fifo_fill(sc, txq); 380 ATH_TXQ_UNLOCK(txq); 381 } 382 383 /* 384 * Hand off this frame to a multicast software queue. 385 * 386 * The EDMA TX CABQ will get a list of chained frames, chained 387 * together using the next pointer. The single head of that 388 * particular queue is pushed to the hardware CABQ. 389 */ 390 static void 391 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 392 struct ath_buf *bf) 393 { 394 395 ATH_TX_LOCK_ASSERT(sc); 396 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 397 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 398 399 ATH_TXQ_LOCK(txq); 400 /* 401 * XXX this is mostly duplicated in ath_tx_handoff_mcast(). 402 */ 403 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 404 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 405 struct ieee80211_frame *wh; 406 407 /* mark previous frame */ 408 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 409 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 410 411 /* re-sync buffer to memory */ 412 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 413 BUS_DMASYNC_PREWRITE); 414 415 /* link descriptor */ 416 ath_hal_settxdesclink(sc->sc_ah, 417 bf_last->bf_lastds, 418 bf->bf_daddr); 419 } 420 #ifdef ATH_DEBUG_ALQ 421 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 422 ath_tx_alq_post(sc, bf); 423 #endif /* ATH_DEBUG_ALQ */ 424 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 425 ATH_TXQ_UNLOCK(txq); 426 } 427 428 /* 429 * Handoff this frame to the hardware. 430 * 431 * For the multicast queue, this will treat it as a software queue 432 * and append it to the list, after updating the MORE_DATA flag 433 * in the previous frame. The cabq processing code will ensure 434 * that the queue contents gets transferred over. 435 * 436 * For the hardware queues, this will queue a frame to the queue 437 * like before, then populate the FIFO from that. Since the 438 * EDMA hardware has 8 FIFO slots per TXQ, this ensures that 439 * frames such as management frames don't get prematurely dropped. 440 * 441 * This does imply that a similar flush-hwq-to-fifoq method will 442 * need to be called from the processq function, before the 443 * per-node software scheduler is called. 444 */ 445 static void 446 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 447 struct ath_buf *bf) 448 { 449 450 DPRINTF(sc, ATH_DEBUG_XMIT_DESC, 451 "%s: called; bf=%p, txq=%p, qnum=%d\n", 452 __func__, 453 bf, 454 txq, 455 txq->axq_qnum); 456 457 if (txq->axq_qnum == ATH_TXQ_SWQ) 458 ath_edma_xmit_handoff_mcast(sc, txq, bf); 459 else 460 ath_edma_xmit_handoff_hw(sc, txq, bf); 461 } 462 463 static int 464 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum) 465 { 466 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 467 468 te->m_fifo = kmalloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH, 469 M_ATHDEV, M_INTWAIT | M_ZERO); 470 if (te->m_fifo == NULL) { 471 device_printf(sc->sc_dev, "%s: malloc failed\n", 472 __func__); 473 return (-ENOMEM); 474 } 475 476 /* 477 * Set initial "empty" state. 478 */ 479 te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0; 480 481 return (0); 482 } 483 484 static int 485 ath_edma_free_txfifo(struct ath_softc *sc, int qnum) 486 { 487 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 488 489 /* XXX TODO: actually deref the ath_buf entries? */ 490 kfree(te->m_fifo, M_ATHDEV); 491 return (0); 492 } 493 494 static int 495 ath_edma_dma_txsetup(struct ath_softc *sc) 496 { 497 int error; 498 int i; 499 500 error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma, 501 NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE); 502 if (error != 0) 503 return (error); 504 505 ath_hal_setuptxstatusring(sc->sc_ah, 506 (void *) sc->sc_txsdma.dd_desc, 507 sc->sc_txsdma.dd_desc_paddr, 508 ATH_TXSTATUS_RING_SIZE); 509 510 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 511 ath_edma_setup_txfifo(sc, i); 512 } 513 514 return (0); 515 } 516 517 static int 518 ath_edma_dma_txteardown(struct ath_softc *sc) 519 { 520 int i; 521 522 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 523 ath_edma_free_txfifo(sc, i); 524 } 525 526 ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL); 527 return (0); 528 } 529 530 /* 531 * Drain all TXQs, potentially after completing the existing completed 532 * frames. 533 */ 534 static void 535 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 536 { 537 struct ifnet *ifp = sc->sc_ifp; 538 int i; 539 540 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 541 542 (void) ath_stoptxdma(sc); 543 544 /* 545 * If reset type is noloss, the TX FIFO needs to be serviced 546 * and those frames need to be handled. 547 * 548 * Otherwise, just toss everything in each TX queue. 549 */ 550 if (reset_type == ATH_RESET_NOLOSS) { 551 ath_edma_tx_processq(sc, 0); 552 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 553 if (ATH_TXQ_SETUP(sc, i)) { 554 ATH_TXQ_LOCK(&sc->sc_txq[i]); 555 /* 556 * Free the holding buffer; DMA is now 557 * stopped. 558 */ 559 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 560 /* 561 * Reset the link pointer to NULL; there's 562 * no frames to chain DMA to. 563 */ 564 sc->sc_txq[i].axq_link = NULL; 565 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 566 } 567 } 568 } else { 569 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 570 if (ATH_TXQ_SETUP(sc, i)) 571 ath_tx_draintxq(sc, &sc->sc_txq[i]); 572 } 573 } 574 575 /* XXX dump out the TX completion FIFO contents */ 576 577 /* XXX dump out the frames */ 578 579 IF_LOCK(&ifp->if_snd); 580 #if defined(__DragonFly__) 581 ifq_clr_oactive(&ifp->if_snd); 582 #else 583 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 584 #endif 585 IF_UNLOCK(&ifp->if_snd); 586 sc->sc_wd_timer = 0; 587 } 588 589 /* 590 * TX completion tasklet. 591 */ 592 593 static void 594 ath_edma_tx_proc(void *arg, int npending) 595 { 596 struct ath_softc *sc = (struct ath_softc *) arg; 597 598 #if 0 599 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n", 600 __func__, npending); 601 #endif 602 ath_edma_tx_processq(sc, 1); 603 } 604 605 /* 606 * Process the TX status queue. 607 */ 608 static void 609 ath_edma_tx_processq(struct ath_softc *sc, int dosched) 610 { 611 struct ath_hal *ah = sc->sc_ah; 612 HAL_STATUS status; 613 struct ath_tx_status ts; 614 struct ath_txq *txq; 615 struct ath_buf *bf; 616 struct ieee80211_node *ni; 617 int nacked = 0; 618 int idx; 619 620 #ifdef ATH_DEBUG 621 /* XXX */ 622 uint32_t txstatus[32]; 623 #endif 624 625 for (idx = 0; ; idx++) { 626 bzero(&ts, sizeof(ts)); 627 628 ATH_TXSTATUS_LOCK(sc); 629 #ifdef ATH_DEBUG 630 ath_hal_gettxrawtxdesc(ah, txstatus); 631 #endif 632 status = ath_hal_txprocdesc(ah, NULL, (void *) &ts); 633 ATH_TXSTATUS_UNLOCK(sc); 634 635 if (status == HAL_EINPROGRESS) 636 break; 637 638 #ifdef ATH_DEBUG 639 if (sc->sc_debug & ATH_DEBUG_TX_PROC) 640 if (ts.ts_queue_id != sc->sc_bhalq) 641 ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id, 642 idx, (status == HAL_OK)); 643 #endif 644 645 /* 646 * If there is an error with this descriptor, continue 647 * processing. 648 * 649 * XXX TBD: log some statistics? 650 */ 651 if (status == HAL_EIO) { 652 device_printf(sc->sc_dev, "%s: invalid TX status?\n", 653 __func__); 654 break; 655 } 656 657 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG) 658 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) 659 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 660 sc->sc_tx_statuslen, 661 (char *) txstatus); 662 #endif /* ATH_DEBUG_ALQ */ 663 664 /* 665 * At this point we have a valid status descriptor. 666 * The QID and descriptor ID (which currently isn't set) 667 * is part of the status. 668 * 669 * We then assume that the descriptor in question is the 670 * -head- of the given QID. Eventually we should verify 671 * this by using the descriptor ID. 672 */ 673 674 /* 675 * The beacon queue is not currently a "real" queue. 676 * Frames aren't pushed onto it and the lock isn't setup. 677 * So skip it for now; the beacon handling code will 678 * free and alloc more beacon buffers as appropriate. 679 */ 680 if (ts.ts_queue_id == sc->sc_bhalq) 681 continue; 682 683 txq = &sc->sc_txq[ts.ts_queue_id]; 684 685 ATH_TXQ_LOCK(txq); 686 bf = ATH_TXQ_FIRST(&txq->fifo); 687 688 /* 689 * Work around the situation where I'm seeing notifications 690 * for Q1 when no frames are available. That needs to be 691 * debugged but not by crashing _here_. 692 */ 693 if (bf == NULL) { 694 device_printf(sc->sc_dev, "%s: Q%d: empty?\n", 695 __func__, 696 ts.ts_queue_id); 697 ATH_TXQ_UNLOCK(txq); 698 continue; 699 } 700 701 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n", 702 __func__, 703 ts.ts_queue_id, bf, 704 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 705 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 706 707 /* XXX TODO: actually output debugging info about this */ 708 709 #if 0 710 /* XXX assert the buffer/descriptor matches the status descid */ 711 if (ts.ts_desc_id != bf->bf_descid) { 712 device_printf(sc->sc_dev, 713 "%s: mismatched descid (qid=%d, tsdescid=%d, " 714 "bfdescid=%d\n", 715 __func__, 716 ts.ts_queue_id, 717 ts.ts_desc_id, 718 bf->bf_descid); 719 } 720 #endif 721 722 /* This removes the buffer and decrements the queue depth */ 723 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 724 if (bf->bf_state.bfs_aggr) 725 txq->axq_aggr_depth--; 726 727 /* 728 * If this was the end of a FIFO set, decrement FIFO depth 729 */ 730 if (bf->bf_flags & ATH_BUF_FIFOEND) 731 txq->axq_fifo_depth--; 732 733 /* 734 * If this isn't the final buffer in a FIFO set, mark 735 * the buffer as busy so it goes onto the holding queue. 736 */ 737 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 738 bf->bf_flags |= ATH_BUF_BUSY; 739 740 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n", 741 __func__, 742 txq->axq_qnum, 743 txq->axq_fifo_depth, 744 txq->fifo.axq_depth); 745 746 /* XXX assert FIFO depth >= 0 */ 747 ATH_TXQ_UNLOCK(txq); 748 749 /* 750 * Outside of the TX lock - if the buffer is end 751 * end buffer in this FIFO, we don't need a holding 752 * buffer any longer. 753 */ 754 if (bf->bf_flags & ATH_BUF_FIFOEND) { 755 ATH_TXQ_LOCK(txq); 756 ath_txq_freeholdingbuf(sc, txq); 757 ATH_TXQ_UNLOCK(txq); 758 } 759 760 /* 761 * First we need to make sure ts_rate is valid. 762 * 763 * Pre-EDMA chips pass the whole TX descriptor to 764 * the proctxdesc function which will then fill out 765 * ts_rate based on the ts_finaltsi (final TX index) 766 * in the TX descriptor. However the TX completion 767 * FIFO doesn't have this information. So here we 768 * do a separate HAL call to populate that information. 769 * 770 * The same problem exists with ts_longretry. 771 * The FreeBSD HAL corrects ts_longretry in the HAL layer; 772 * the AR9380 HAL currently doesn't. So until the HAL 773 * is imported and this can be added, we correct for it 774 * here. 775 */ 776 /* XXX TODO */ 777 /* XXX faked for now. Ew. */ 778 if (ts.ts_finaltsi < 4) { 779 ts.ts_rate = 780 bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode; 781 switch (ts.ts_finaltsi) { 782 case 3: ts.ts_longretry += 783 bf->bf_state.bfs_rc[2].tries; 784 case 2: ts.ts_longretry += 785 bf->bf_state.bfs_rc[1].tries; 786 case 1: ts.ts_longretry += 787 bf->bf_state.bfs_rc[0].tries; 788 } 789 } else { 790 device_printf(sc->sc_dev, "%s: finaltsi=%d\n", 791 __func__, 792 ts.ts_finaltsi); 793 ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode; 794 } 795 796 /* 797 * XXX This is terrible. 798 * 799 * Right now, some code uses the TX status that is 800 * passed in here, but the completion handlers in the 801 * software TX path also use bf_status.ds_txstat. 802 * Ew. That should all go away. 803 * 804 * XXX It's also possible the rate control completion 805 * routine is called twice. 806 */ 807 memcpy(&bf->bf_status, &ts, sizeof(ts)); 808 809 ni = bf->bf_node; 810 811 /* Update RSSI */ 812 /* XXX duplicate from ath_tx_processq */ 813 if (ni != NULL && ts.ts_status == 0 && 814 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 815 nacked++; 816 sc->sc_stats.ast_tx_rssi = ts.ts_rssi; 817 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 818 ts.ts_rssi); 819 } 820 821 /* Handle frame completion and rate control update */ 822 ath_tx_process_buf_completion(sc, txq, &ts, bf); 823 824 /* bf is invalid at this point */ 825 826 /* 827 * Now that there's space in the FIFO, let's push some 828 * more frames into it. 829 */ 830 ATH_TXQ_LOCK(txq); 831 if (dosched) 832 ath_edma_tx_fifo_fill(sc, txq); 833 ATH_TXQ_UNLOCK(txq); 834 } 835 836 sc->sc_wd_timer = 0; 837 838 if (idx > 0) { 839 IF_LOCK(&sc->sc_ifp->if_snd); 840 #if defined(__DragonFly__) 841 ifq_clr_oactive(&sc->sc_ifp->if_snd); 842 #else 843 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 844 #endif 845 IF_UNLOCK(&sc->sc_ifp->if_snd); 846 } 847 848 /* Kick software scheduler */ 849 /* 850 * XXX It's inefficient to do this if the FIFO queue is full, 851 * but there's no easy way right now to only populate 852 * the txq task for _one_ TXQ. This should be fixed. 853 */ 854 if (dosched) 855 ath_tx_swq_kick(sc); 856 } 857 858 static void 859 ath_edma_attach_comp_func(struct ath_softc *sc) 860 { 861 862 TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc); 863 } 864 865 void 866 ath_xmit_setup_edma(struct ath_softc *sc) 867 { 868 869 /* Fetch EDMA field and buffer sizes */ 870 (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen); 871 (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen); 872 (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps); 873 874 if (bootverbose) { 875 device_printf(sc->sc_dev, "TX descriptor length: %d\n", 876 sc->sc_tx_desclen); 877 device_printf(sc->sc_dev, "TX status length: %d\n", 878 sc->sc_tx_statuslen); 879 device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n", 880 sc->sc_tx_nmaps); 881 } 882 883 sc->sc_tx.xmit_setup = ath_edma_dma_txsetup; 884 sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown; 885 sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func; 886 887 sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart; 888 sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff; 889 sc->sc_tx.xmit_drain = ath_edma_tx_drain; 890 } 891