1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 72 #include <net/if.h> 73 #include <net/if_var.h> 74 #include <net/if_dl.h> 75 #include <net/if_media.h> 76 #include <net/if_types.h> 77 #include <net/if_arp.h> 78 #include <net/ethernet.h> 79 #include <net/if_llc.h> 80 #include <net/ifq_var.h> 81 82 #include <netproto/802_11/ieee80211_var.h> 83 #include <netproto/802_11/ieee80211_regdomain.h> 84 #ifdef IEEE80211_SUPPORT_SUPERG 85 #include <netproto/802_11/ieee80211_superg.h> 86 #endif 87 #ifdef IEEE80211_SUPPORT_TDMA 88 #include <netproto/802_11/ieee80211_tdma.h> 89 #endif 90 91 #include <net/bpf.h> 92 93 #ifdef INET 94 #include <netinet/in.h> 95 #include <netinet/if_ether.h> 96 #endif 97 98 #include <dev/netif/ath/ath/if_athvar.h> 99 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 100 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 101 102 #include <dev/netif/ath/ath/if_ath_debug.h> 103 #include <dev/netif/ath/ath/if_ath_misc.h> 104 #include <dev/netif/ath/ath/if_ath_tsf.h> 105 #include <dev/netif/ath/ath/if_ath_tx.h> 106 #include <dev/netif/ath/ath/if_ath_sysctl.h> 107 #include <dev/netif/ath/ath/if_ath_led.h> 108 #include <dev/netif/ath/ath/if_ath_keycache.h> 109 #include <dev/netif/ath/ath/if_ath_rx.h> 110 #include <dev/netif/ath/ath/if_ath_beacon.h> 111 #include <dev/netif/ath/ath/if_athdfs.h> 112 113 #ifdef ATH_TX99_DIAG 114 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 115 #endif 116 117 #include <dev/netif/ath/ath/if_ath_rx_edma.h> 118 119 #ifdef ATH_DEBUG_ALQ 120 #include <dev/netif/ath/ath/if_ath_alq.h> 121 #endif 122 123 /* 124 * some general macros 125 */ 126 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 127 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 128 129 MALLOC_DECLARE(M_ATHDEV); 130 131 /* 132 * XXX TODO: 133 * 134 * + Make sure the FIFO is correctly flushed and reinitialised 135 * through a reset; 136 * + Verify multi-descriptor frames work! 137 * + There's a "memory use after free" which needs to be tracked down 138 * and fixed ASAP. I've seen this in the legacy path too, so it 139 * may be a generic RX path issue. 140 */ 141 142 /* 143 * XXX shuffle the function orders so these pre-declarations aren't 144 * required! 145 */ 146 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 147 int nbufs); 148 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 149 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 150 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 151 HAL_RX_QUEUE qtype, int dosched); 152 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 153 HAL_RX_QUEUE qtype, int dosched); 154 155 static void 156 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 157 { 158 struct ath_hal *ah = sc->sc_ah; 159 160 ATH_RX_LOCK(sc); 161 162 ath_hal_stoppcurecv(ah); 163 ath_hal_setrxfilter(ah, 0); 164 165 /* 166 * 167 */ 168 if (ath_hal_stopdmarecv(ah) == AH_TRUE) 169 sc->sc_rx_stopped = 1; 170 171 /* 172 * Give the various bus FIFOs (not EDMA descriptor FIFO) 173 * time to finish flushing out data. 174 */ 175 DELAY(3000); 176 177 /* Flush RX pending for each queue */ 178 /* XXX should generic-ify this */ 179 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 180 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 181 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 182 } 183 184 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 185 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 186 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 187 } 188 ATH_RX_UNLOCK(sc); 189 } 190 191 /* 192 * Re-initialise the FIFO given the current buffer contents. 193 * Specifically, walk from head -> tail, pushing the FIFO contents 194 * back into the FIFO. 195 */ 196 static void 197 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 198 { 199 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 200 struct ath_buf *bf; 201 int i, j; 202 203 ATH_RX_LOCK_ASSERT(sc); 204 205 i = re->m_fifo_head; 206 for (j = 0; j < re->m_fifo_depth; j++) { 207 bf = re->m_fifo[i]; 208 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 209 "%s: Q%d: pos=%i, addr=0x%jx\n", 210 __func__, 211 qtype, 212 i, 213 (uintmax_t)bf->bf_daddr); 214 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 215 INCR(i, re->m_fifolen); 216 } 217 218 /* Ensure this worked out right */ 219 if (i != re->m_fifo_tail) { 220 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 221 __func__, 222 i, 223 re->m_fifo_tail); 224 } 225 } 226 227 /* 228 * Start receive. 229 */ 230 static int 231 ath_edma_startrecv(struct ath_softc *sc) 232 { 233 struct ath_hal *ah = sc->sc_ah; 234 235 ATH_RX_LOCK(sc); 236 237 /* 238 * Sanity check - are we being called whilst RX 239 * isn't stopped? If so, we may end up pushing 240 * too many entries into the RX FIFO and 241 * badness occurs. 242 */ 243 244 /* Enable RX FIFO */ 245 ath_hal_rxena(ah); 246 247 /* 248 * In theory the hardware has been initialised, right? 249 */ 250 if (sc->sc_rx_resetted == 1) { 251 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 252 "%s: Re-initing HP FIFO\n", __func__); 253 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 254 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 255 "%s: Re-initing LP FIFO\n", __func__); 256 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 257 sc->sc_rx_resetted = 0; 258 } else { 259 device_printf(sc->sc_dev, 260 "%s: called without resetting chip?\n", 261 __func__); 262 } 263 264 /* Add up to m_fifolen entries in each queue */ 265 /* 266 * These must occur after the above write so the FIFO buffers 267 * are pushed/tracked in the same order as the hardware will 268 * process them. 269 * 270 * XXX TODO: is this really necessary? We should've stopped 271 * the hardware already and reinitialised it, so it's a no-op. 272 */ 273 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 274 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 275 276 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 277 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 278 279 ath_mode_init(sc); 280 ath_hal_startpcurecv(ah); 281 282 /* 283 * We're now doing RX DMA! 284 */ 285 sc->sc_rx_stopped = 0; 286 287 ATH_RX_UNLOCK(sc); 288 289 return (0); 290 } 291 292 static void 293 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 294 int dosched) 295 { 296 297 ATH_LOCK(sc); 298 ath_power_set_power_state(sc, HAL_PM_AWAKE); 299 ATH_UNLOCK(sc); 300 301 ath_edma_recv_proc_queue(sc, qtype, dosched); 302 303 ATH_LOCK(sc); 304 ath_power_restore_power_state(sc); 305 ATH_UNLOCK(sc); 306 307 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 308 } 309 310 static void 311 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 312 { 313 314 ATH_LOCK(sc); 315 ath_power_set_power_state(sc, HAL_PM_AWAKE); 316 ATH_UNLOCK(sc); 317 318 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 319 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 320 321 ATH_LOCK(sc); 322 ath_power_restore_power_state(sc); 323 ATH_UNLOCK(sc); 324 325 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 326 } 327 328 static void 329 ath_edma_recv_flush(struct ath_softc *sc) 330 { 331 332 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 333 334 ATH_PCU_LOCK(sc); 335 sc->sc_rxproc_cnt++; 336 ATH_PCU_UNLOCK(sc); 337 338 ATH_LOCK(sc); 339 ath_power_set_power_state(sc, HAL_PM_AWAKE); 340 ATH_UNLOCK(sc); 341 342 /* 343 * Flush any active frames from FIFO -> deferred list 344 */ 345 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 346 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 347 348 /* 349 * Process what's in the deferred queue 350 */ 351 /* 352 * XXX: If we read the tsf/channoise here and then pass it in, 353 * we could restore the power state before processing 354 * the deferred queue. 355 */ 356 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 357 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 358 359 ATH_LOCK(sc); 360 ath_power_restore_power_state(sc); 361 ATH_UNLOCK(sc); 362 363 ATH_PCU_LOCK(sc); 364 sc->sc_rxproc_cnt--; 365 ATH_PCU_UNLOCK(sc); 366 } 367 368 /* 369 * Process frames from the current queue into the deferred queue. 370 */ 371 static void 372 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 373 int dosched) 374 { 375 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 376 struct ath_rx_status *rs; 377 struct ath_desc *ds; 378 struct ath_buf *bf; 379 struct mbuf *m; 380 struct ath_hal *ah = sc->sc_ah; 381 uint64_t tsf; 382 uint16_t nf; 383 int npkts = 0; 384 385 tsf = ath_hal_gettsf64(ah); 386 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 387 sc->sc_stats.ast_rx_noise = nf; 388 389 ATH_RX_LOCK(sc); 390 391 #if 1 392 if (sc->sc_rx_resetted == 1) { 393 /* 394 * XXX We shouldn't ever be scheduled if 395 * receive has been stopped - so complain 396 * loudly! 397 */ 398 device_printf(sc->sc_dev, 399 "%s: sc_rx_resetted=1! Bad!\n", 400 __func__); 401 ATH_RX_UNLOCK(sc); 402 return; 403 } 404 #endif 405 406 do { 407 bf = re->m_fifo[re->m_fifo_head]; 408 /* This shouldn't occur! */ 409 if (bf == NULL) { 410 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 411 __func__, 412 qtype); 413 break; 414 } 415 m = bf->bf_m; 416 ds = bf->bf_desc; 417 418 /* 419 * Sync descriptor memory - this also syncs the buffer for us. 420 * EDMA descriptors are in cached memory. 421 */ 422 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 423 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 424 rs = &bf->bf_status.ds_rxstat; 425 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 426 NULL, rs); 427 #ifdef ATH_DEBUG 428 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 429 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 430 #endif /* ATH_DEBUG */ 431 #ifdef ATH_DEBUG_ALQ 432 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 433 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 434 sc->sc_rx_statuslen, (char *) ds); 435 #endif /* ATH_DEBUG */ 436 if (bf->bf_rxstatus == HAL_EINPROGRESS) 437 break; 438 439 /* 440 * Completed descriptor. 441 */ 442 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 443 "%s: Q%d: completed!\n", __func__, qtype); 444 npkts++; 445 446 /* 447 * We've been synced already, so unmap. 448 */ 449 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 450 451 /* 452 * Remove the FIFO entry and place it on the completion 453 * queue. 454 */ 455 re->m_fifo[re->m_fifo_head] = NULL; 456 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 457 458 /* Bump the descriptor FIFO stats */ 459 INCR(re->m_fifo_head, re->m_fifolen); 460 re->m_fifo_depth--; 461 /* XXX check it doesn't fall below 0 */ 462 } while (re->m_fifo_depth > 0); 463 464 /* Append some more fresh frames to the FIFO */ 465 if (dosched) 466 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 467 468 ATH_RX_UNLOCK(sc); 469 470 /* rx signal state monitoring */ 471 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 472 473 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 474 "ath edma rx proc: npkts=%d\n", 475 npkts); 476 477 return; 478 } 479 480 /* 481 * Flush the deferred queue. 482 * 483 * This destructively flushes the deferred queue - it doesn't 484 * call the wireless stack on each mbuf. 485 */ 486 static void 487 ath_edma_flush_deferred_queue(struct ath_softc *sc) 488 { 489 struct ath_buf *bf; 490 491 ATH_RX_LOCK_ASSERT(sc); 492 493 /* Free in one set, inside the lock */ 494 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { 495 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 496 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 497 /* Free the buffer/mbuf */ 498 ath_edma_rxbuf_free(sc, bf); 499 } 500 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { 501 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 502 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 503 /* Free the buffer/mbuf */ 504 ath_edma_rxbuf_free(sc, bf); 505 } 506 } 507 508 static int 509 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 510 int dosched) 511 { 512 int ngood = 0; 513 uint64_t tsf; 514 struct ath_buf *bf, *next; 515 struct ath_rx_status *rs; 516 int16_t nf; 517 ath_bufhead rxlist; 518 struct mbuf *m; 519 520 TAILQ_INIT(&rxlist); 521 522 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 523 /* 524 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 525 * otherwise we may end up adding in the wrong values if this 526 * is delayed too far.. 527 */ 528 tsf = ath_hal_gettsf64(sc->sc_ah); 529 530 /* Copy the list over */ 531 ATH_RX_LOCK(sc); 532 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 533 ATH_RX_UNLOCK(sc); 534 535 /* Handle the completed descriptors */ 536 /* 537 * XXX is this SAFE call needed? The ath_buf entries 538 * aren't modified by ath_rx_pkt, right? 539 */ 540 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 541 /* 542 * Skip the RX descriptor status - start at the data offset 543 */ 544 m_adj(bf->bf_m, sc->sc_rx_statuslen); 545 546 /* Handle the frame */ 547 548 rs = &bf->bf_status.ds_rxstat; 549 m = bf->bf_m; 550 bf->bf_m = NULL; 551 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 552 ngood++; 553 } 554 555 if (ngood) { 556 sc->sc_lastrx = tsf; 557 } 558 559 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 560 "ath edma rx deferred proc: ngood=%d\n", 561 ngood); 562 563 /* Free in one set, inside the lock */ 564 ATH_RX_LOCK(sc); 565 while (! TAILQ_EMPTY(&rxlist)) { 566 bf = TAILQ_FIRST(&rxlist); 567 TAILQ_REMOVE(&rxlist, bf, bf_list); 568 /* Free the buffer/mbuf */ 569 ath_edma_rxbuf_free(sc, bf); 570 } 571 ATH_RX_UNLOCK(sc); 572 573 return (ngood); 574 } 575 576 static void 577 ath_edma_recv_tasklet(void *arg, int npending) 578 { 579 struct ath_softc *sc = (struct ath_softc *) arg; 580 struct ifnet *ifp = sc->sc_ifp; 581 #ifdef IEEE80211_SUPPORT_SUPERG 582 struct ieee80211com *ic = ifp->if_l2com; 583 #endif 584 585 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 586 __func__, 587 npending); 588 589 ATH_PCU_LOCK(sc); 590 if (sc->sc_inreset_cnt > 0) { 591 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 592 __func__); 593 ATH_PCU_UNLOCK(sc); 594 return; 595 } 596 sc->sc_rxproc_cnt++; 597 ATH_PCU_UNLOCK(sc); 598 599 ATH_LOCK(sc); 600 ath_power_set_power_state(sc, HAL_PM_AWAKE); 601 ATH_UNLOCK(sc); 602 603 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 604 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 605 606 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 607 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 608 609 /* 610 * XXX: If we read the tsf/channoise here and then pass it in, 611 * we could restore the power state before processing 612 * the deferred queue. 613 */ 614 ATH_LOCK(sc); 615 ath_power_restore_power_state(sc); 616 ATH_UNLOCK(sc); 617 618 /* XXX inside IF_LOCK ? */ 619 #if defined(__DragonFly__) 620 if (!ifq_is_oactive(&ifp->if_snd)) { 621 #else 622 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 623 #endif 624 #ifdef IEEE80211_SUPPORT_SUPERG 625 ieee80211_ff_age_all(ic, 100); 626 #endif 627 #if defined(__DragonFly__) 628 if (! ifq_is_empty(&ifp->if_snd)) 629 ath_tx_kick(sc); 630 #else 631 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 632 ath_tx_kick(sc); 633 #endif 634 } 635 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 636 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 637 638 ATH_PCU_LOCK(sc); 639 sc->sc_rxproc_cnt--; 640 ATH_PCU_UNLOCK(sc); 641 } 642 643 /* 644 * Allocate an RX mbuf for the given ath_buf and initialise 645 * it for EDMA. 646 * 647 * + Allocate a 4KB mbuf; 648 * + Setup the DMA map for the given buffer; 649 * + Return that. 650 */ 651 static int 652 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 653 { 654 655 struct mbuf *m; 656 int error; 657 int len; 658 659 ATH_RX_LOCK_ASSERT(sc); 660 661 #if defined(__DragonFly__) 662 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->sc_edma_bufsize); 663 #else 664 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 665 #endif 666 if (! m) 667 return (ENOBUFS); /* XXX ?*/ 668 669 /* XXX warn/enforce alignment */ 670 671 len = m->m_ext.ext_size; 672 #if 0 673 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 674 __func__, 675 m, 676 len, 677 mtod(m, char *)); 678 #endif 679 680 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 681 682 /* 683 * Populate ath_buf fields. 684 */ 685 bf->bf_desc = mtod(m, struct ath_desc *); 686 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 687 bf->bf_m = m; 688 689 /* 690 * Zero the descriptor and ensure it makes it out to the 691 * bounce buffer if one is required. 692 * 693 * XXX PREWRITE will copy the whole buffer; we only needed it 694 * to sync the first 32 DWORDS. Oh well. 695 */ 696 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 697 698 /* 699 * Create DMA mapping. 700 */ 701 #if defined(__DragonFly__) 702 error = bus_dmamap_load_mbuf_segment( 703 sc->sc_dmat, bf->bf_dmamap, m, 704 bf->bf_segs, 1, &bf->bf_nseg, BUS_DMA_NOWAIT); 705 #else 706 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 707 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 708 #endif 709 710 if (error != 0) { 711 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 712 __func__, 713 error); 714 m_freem(m); 715 return (error); 716 } 717 718 /* 719 * Set daddr to the physical mapping page. 720 */ 721 bf->bf_daddr = bf->bf_segs[0].ds_addr; 722 723 /* 724 * Prepare for the upcoming read. 725 * 726 * We need to both sync some data into the buffer (the zero'ed 727 * descriptor payload) and also prepare for the read that's going 728 * to occur. 729 */ 730 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 732 733 /* Finish! */ 734 return (0); 735 } 736 737 /* 738 * Allocate a RX buffer. 739 */ 740 static struct ath_buf * 741 ath_edma_rxbuf_alloc(struct ath_softc *sc) 742 { 743 struct ath_buf *bf; 744 int error; 745 746 ATH_RX_LOCK_ASSERT(sc); 747 748 /* Allocate buffer */ 749 bf = TAILQ_FIRST(&sc->sc_rxbuf); 750 /* XXX shouldn't happen upon startup? */ 751 if (bf == NULL) { 752 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 753 __func__); 754 return (NULL); 755 } 756 757 /* Remove it from the free list */ 758 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 759 760 /* Assign RX mbuf to it */ 761 error = ath_edma_rxbuf_init(sc, bf); 762 if (error != 0) { 763 device_printf(sc->sc_dev, 764 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 765 __func__, 766 bf, 767 error); 768 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 769 return (NULL); 770 } 771 772 return (bf); 773 } 774 775 static void 776 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 777 { 778 779 ATH_RX_LOCK_ASSERT(sc); 780 781 /* 782 * Only unload the frame if we haven't consumed 783 * the mbuf via ath_rx_pkt(). 784 */ 785 if (bf->bf_m) { 786 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 787 m_freem(bf->bf_m); 788 bf->bf_m = NULL; 789 } 790 791 /* XXX lock? */ 792 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 793 } 794 795 /* 796 * Allocate up to 'n' entries and push them onto the hardware FIFO. 797 * 798 * Return how many entries were successfully pushed onto the 799 * FIFO. 800 */ 801 static int 802 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 803 { 804 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 805 struct ath_buf *bf; 806 int i; 807 808 ATH_RX_LOCK_ASSERT(sc); 809 810 /* 811 * Allocate buffers until the FIFO is full or nbufs is reached. 812 */ 813 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 814 /* Ensure the FIFO is already blank, complain loudly! */ 815 if (re->m_fifo[re->m_fifo_tail] != NULL) { 816 device_printf(sc->sc_dev, 817 "%s: Q%d: fifo[%d] != NULL (%p)\n", 818 __func__, 819 qtype, 820 re->m_fifo_tail, 821 re->m_fifo[re->m_fifo_tail]); 822 823 /* Free the slot */ 824 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 825 re->m_fifo_depth--; 826 /* XXX check it's not < 0 */ 827 re->m_fifo[re->m_fifo_tail] = NULL; 828 } 829 830 bf = ath_edma_rxbuf_alloc(sc); 831 /* XXX should ensure the FIFO is not NULL? */ 832 if (bf == NULL) { 833 device_printf(sc->sc_dev, 834 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 835 __func__, 836 qtype, 837 i, 838 nbufs); 839 break; 840 } 841 842 re->m_fifo[re->m_fifo_tail] = bf; 843 844 /* Write to the RX FIFO */ 845 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 846 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 847 __func__, 848 qtype, 849 bf->bf_desc, 850 (uintmax_t) bf->bf_daddr); 851 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 852 853 re->m_fifo_depth++; 854 INCR(re->m_fifo_tail, re->m_fifolen); 855 } 856 857 /* 858 * Return how many were allocated. 859 */ 860 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 861 __func__, 862 qtype, 863 nbufs, 864 i); 865 return (i); 866 } 867 868 static int 869 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 870 { 871 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 872 int i; 873 874 ATH_RX_LOCK_ASSERT(sc); 875 876 for (i = 0; i < re->m_fifolen; i++) { 877 if (re->m_fifo[i] != NULL) { 878 #ifdef ATH_DEBUG 879 struct ath_buf *bf = re->m_fifo[i]; 880 881 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 882 ath_printrxbuf(sc, bf, 0, HAL_OK); 883 #endif 884 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 885 re->m_fifo[i] = NULL; 886 re->m_fifo_depth--; 887 } 888 } 889 890 if (re->m_rxpending != NULL) { 891 m_freem(re->m_rxpending); 892 re->m_rxpending = NULL; 893 } 894 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 895 896 return (0); 897 } 898 899 /* 900 * Setup the initial RX FIFO structure. 901 */ 902 static int 903 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 904 { 905 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 906 907 ATH_RX_LOCK_ASSERT(sc); 908 909 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 910 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 911 __func__, 912 qtype); 913 return (-EINVAL); 914 } 915 916 if (bootverbose) 917 device_printf(sc->sc_dev, 918 "%s: type=%d, FIFO depth = %d entries\n", 919 __func__, 920 qtype, 921 re->m_fifolen); 922 923 /* Allocate ath_buf FIFO array, pre-zero'ed */ 924 re->m_fifo = kmalloc(sizeof(struct ath_buf *) * re->m_fifolen, 925 M_ATHDEV, M_INTWAIT | M_ZERO); 926 if (re->m_fifo == NULL) { 927 device_printf(sc->sc_dev, "%s: malloc failed\n", 928 __func__); 929 return (-ENOMEM); 930 } 931 932 /* 933 * Set initial "empty" state. 934 */ 935 re->m_rxpending = NULL; 936 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 937 938 return (0); 939 } 940 941 static int 942 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 943 { 944 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 945 946 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 947 __func__, 948 qtype); 949 950 kfree(re->m_fifo, M_ATHDEV); 951 952 return (0); 953 } 954 955 static int 956 ath_edma_dma_rxsetup(struct ath_softc *sc) 957 { 958 int error; 959 960 /* 961 * Create RX DMA tag and buffers. 962 */ 963 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 964 "rx", ath_rxbuf, sc->sc_rx_statuslen); 965 if (error != 0) 966 return error; 967 968 ATH_RX_LOCK(sc); 969 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 970 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 971 ATH_RX_UNLOCK(sc); 972 973 return (0); 974 } 975 976 static int 977 ath_edma_dma_rxteardown(struct ath_softc *sc) 978 { 979 980 ATH_RX_LOCK(sc); 981 ath_edma_flush_deferred_queue(sc); 982 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 983 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 984 985 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 986 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 987 ATH_RX_UNLOCK(sc); 988 989 /* Free RX ath_buf */ 990 /* Free RX DMA tag */ 991 if (sc->sc_rxdma.dd_desc_len != 0) 992 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 993 994 return (0); 995 } 996 997 void 998 ath_recv_setup_edma(struct ath_softc *sc) 999 { 1000 1001 /* Set buffer size to 4k */ 1002 sc->sc_edma_bufsize = 4096; 1003 1004 /* Fetch EDMA field and buffer sizes */ 1005 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 1006 1007 /* Configure the hardware with the RX buffer size */ 1008 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 1009 sc->sc_rx_statuslen); 1010 1011 if (bootverbose) { 1012 device_printf(sc->sc_dev, "RX status length: %d\n", 1013 sc->sc_rx_statuslen); 1014 device_printf(sc->sc_dev, "RX buffer size: %d\n", 1015 sc->sc_edma_bufsize); 1016 } 1017 1018 sc->sc_rx.recv_stop = ath_edma_stoprecv; 1019 sc->sc_rx.recv_start = ath_edma_startrecv; 1020 sc->sc_rx.recv_flush = ath_edma_recv_flush; 1021 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 1022 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 1023 1024 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 1025 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 1026 1027 sc->sc_rx.recv_sched = ath_edma_recv_sched; 1028 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 1029 } 1030