1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 72 #if defined(__DragonFly__) 73 /* empty */ 74 #else 75 #include <sys/smp.h> /* for mp_ncpus */ 76 #include <machine/bus.h> 77 #endif 78 79 #include <net/if.h> 80 #include <net/if_var.h> 81 #include <net/if_dl.h> 82 #include <net/if_media.h> 83 #include <net/if_types.h> 84 #include <net/if_arp.h> 85 #include <net/ethernet.h> 86 #include <net/if_llc.h> 87 #if defined(__DragonFly__) 88 #include <net/ifq_var.h> 89 #endif 90 91 #include <netproto/802_11/ieee80211_var.h> 92 #include <netproto/802_11/ieee80211_regdomain.h> 93 #ifdef IEEE80211_SUPPORT_SUPERG 94 #include <netproto/802_11/ieee80211_superg.h> 95 #endif 96 #ifdef IEEE80211_SUPPORT_TDMA 97 #include <netproto/802_11/ieee80211_tdma.h> 98 #endif 99 100 #include <net/bpf.h> 101 102 #ifdef INET 103 #include <netinet/in.h> 104 #include <netinet/if_ether.h> 105 #endif 106 107 #include <dev/netif/ath/ath/if_athvar.h> 108 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 109 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 110 111 #include <dev/netif/ath/ath/if_ath_debug.h> 112 #include <dev/netif/ath/ath/if_ath_misc.h> 113 #include <dev/netif/ath/ath/if_ath_tsf.h> 114 #include <dev/netif/ath/ath/if_ath_tx.h> 115 #include <dev/netif/ath/ath/if_ath_sysctl.h> 116 #include <dev/netif/ath/ath/if_ath_led.h> 117 #include <dev/netif/ath/ath/if_ath_keycache.h> 118 #include <dev/netif/ath/ath/if_ath_rx.h> 119 #include <dev/netif/ath/ath/if_ath_beacon.h> 120 #include <dev/netif/ath/ath/if_athdfs.h> 121 #include <dev/netif/ath/ath/if_ath_descdma.h> 122 123 #ifdef ATH_TX99_DIAG 124 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 125 #endif 126 127 #include <dev/netif/ath/ath/if_ath_rx_edma.h> 128 129 #ifdef ATH_DEBUG_ALQ 130 #include <dev/netif/ath/ath/if_ath_alq.h> 131 #endif 132 133 /* 134 * some general macros 135 */ 136 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 137 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 138 139 MALLOC_DECLARE(M_ATHDEV); 140 141 /* 142 * XXX TODO: 143 * 144 * + Make sure the FIFO is correctly flushed and reinitialised 145 * through a reset; 146 * + Verify multi-descriptor frames work! 147 * + There's a "memory use after free" which needs to be tracked down 148 * and fixed ASAP. I've seen this in the legacy path too, so it 149 * may be a generic RX path issue. 150 */ 151 152 /* 153 * XXX shuffle the function orders so these pre-declarations aren't 154 * required! 155 */ 156 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 157 int nbufs); 158 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 159 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 160 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 161 HAL_RX_QUEUE qtype, int dosched); 162 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 163 HAL_RX_QUEUE qtype, int dosched); 164 165 static void 166 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 167 { 168 struct ath_hal *ah = sc->sc_ah; 169 170 ATH_RX_LOCK(sc); 171 172 ath_hal_stoppcurecv(ah); 173 ath_hal_setrxfilter(ah, 0); 174 175 /* 176 * 177 */ 178 if (ath_hal_stopdmarecv(ah) == AH_TRUE) 179 sc->sc_rx_stopped = 1; 180 181 /* 182 * Give the various bus FIFOs (not EDMA descriptor FIFO) 183 * time to finish flushing out data. 184 */ 185 DELAY(3000); 186 187 /* Flush RX pending for each queue */ 188 /* XXX should generic-ify this */ 189 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 190 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 191 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 192 } 193 194 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 195 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 196 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 197 } 198 ATH_RX_UNLOCK(sc); 199 } 200 201 /* 202 * Re-initialise the FIFO given the current buffer contents. 203 * Specifically, walk from head -> tail, pushing the FIFO contents 204 * back into the FIFO. 205 */ 206 static void 207 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 208 { 209 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 210 struct ath_buf *bf; 211 int i, j; 212 213 ATH_RX_LOCK_ASSERT(sc); 214 215 i = re->m_fifo_head; 216 for (j = 0; j < re->m_fifo_depth; j++) { 217 bf = re->m_fifo[i]; 218 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 219 "%s: Q%d: pos=%i, addr=0x%jx\n", 220 __func__, 221 qtype, 222 i, 223 (uintmax_t)bf->bf_daddr); 224 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 225 INCR(i, re->m_fifolen); 226 } 227 228 /* Ensure this worked out right */ 229 if (i != re->m_fifo_tail) { 230 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 231 __func__, 232 i, 233 re->m_fifo_tail); 234 } 235 } 236 237 /* 238 * Start receive. 239 */ 240 static int 241 ath_edma_startrecv(struct ath_softc *sc) 242 { 243 struct ath_hal *ah = sc->sc_ah; 244 245 ATH_RX_LOCK(sc); 246 247 /* 248 * Sanity check - are we being called whilst RX 249 * isn't stopped? If so, we may end up pushing 250 * too many entries into the RX FIFO and 251 * badness occurs. 252 */ 253 254 /* Enable RX FIFO */ 255 ath_hal_rxena(ah); 256 257 /* 258 * In theory the hardware has been initialised, right? 259 */ 260 if (sc->sc_rx_resetted == 1) { 261 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 262 "%s: Re-initing HP FIFO\n", __func__); 263 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 264 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 265 "%s: Re-initing LP FIFO\n", __func__); 266 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 267 sc->sc_rx_resetted = 0; 268 } else { 269 device_printf(sc->sc_dev, 270 "%s: called without resetting chip?\n", 271 __func__); 272 } 273 274 /* Add up to m_fifolen entries in each queue */ 275 /* 276 * These must occur after the above write so the FIFO buffers 277 * are pushed/tracked in the same order as the hardware will 278 * process them. 279 * 280 * XXX TODO: is this really necessary? We should've stopped 281 * the hardware already and reinitialised it, so it's a no-op. 282 */ 283 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 284 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 285 286 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 287 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 288 289 ath_mode_init(sc); 290 ath_hal_startpcurecv(ah); 291 292 /* 293 * We're now doing RX DMA! 294 */ 295 sc->sc_rx_stopped = 0; 296 297 ATH_RX_UNLOCK(sc); 298 299 return (0); 300 } 301 302 static void 303 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 304 int dosched) 305 { 306 307 ATH_LOCK(sc); 308 ath_power_set_power_state(sc, HAL_PM_AWAKE); 309 ATH_UNLOCK(sc); 310 311 ath_edma_recv_proc_queue(sc, qtype, dosched); 312 313 ATH_LOCK(sc); 314 ath_power_restore_power_state(sc); 315 ATH_UNLOCK(sc); 316 317 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 318 } 319 320 static void 321 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 322 { 323 324 ATH_LOCK(sc); 325 ath_power_set_power_state(sc, HAL_PM_AWAKE); 326 ATH_UNLOCK(sc); 327 328 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 329 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 330 331 ATH_LOCK(sc); 332 ath_power_restore_power_state(sc); 333 ATH_UNLOCK(sc); 334 335 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 336 } 337 338 static void 339 ath_edma_recv_flush(struct ath_softc *sc) 340 { 341 342 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 343 344 ATH_PCU_LOCK(sc); 345 sc->sc_rxproc_cnt++; 346 ATH_PCU_UNLOCK(sc); 347 348 ATH_LOCK(sc); 349 ath_power_set_power_state(sc, HAL_PM_AWAKE); 350 ATH_UNLOCK(sc); 351 352 /* 353 * Flush any active frames from FIFO -> deferred list 354 */ 355 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 356 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 357 358 /* 359 * Process what's in the deferred queue 360 */ 361 /* 362 * XXX: If we read the tsf/channoise here and then pass it in, 363 * we could restore the power state before processing 364 * the deferred queue. 365 */ 366 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 367 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 368 369 ATH_LOCK(sc); 370 ath_power_restore_power_state(sc); 371 ATH_UNLOCK(sc); 372 373 ATH_PCU_LOCK(sc); 374 sc->sc_rxproc_cnt--; 375 ATH_PCU_UNLOCK(sc); 376 } 377 378 /* 379 * Process frames from the current queue into the deferred queue. 380 */ 381 static void 382 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 383 int dosched) 384 { 385 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 386 struct ath_rx_status *rs; 387 struct ath_desc *ds; 388 struct ath_buf *bf; 389 struct mbuf *m; 390 struct ath_hal *ah = sc->sc_ah; 391 uint64_t tsf; 392 uint16_t nf; 393 int npkts = 0; 394 395 tsf = ath_hal_gettsf64(ah); 396 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 397 sc->sc_stats.ast_rx_noise = nf; 398 399 ATH_RX_LOCK(sc); 400 401 #if 1 402 if (sc->sc_rx_resetted == 1) { 403 /* 404 * XXX We shouldn't ever be scheduled if 405 * receive has been stopped - so complain 406 * loudly! 407 */ 408 device_printf(sc->sc_dev, 409 "%s: sc_rx_resetted=1! Bad!\n", 410 __func__); 411 ATH_RX_UNLOCK(sc); 412 return; 413 } 414 #endif 415 416 do { 417 bf = re->m_fifo[re->m_fifo_head]; 418 /* This shouldn't occur! */ 419 if (bf == NULL) { 420 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 421 __func__, 422 qtype); 423 break; 424 } 425 m = bf->bf_m; 426 ds = bf->bf_desc; 427 428 /* 429 * Sync descriptor memory - this also syncs the buffer for us. 430 * EDMA descriptors are in cached memory. 431 */ 432 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 433 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 434 rs = &bf->bf_status.ds_rxstat; 435 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 436 NULL, rs); 437 #ifdef ATH_DEBUG 438 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 439 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 440 #endif /* ATH_DEBUG */ 441 #ifdef ATH_DEBUG_ALQ 442 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 443 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 444 sc->sc_rx_statuslen, (char *) ds); 445 #endif /* ATH_DEBUG */ 446 if (bf->bf_rxstatus == HAL_EINPROGRESS) 447 break; 448 449 /* 450 * Completed descriptor. 451 */ 452 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 453 "%s: Q%d: completed!\n", __func__, qtype); 454 npkts++; 455 456 /* 457 * We've been synced already, so unmap. 458 */ 459 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 460 461 /* 462 * Remove the FIFO entry and place it on the completion 463 * queue. 464 */ 465 re->m_fifo[re->m_fifo_head] = NULL; 466 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 467 468 /* Bump the descriptor FIFO stats */ 469 INCR(re->m_fifo_head, re->m_fifolen); 470 re->m_fifo_depth--; 471 /* XXX check it doesn't fall below 0 */ 472 } while (re->m_fifo_depth > 0); 473 474 /* Append some more fresh frames to the FIFO */ 475 if (dosched) 476 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 477 478 ATH_RX_UNLOCK(sc); 479 480 /* rx signal state monitoring */ 481 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 482 483 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 484 "ath edma rx proc: npkts=%d\n", 485 npkts); 486 487 return; 488 } 489 490 /* 491 * Flush the deferred queue. 492 * 493 * This destructively flushes the deferred queue - it doesn't 494 * call the wireless stack on each mbuf. 495 */ 496 static void 497 ath_edma_flush_deferred_queue(struct ath_softc *sc) 498 { 499 struct ath_buf *bf; 500 501 ATH_RX_LOCK_ASSERT(sc); 502 503 /* Free in one set, inside the lock */ 504 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { 505 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 506 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 507 /* Free the buffer/mbuf */ 508 ath_edma_rxbuf_free(sc, bf); 509 } 510 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { 511 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 512 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 513 /* Free the buffer/mbuf */ 514 ath_edma_rxbuf_free(sc, bf); 515 } 516 } 517 518 static int 519 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 520 int dosched) 521 { 522 int ngood = 0; 523 uint64_t tsf; 524 struct ath_buf *bf, *next; 525 struct ath_rx_status *rs; 526 int16_t nf; 527 ath_bufhead rxlist; 528 struct mbuf *m; 529 530 TAILQ_INIT(&rxlist); 531 532 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 533 /* 534 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 535 * otherwise we may end up adding in the wrong values if this 536 * is delayed too far.. 537 */ 538 tsf = ath_hal_gettsf64(sc->sc_ah); 539 540 /* Copy the list over */ 541 ATH_RX_LOCK(sc); 542 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 543 ATH_RX_UNLOCK(sc); 544 545 /* Handle the completed descriptors */ 546 /* 547 * XXX is this SAFE call needed? The ath_buf entries 548 * aren't modified by ath_rx_pkt, right? 549 */ 550 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 551 /* 552 * Skip the RX descriptor status - start at the data offset 553 */ 554 m_adj(bf->bf_m, sc->sc_rx_statuslen); 555 556 /* Handle the frame */ 557 558 rs = &bf->bf_status.ds_rxstat; 559 m = bf->bf_m; 560 bf->bf_m = NULL; 561 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 562 ngood++; 563 } 564 565 if (ngood) { 566 sc->sc_lastrx = tsf; 567 } 568 569 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 570 "ath edma rx deferred proc: ngood=%d\n", 571 ngood); 572 573 /* Free in one set, inside the lock */ 574 ATH_RX_LOCK(sc); 575 while (! TAILQ_EMPTY(&rxlist)) { 576 bf = TAILQ_FIRST(&rxlist); 577 TAILQ_REMOVE(&rxlist, bf, bf_list); 578 /* Free the buffer/mbuf */ 579 ath_edma_rxbuf_free(sc, bf); 580 } 581 ATH_RX_UNLOCK(sc); 582 583 return (ngood); 584 } 585 586 static void 587 ath_edma_recv_tasklet(void *arg, int npending) 588 { 589 struct ath_softc *sc = (struct ath_softc *) arg; 590 #ifdef IEEE80211_SUPPORT_SUPERG 591 struct ieee80211com *ic = &sc->sc_ic; 592 #endif 593 594 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 595 __func__, 596 npending); 597 598 ATH_PCU_LOCK(sc); 599 if (sc->sc_inreset_cnt > 0) { 600 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 601 __func__); 602 ATH_PCU_UNLOCK(sc); 603 return; 604 } 605 sc->sc_rxproc_cnt++; 606 ATH_PCU_UNLOCK(sc); 607 608 ATH_LOCK(sc); 609 ath_power_set_power_state(sc, HAL_PM_AWAKE); 610 ATH_UNLOCK(sc); 611 612 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 613 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 614 615 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 616 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 617 618 /* 619 * XXX: If we read the tsf/channoise here and then pass it in, 620 * we could restore the power state before processing 621 * the deferred queue. 622 */ 623 ATH_LOCK(sc); 624 ath_power_restore_power_state(sc); 625 ATH_UNLOCK(sc); 626 627 #ifdef IEEE80211_SUPPORT_SUPERG 628 ieee80211_ff_age_all(ic, 100); 629 #endif 630 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 631 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 632 633 ATH_PCU_LOCK(sc); 634 sc->sc_rxproc_cnt--; 635 ATH_PCU_UNLOCK(sc); 636 } 637 638 /* 639 * Allocate an RX mbuf for the given ath_buf and initialise 640 * it for EDMA. 641 * 642 * + Allocate a 4KB mbuf; 643 * + Setup the DMA map for the given buffer; 644 * + Return that. 645 */ 646 static int 647 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 648 { 649 650 struct mbuf *m; 651 int error; 652 int len; 653 654 ATH_RX_LOCK_ASSERT(sc); 655 656 #if defined(__DragonFly__) 657 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->sc_edma_bufsize); 658 #else 659 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 660 #endif 661 if (! m) 662 return (ENOBUFS); /* XXX ?*/ 663 664 /* XXX warn/enforce alignment */ 665 666 len = m->m_ext.ext_size; 667 #if 0 668 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 669 __func__, 670 m, 671 len, 672 mtod(m, char *)); 673 #endif 674 675 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 676 677 /* 678 * Populate ath_buf fields. 679 */ 680 bf->bf_desc = mtod(m, struct ath_desc *); 681 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 682 bf->bf_m = m; 683 684 /* 685 * Zero the descriptor and ensure it makes it out to the 686 * bounce buffer if one is required. 687 * 688 * XXX PREWRITE will copy the whole buffer; we only needed it 689 * to sync the first 32 DWORDS. Oh well. 690 */ 691 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 692 693 /* 694 * Create DMA mapping. 695 */ 696 #if defined(__DragonFly__) 697 error = bus_dmamap_load_mbuf_segment( 698 sc->sc_dmat, bf->bf_dmamap, m, 699 bf->bf_segs, 1, &bf->bf_nseg, BUS_DMA_NOWAIT); 700 #else 701 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 702 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 703 #endif 704 705 if (error != 0) { 706 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 707 __func__, 708 error); 709 m_freem(m); 710 return (error); 711 } 712 713 /* 714 * Set daddr to the physical mapping page. 715 */ 716 bf->bf_daddr = bf->bf_segs[0].ds_addr; 717 718 /* 719 * Prepare for the upcoming read. 720 * 721 * We need to both sync some data into the buffer (the zero'ed 722 * descriptor payload) and also prepare for the read that's going 723 * to occur. 724 */ 725 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 726 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 727 728 /* Finish! */ 729 return (0); 730 } 731 732 /* 733 * Allocate a RX buffer. 734 */ 735 static struct ath_buf * 736 ath_edma_rxbuf_alloc(struct ath_softc *sc) 737 { 738 struct ath_buf *bf; 739 int error; 740 741 ATH_RX_LOCK_ASSERT(sc); 742 743 /* Allocate buffer */ 744 bf = TAILQ_FIRST(&sc->sc_rxbuf); 745 /* XXX shouldn't happen upon startup? */ 746 if (bf == NULL) { 747 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 748 __func__); 749 return (NULL); 750 } 751 752 /* Remove it from the free list */ 753 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 754 755 /* Assign RX mbuf to it */ 756 error = ath_edma_rxbuf_init(sc, bf); 757 if (error != 0) { 758 device_printf(sc->sc_dev, 759 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 760 __func__, 761 bf, 762 error); 763 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 764 return (NULL); 765 } 766 767 return (bf); 768 } 769 770 static void 771 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 772 { 773 774 ATH_RX_LOCK_ASSERT(sc); 775 776 /* 777 * Only unload the frame if we haven't consumed 778 * the mbuf via ath_rx_pkt(). 779 */ 780 if (bf->bf_m) { 781 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 782 m_freem(bf->bf_m); 783 bf->bf_m = NULL; 784 } 785 786 /* XXX lock? */ 787 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 788 } 789 790 /* 791 * Allocate up to 'n' entries and push them onto the hardware FIFO. 792 * 793 * Return how many entries were successfully pushed onto the 794 * FIFO. 795 */ 796 static int 797 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 798 { 799 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 800 struct ath_buf *bf; 801 int i; 802 803 ATH_RX_LOCK_ASSERT(sc); 804 805 /* 806 * Allocate buffers until the FIFO is full or nbufs is reached. 807 */ 808 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 809 /* Ensure the FIFO is already blank, complain loudly! */ 810 if (re->m_fifo[re->m_fifo_tail] != NULL) { 811 device_printf(sc->sc_dev, 812 "%s: Q%d: fifo[%d] != NULL (%p)\n", 813 __func__, 814 qtype, 815 re->m_fifo_tail, 816 re->m_fifo[re->m_fifo_tail]); 817 818 /* Free the slot */ 819 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 820 re->m_fifo_depth--; 821 /* XXX check it's not < 0 */ 822 re->m_fifo[re->m_fifo_tail] = NULL; 823 } 824 825 bf = ath_edma_rxbuf_alloc(sc); 826 /* XXX should ensure the FIFO is not NULL? */ 827 if (bf == NULL) { 828 device_printf(sc->sc_dev, 829 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 830 __func__, 831 qtype, 832 i, 833 nbufs); 834 break; 835 } 836 837 re->m_fifo[re->m_fifo_tail] = bf; 838 839 /* Write to the RX FIFO */ 840 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 841 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 842 __func__, 843 qtype, 844 bf->bf_desc, 845 (uintmax_t) bf->bf_daddr); 846 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 847 848 re->m_fifo_depth++; 849 INCR(re->m_fifo_tail, re->m_fifolen); 850 } 851 852 /* 853 * Return how many were allocated. 854 */ 855 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 856 __func__, 857 qtype, 858 nbufs, 859 i); 860 return (i); 861 } 862 863 static int 864 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 865 { 866 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 867 int i; 868 869 ATH_RX_LOCK_ASSERT(sc); 870 871 for (i = 0; i < re->m_fifolen; i++) { 872 if (re->m_fifo[i] != NULL) { 873 #ifdef ATH_DEBUG 874 struct ath_buf *bf = re->m_fifo[i]; 875 876 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 877 ath_printrxbuf(sc, bf, 0, HAL_OK); 878 #endif 879 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 880 re->m_fifo[i] = NULL; 881 re->m_fifo_depth--; 882 } 883 } 884 885 if (re->m_rxpending != NULL) { 886 m_freem(re->m_rxpending); 887 re->m_rxpending = NULL; 888 } 889 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 890 891 return (0); 892 } 893 894 /* 895 * Setup the initial RX FIFO structure. 896 */ 897 static int 898 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 899 { 900 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 901 902 ATH_RX_LOCK_ASSERT(sc); 903 904 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 905 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 906 __func__, 907 qtype); 908 return (-EINVAL); 909 } 910 911 if (bootverbose) 912 device_printf(sc->sc_dev, 913 "%s: type=%d, FIFO depth = %d entries\n", 914 __func__, 915 qtype, 916 re->m_fifolen); 917 918 /* Allocate ath_buf FIFO array, pre-zero'ed */ 919 /* DragonFly: note use of M_INTWAIT */ 920 re->m_fifo = kmalloc(sizeof(struct ath_buf *) * re->m_fifolen, 921 M_ATHDEV, M_INTWAIT | M_ZERO); 922 if (re->m_fifo == NULL) { 923 device_printf(sc->sc_dev, "%s: malloc failed\n", 924 __func__); 925 return (-ENOMEM); 926 } 927 928 /* 929 * Set initial "empty" state. 930 */ 931 re->m_rxpending = NULL; 932 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 933 934 return (0); 935 } 936 937 static int 938 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 939 { 940 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 941 942 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 943 __func__, 944 qtype); 945 946 kfree(re->m_fifo, M_ATHDEV); 947 948 return (0); 949 } 950 951 static int 952 ath_edma_dma_rxsetup(struct ath_softc *sc) 953 { 954 int error; 955 956 /* 957 * Create RX DMA tag and buffers. 958 */ 959 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 960 "rx", ath_rxbuf, sc->sc_rx_statuslen); 961 if (error != 0) 962 return error; 963 964 ATH_RX_LOCK(sc); 965 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 966 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 967 ATH_RX_UNLOCK(sc); 968 969 return (0); 970 } 971 972 static int 973 ath_edma_dma_rxteardown(struct ath_softc *sc) 974 { 975 976 ATH_RX_LOCK(sc); 977 ath_edma_flush_deferred_queue(sc); 978 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 979 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 980 981 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 982 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 983 ATH_RX_UNLOCK(sc); 984 985 /* Free RX ath_buf */ 986 /* Free RX DMA tag */ 987 if (sc->sc_rxdma.dd_desc_len != 0) 988 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 989 990 return (0); 991 } 992 993 void 994 ath_recv_setup_edma(struct ath_softc *sc) 995 { 996 997 /* Set buffer size to 4k */ 998 sc->sc_edma_bufsize = 4096; 999 1000 /* Fetch EDMA field and buffer sizes */ 1001 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 1002 1003 /* Configure the hardware with the RX buffer size */ 1004 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 1005 sc->sc_rx_statuslen); 1006 1007 if (bootverbose) { 1008 device_printf(sc->sc_dev, "RX status length: %d\n", 1009 sc->sc_rx_statuslen); 1010 device_printf(sc->sc_dev, "RX buffer size: %d\n", 1011 sc->sc_edma_bufsize); 1012 } 1013 1014 sc->sc_rx.recv_stop = ath_edma_stoprecv; 1015 sc->sc_rx.recv_start = ath_edma_startrecv; 1016 sc->sc_rx.recv_flush = ath_edma_recv_flush; 1017 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 1018 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 1019 1020 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 1021 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 1022 1023 sc->sc_rx.recv_sched = ath_edma_recv_sched; 1024 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 1025 } 1026