1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/if_types.h> 79 #include <net/if_arp.h> 80 #include <net/ethernet.h> 81 #include <net/if_llc.h> 82 83 #include <net80211/ieee80211_var.h> 84 #include <net80211/ieee80211_regdomain.h> 85 #ifdef IEEE80211_SUPPORT_SUPERG 86 #include <net80211/ieee80211_superg.h> 87 #endif 88 #ifdef IEEE80211_SUPPORT_TDMA 89 #include <net80211/ieee80211_tdma.h> 90 #endif 91 92 #include <net/bpf.h> 93 94 #ifdef INET 95 #include <netinet/in.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #include <dev/ath/if_athvar.h> 100 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101 #include <dev/ath/ath_hal/ah_diagcodes.h> 102 103 #include <dev/ath/if_ath_debug.h> 104 #include <dev/ath/if_ath_misc.h> 105 #include <dev/ath/if_ath_tsf.h> 106 #include <dev/ath/if_ath_tx.h> 107 #include <dev/ath/if_ath_sysctl.h> 108 #include <dev/ath/if_ath_led.h> 109 #include <dev/ath/if_ath_keycache.h> 110 #include <dev/ath/if_ath_rx.h> 111 #include <dev/ath/if_ath_beacon.h> 112 #include <dev/ath/if_athdfs.h> 113 114 #ifdef ATH_TX99_DIAG 115 #include <dev/ath/ath_tx99/ath_tx99.h> 116 #endif 117 118 #include <dev/ath/if_ath_rx_edma.h> 119 120 #ifdef ATH_DEBUG_ALQ 121 #include <dev/ath/if_ath_alq.h> 122 #endif 123 124 /* 125 * some general macros 126 */ 127 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 128 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 129 130 MALLOC_DECLARE(M_ATHDEV); 131 132 /* 133 * XXX TODO: 134 * 135 * + Make sure the FIFO is correctly flushed and reinitialised 136 * through a reset; 137 * + Verify multi-descriptor frames work! 138 * + There's a "memory use after free" which needs to be tracked down 139 * and fixed ASAP. I've seen this in the legacy path too, so it 140 * may be a generic RX path issue. 141 */ 142 143 /* 144 * XXX shuffle the function orders so these pre-declarations aren't 145 * required! 146 */ 147 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 148 int nbufs); 149 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 150 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 151 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 152 HAL_RX_QUEUE qtype, int dosched); 153 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 154 HAL_RX_QUEUE qtype, int dosched); 155 156 static void 157 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 158 { 159 struct ath_hal *ah = sc->sc_ah; 160 161 ATH_RX_LOCK(sc); 162 ath_hal_stoppcurecv(ah); 163 ath_hal_setrxfilter(ah, 0); 164 ath_hal_stopdmarecv(ah); 165 166 DELAY(3000); 167 168 /* Flush RX pending for each queue */ 169 /* XXX should generic-ify this */ 170 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 171 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 172 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 173 } 174 175 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 176 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 177 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 178 } 179 ATH_RX_UNLOCK(sc); 180 } 181 182 /* 183 * Re-initialise the FIFO given the current buffer contents. 184 * Specifically, walk from head -> tail, pushing the FIFO contents 185 * back into the FIFO. 186 */ 187 static void 188 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 189 { 190 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 191 struct ath_buf *bf; 192 int i, j; 193 194 ATH_RX_LOCK_ASSERT(sc); 195 196 i = re->m_fifo_head; 197 for (j = 0; j < re->m_fifo_depth; j++) { 198 bf = re->m_fifo[i]; 199 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 200 "%s: Q%d: pos=%i, addr=0x%jx\n", 201 __func__, 202 qtype, 203 i, 204 (uintmax_t)bf->bf_daddr); 205 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 206 INCR(i, re->m_fifolen); 207 } 208 209 /* Ensure this worked out right */ 210 if (i != re->m_fifo_tail) { 211 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 212 __func__, 213 i, 214 re->m_fifo_tail); 215 } 216 } 217 218 /* 219 * Start receive. 220 * 221 * XXX TODO: this needs to reallocate the FIFO entries when a reset 222 * occurs, in case the FIFO is filled up and no new descriptors get 223 * thrown into the FIFO. 224 */ 225 static int 226 ath_edma_startrecv(struct ath_softc *sc) 227 { 228 struct ath_hal *ah = sc->sc_ah; 229 230 ATH_RX_LOCK(sc); 231 232 /* Enable RX FIFO */ 233 ath_hal_rxena(ah); 234 235 /* 236 * Entries should only be written out if the 237 * FIFO is empty. 238 * 239 * XXX This isn't correct. I should be looking 240 * at the value of AR_RXDP_SIZE (0x0070) to determine 241 * how many entries are in here. 242 * 243 * A warm reset will clear the registers but not the FIFO. 244 * 245 * And I believe this is actually the address of the last 246 * handled buffer rather than the current FIFO pointer. 247 * So if no frames have been (yet) seen, we'll reinit the 248 * FIFO. 249 * 250 * I'll chase that up at some point. 251 */ 252 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) { 253 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 254 "%s: Re-initing HP FIFO\n", __func__); 255 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 256 } 257 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) { 258 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 259 "%s: Re-initing LP FIFO\n", __func__); 260 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 261 } 262 263 /* Add up to m_fifolen entries in each queue */ 264 /* 265 * These must occur after the above write so the FIFO buffers 266 * are pushed/tracked in the same order as the hardware will 267 * process them. 268 */ 269 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 270 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 271 272 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 273 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 274 275 ath_mode_init(sc); 276 ath_hal_startpcurecv(ah); 277 278 ATH_RX_UNLOCK(sc); 279 280 return (0); 281 } 282 283 static void 284 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 285 int dosched) 286 { 287 288 ath_edma_recv_proc_queue(sc, qtype, dosched); 289 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 290 } 291 292 static void 293 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 294 { 295 296 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 297 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 298 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 299 } 300 301 static void 302 ath_edma_recv_flush(struct ath_softc *sc) 303 { 304 305 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 306 307 ATH_PCU_LOCK(sc); 308 sc->sc_rxproc_cnt++; 309 ATH_PCU_UNLOCK(sc); 310 311 /* 312 * Flush any active frames from FIFO -> deferred list 313 */ 314 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 315 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 316 317 /* 318 * Process what's in the deferred queue 319 */ 320 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 321 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 322 323 ATH_PCU_LOCK(sc); 324 sc->sc_rxproc_cnt--; 325 ATH_PCU_UNLOCK(sc); 326 } 327 328 /* 329 * Process frames from the current queue into the deferred queue. 330 */ 331 static void 332 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 333 int dosched) 334 { 335 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 336 struct ath_rx_status *rs; 337 struct ath_desc *ds; 338 struct ath_buf *bf; 339 struct mbuf *m; 340 struct ath_hal *ah = sc->sc_ah; 341 uint64_t tsf; 342 uint16_t nf; 343 int npkts = 0; 344 345 tsf = ath_hal_gettsf64(ah); 346 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 347 sc->sc_stats.ast_rx_noise = nf; 348 349 ATH_RX_LOCK(sc); 350 351 do { 352 bf = re->m_fifo[re->m_fifo_head]; 353 /* This shouldn't occur! */ 354 if (bf == NULL) { 355 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 356 __func__, 357 qtype); 358 break; 359 } 360 m = bf->bf_m; 361 ds = bf->bf_desc; 362 363 /* 364 * Sync descriptor memory - this also syncs the buffer for us. 365 * EDMA descriptors are in cached memory. 366 */ 367 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 368 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 369 rs = &bf->bf_status.ds_rxstat; 370 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 371 NULL, rs); 372 #ifdef ATH_DEBUG 373 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 374 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 375 #endif /* ATH_DEBUG */ 376 #ifdef ATH_DEBUG_ALQ 377 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 378 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 379 sc->sc_rx_statuslen, (char *) ds); 380 #endif /* ATH_DEBUG */ 381 if (bf->bf_rxstatus == HAL_EINPROGRESS) 382 break; 383 384 /* 385 * Completed descriptor. 386 */ 387 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 388 "%s: Q%d: completed!\n", __func__, qtype); 389 npkts++; 390 391 /* 392 * We've been synced already, so unmap. 393 */ 394 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 395 396 /* 397 * Remove the FIFO entry and place it on the completion 398 * queue. 399 */ 400 re->m_fifo[re->m_fifo_head] = NULL; 401 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 402 403 /* Bump the descriptor FIFO stats */ 404 INCR(re->m_fifo_head, re->m_fifolen); 405 re->m_fifo_depth--; 406 /* XXX check it doesn't fall below 0 */ 407 } while (re->m_fifo_depth > 0); 408 409 /* Append some more fresh frames to the FIFO */ 410 if (dosched) 411 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 412 413 ATH_RX_UNLOCK(sc); 414 415 /* rx signal state monitoring */ 416 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 417 418 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 419 "ath edma rx proc: npkts=%d\n", 420 npkts); 421 422 /* Handle resched and kickpcu appropriately */ 423 ATH_PCU_LOCK(sc); 424 if (dosched && sc->sc_kickpcu) { 425 ATH_KTR(sc, ATH_KTR_ERROR, 0, 426 "ath_edma_recv_proc_queue(): kickpcu"); 427 device_printf(sc->sc_dev, 428 "%s: handled npkts %d\n", 429 __func__, npkts); 430 431 /* 432 * XXX TODO: what should occur here? Just re-poke and 433 * re-enable the RX FIFO? 434 */ 435 sc->sc_kickpcu = 0; 436 } 437 ATH_PCU_UNLOCK(sc); 438 439 return; 440 } 441 442 /* 443 * Flush the deferred queue. 444 * 445 * This destructively flushes the deferred queue - it doesn't 446 * call the wireless stack on each mbuf. 447 */ 448 static void 449 ath_edma_flush_deferred_queue(struct ath_softc *sc) 450 { 451 struct ath_buf *bf, *next; 452 453 ATH_RX_LOCK_ASSERT(sc); 454 455 /* Free in one set, inside the lock */ 456 TAILQ_FOREACH_SAFE(bf, 457 &sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf_list, next) { 458 /* Free the buffer/mbuf */ 459 ath_edma_rxbuf_free(sc, bf); 460 } 461 TAILQ_FOREACH_SAFE(bf, 462 &sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf_list, next) { 463 /* Free the buffer/mbuf */ 464 ath_edma_rxbuf_free(sc, bf); 465 } 466 } 467 468 static int 469 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 470 int dosched) 471 { 472 int ngood = 0; 473 uint64_t tsf; 474 struct ath_buf *bf, *next; 475 struct ath_rx_status *rs; 476 int16_t nf; 477 ath_bufhead rxlist; 478 struct mbuf *m; 479 480 TAILQ_INIT(&rxlist); 481 482 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 483 /* 484 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 485 * otherwise we may end up adding in the wrong values if this 486 * is delayed too far.. 487 */ 488 tsf = ath_hal_gettsf64(sc->sc_ah); 489 490 /* Copy the list over */ 491 ATH_RX_LOCK(sc); 492 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 493 ATH_RX_UNLOCK(sc); 494 495 /* Handle the completed descriptors */ 496 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 497 /* 498 * Skip the RX descriptor status - start at the data offset 499 */ 500 m_adj(bf->bf_m, sc->sc_rx_statuslen); 501 502 /* Handle the frame */ 503 504 rs = &bf->bf_status.ds_rxstat; 505 m = bf->bf_m; 506 bf->bf_m = NULL; 507 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 508 ngood++; 509 } 510 511 if (ngood) { 512 sc->sc_lastrx = tsf; 513 } 514 515 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 516 "ath edma rx deferred proc: ngood=%d\n", 517 ngood); 518 519 /* Free in one set, inside the lock */ 520 ATH_RX_LOCK(sc); 521 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 522 /* Free the buffer/mbuf */ 523 ath_edma_rxbuf_free(sc, bf); 524 } 525 ATH_RX_UNLOCK(sc); 526 527 return (ngood); 528 } 529 530 static void 531 ath_edma_recv_tasklet(void *arg, int npending) 532 { 533 struct ath_softc *sc = (struct ath_softc *) arg; 534 struct ifnet *ifp = sc->sc_ifp; 535 #ifdef IEEE80211_SUPPORT_SUPERG 536 struct ieee80211com *ic = ifp->if_l2com; 537 #endif 538 539 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 540 __func__, 541 npending); 542 543 ATH_PCU_LOCK(sc); 544 if (sc->sc_inreset_cnt > 0) { 545 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 546 __func__); 547 ATH_PCU_UNLOCK(sc); 548 return; 549 } 550 sc->sc_rxproc_cnt++; 551 ATH_PCU_UNLOCK(sc); 552 553 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 554 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 555 556 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 557 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 558 559 /* XXX inside IF_LOCK ? */ 560 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 561 #ifdef IEEE80211_SUPPORT_SUPERG 562 ieee80211_ff_age_all(ic, 100); 563 #endif 564 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 565 ath_tx_kick(sc); 566 } 567 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 568 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 569 570 ATH_PCU_LOCK(sc); 571 sc->sc_rxproc_cnt--; 572 ATH_PCU_UNLOCK(sc); 573 } 574 575 /* 576 * Allocate an RX mbuf for the given ath_buf and initialise 577 * it for EDMA. 578 * 579 * + Allocate a 4KB mbuf; 580 * + Setup the DMA map for the given buffer; 581 * + Return that. 582 */ 583 static int 584 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 585 { 586 587 struct mbuf *m; 588 int error; 589 int len; 590 591 ATH_RX_LOCK_ASSERT(sc); 592 593 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 594 if (! m) 595 return (ENOBUFS); /* XXX ?*/ 596 597 /* XXX warn/enforce alignment */ 598 599 len = m->m_ext.ext_size; 600 #if 0 601 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 602 __func__, 603 m, 604 len, 605 mtod(m, char *)); 606 #endif 607 608 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 609 610 /* 611 * Populate ath_buf fields. 612 */ 613 bf->bf_desc = mtod(m, struct ath_desc *); 614 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 615 bf->bf_m = m; 616 617 /* 618 * Zero the descriptor and ensure it makes it out to the 619 * bounce buffer if one is required. 620 * 621 * XXX PREWRITE will copy the whole buffer; we only needed it 622 * to sync the first 32 DWORDS. Oh well. 623 */ 624 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 625 626 /* 627 * Create DMA mapping. 628 */ 629 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 630 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 631 632 if (error != 0) { 633 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 634 __func__, 635 error); 636 m_freem(m); 637 return (error); 638 } 639 640 /* 641 * Set daddr to the physical mapping page. 642 */ 643 bf->bf_daddr = bf->bf_segs[0].ds_addr; 644 645 /* 646 * Prepare for the upcoming read. 647 * 648 * We need to both sync some data into the buffer (the zero'ed 649 * descriptor payload) and also prepare for the read that's going 650 * to occur. 651 */ 652 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 653 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 654 655 /* Finish! */ 656 return (0); 657 } 658 659 /* 660 * Allocate a RX buffer. 661 */ 662 static struct ath_buf * 663 ath_edma_rxbuf_alloc(struct ath_softc *sc) 664 { 665 struct ath_buf *bf; 666 int error; 667 668 ATH_RX_LOCK_ASSERT(sc); 669 670 /* Allocate buffer */ 671 bf = TAILQ_FIRST(&sc->sc_rxbuf); 672 /* XXX shouldn't happen upon startup? */ 673 if (bf == NULL) { 674 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 675 __func__); 676 return (NULL); 677 } 678 679 /* Remove it from the free list */ 680 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 681 682 /* Assign RX mbuf to it */ 683 error = ath_edma_rxbuf_init(sc, bf); 684 if (error != 0) { 685 device_printf(sc->sc_dev, 686 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 687 __func__, 688 bf, 689 error); 690 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 691 return (NULL); 692 } 693 694 return (bf); 695 } 696 697 static void 698 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 699 { 700 701 ATH_RX_LOCK_ASSERT(sc); 702 703 /* 704 * Only unload the frame if we haven't consumed 705 * the mbuf via ath_rx_pkt(). 706 */ 707 if (bf->bf_m) { 708 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 709 m_freem(bf->bf_m); 710 bf->bf_m = NULL; 711 } 712 713 /* XXX lock? */ 714 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 715 } 716 717 /* 718 * Allocate up to 'n' entries and push them onto the hardware FIFO. 719 * 720 * Return how many entries were successfully pushed onto the 721 * FIFO. 722 */ 723 static int 724 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 725 { 726 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 727 struct ath_buf *bf; 728 int i; 729 730 ATH_RX_LOCK_ASSERT(sc); 731 732 /* 733 * Allocate buffers until the FIFO is full or nbufs is reached. 734 */ 735 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 736 /* Ensure the FIFO is already blank, complain loudly! */ 737 if (re->m_fifo[re->m_fifo_tail] != NULL) { 738 device_printf(sc->sc_dev, 739 "%s: Q%d: fifo[%d] != NULL (%p)\n", 740 __func__, 741 qtype, 742 re->m_fifo_tail, 743 re->m_fifo[re->m_fifo_tail]); 744 745 /* Free the slot */ 746 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 747 re->m_fifo_depth--; 748 /* XXX check it's not < 0 */ 749 re->m_fifo[re->m_fifo_tail] = NULL; 750 } 751 752 bf = ath_edma_rxbuf_alloc(sc); 753 /* XXX should ensure the FIFO is not NULL? */ 754 if (bf == NULL) { 755 device_printf(sc->sc_dev, 756 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 757 __func__, 758 qtype, 759 i, 760 nbufs); 761 break; 762 } 763 764 re->m_fifo[re->m_fifo_tail] = bf; 765 766 /* Write to the RX FIFO */ 767 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 768 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 769 __func__, 770 qtype, 771 bf->bf_desc, 772 (uintmax_t) bf->bf_daddr); 773 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 774 775 re->m_fifo_depth++; 776 INCR(re->m_fifo_tail, re->m_fifolen); 777 } 778 779 /* 780 * Return how many were allocated. 781 */ 782 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 783 __func__, 784 qtype, 785 nbufs, 786 i); 787 return (i); 788 } 789 790 static int 791 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 792 { 793 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 794 int i; 795 796 ATH_RX_LOCK_ASSERT(sc); 797 798 for (i = 0; i < re->m_fifolen; i++) { 799 if (re->m_fifo[i] != NULL) { 800 #ifdef ATH_DEBUG 801 struct ath_buf *bf = re->m_fifo[i]; 802 803 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 804 ath_printrxbuf(sc, bf, 0, HAL_OK); 805 #endif 806 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 807 re->m_fifo[i] = NULL; 808 re->m_fifo_depth--; 809 } 810 } 811 812 if (re->m_rxpending != NULL) { 813 m_freem(re->m_rxpending); 814 re->m_rxpending = NULL; 815 } 816 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 817 818 return (0); 819 } 820 821 /* 822 * Setup the initial RX FIFO structure. 823 */ 824 static int 825 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 826 { 827 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 828 829 ATH_RX_LOCK_ASSERT(sc); 830 831 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 832 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 833 __func__, 834 qtype); 835 return (-EINVAL); 836 } 837 device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n", 838 __func__, 839 qtype, 840 re->m_fifolen); 841 842 /* Allocate ath_buf FIFO array, pre-zero'ed */ 843 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 844 M_ATHDEV, 845 M_NOWAIT | M_ZERO); 846 if (re->m_fifo == NULL) { 847 device_printf(sc->sc_dev, "%s: malloc failed\n", 848 __func__); 849 return (-ENOMEM); 850 } 851 852 /* 853 * Set initial "empty" state. 854 */ 855 re->m_rxpending = NULL; 856 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 857 858 return (0); 859 } 860 861 static int 862 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 863 { 864 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 865 866 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 867 __func__, 868 qtype); 869 870 free(re->m_fifo, M_ATHDEV); 871 872 return (0); 873 } 874 875 static int 876 ath_edma_dma_rxsetup(struct ath_softc *sc) 877 { 878 int error; 879 880 /* 881 * Create RX DMA tag and buffers. 882 */ 883 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 884 "rx", ath_rxbuf, sc->sc_rx_statuslen); 885 if (error != 0) 886 return error; 887 888 ATH_RX_LOCK(sc); 889 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 890 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 891 ATH_RX_UNLOCK(sc); 892 893 return (0); 894 } 895 896 static int 897 ath_edma_dma_rxteardown(struct ath_softc *sc) 898 { 899 900 ATH_RX_LOCK(sc); 901 ath_edma_flush_deferred_queue(sc); 902 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 903 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 904 905 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 906 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 907 ATH_RX_UNLOCK(sc); 908 909 /* Free RX ath_buf */ 910 /* Free RX DMA tag */ 911 if (sc->sc_rxdma.dd_desc_len != 0) 912 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 913 914 return (0); 915 } 916 917 void 918 ath_recv_setup_edma(struct ath_softc *sc) 919 { 920 921 /* Set buffer size to 4k */ 922 sc->sc_edma_bufsize = 4096; 923 924 /* Fetch EDMA field and buffer sizes */ 925 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 926 927 /* Configure the hardware with the RX buffer size */ 928 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 929 sc->sc_rx_statuslen); 930 931 device_printf(sc->sc_dev, "RX status length: %d\n", 932 sc->sc_rx_statuslen); 933 device_printf(sc->sc_dev, "RX buffer size: %d\n", 934 sc->sc_edma_bufsize); 935 936 sc->sc_rx.recv_stop = ath_edma_stoprecv; 937 sc->sc_rx.recv_start = ath_edma_startrecv; 938 sc->sc_rx.recv_flush = ath_edma_recv_flush; 939 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 940 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 941 942 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 943 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 944 945 sc->sc_rx.recv_sched = ath_edma_recv_sched; 946 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 947 } 948