1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 32 /* 33 * Driver for the Atheros Wireless LAN controller. 34 * 35 * This software is derived from work of Atsushi Onoe; his contribution 36 * is greatly appreciated. 37 */ 38 39 #include "opt_inet.h" 40 #include "opt_ath.h" 41 /* 42 * This is needed for register operations which are performed 43 * by the driver - eg, calls to ath_hal_gettsf32(). 44 * 45 * It's also required for any AH_DEBUG checks in here, eg the 46 * module dependencies. 47 */ 48 #include "opt_ah.h" 49 #include "opt_wlan.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/sysctl.h> 54 #include <sys/mbuf.h> 55 #include <sys/malloc.h> 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 #include <sys/kernel.h> 59 #include <sys/socket.h> 60 #include <sys/sockio.h> 61 #include <sys/errno.h> 62 #include <sys/callout.h> 63 #include <sys/bus.h> 64 #include <sys/endian.h> 65 #include <sys/kthread.h> 66 #include <sys/taskqueue.h> 67 #include <sys/priv.h> 68 #include <sys/module.h> 69 #include <sys/ktr.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 #include <net/if_arp.h> 77 #include <net/ethernet.h> 78 #include <net/if_llc.h> 79 #include <net/ifq_var.h> 80 81 #include <netproto/802_11/ieee80211_var.h> 82 #include <netproto/802_11/ieee80211_regdomain.h> 83 #ifdef IEEE80211_SUPPORT_SUPERG 84 #include <netproto/802_11/ieee80211_superg.h> 85 #endif 86 #ifdef IEEE80211_SUPPORT_TDMA 87 #include <netproto/802_11/ieee80211_tdma.h> 88 #endif 89 90 #include <net/bpf.h> 91 92 #ifdef INET 93 #include <netinet/in.h> 94 #include <netinet/if_ether.h> 95 #endif 96 97 #include <dev/netif/ath/ath/if_athvar.h> 98 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 99 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 100 101 #include <dev/netif/ath/ath/if_ath_debug.h> 102 #include <dev/netif/ath/ath/if_ath_misc.h> 103 #include <dev/netif/ath/ath/if_ath_tsf.h> 104 #include <dev/netif/ath/ath/if_ath_tx.h> 105 #include <dev/netif/ath/ath/if_ath_sysctl.h> 106 #include <dev/netif/ath/ath/if_ath_led.h> 107 #include <dev/netif/ath/ath/if_ath_keycache.h> 108 #include <dev/netif/ath/ath/if_ath_rx.h> 109 #include <dev/netif/ath/ath/if_ath_beacon.h> 110 #include <dev/netif/ath/ath/if_athdfs.h> 111 112 #ifdef ATH_TX99_DIAG 113 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 114 #endif 115 116 #include <dev/netif/ath/ath/if_ath_rx_edma.h> 117 118 #ifdef ATH_DEBUG_ALQ 119 #include <dev/netif/ath/ath/if_ath_alq.h> 120 #endif 121 122 /* 123 * some general macros 124 */ 125 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 126 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 127 128 MALLOC_DECLARE(M_ATHDEV); 129 130 /* 131 * XXX TODO: 132 * 133 * + Make sure the FIFO is correctly flushed and reinitialised 134 * through a reset; 135 * + Verify multi-descriptor frames work! 136 * + There's a "memory use after free" which needs to be tracked down 137 * and fixed ASAP. I've seen this in the legacy path too, so it 138 * may be a generic RX path issue. 139 */ 140 141 /* 142 * XXX shuffle the function orders so these pre-declarations aren't 143 * required! 144 */ 145 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 146 int nbufs); 147 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 148 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 149 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 150 HAL_RX_QUEUE qtype, int dosched); 151 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 152 HAL_RX_QUEUE qtype, int dosched); 153 154 static void 155 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 156 { 157 struct ath_hal *ah = sc->sc_ah; 158 159 ATH_RX_LOCK(sc); 160 ath_hal_stoppcurecv(ah); 161 ath_hal_setrxfilter(ah, 0); 162 ath_hal_stopdmarecv(ah); 163 164 DELAY(3000); 165 166 /* Flush RX pending for each queue */ 167 /* XXX should generic-ify this */ 168 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 169 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 170 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 171 } 172 173 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 174 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 175 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 176 } 177 ATH_RX_UNLOCK(sc); 178 } 179 180 /* 181 * Re-initialise the FIFO given the current buffer contents. 182 * Specifically, walk from head -> tail, pushing the FIFO contents 183 * back into the FIFO. 184 */ 185 static void 186 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 187 { 188 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 189 struct ath_buf *bf; 190 int i, j; 191 192 ATH_RX_LOCK_ASSERT(sc); 193 194 i = re->m_fifo_head; 195 for (j = 0; j < re->m_fifo_depth; j++) { 196 bf = re->m_fifo[i]; 197 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 198 "%s: Q%d: pos=%i, addr=0x%jx\n", 199 __func__, 200 qtype, 201 i, 202 (uintmax_t)bf->bf_daddr); 203 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 204 INCR(i, re->m_fifolen); 205 } 206 207 /* Ensure this worked out right */ 208 if (i != re->m_fifo_tail) { 209 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 210 __func__, 211 i, 212 re->m_fifo_tail); 213 } 214 } 215 216 /* 217 * Start receive. 218 * 219 * XXX TODO: this needs to reallocate the FIFO entries when a reset 220 * occurs, in case the FIFO is filled up and no new descriptors get 221 * thrown into the FIFO. 222 */ 223 static int 224 ath_edma_startrecv(struct ath_softc *sc) 225 { 226 struct ath_hal *ah = sc->sc_ah; 227 228 ATH_RX_LOCK(sc); 229 230 /* Enable RX FIFO */ 231 ath_hal_rxena(ah); 232 233 /* 234 * Entries should only be written out if the 235 * FIFO is empty. 236 * 237 * XXX This isn't correct. I should be looking 238 * at the value of AR_RXDP_SIZE (0x0070) to determine 239 * how many entries are in here. 240 * 241 * A warm reset will clear the registers but not the FIFO. 242 * 243 * And I believe this is actually the address of the last 244 * handled buffer rather than the current FIFO pointer. 245 * So if no frames have been (yet) seen, we'll reinit the 246 * FIFO. 247 * 248 * I'll chase that up at some point. 249 */ 250 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) { 251 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 252 "%s: Re-initing HP FIFO\n", __func__); 253 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 254 } 255 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) { 256 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 257 "%s: Re-initing LP FIFO\n", __func__); 258 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 259 } 260 261 /* Add up to m_fifolen entries in each queue */ 262 /* 263 * These must occur after the above write so the FIFO buffers 264 * are pushed/tracked in the same order as the hardware will 265 * process them. 266 */ 267 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 268 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 269 270 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 271 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 272 273 ath_mode_init(sc); 274 ath_hal_startpcurecv(ah); 275 276 ATH_RX_UNLOCK(sc); 277 278 return (0); 279 } 280 281 static void 282 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 283 int dosched) 284 { 285 286 ath_edma_recv_proc_queue(sc, qtype, dosched); 287 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 288 } 289 290 static void 291 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 292 { 293 294 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 295 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 296 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 297 } 298 299 static void 300 ath_edma_recv_flush(struct ath_softc *sc) 301 { 302 303 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 304 305 ATH_PCU_LOCK(sc); 306 sc->sc_rxproc_cnt++; 307 ATH_PCU_UNLOCK(sc); 308 309 /* 310 * Flush any active frames from FIFO -> deferred list 311 */ 312 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 313 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 314 315 /* 316 * Process what's in the deferred queue 317 */ 318 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 319 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 320 321 ATH_PCU_LOCK(sc); 322 sc->sc_rxproc_cnt--; 323 ATH_PCU_UNLOCK(sc); 324 } 325 326 /* 327 * Process frames from the current queue into the deferred queue. 328 */ 329 static void 330 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 331 int dosched) 332 { 333 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 334 struct ath_rx_status *rs; 335 struct ath_desc *ds; 336 struct ath_buf *bf; 337 struct mbuf *m; 338 struct ath_hal *ah = sc->sc_ah; 339 uint64_t tsf; 340 uint16_t nf; 341 int npkts = 0; 342 343 tsf = ath_hal_gettsf64(ah); 344 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 345 sc->sc_stats.ast_rx_noise = nf; 346 347 ATH_RX_LOCK(sc); 348 349 do { 350 bf = re->m_fifo[re->m_fifo_head]; 351 /* This shouldn't occur! */ 352 if (bf == NULL) { 353 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 354 __func__, 355 qtype); 356 break; 357 } 358 m = bf->bf_m; 359 ds = bf->bf_desc; 360 361 /* 362 * Sync descriptor memory - this also syncs the buffer for us. 363 * EDMA descriptors are in cached memory. 364 */ 365 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 366 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 367 rs = &bf->bf_status.ds_rxstat; 368 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 369 NULL, rs); 370 #ifdef ATH_DEBUG 371 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 372 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 373 #endif /* ATH_DEBUG */ 374 #ifdef ATH_DEBUG_ALQ 375 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 376 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 377 sc->sc_rx_statuslen, (char *) ds); 378 #endif /* ATH_DEBUG */ 379 if (bf->bf_rxstatus == HAL_EINPROGRESS) 380 break; 381 382 /* 383 * Completed descriptor. 384 */ 385 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 386 "%s: Q%d: completed!\n", __func__, qtype); 387 npkts++; 388 389 /* 390 * We've been synced already, so unmap. 391 */ 392 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 393 394 /* 395 * Remove the FIFO entry and place it on the completion 396 * queue. 397 */ 398 re->m_fifo[re->m_fifo_head] = NULL; 399 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 400 401 /* Bump the descriptor FIFO stats */ 402 INCR(re->m_fifo_head, re->m_fifolen); 403 re->m_fifo_depth--; 404 /* XXX check it doesn't fall below 0 */ 405 } while (re->m_fifo_depth > 0); 406 407 /* Append some more fresh frames to the FIFO */ 408 if (dosched) 409 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 410 411 ATH_RX_UNLOCK(sc); 412 413 /* rx signal state monitoring */ 414 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 415 416 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 417 "ath edma rx proc: npkts=%d\n", 418 npkts); 419 420 /* Handle resched and kickpcu appropriately */ 421 ATH_PCU_LOCK(sc); 422 if (dosched && sc->sc_kickpcu) { 423 ATH_KTR(sc, ATH_KTR_ERROR, 0, 424 "ath_edma_recv_proc_queue(): kickpcu"); 425 if (npkts > 0) 426 device_printf(sc->sc_dev, 427 "%s: handled npkts %d\n", 428 __func__, npkts); 429 430 /* 431 * XXX TODO: what should occur here? Just re-poke and 432 * re-enable the RX FIFO? 433 */ 434 sc->sc_kickpcu = 0; 435 } 436 ATH_PCU_UNLOCK(sc); 437 438 return; 439 } 440 441 /* 442 * Flush the deferred queue. 443 * 444 * This destructively flushes the deferred queue - it doesn't 445 * call the wireless stack on each mbuf. 446 */ 447 static void 448 ath_edma_flush_deferred_queue(struct ath_softc *sc) 449 { 450 struct ath_buf *bf; 451 452 ATH_RX_LOCK_ASSERT(sc); 453 454 /* Free in one set, inside the lock */ 455 while ((bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) != NULL) { 456 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 457 ath_edma_rxbuf_free(sc, bf); 458 } 459 while ((bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) != NULL) { 460 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 461 ath_edma_rxbuf_free(sc, bf); 462 } 463 } 464 465 static int 466 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 467 int dosched) 468 { 469 int ngood = 0; 470 uint64_t tsf; 471 struct ath_buf *bf; 472 struct ath_buf *next; 473 struct ath_rx_status *rs; 474 int16_t nf; 475 ath_bufhead rxlist; 476 struct mbuf *m; 477 478 TAILQ_INIT(&rxlist); 479 480 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 481 /* 482 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 483 * otherwise we may end up adding in the wrong values if this 484 * is delayed too far.. 485 */ 486 tsf = ath_hal_gettsf64(sc->sc_ah); 487 488 /* Copy the list over */ 489 ATH_RX_LOCK(sc); 490 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 491 ATH_RX_UNLOCK(sc); 492 493 /* Handle the completed descriptors */ 494 TAILQ_FOREACH_MUTABLE(bf, &rxlist, bf_list, next) { 495 /* 496 * Skip the RX descriptor status - start at the data offset 497 */ 498 m_adj(bf->bf_m, sc->sc_rx_statuslen); 499 500 /* Handle the frame */ 501 502 rs = &bf->bf_status.ds_rxstat; 503 m = bf->bf_m; 504 bf->bf_m = NULL; 505 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 506 ngood++; 507 } 508 509 if (ngood) { 510 sc->sc_lastrx = tsf; 511 } 512 513 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 514 "ath edma rx deferred proc: ngood=%d\n", 515 ngood); 516 517 /* Free in one set, inside the lock */ 518 ATH_RX_LOCK(sc); 519 520 while ((bf = TAILQ_FIRST(&rxlist)) != NULL) { 521 /* Free the buffer/mbuf */ 522 TAILQ_REMOVE(&rxlist, bf, bf_list); 523 ath_edma_rxbuf_free(sc, bf); 524 } 525 ATH_RX_UNLOCK(sc); 526 527 return (ngood); 528 } 529 530 static void 531 ath_edma_recv_tasklet(void *arg, int npending) 532 { 533 struct ath_softc *sc = (struct ath_softc *) arg; 534 struct ifnet *ifp = sc->sc_ifp; 535 #ifdef IEEE80211_SUPPORT_SUPERG 536 struct ieee80211com *ic = ifp->if_l2com; 537 #endif 538 539 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 540 __func__, 541 npending); 542 543 wlan_serialize_enter(); 544 ATH_PCU_LOCK(sc); 545 if (sc->sc_inreset_cnt > 0) { 546 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 547 __func__); 548 ATH_PCU_UNLOCK(sc); 549 wlan_serialize_exit(); 550 return; 551 } 552 sc->sc_rxproc_cnt++; 553 ATH_PCU_UNLOCK(sc); 554 555 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 556 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 557 558 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 559 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 560 561 /* XXX inside IF_LOCK ? */ 562 if (!ifq_is_oactive(&ifp->if_snd)) { 563 #ifdef IEEE80211_SUPPORT_SUPERG 564 ieee80211_ff_age_all(ic, 100); 565 #endif 566 if (!ifq_is_empty(&ifp->if_snd)) 567 ath_tx_kick(sc); 568 } 569 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 570 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 571 572 ATH_PCU_LOCK(sc); 573 sc->sc_rxproc_cnt--; 574 ATH_PCU_UNLOCK(sc); 575 wlan_serialize_exit(); 576 } 577 578 /* 579 * Allocate an RX mbuf for the given ath_buf and initialise 580 * it for EDMA. 581 * 582 * + Allocate a 4KB mbuf; 583 * + Setup the DMA map for the given buffer; 584 * + Return that. 585 */ 586 static int 587 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 588 { 589 590 struct mbuf *m; 591 int error; 592 int len; 593 594 ATH_RX_LOCK_ASSERT(sc); 595 596 m = m_getjcl(MB_DONTWAIT, MT_DATA, M_PKTHDR, sc->sc_edma_bufsize); 597 /* m = m_getcl(MB_WAIT, MT_DATA, M_PKTHDR);*/ 598 /* m = m_getm(NULL, sc->sc_edma_bufsize, MB_WAIT, MT_DATA);*/ 599 if (! m) 600 return (ENOBUFS); /* XXX ?*/ 601 602 /* XXX warn/enforce alignment */ 603 604 len = m->m_ext.ext_size; 605 #if 0 606 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 607 __func__, 608 m, 609 len, 610 mtod(m, char *)); 611 #endif 612 613 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 614 615 /* 616 * Populate ath_buf fields. 617 */ 618 bf->bf_desc = mtod(m, struct ath_desc *); 619 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 620 bf->bf_m = m; 621 622 /* 623 * Zero the descriptor and ensure it makes it out to the 624 * bounce buffer if one is required. 625 * 626 * XXX PREWRITE will copy the whole buffer; we only needed it 627 * to sync the first 32 DWORDS. Oh well. 628 */ 629 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 630 631 /* 632 * Create DMA mapping. 633 */ 634 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, 635 bf->bf_dmamap, m, bf->bf_segs, 1, &bf->bf_nseg, BUS_DMA_NOWAIT); 636 637 if (error != 0) { 638 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 639 __func__, 640 error); 641 m_freem(m); 642 return (error); 643 } 644 645 /* 646 * Set daddr to the physical mapping page. 647 */ 648 bf->bf_daddr = bf->bf_segs[0].ds_addr; 649 650 /* 651 * Prepare for the upcoming read. 652 * 653 * We need to both sync some data into the buffer (the zero'ed 654 * descriptor payload) and also prepare for the read that's going 655 * to occur. 656 */ 657 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 659 660 /* Finish! */ 661 return (0); 662 } 663 664 /* 665 * Allocate a RX buffer. 666 */ 667 static struct ath_buf * 668 ath_edma_rxbuf_alloc(struct ath_softc *sc) 669 { 670 struct ath_buf *bf; 671 int error; 672 673 ATH_RX_LOCK_ASSERT(sc); 674 675 /* Allocate buffer */ 676 bf = TAILQ_FIRST(&sc->sc_rxbuf); 677 /* XXX shouldn't happen upon startup? */ 678 if (bf == NULL) { 679 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 680 __func__); 681 return (NULL); 682 } 683 684 /* Remove it from the free list */ 685 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 686 687 /* Assign RX mbuf to it */ 688 error = ath_edma_rxbuf_init(sc, bf); 689 if (error != 0) { 690 device_printf(sc->sc_dev, 691 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 692 __func__, 693 bf, 694 error); 695 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 696 return (NULL); 697 } 698 699 return (bf); 700 } 701 702 static void 703 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 704 { 705 706 ATH_RX_LOCK_ASSERT(sc); 707 708 /* 709 * Only unload the frame if we haven't consumed 710 * the mbuf via ath_rx_pkt(). 711 */ 712 if (bf->bf_m) { 713 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 714 m_freem(bf->bf_m); 715 bf->bf_m = NULL; 716 } 717 718 /* XXX lock? */ 719 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 720 } 721 722 /* 723 * Allocate up to 'n' entries and push them onto the hardware FIFO. 724 * 725 * Return how many entries were successfully pushed onto the 726 * FIFO. 727 */ 728 static int 729 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 730 { 731 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 732 struct ath_buf *bf; 733 int i; 734 735 ATH_RX_LOCK_ASSERT(sc); 736 737 /* 738 * Allocate buffers until the FIFO is full or nbufs is reached. 739 */ 740 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 741 /* Ensure the FIFO is already blank, complain loudly! */ 742 if (re->m_fifo[re->m_fifo_tail] != NULL) { 743 device_printf(sc->sc_dev, 744 "%s: Q%d: fifo[%d] != NULL (%p)\n", 745 __func__, 746 qtype, 747 re->m_fifo_tail, 748 re->m_fifo[re->m_fifo_tail]); 749 750 /* Free the slot */ 751 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 752 re->m_fifo_depth--; 753 /* XXX check it's not < 0 */ 754 re->m_fifo[re->m_fifo_tail] = NULL; 755 } 756 757 bf = ath_edma_rxbuf_alloc(sc); 758 /* XXX should ensure the FIFO is not NULL? */ 759 if (bf == NULL) { 760 device_printf(sc->sc_dev, 761 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 762 __func__, 763 qtype, 764 i, 765 nbufs); 766 break; 767 } 768 769 re->m_fifo[re->m_fifo_tail] = bf; 770 771 /* Write to the RX FIFO */ 772 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 773 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 774 __func__, 775 qtype, 776 bf->bf_desc, 777 (uintmax_t) bf->bf_daddr); 778 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 779 780 re->m_fifo_depth++; 781 INCR(re->m_fifo_tail, re->m_fifolen); 782 } 783 784 /* 785 * Return how many were allocated. 786 */ 787 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 788 __func__, 789 qtype, 790 nbufs, 791 i); 792 return (i); 793 } 794 795 static int 796 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 797 { 798 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 799 int i; 800 801 ATH_RX_LOCK_ASSERT(sc); 802 803 for (i = 0; i < re->m_fifolen; i++) { 804 if (re->m_fifo[i] != NULL) { 805 #ifdef ATH_DEBUG 806 struct ath_buf *bf = re->m_fifo[i]; 807 808 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 809 ath_printrxbuf(sc, bf, 0, HAL_OK); 810 #endif 811 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 812 re->m_fifo[i] = NULL; 813 re->m_fifo_depth--; 814 } 815 } 816 817 if (re->m_rxpending != NULL) { 818 m_freem(re->m_rxpending); 819 re->m_rxpending = NULL; 820 } 821 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 822 823 return (0); 824 } 825 826 /* 827 * Setup the initial RX FIFO structure. 828 */ 829 static int 830 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 831 { 832 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 833 834 ATH_RX_LOCK_ASSERT(sc); 835 836 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 837 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 838 __func__, 839 qtype); 840 return (-EINVAL); 841 } 842 device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n", 843 __func__, 844 qtype, 845 re->m_fifolen); 846 847 /* Allocate ath_buf FIFO array, pre-zero'ed */ 848 re->m_fifo = kmalloc(sizeof(struct ath_buf *) * re->m_fifolen, 849 M_ATHDEV, 850 M_INTWAIT | M_ZERO); 851 if (re->m_fifo == NULL) { 852 device_printf(sc->sc_dev, "%s: malloc failed\n", 853 __func__); 854 return (-ENOMEM); 855 } 856 857 /* 858 * Set initial "empty" state. 859 */ 860 re->m_rxpending = NULL; 861 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 862 863 return (0); 864 } 865 866 static int 867 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 868 { 869 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 870 871 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 872 __func__, 873 qtype); 874 875 kfree(re->m_fifo, M_ATHDEV); 876 877 return (0); 878 } 879 880 static int 881 ath_edma_dma_rxsetup(struct ath_softc *sc) 882 { 883 int error; 884 885 /* 886 * Create RX DMA tag and buffers. 887 */ 888 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 889 "rx", ath_rxbuf, sc->sc_rx_statuslen); 890 if (error != 0) 891 return error; 892 893 ATH_RX_LOCK(sc); 894 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 895 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 896 ATH_RX_UNLOCK(sc); 897 898 return (0); 899 } 900 901 static int 902 ath_edma_dma_rxteardown(struct ath_softc *sc) 903 { 904 905 ATH_RX_LOCK(sc); 906 ath_edma_flush_deferred_queue(sc); 907 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 908 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 909 910 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 911 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 912 ATH_RX_UNLOCK(sc); 913 914 /* Free RX ath_buf */ 915 /* Free RX DMA tag */ 916 if (sc->sc_rxdma.dd_desc_len != 0) 917 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 918 919 return (0); 920 } 921 922 void 923 ath_recv_setup_edma(struct ath_softc *sc) 924 { 925 926 /* Set buffer size to 4k */ 927 sc->sc_edma_bufsize = 4096; 928 929 /* Fetch EDMA field and buffer sizes */ 930 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 931 932 /* Configure the hardware with the RX buffer size */ 933 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 934 sc->sc_rx_statuslen); 935 936 device_printf(sc->sc_dev, "RX status length: %d\n", 937 sc->sc_rx_statuslen); 938 device_printf(sc->sc_dev, "RX buffer size: %d\n", 939 sc->sc_edma_bufsize); 940 941 sc->sc_rx.recv_stop = ath_edma_stoprecv; 942 sc->sc_rx.recv_start = ath_edma_startrecv; 943 sc->sc_rx.recv_flush = ath_edma_recv_flush; 944 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 945 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 946 947 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 948 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 949 950 sc->sc_rx.recv_sched = ath_edma_recv_sched; 951 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 952 } 953