xref: /dragonfly/sys/dev/netif/ath/ath/if_ath_rx_edma.c (revision 5f39c7e7)
1 /*-
2  * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 
32 /*
33  * Driver for the Atheros Wireless LAN controller.
34  *
35  * This software is derived from work of Atsushi Onoe; his contribution
36  * is greatly appreciated.
37  */
38 
39 #include "opt_inet.h"
40 #include "opt_ath.h"
41 /*
42  * This is needed for register operations which are performed
43  * by the driver - eg, calls to ath_hal_gettsf32().
44  *
45  * It's also required for any AH_DEBUG checks in here, eg the
46  * module dependencies.
47  */
48 #include "opt_ah.h"
49 #include "opt_wlan.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/sysctl.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/lock.h>
57 #include <sys/mutex.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/errno.h>
62 #include <sys/callout.h>
63 #include <sys/bus.h>
64 #include <sys/endian.h>
65 #include <sys/kthread.h>
66 #include <sys/taskqueue.h>
67 #include <sys/priv.h>
68 #include <sys/module.h>
69 #include <sys/ktr.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_types.h>
76 #include <net/if_arp.h>
77 #include <net/ethernet.h>
78 #include <net/if_llc.h>
79 #include <net/ifq_var.h>
80 
81 #include <netproto/802_11/ieee80211_var.h>
82 #include <netproto/802_11/ieee80211_regdomain.h>
83 #ifdef IEEE80211_SUPPORT_SUPERG
84 #include <netproto/802_11/ieee80211_superg.h>
85 #endif
86 #ifdef IEEE80211_SUPPORT_TDMA
87 #include <netproto/802_11/ieee80211_tdma.h>
88 #endif
89 
90 #include <net/bpf.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/if_ether.h>
95 #endif
96 
97 #include <dev/netif/ath/ath/if_athvar.h>
98 #include <dev/netif/ath/ath_hal/ah_devid.h>		/* XXX for softled */
99 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
100 
101 #include <dev/netif/ath/ath/if_ath_debug.h>
102 #include <dev/netif/ath/ath/if_ath_misc.h>
103 #include <dev/netif/ath/ath/if_ath_tsf.h>
104 #include <dev/netif/ath/ath/if_ath_tx.h>
105 #include <dev/netif/ath/ath/if_ath_sysctl.h>
106 #include <dev/netif/ath/ath/if_ath_led.h>
107 #include <dev/netif/ath/ath/if_ath_keycache.h>
108 #include <dev/netif/ath/ath/if_ath_rx.h>
109 #include <dev/netif/ath/ath/if_ath_beacon.h>
110 #include <dev/netif/ath/ath/if_athdfs.h>
111 
112 #ifdef ATH_TX99_DIAG
113 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
114 #endif
115 
116 #include <dev/netif/ath/ath/if_ath_rx_edma.h>
117 
118 #ifdef	ATH_DEBUG_ALQ
119 #include <dev/netif/ath/ath/if_ath_alq.h>
120 #endif
121 
122 /*
123  * some general macros
124   */
125 #define	INCR(_l, _sz)		(_l) ++; (_l) &= ((_sz) - 1)
126 #define	DECR(_l, _sz)		(_l) --; (_l) &= ((_sz) - 1)
127 
128 MALLOC_DECLARE(M_ATHDEV);
129 
130 /*
131  * XXX TODO:
132  *
133  * + Make sure the FIFO is correctly flushed and reinitialised
134  *   through a reset;
135  * + Verify multi-descriptor frames work!
136  * + There's a "memory use after free" which needs to be tracked down
137  *   and fixed ASAP.  I've seen this in the legacy path too, so it
138  *   may be a generic RX path issue.
139  */
140 
141 /*
142  * XXX shuffle the function orders so these pre-declarations aren't
143  * required!
144  */
145 static	int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
146 	    int nbufs);
147 static	int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
148 static	void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
149 static	void ath_edma_recv_proc_queue(struct ath_softc *sc,
150 	    HAL_RX_QUEUE qtype, int dosched);
151 static	int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
152 	    HAL_RX_QUEUE qtype, int dosched);
153 
154 static void
155 ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
156 {
157 	struct ath_hal *ah = sc->sc_ah;
158 
159 	ATH_RX_LOCK(sc);
160 	ath_hal_stoppcurecv(ah);
161 	ath_hal_setrxfilter(ah, 0);
162 	ath_hal_stopdmarecv(ah);
163 
164 	DELAY(3000);
165 
166 	/* Flush RX pending for each queue */
167 	/* XXX should generic-ify this */
168 	if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
169 		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
170 		sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
171 	}
172 
173 	if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
174 		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
175 		sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
176 	}
177 	ATH_RX_UNLOCK(sc);
178 }
179 
180 /*
181  * Re-initialise the FIFO given the current buffer contents.
182  * Specifically, walk from head -> tail, pushing the FIFO contents
183  * back into the FIFO.
184  */
185 static void
186 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
187 {
188 	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
189 	struct ath_buf *bf;
190 	int i, j;
191 
192 	ATH_RX_LOCK_ASSERT(sc);
193 
194 	i = re->m_fifo_head;
195 	for (j = 0; j < re->m_fifo_depth; j++) {
196 		bf = re->m_fifo[i];
197 		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
198 		    "%s: Q%d: pos=%i, addr=0x%jx\n",
199 		    __func__,
200 		    qtype,
201 		    i,
202 		    (uintmax_t)bf->bf_daddr);
203 		ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
204 		INCR(i, re->m_fifolen);
205 	}
206 
207 	/* Ensure this worked out right */
208 	if (i != re->m_fifo_tail) {
209 		device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
210 		    __func__,
211 		    i,
212 		    re->m_fifo_tail);
213 	}
214 }
215 
216 /*
217  * Start receive.
218  *
219  * XXX TODO: this needs to reallocate the FIFO entries when a reset
220  * occurs, in case the FIFO is filled up and no new descriptors get
221  * thrown into the FIFO.
222  */
223 static int
224 ath_edma_startrecv(struct ath_softc *sc)
225 {
226 	struct ath_hal *ah = sc->sc_ah;
227 
228 	ATH_RX_LOCK(sc);
229 
230 	/* Enable RX FIFO */
231 	ath_hal_rxena(ah);
232 
233 	/*
234 	 * Entries should only be written out if the
235 	 * FIFO is empty.
236 	 *
237 	 * XXX This isn't correct. I should be looking
238 	 * at the value of AR_RXDP_SIZE (0x0070) to determine
239 	 * how many entries are in here.
240 	 *
241 	 * A warm reset will clear the registers but not the FIFO.
242 	 *
243 	 * And I believe this is actually the address of the last
244 	 * handled buffer rather than the current FIFO pointer.
245 	 * So if no frames have been (yet) seen, we'll reinit the
246 	 * FIFO.
247 	 *
248 	 * I'll chase that up at some point.
249 	 */
250 	if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) {
251 		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
252 		    "%s: Re-initing HP FIFO\n", __func__);
253 		ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
254 	}
255 	if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) {
256 		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
257 		    "%s: Re-initing LP FIFO\n", __func__);
258 		ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
259 	}
260 
261 	/* Add up to m_fifolen entries in each queue */
262 	/*
263 	 * These must occur after the above write so the FIFO buffers
264 	 * are pushed/tracked in the same order as the hardware will
265 	 * process them.
266 	 */
267 	ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
268 	    sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
269 
270 	ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
271 	    sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
272 
273 	ath_mode_init(sc);
274 	ath_hal_startpcurecv(ah);
275 
276 	ATH_RX_UNLOCK(sc);
277 
278 	return (0);
279 }
280 
281 static void
282 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
283     int dosched)
284 {
285 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
286 	ath_edma_recv_proc_queue(sc, qtype, dosched);
287 	ath_power_restore_power_state(sc);
288 
289 	taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
290 }
291 
292 static void
293 ath_edma_recv_sched(struct ath_softc *sc, int dosched)
294 {
295 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
296 	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
297 	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
298 	ath_power_restore_power_state(sc);
299 
300 	taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
301 }
302 
303 static void
304 ath_edma_recv_flush(struct ath_softc *sc)
305 {
306 
307 	DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
308 
309 	ATH_PCU_LOCK(sc);
310 	sc->sc_rxproc_cnt++;
311 	ATH_PCU_UNLOCK(sc);
312 
313 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
314 
315 	/*
316 	 * Flush any active frames from FIFO -> deferred list
317 	 */
318 	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
319 	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
320 
321 	/*
322 	 * Process what's in the deferred queue
323 	 */
324 	/*
325 	 * XXX: If we read the tsf/channoise here and then pass it in,
326 	 * we could restore the power state before processing
327 	 * the deferred queue.
328 	 */
329 	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
330 	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
331 
332 	ath_power_restore_power_state(sc);
333 
334 	ATH_PCU_LOCK(sc);
335 	sc->sc_rxproc_cnt--;
336 	ATH_PCU_UNLOCK(sc);
337 }
338 
339 /*
340  * Process frames from the current queue into the deferred queue.
341  */
342 static void
343 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
344     int dosched)
345 {
346 	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
347 	struct ath_rx_status *rs;
348 	struct ath_desc *ds;
349 	struct ath_buf *bf;
350 	struct mbuf *m;
351 	struct ath_hal *ah = sc->sc_ah;
352 	uint64_t tsf;
353 	uint16_t nf;
354 	int npkts = 0;
355 
356 	tsf = ath_hal_gettsf64(ah);
357 	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
358 	sc->sc_stats.ast_rx_noise = nf;
359 
360 	ATH_RX_LOCK(sc);
361 
362 	do {
363 		bf = re->m_fifo[re->m_fifo_head];
364 		/* This shouldn't occur! */
365 		if (bf == NULL) {
366 			device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
367 			    __func__,
368 			    qtype);
369 			break;
370 		}
371 		m = bf->bf_m;
372 		ds = bf->bf_desc;
373 
374 		/*
375 		 * Sync descriptor memory - this also syncs the buffer for us.
376 		 * EDMA descriptors are in cached memory.
377 		 */
378 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
379 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
380 		rs = &bf->bf_status.ds_rxstat;
381 		bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
382 		    NULL, rs);
383 #ifdef	ATH_DEBUG
384 		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
385 			ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
386 #endif /* ATH_DEBUG */
387 #ifdef	ATH_DEBUG_ALQ
388 		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
389 			if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
390 			    sc->sc_rx_statuslen, (char *) ds);
391 #endif /* ATH_DEBUG */
392 		if (bf->bf_rxstatus == HAL_EINPROGRESS)
393 			break;
394 
395 		/*
396 		 * Completed descriptor.
397 		 */
398 		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
399 		    "%s: Q%d: completed!\n", __func__, qtype);
400 		npkts++;
401 
402 		/*
403 		 * We've been synced already, so unmap.
404 		 */
405 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
406 
407 		/*
408 		 * Remove the FIFO entry and place it on the completion
409 		 * queue.
410 		 */
411 		re->m_fifo[re->m_fifo_head] = NULL;
412 		TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
413 
414 		/* Bump the descriptor FIFO stats */
415 		INCR(re->m_fifo_head, re->m_fifolen);
416 		re->m_fifo_depth--;
417 		/* XXX check it doesn't fall below 0 */
418 	} while (re->m_fifo_depth > 0);
419 
420 	/* Append some more fresh frames to the FIFO */
421 	if (dosched)
422 		ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
423 
424 	ATH_RX_UNLOCK(sc);
425 
426 	/* rx signal state monitoring */
427 	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
428 
429 	ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
430 	    "ath edma rx proc: npkts=%d\n",
431 	    npkts);
432 
433 	/* Handle resched and kickpcu appropriately */
434 	ATH_PCU_LOCK(sc);
435 	if (dosched && sc->sc_kickpcu) {
436 		ATH_KTR(sc, ATH_KTR_ERROR, 0,
437 		    "ath_edma_recv_proc_queue(): kickpcu");
438 		if (npkts > 0)
439 			device_printf(sc->sc_dev,
440 			    "%s: handled npkts %d\n",
441 			    __func__, npkts);
442 
443 		/*
444 		 * XXX TODO: what should occur here? Just re-poke and
445 		 * re-enable the RX FIFO?
446 		 */
447 		sc->sc_kickpcu = 0;
448 	}
449 	ATH_PCU_UNLOCK(sc);
450 
451 	return;
452 }
453 
454 /*
455  * Flush the deferred queue.
456  *
457  * This destructively flushes the deferred queue - it doesn't
458  * call the wireless stack on each mbuf.
459  */
460 static void
461 ath_edma_flush_deferred_queue(struct ath_softc *sc)
462 {
463 	struct ath_buf *bf;
464 
465 	ATH_RX_LOCK_ASSERT(sc);
466 
467 	/* Free in one set, inside the lock */
468 	while ((bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) != NULL) {
469 		TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list);
470 		ath_edma_rxbuf_free(sc, bf);
471 	}
472 	while ((bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) != NULL) {
473 		TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list);
474 		ath_edma_rxbuf_free(sc, bf);
475 	}
476 }
477 
478 static int
479 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
480     int dosched)
481 {
482 	int ngood = 0;
483 	uint64_t tsf;
484 	struct ath_buf *bf;
485 	struct ath_buf *next;
486 	struct ath_rx_status *rs;
487 	int16_t nf;
488 	ath_bufhead rxlist;
489 	struct mbuf *m;
490 
491 	TAILQ_INIT(&rxlist);
492 
493 	nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
494 	/*
495 	 * XXX TODO: the NF/TSF should be stamped on the bufs themselves,
496 	 * otherwise we may end up adding in the wrong values if this
497 	 * is delayed too far..
498 	 */
499 	tsf = ath_hal_gettsf64(sc->sc_ah);
500 
501 	/* Copy the list over */
502 	ATH_RX_LOCK(sc);
503 	TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
504 	ATH_RX_UNLOCK(sc);
505 
506 	/* Handle the completed descriptors */
507 	TAILQ_FOREACH_MUTABLE(bf, &rxlist, bf_list, next) {
508 		/*
509 		 * Skip the RX descriptor status - start at the data offset
510 		 */
511 		m_adj(bf->bf_m, sc->sc_rx_statuslen);
512 
513 		/* Handle the frame */
514 
515 		rs = &bf->bf_status.ds_rxstat;
516 		m = bf->bf_m;
517 		bf->bf_m = NULL;
518 		if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
519 			ngood++;
520 	}
521 
522 	if (ngood) {
523 		sc->sc_lastrx = tsf;
524 	}
525 
526 	ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
527 	    "ath edma rx deferred proc: ngood=%d\n",
528 	    ngood);
529 
530 	/* Free in one set, inside the lock */
531 	ATH_RX_LOCK(sc);
532 
533 	while ((bf = TAILQ_FIRST(&rxlist)) != NULL) {
534 		/* Free the buffer/mbuf */
535 		TAILQ_REMOVE(&rxlist, bf, bf_list);
536 		ath_edma_rxbuf_free(sc, bf);
537 	}
538 	ATH_RX_UNLOCK(sc);
539 
540 	return (ngood);
541 }
542 
543 static void
544 ath_edma_recv_tasklet(void *arg, int npending)
545 {
546 	struct ath_softc *sc = (struct ath_softc *) arg;
547 	struct ifnet *ifp = sc->sc_ifp;
548 #ifdef	IEEE80211_SUPPORT_SUPERG
549 	struct ieee80211com *ic = ifp->if_l2com;
550 #endif
551 
552 	DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
553 	    __func__,
554 	    npending);
555 
556 	wlan_serialize_enter();
557 	ATH_PCU_LOCK(sc);
558 	if (sc->sc_inreset_cnt > 0) {
559 		device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
560 		    __func__);
561 		ATH_PCU_UNLOCK(sc);
562 		wlan_serialize_exit();
563 		return;
564 	}
565 	sc->sc_rxproc_cnt++;
566 	ATH_PCU_UNLOCK(sc);
567 
568 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
569 
570 	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
571 	ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
572 
573 	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
574 	ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
575 
576 	/*
577 	 * XXX: If we read the tsf/channoise here and then pass it in,
578 	 * we could restore the power state before processing
579 	 * the deferred queue.
580 	 */
581 	ath_power_restore_power_state(sc);
582 
583 	/* XXX inside IF_LOCK ? */
584 	if (!ifq_is_oactive(&ifp->if_snd)) {
585 #ifdef	IEEE80211_SUPPORT_SUPERG
586 		ieee80211_ff_age_all(ic, 100);
587 #endif
588 		if (!ifq_is_empty(&ifp->if_snd))
589 			ath_tx_kick(sc);
590 	}
591 	if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
592 		taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
593 
594 	ATH_PCU_LOCK(sc);
595 	sc->sc_rxproc_cnt--;
596 	ATH_PCU_UNLOCK(sc);
597 	wlan_serialize_exit();
598 }
599 
600 /*
601  * Allocate an RX mbuf for the given ath_buf and initialise
602  * it for EDMA.
603  *
604  * + Allocate a 4KB mbuf;
605  * + Setup the DMA map for the given buffer;
606  * + Return that.
607  */
608 static int
609 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
610 {
611 
612 	struct mbuf *m;
613 	int error;
614 	int len;
615 
616 	ATH_RX_LOCK_ASSERT(sc);
617 
618 	m = m_getjcl(MB_DONTWAIT, MT_DATA, M_PKTHDR, sc->sc_edma_bufsize);
619 /*	m = m_getcl(MB_WAIT, MT_DATA, M_PKTHDR);*/
620 /*	m = m_getm(NULL, sc->sc_edma_bufsize, MB_WAIT, MT_DATA);*/
621 	if (! m)
622 		return (ENOBUFS);		/* XXX ?*/
623 
624 	/* XXX warn/enforce alignment */
625 
626 	len = m->m_ext.ext_size;
627 #if 0
628 	device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
629 	    __func__,
630 	    m,
631 	    len,
632 	    mtod(m, char *));
633 #endif
634 
635 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
636 
637 	/*
638 	 * Populate ath_buf fields.
639 	 */
640 	bf->bf_desc = mtod(m, struct ath_desc *);
641 	bf->bf_lastds = bf->bf_desc;	/* XXX only really for TX? */
642 	bf->bf_m = m;
643 
644 	/*
645 	 * Zero the descriptor and ensure it makes it out to the
646 	 * bounce buffer if one is required.
647 	 *
648 	 * XXX PREWRITE will copy the whole buffer; we only needed it
649 	 * to sync the first 32 DWORDS.  Oh well.
650 	 */
651 	memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
652 
653 	/*
654 	 * Create DMA mapping.
655 	 */
656 	error = bus_dmamap_load_mbuf_segment(sc->sc_dmat,
657 	    bf->bf_dmamap, m, bf->bf_segs, 1, &bf->bf_nseg, BUS_DMA_NOWAIT);
658 
659 	if (error != 0) {
660 		device_printf(sc->sc_dev, "%s: failed; error=%d\n",
661 		    __func__,
662 		    error);
663 		m_freem(m);
664 		return (error);
665 	}
666 
667 	/*
668 	 * Set daddr to the physical mapping page.
669 	 */
670 	bf->bf_daddr = bf->bf_segs[0].ds_addr;
671 
672 	/*
673 	 * Prepare for the upcoming read.
674 	 *
675 	 * We need to both sync some data into the buffer (the zero'ed
676 	 * descriptor payload) and also prepare for the read that's going
677 	 * to occur.
678 	 */
679 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
680 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
681 
682 	/* Finish! */
683 	return (0);
684 }
685 
686 /*
687  * Allocate a RX buffer.
688  */
689 static struct ath_buf *
690 ath_edma_rxbuf_alloc(struct ath_softc *sc)
691 {
692 	struct ath_buf *bf;
693 	int error;
694 
695 	ATH_RX_LOCK_ASSERT(sc);
696 
697 	/* Allocate buffer */
698 	bf = TAILQ_FIRST(&sc->sc_rxbuf);
699 	/* XXX shouldn't happen upon startup? */
700 	if (bf == NULL) {
701 		device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n",
702 		    __func__);
703 		return (NULL);
704 	}
705 
706 	/* Remove it from the free list */
707 	TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
708 
709 	/* Assign RX mbuf to it */
710 	error = ath_edma_rxbuf_init(sc, bf);
711 	if (error != 0) {
712 		device_printf(sc->sc_dev,
713 		    "%s: bf=%p, rxbuf alloc failed! error=%d\n",
714 		    __func__,
715 		    bf,
716 		    error);
717 		TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
718 		return (NULL);
719 	}
720 
721 	return (bf);
722 }
723 
724 static void
725 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
726 {
727 
728 	ATH_RX_LOCK_ASSERT(sc);
729 
730 	/*
731 	 * Only unload the frame if we haven't consumed
732 	 * the mbuf via ath_rx_pkt().
733 	 */
734 	if (bf->bf_m) {
735 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
736 		m_freem(bf->bf_m);
737 		bf->bf_m = NULL;
738 	}
739 
740 	/* XXX lock? */
741 	TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
742 }
743 
744 /*
745  * Allocate up to 'n' entries and push them onto the hardware FIFO.
746  *
747  * Return how many entries were successfully pushed onto the
748  * FIFO.
749  */
750 static int
751 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
752 {
753 	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
754 	struct ath_buf *bf;
755 	int i;
756 
757 	ATH_RX_LOCK_ASSERT(sc);
758 
759 	/*
760 	 * Allocate buffers until the FIFO is full or nbufs is reached.
761 	 */
762 	for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
763 		/* Ensure the FIFO is already blank, complain loudly! */
764 		if (re->m_fifo[re->m_fifo_tail] != NULL) {
765 			device_printf(sc->sc_dev,
766 			    "%s: Q%d: fifo[%d] != NULL (%p)\n",
767 			    __func__,
768 			    qtype,
769 			    re->m_fifo_tail,
770 			    re->m_fifo[re->m_fifo_tail]);
771 
772 			/* Free the slot */
773 			ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
774 			re->m_fifo_depth--;
775 			/* XXX check it's not < 0 */
776 			re->m_fifo[re->m_fifo_tail] = NULL;
777 		}
778 
779 		bf = ath_edma_rxbuf_alloc(sc);
780 		/* XXX should ensure the FIFO is not NULL? */
781 		if (bf == NULL) {
782 			device_printf(sc->sc_dev,
783 			    "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
784 			    __func__,
785 			    qtype,
786 			    i,
787 			    nbufs);
788 			break;
789 		}
790 
791 		re->m_fifo[re->m_fifo_tail] = bf;
792 
793 		/* Write to the RX FIFO */
794 		DPRINTF(sc, ATH_DEBUG_EDMA_RX,
795 		    "%s: Q%d: putrxbuf=%p (0x%jx)\n",
796 		    __func__,
797 		    qtype,
798 		    bf->bf_desc,
799 		    (uintmax_t) bf->bf_daddr);
800 		ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
801 
802 		re->m_fifo_depth++;
803 		INCR(re->m_fifo_tail, re->m_fifolen);
804 	}
805 
806 	/*
807 	 * Return how many were allocated.
808 	 */
809 	DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
810 	    __func__,
811 	    qtype,
812 	    nbufs,
813 	    i);
814 	return (i);
815 }
816 
817 static int
818 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
819 {
820 	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
821 	int i;
822 
823 	ATH_RX_LOCK_ASSERT(sc);
824 
825 	for (i = 0; i < re->m_fifolen; i++) {
826 		if (re->m_fifo[i] != NULL) {
827 #ifdef	ATH_DEBUG
828 			struct ath_buf *bf = re->m_fifo[i];
829 
830 			if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
831 				ath_printrxbuf(sc, bf, 0, HAL_OK);
832 #endif
833 			ath_edma_rxbuf_free(sc, re->m_fifo[i]);
834 			re->m_fifo[i] = NULL;
835 			re->m_fifo_depth--;
836 		}
837 	}
838 
839 	if (re->m_rxpending != NULL) {
840 		m_freem(re->m_rxpending);
841 		re->m_rxpending = NULL;
842 	}
843 	re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
844 
845 	return (0);
846 }
847 
848 /*
849  * Setup the initial RX FIFO structure.
850  */
851 static int
852 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
853 {
854 	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
855 
856 	ATH_RX_LOCK_ASSERT(sc);
857 
858 	if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
859 		device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
860 		    __func__,
861 		    qtype);
862 		return (-EINVAL);
863 	}
864 	device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n",
865 	    __func__,
866 	    qtype,
867 	    re->m_fifolen);
868 
869 	/* Allocate ath_buf FIFO array, pre-zero'ed */
870 	re->m_fifo = kmalloc(sizeof(struct ath_buf *) * re->m_fifolen,
871 	    M_ATHDEV,
872 	    M_INTWAIT | M_ZERO);
873 	if (re->m_fifo == NULL) {
874 		device_printf(sc->sc_dev, "%s: malloc failed\n",
875 		    __func__);
876 		return (-ENOMEM);
877 	}
878 
879 	/*
880 	 * Set initial "empty" state.
881 	 */
882 	re->m_rxpending = NULL;
883 	re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
884 
885 	return (0);
886 }
887 
888 static int
889 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
890 {
891 	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
892 
893 	device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
894 	    __func__,
895 	    qtype);
896 
897 	kfree(re->m_fifo, M_ATHDEV);
898 
899 	return (0);
900 }
901 
902 static int
903 ath_edma_dma_rxsetup(struct ath_softc *sc)
904 {
905 	int error;
906 
907 	/*
908 	 * Create RX DMA tag and buffers.
909 	 */
910 	error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
911 	    "rx", ath_rxbuf, sc->sc_rx_statuslen);
912 	if (error != 0)
913 		return error;
914 
915 	ATH_RX_LOCK(sc);
916 	(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
917 	(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
918 	ATH_RX_UNLOCK(sc);
919 
920 	return (0);
921 }
922 
923 static int
924 ath_edma_dma_rxteardown(struct ath_softc *sc)
925 {
926 
927 	ATH_RX_LOCK(sc);
928 	ath_edma_flush_deferred_queue(sc);
929 	ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
930 	ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
931 
932 	ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
933 	ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
934 	ATH_RX_UNLOCK(sc);
935 
936 	/* Free RX ath_buf */
937 	/* Free RX DMA tag */
938 	if (sc->sc_rxdma.dd_desc_len != 0)
939 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
940 
941 	return (0);
942 }
943 
944 void
945 ath_recv_setup_edma(struct ath_softc *sc)
946 {
947 
948 	/* Set buffer size to 4k */
949 	sc->sc_edma_bufsize = 4096;
950 
951 	/* Fetch EDMA field and buffer sizes */
952 	(void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
953 
954 	/* Configure the hardware with the RX buffer size */
955 	(void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
956 	    sc->sc_rx_statuslen);
957 
958 	device_printf(sc->sc_dev, "RX status length: %d\n",
959 	    sc->sc_rx_statuslen);
960 	device_printf(sc->sc_dev, "RX buffer size: %d\n",
961 	    sc->sc_edma_bufsize);
962 
963 	sc->sc_rx.recv_stop = ath_edma_stoprecv;
964 	sc->sc_rx.recv_start = ath_edma_startrecv;
965 	sc->sc_rx.recv_flush = ath_edma_recv_flush;
966 	sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
967 	sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
968 
969 	sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
970 	sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
971 
972 	sc->sc_rx.recv_sched = ath_edma_recv_sched;
973 	sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;
974 }
975