xref: /dragonfly/sys/dev/netif/ath/ath/if_ath_tx_edma.c (revision dadd6466)
1 /*-
2  * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 
32 /*
33  * Driver for the Atheros Wireless LAN controller.
34  *
35  * This software is derived from work of Atsushi Onoe; his contribution
36  * is greatly appreciated.
37  */
38 
39 #include "opt_inet.h"
40 #include "opt_ath.h"
41 /*
42  * This is needed for register operations which are performed
43  * by the driver - eg, calls to ath_hal_gettsf32().
44  *
45  * It's also required for any AH_DEBUG checks in here, eg the
46  * module dependencies.
47  */
48 #include "opt_ah.h"
49 #include "opt_wlan.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/sysctl.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/lock.h>
57 #include <sys/mutex.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/errno.h>
62 #include <sys/callout.h>
63 #include <sys/bus.h>
64 #include <sys/endian.h>
65 #include <sys/kthread.h>
66 #include <sys/taskqueue.h>
67 #include <sys/priv.h>
68 #include <sys/module.h>
69 #include <sys/ktr.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_types.h>
76 #include <net/if_arp.h>
77 #include <net/ethernet.h>
78 #include <net/if_llc.h>
79 #include <net/ifq_var.h>
80 
81 #include <netproto/802_11/ieee80211_var.h>
82 #include <netproto/802_11/ieee80211_regdomain.h>
83 #ifdef IEEE80211_SUPPORT_SUPERG
84 #include <netproto/802_11/ieee80211_superg.h>
85 #endif
86 #ifdef IEEE80211_SUPPORT_TDMA
87 #include <netproto/802_11/ieee80211_tdma.h>
88 #endif
89 
90 #include <net/bpf.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/if_ether.h>
95 #endif
96 
97 #include <dev/netif/ath/ath/if_athvar.h>
98 #include <dev/netif/ath/ath_hal/ah_devid.h>		/* XXX for softled */
99 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
100 
101 #include <dev/netif/ath/ath/if_ath_debug.h>
102 #include <dev/netif/ath/ath/if_ath_misc.h>
103 #include <dev/netif/ath/ath/if_ath_tsf.h>
104 #include <dev/netif/ath/ath/if_ath_tx.h>
105 #include <dev/netif/ath/ath/if_ath_sysctl.h>
106 #include <dev/netif/ath/ath/if_ath_led.h>
107 #include <dev/netif/ath/ath/if_ath_keycache.h>
108 #include <dev/netif/ath/ath/if_ath_rx.h>
109 #include <dev/netif/ath/ath/if_ath_beacon.h>
110 #include <dev/netif/ath/ath/if_athdfs.h>
111 
112 #ifdef ATH_TX99_DIAG
113 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
114 #endif
115 
116 #include <dev/netif/ath/ath/if_ath_tx_edma.h>
117 
118 #ifdef	ATH_DEBUG_ALQ
119 #include <dev/netif/ath/ath/if_ath_alq.h>
120 #endif
121 
122 /*
123  * some general macros
124  */
125 #define	INCR(_l, _sz)		(_l) ++; (_l) &= ((_sz) - 1)
126 #define	DECR(_l, _sz)		(_l) --; (_l) &= ((_sz) - 1)
127 
128 /*
129  * XXX doesn't belong here, and should be tunable
130  */
131 #define	ATH_TXSTATUS_RING_SIZE	512
132 
133 MALLOC_DECLARE(M_ATHDEV);
134 
135 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched);
136 
137 /*
138  * Push some frames into the TX FIFO if we have space.
139  */
140 static void
141 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
142 {
143 	struct ath_buf *bf, *bf_last;
144 	int i = 0;
145 
146 	ATH_TXQ_LOCK_ASSERT(txq);
147 
148 	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n",
149 	    __func__,
150 	    txq->axq_qnum);
151 
152 	TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
153 		if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH)
154 			break;
155 
156 		/*
157 		 * We have space in the FIFO - so let's push a frame
158 		 * into it.
159 		 */
160 
161 		/*
162 		 * Remove it from the normal list
163 		 */
164 		ATH_TXQ_REMOVE(txq, bf, bf_list);
165 
166 		/*
167 		 * XXX for now, we only dequeue a frame at a time, so
168 		 * that's only one buffer.  Later on when we just
169 		 * push this staging _list_ into the queue, we'll
170 		 * set bf_last to the end pointer in the list.
171 		 */
172 		bf_last = bf;
173 		DPRINTF(sc, ATH_DEBUG_TX_PROC,
174 		    "%s: Q%d: depth=%d; pushing %p->%p\n",
175 		    __func__,
176 		    txq->axq_qnum,
177 		    txq->axq_fifo_depth,
178 		    bf,
179 		    bf_last);
180 
181 		/*
182 		 * Append it to the FIFO staging list
183 		 */
184 		ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list);
185 
186 		/*
187 		 * Set fifo start / fifo end flags appropriately
188 		 *
189 		 */
190 		bf->bf_flags |= ATH_BUF_FIFOPTR;
191 		bf_last->bf_flags |= ATH_BUF_FIFOEND;
192 
193 		/*
194 		 * Push _into_ the FIFO.
195 		 */
196 		ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
197 #ifdef	ATH_DEBUG
198 		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
199 			ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
200 #endif/* ATH_DEBUG */
201 #ifdef	ATH_DEBUG_ALQ
202 		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
203 			ath_tx_alq_post(sc, bf);
204 #endif /* ATH_DEBUG_ALQ */
205 		txq->axq_fifo_depth++;
206 		i++;
207 	}
208 	if (i > 0)
209 		ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
210 }
211 
212 /*
213  * Re-initialise the DMA FIFO with the current contents of
214  * said TXQ.
215  *
216  * This should only be called as part of the chip reset path, as it
217  * assumes the FIFO is currently empty.
218  */
219 static void
220 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
221 {
222 	struct ath_buf *bf;
223 	int i = 0;
224 	int fifostart = 1;
225 	int old_fifo_depth;
226 
227 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n",
228 	    __func__,
229 	    txq->axq_qnum);
230 
231 	ATH_TXQ_LOCK_ASSERT(txq);
232 
233 	/*
234 	 * Let's log if the tracked FIFO depth doesn't match
235 	 * what we actually push in.
236 	 */
237 	old_fifo_depth = txq->axq_fifo_depth;
238 	txq->axq_fifo_depth = 0;
239 
240 	/*
241 	 * Walk the FIFO staging list, looking for "head" entries.
242 	 * Since we may have a partially completed list of frames,
243 	 * we push the first frame we see into the FIFO and re-mark
244 	 * it as the head entry.  We then skip entries until we see
245 	 * FIFO end, at which point we get ready to push another
246 	 * entry into the FIFO.
247 	 */
248 	TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) {
249 		/*
250 		 * If we're looking for FIFOEND and we haven't found
251 		 * it, skip.
252 		 *
253 		 * If we're looking for FIFOEND and we've found it,
254 		 * reset for another descriptor.
255 		 */
256 #ifdef	ATH_DEBUG
257 		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
258 			ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
259 #endif/* ATH_DEBUG */
260 #ifdef	ATH_DEBUG_ALQ
261 		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
262 			ath_tx_alq_post(sc, bf);
263 #endif /* ATH_DEBUG_ALQ */
264 
265 		if (fifostart == 0) {
266 			if (bf->bf_flags & ATH_BUF_FIFOEND)
267 				fifostart = 1;
268 			continue;
269 		}
270 
271 		/* Make sure we're not overflowing the FIFO! */
272 		if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) {
273 			device_printf(sc->sc_dev,
274 			    "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n",
275 			    __func__,
276 			    txq->axq_qnum,
277 			    txq->axq_fifo_depth);
278 		}
279 
280 #if 0
281 		DPRINTF(sc, ATH_DEBUG_RESET,
282 		    "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n",
283 		    __func__,
284 		    txq->axq_qnum,
285 		    txq->axq_fifo_depth,
286 		    bf,
287 		    !! (bf->bf_flags & ATH_BUF_FIFOPTR),
288 		    !! (bf->bf_flags & ATH_BUF_FIFOEND));
289 #endif
290 
291 		/*
292 		 * Set this to be the first buffer in the FIFO
293 		 * list - even if it's also the last buffer in
294 		 * a FIFO list!
295 		 */
296 		bf->bf_flags |= ATH_BUF_FIFOPTR;
297 
298 		/* Push it into the FIFO and bump the FIFO count */
299 		ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
300 		txq->axq_fifo_depth++;
301 
302 		/*
303 		 * If this isn't the last entry either, let's
304 		 * clear fifostart so we continue looking for
305 		 * said last entry.
306 		 */
307 		if (! (bf->bf_flags & ATH_BUF_FIFOEND))
308 			fifostart = 0;
309 		i++;
310 	}
311 
312 	/* Only bother starting the queue if there's something in it */
313 	if (i > 0)
314 		ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
315 
316 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n",
317 	    __func__,
318 	    txq->axq_qnum,
319 	    old_fifo_depth,
320 	    txq->axq_fifo_depth);
321 
322 	/* And now, let's check! */
323 	if (txq->axq_fifo_depth != old_fifo_depth) {
324 		device_printf(sc->sc_dev,
325 		    "%s: Q%d: FIFO depth should be %d, is %d\n",
326 		    __func__,
327 		    txq->axq_qnum,
328 		    old_fifo_depth,
329 		    txq->axq_fifo_depth);
330 	}
331 }
332 
333 /*
334  * Hand off this frame to a hardware queue.
335  *
336  * Things are a bit hairy in the EDMA world.  The TX FIFO is only
337  * 8 entries deep, so we need to keep track of exactly what we've
338  * pushed into the FIFO and what's just sitting in the TX queue,
339  * waiting to go out.
340  *
341  * So this is split into two halves - frames get appended to the
342  * TXQ; then a scheduler is called to push some frames into the
343  * actual TX FIFO.
344  */
345 static void
346 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
347     struct ath_buf *bf)
348 {
349 
350 	ATH_TXQ_LOCK(txq);
351 
352 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
353 	    ("%s: busy status 0x%x", __func__, bf->bf_flags));
354 
355 	/*
356 	 * XXX TODO: write a hard-coded check to ensure that
357 	 * the queue id in the TX descriptor matches txq->axq_qnum.
358 	 */
359 
360 	/* Update aggr stats */
361 	if (bf->bf_state.bfs_aggr)
362 		txq->axq_aggr_depth++;
363 
364 	/* Push and update frame stats */
365 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
366 
367 	/* For now, set the link pointer in the last descriptor
368 	 * to be NULL.
369 	 *
370 	 * Later on, when it comes time to handling multiple descriptors
371 	 * in one FIFO push, we can link descriptors together this way.
372 	 */
373 
374 	/*
375 	 * Finally, call the FIFO schedule routine to schedule some
376 	 * frames to the FIFO.
377 	 */
378 	ath_edma_tx_fifo_fill(sc, txq);
379 	ATH_TXQ_UNLOCK(txq);
380 }
381 
382 /*
383  * Hand off this frame to a multicast software queue.
384  *
385  * The EDMA TX CABQ will get a list of chained frames, chained
386  * together using the next pointer.  The single head of that
387  * particular queue is pushed to the hardware CABQ.
388  */
389 static void
390 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
391     struct ath_buf *bf)
392 {
393 
394 	ATH_TX_LOCK_ASSERT(sc);
395 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
396 	    ("%s: busy status 0x%x", __func__, bf->bf_flags));
397 
398 	ATH_TXQ_LOCK(txq);
399 	/*
400 	 * XXX this is mostly duplicated in ath_tx_handoff_mcast().
401 	 */
402 	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
403 		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
404 		struct ieee80211_frame *wh;
405 
406 		/* mark previous frame */
407 		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
408 		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
409 
410 		/* re-sync buffer to memory */
411 		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
412 		   BUS_DMASYNC_PREWRITE);
413 
414 		/* link descriptor */
415 		ath_hal_settxdesclink(sc->sc_ah,
416 		    bf_last->bf_lastds,
417 		    bf->bf_daddr);
418 	}
419 #ifdef	ATH_DEBUG_ALQ
420 	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
421 		ath_tx_alq_post(sc, bf);
422 #endif	/* ATH_DEBUG_ALQ */
423 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
424 	ATH_TXQ_UNLOCK(txq);
425 }
426 
427 /*
428  * Handoff this frame to the hardware.
429  *
430  * For the multicast queue, this will treat it as a software queue
431  * and append it to the list, after updating the MORE_DATA flag
432  * in the previous frame.  The cabq processing code will ensure
433  * that the queue contents gets transferred over.
434  *
435  * For the hardware queues, this will queue a frame to the queue
436  * like before, then populate the FIFO from that.  Since the
437  * EDMA hardware has 8 FIFO slots per TXQ, this ensures that
438  * frames such as management frames don't get prematurely dropped.
439  *
440  * This does imply that a similar flush-hwq-to-fifoq method will
441  * need to be called from the processq function, before the
442  * per-node software scheduler is called.
443  */
444 static void
445 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
446     struct ath_buf *bf)
447 {
448 
449 	DPRINTF(sc, ATH_DEBUG_XMIT_DESC,
450 	    "%s: called; bf=%p, txq=%p, qnum=%d\n",
451 	    __func__,
452 	    bf,
453 	    txq,
454 	    txq->axq_qnum);
455 
456 	if (txq->axq_qnum == ATH_TXQ_SWQ)
457 		ath_edma_xmit_handoff_mcast(sc, txq, bf);
458 	else
459 		ath_edma_xmit_handoff_hw(sc, txq, bf);
460 }
461 
462 static int
463 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum)
464 {
465 	struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
466 
467 	te->m_fifo = kmalloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH,
468 	    M_ATHDEV,
469 	    M_INTWAIT | M_ZERO);
470 	if (te->m_fifo == NULL) {
471 		device_printf(sc->sc_dev, "%s: malloc failed\n",
472 		    __func__);
473 		return (-ENOMEM);
474 	}
475 
476 	/*
477 	 * Set initial "empty" state.
478 	 */
479 	te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0;
480 
481 	return (0);
482 }
483 
484 static int
485 ath_edma_free_txfifo(struct ath_softc *sc, int qnum)
486 {
487 	struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
488 
489 	/* XXX TODO: actually deref the ath_buf entries? */
490 	kfree(te->m_fifo, M_ATHDEV);
491 	return (0);
492 }
493 
494 static int
495 ath_edma_dma_txsetup(struct ath_softc *sc)
496 {
497 	int error;
498 	int i;
499 
500 	error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma,
501 	    NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE);
502 	if (error != 0)
503 		return (error);
504 
505 	ath_hal_setuptxstatusring(sc->sc_ah,
506 	    (void *) sc->sc_txsdma.dd_desc,
507 	    sc->sc_txsdma.dd_desc_paddr,
508 	    ATH_TXSTATUS_RING_SIZE);
509 
510 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
511 		ath_edma_setup_txfifo(sc, i);
512 	}
513 
514 	return (0);
515 }
516 
517 static int
518 ath_edma_dma_txteardown(struct ath_softc *sc)
519 {
520 	int i;
521 
522 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
523 		ath_edma_free_txfifo(sc, i);
524 	}
525 
526 	ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL);
527 	return (0);
528 }
529 
530 /*
531  * Drain all TXQs, potentially after completing the existing completed
532  * frames.
533  */
534 static void
535 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
536 {
537 #if 0
538 	struct ifnet *ifp = sc->sc_ifp;
539 #endif
540 	int i;
541 
542 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
543 
544 	(void) ath_stoptxdma(sc);
545 
546 	/*
547 	 * If reset type is noloss, the TX FIFO needs to be serviced
548 	 * and those frames need to be handled.
549 	 *
550 	 * Otherwise, just toss everything in each TX queue.
551 	 */
552 	if (reset_type == ATH_RESET_NOLOSS) {
553 		ath_edma_tx_processq(sc, 0);
554 		for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
555 			if (ATH_TXQ_SETUP(sc, i)) {
556 				ATH_TXQ_LOCK(&sc->sc_txq[i]);
557 				/*
558 				 * Free the holding buffer; DMA is now
559 				 * stopped.
560 				 */
561 				ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
562 				/*
563 				 * Reset the link pointer to NULL; there's
564 				 * no frames to chain DMA to.
565 				 */
566 				sc->sc_txq[i].axq_link = NULL;
567 				ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
568 			}
569 		}
570 	} else {
571 		for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
572 			if (ATH_TXQ_SETUP(sc, i))
573 				ath_tx_draintxq(sc, &sc->sc_txq[i]);
574 		}
575 	}
576 
577 	/* XXX dump out the TX completion FIFO contents */
578 
579 	/* XXX dump out the frames */
580 
581 #if 0
582 	/* remove, DragonFly uses OACTIVE to control if_start calls */
583 	IF_LOCK(&ifp->if_snd);
584 	ifq_clr_oactive(&ifp->if_snd);
585 	IF_UNLOCK(&ifp->if_snd);
586 #endif
587 	sc->sc_wd_timer = 0;
588 }
589 
590 /*
591  * TX completion tasklet.
592  */
593 
594 static void
595 ath_edma_tx_proc(void *arg, int npending)
596 {
597 	struct ath_softc *sc = (struct ath_softc *) arg;
598 
599 #if 0
600 	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n",
601 	    __func__, npending);
602 #endif
603 	wlan_serialize_enter();
604 	ath_edma_tx_processq(sc, 1);
605 	wlan_serialize_exit();
606 }
607 
608 /*
609  * Process the TX status queue.
610  */
611 static void
612 ath_edma_tx_processq(struct ath_softc *sc, int dosched)
613 {
614 	struct ath_hal *ah = sc->sc_ah;
615 	HAL_STATUS status;
616 	struct ath_tx_status ts;
617 	struct ath_txq *txq;
618 	struct ath_buf *bf;
619 	struct ieee80211_node *ni;
620 	int nacked = 0;
621 	int idx;
622 
623 #ifdef	ATH_DEBUG
624 	/* XXX */
625 	uint32_t txstatus[32];
626 #endif
627 
628 	for (idx = 0; ; idx++) {
629 		bzero(&ts, sizeof(ts));
630 
631 		ATH_TXSTATUS_LOCK(sc);
632 #ifdef	ATH_DEBUG
633 		ath_hal_gettxrawtxdesc(ah, txstatus);
634 #endif
635 		status = ath_hal_txprocdesc(ah, NULL, (void *) &ts);
636 		ATH_TXSTATUS_UNLOCK(sc);
637 
638 		if (status == HAL_EINPROGRESS)
639 			break;
640 
641 #ifdef	ATH_DEBUG
642 		if (sc->sc_debug & ATH_DEBUG_TX_PROC)
643 			if (ts.ts_queue_id != sc->sc_bhalq)
644 			ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id,
645 			    idx, (status == HAL_OK));
646 #endif
647 
648 		/*
649 		 * If there is an error with this descriptor, continue
650 		 * processing.
651 		 *
652 		 * XXX TBD: log some statistics?
653 		 */
654 		if (status == HAL_EIO) {
655 			device_printf(sc->sc_dev, "%s: invalid TX status?\n",
656 			    __func__);
657 			break;
658 		}
659 
660 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG)
661 		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS))
662 			if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
663 			    sc->sc_tx_statuslen,
664 			    (char *) txstatus);
665 #endif /* ATH_DEBUG_ALQ */
666 
667 		/*
668 		 * At this point we have a valid status descriptor.
669 		 * The QID and descriptor ID (which currently isn't set)
670 		 * is part of the status.
671 		 *
672 		 * We then assume that the descriptor in question is the
673 		 * -head- of the given QID.  Eventually we should verify
674 		 * this by using the descriptor ID.
675 		 */
676 
677 		/*
678 		 * The beacon queue is not currently a "real" queue.
679 		 * Frames aren't pushed onto it and the lock isn't setup.
680 		 * So skip it for now; the beacon handling code will
681 		 * free and alloc more beacon buffers as appropriate.
682 		 */
683 		if (ts.ts_queue_id == sc->sc_bhalq)
684 			continue;
685 
686 		txq = &sc->sc_txq[ts.ts_queue_id];
687 
688 		ATH_TXQ_LOCK(txq);
689 		bf = ATH_TXQ_FIRST(&txq->fifo);
690 
691 		/*
692 		 * Work around the situation where I'm seeing notifications
693 		 * for Q1 when no frames are available.  That needs to be
694 		 * debugged but not by crashing _here_.
695 		 */
696 		if (bf == NULL) {
697 			device_printf(sc->sc_dev, "%s: Q%d: empty?\n",
698 			    __func__,
699 			    ts.ts_queue_id);
700 			ATH_TXQ_UNLOCK(txq);
701 			continue;
702 		}
703 
704 		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n",
705 		    __func__,
706 		    ts.ts_queue_id, bf,
707 		    !! (bf->bf_flags & ATH_BUF_FIFOPTR),
708 		    !! (bf->bf_flags & ATH_BUF_FIFOEND));
709 
710 		/* XXX TODO: actually output debugging info about this */
711 
712 #if 0
713 		/* XXX assert the buffer/descriptor matches the status descid */
714 		if (ts.ts_desc_id != bf->bf_descid) {
715 			device_printf(sc->sc_dev,
716 			    "%s: mismatched descid (qid=%d, tsdescid=%d, "
717 			    "bfdescid=%d\n",
718 			    __func__,
719 			    ts.ts_queue_id,
720 			    ts.ts_desc_id,
721 			    bf->bf_descid);
722 		}
723 #endif
724 
725 		/* This removes the buffer and decrements the queue depth */
726 		ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
727 		if (bf->bf_state.bfs_aggr)
728 			txq->axq_aggr_depth--;
729 
730 		/*
731 		 * If this was the end of a FIFO set, decrement FIFO depth
732 		 */
733 		if (bf->bf_flags & ATH_BUF_FIFOEND)
734 			txq->axq_fifo_depth--;
735 
736 		/*
737 		 * If this isn't the final buffer in a FIFO set, mark
738 		 * the buffer as busy so it goes onto the holding queue.
739 		 */
740 		if (! (bf->bf_flags & ATH_BUF_FIFOEND))
741 			bf->bf_flags |= ATH_BUF_BUSY;
742 
743 		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n",
744 		    __func__,
745 		    txq->axq_qnum,
746 		    txq->axq_fifo_depth,
747 		    txq->fifo.axq_depth);
748 
749 		/* XXX assert FIFO depth >= 0 */
750 		ATH_TXQ_UNLOCK(txq);
751 
752 		/*
753 		 * Outside of the TX lock - if the buffer is end
754 		 * end buffer in this FIFO, we don't need a holding
755 		 * buffer any longer.
756 		 */
757 		if (bf->bf_flags & ATH_BUF_FIFOEND) {
758 			ATH_TXQ_LOCK(txq);
759 			ath_txq_freeholdingbuf(sc, txq);
760 			ATH_TXQ_UNLOCK(txq);
761 		}
762 
763 		/*
764 		 * First we need to make sure ts_rate is valid.
765 		 *
766 		 * Pre-EDMA chips pass the whole TX descriptor to
767 		 * the proctxdesc function which will then fill out
768 		 * ts_rate based on the ts_finaltsi (final TX index)
769 		 * in the TX descriptor.  However the TX completion
770 		 * FIFO doesn't have this information.  So here we
771 		 * do a separate HAL call to populate that information.
772 		 *
773 		 * The same problem exists with ts_longretry.
774 		 * The FreeBSD HAL corrects ts_longretry in the HAL layer;
775 		 * the AR9380 HAL currently doesn't.  So until the HAL
776 		 * is imported and this can be added, we correct for it
777 		 * here.
778 		 */
779 		/* XXX TODO */
780 		/* XXX faked for now. Ew. */
781 		if (ts.ts_finaltsi < 4) {
782 			ts.ts_rate =
783 			    bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode;
784 			switch (ts.ts_finaltsi) {
785 			case 3: ts.ts_longretry +=
786 			    bf->bf_state.bfs_rc[2].tries;
787 			case 2: ts.ts_longretry +=
788 			    bf->bf_state.bfs_rc[1].tries;
789 			case 1: ts.ts_longretry +=
790 			    bf->bf_state.bfs_rc[0].tries;
791 			}
792 		} else {
793 			device_printf(sc->sc_dev, "%s: finaltsi=%d\n",
794 			    __func__,
795 			    ts.ts_finaltsi);
796 			ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode;
797 		}
798 
799 		/*
800 		 * XXX This is terrible.
801 		 *
802 		 * Right now, some code uses the TX status that is
803 		 * passed in here, but the completion handlers in the
804 		 * software TX path also use bf_status.ds_txstat.
805 		 * Ew.  That should all go away.
806 		 *
807 		 * XXX It's also possible the rate control completion
808 		 * routine is called twice.
809 		 */
810 		memcpy(&bf->bf_status, &ts, sizeof(ts));
811 
812 		ni = bf->bf_node;
813 
814 		/* Update RSSI */
815 		/* XXX duplicate from ath_tx_processq */
816 		if (ni != NULL && ts.ts_status == 0 &&
817 		    ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
818 			nacked++;
819 			sc->sc_stats.ast_tx_rssi = ts.ts_rssi;
820 			ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
821 			    ts.ts_rssi);
822 		}
823 
824 		/* Handle frame completion and rate control update */
825 		ath_tx_process_buf_completion(sc, txq, &ts, bf);
826 
827 		/* bf is invalid at this point */
828 
829 		/*
830 		 * Now that there's space in the FIFO, let's push some
831 		 * more frames into it.
832 		 */
833 		ATH_TXQ_LOCK(txq);
834 		if (dosched)
835 			ath_edma_tx_fifo_fill(sc, txq);
836 		ATH_TXQ_UNLOCK(txq);
837 	}
838 
839 	sc->sc_wd_timer = 0;
840 
841 #if 0
842 	/* remove, DragonFly uses OACTIVE to control if_start calls */
843 	if (idx > 0) {
844 		IF_LOCK(&sc->sc_ifp->if_snd);
845 		ifq_clr_oactive(&sc->sc_ifp->if_snd);
846 		IF_UNLOCK(&sc->sc_ifp->if_snd);
847 	}
848 #endif
849 
850 	/* Kick software scheduler */
851 	/*
852 	 * XXX It's inefficient to do this if the FIFO queue is full,
853 	 * but there's no easy way right now to only populate
854 	 * the txq task for _one_ TXQ.  This should be fixed.
855 	 */
856 	if (dosched)
857 		ath_tx_swq_kick(sc);
858 }
859 
860 static void
861 ath_edma_attach_comp_func(struct ath_softc *sc)
862 {
863 
864 	TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc);
865 }
866 
867 void
868 ath_xmit_setup_edma(struct ath_softc *sc)
869 {
870 
871 	/* Fetch EDMA field and buffer sizes */
872 	(void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen);
873 	(void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen);
874 	(void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps);
875 
876 	device_printf(sc->sc_dev, "TX descriptor length: %d\n",
877 	    sc->sc_tx_desclen);
878 	device_printf(sc->sc_dev, "TX status length: %d\n",
879 	    sc->sc_tx_statuslen);
880 	device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n",
881 	    sc->sc_tx_nmaps);
882 
883 	sc->sc_tx.xmit_setup = ath_edma_dma_txsetup;
884 	sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown;
885 	sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func;
886 
887 	sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart;
888 	sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff;
889 	sc->sc_tx.xmit_drain = ath_edma_tx_drain;
890 }
891