xref: /dragonfly/sys/dev/netif/ath/ath/if_ath_tx_edma.c (revision 9348a738)
1 /*-
2  * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Driver for the Atheros Wireless LAN controller.
35  *
36  * This software is derived from work of Atsushi Onoe; his contribution
37  * is greatly appreciated.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 /*
43  * This is needed for register operations which are performed
44  * by the driver - eg, calls to ath_hal_gettsf32().
45  *
46  * It's also required for any AH_DEBUG checks in here, eg the
47  * module dependencies.
48  */
49 #include "opt_ah.h"
50 #include "opt_wlan.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/lock.h>
58 #include <sys/mutex.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/sockio.h>
62 #include <sys/errno.h>
63 #include <sys/callout.h>
64 #include <sys/bus.h>
65 #include <sys/endian.h>
66 #include <sys/kthread.h>
67 #include <sys/taskqueue.h>
68 #include <sys/priv.h>
69 #include <sys/module.h>
70 #include <sys/ktr.h>
71 
72 #if defined(__DragonFly__)
73 /* empty */
74 #else
75 #include <sys/smp.h>
76 #include <machine/bus.h>
77 #endif
78 
79 #include <net/if.h>
80 #include <net/if_var.h>
81 #include <net/if_dl.h>
82 #include <net/if_media.h>
83 #include <net/if_types.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_llc.h>
87 #if defined(__DragonFly__)
88 #include <net/ifq_var.h>
89 #endif
90 
91 #include <netproto/802_11/ieee80211_var.h>
92 #include <netproto/802_11/ieee80211_regdomain.h>
93 #ifdef IEEE80211_SUPPORT_SUPERG
94 #include <netproto/802_11/ieee80211_superg.h>
95 #endif
96 #ifdef IEEE80211_SUPPORT_TDMA
97 #include <netproto/802_11/ieee80211_tdma.h>
98 #endif
99 
100 #include <net/bpf.h>
101 
102 #ifdef INET
103 #include <netinet/in.h>
104 #include <netinet/if_ether.h>
105 #endif
106 
107 #include <dev/netif/ath/ath/if_athvar.h>
108 #include <dev/netif/ath/ath_hal/ah_devid.h>		/* XXX for softled */
109 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
110 
111 #include <dev/netif/ath/ath/if_ath_debug.h>
112 #include <dev/netif/ath/ath/if_ath_misc.h>
113 #include <dev/netif/ath/ath/if_ath_tsf.h>
114 #include <dev/netif/ath/ath/if_ath_tx.h>
115 #include <dev/netif/ath/ath/if_ath_sysctl.h>
116 #include <dev/netif/ath/ath/if_ath_led.h>
117 #include <dev/netif/ath/ath/if_ath_keycache.h>
118 #include <dev/netif/ath/ath/if_ath_rx.h>
119 #include <dev/netif/ath/ath/if_ath_beacon.h>
120 #include <dev/netif/ath/ath/if_athdfs.h>
121 #include <dev/netif/ath/ath/if_ath_descdma.h>
122 
123 #ifdef ATH_TX99_DIAG
124 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
125 #endif
126 
127 #include <dev/netif/ath/ath/if_ath_tx_edma.h>
128 
129 #ifdef	ATH_DEBUG_ALQ
130 #include <dev/netif/ath/ath/if_ath_alq.h>
131 #endif
132 
133 /*
134  * some general macros
135  */
136 #define	INCR(_l, _sz)		(_l) ++; (_l) &= ((_sz) - 1)
137 #define	DECR(_l, _sz)		(_l) --; (_l) &= ((_sz) - 1)
138 
139 /*
140  * XXX doesn't belong here, and should be tunable
141  */
142 #define	ATH_TXSTATUS_RING_SIZE	512
143 
144 MALLOC_DECLARE(M_ATHDEV);
145 
146 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched);
147 
148 /*
149  * Push some frames into the TX FIFO if we have space.
150  */
151 static void
152 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
153 {
154 	struct ath_buf *bf, *bf_last;
155 	int i = 0;
156 
157 	ATH_TXQ_LOCK_ASSERT(txq);
158 
159 	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n",
160 	    __func__,
161 	    txq->axq_qnum);
162 
163 	TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
164 		if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH)
165 			break;
166 
167 		/*
168 		 * We have space in the FIFO - so let's push a frame
169 		 * into it.
170 		 */
171 
172 		/*
173 		 * Remove it from the normal list
174 		 */
175 		ATH_TXQ_REMOVE(txq, bf, bf_list);
176 
177 		/*
178 		 * XXX for now, we only dequeue a frame at a time, so
179 		 * that's only one buffer.  Later on when we just
180 		 * push this staging _list_ into the queue, we'll
181 		 * set bf_last to the end pointer in the list.
182 		 */
183 		bf_last = bf;
184 		DPRINTF(sc, ATH_DEBUG_TX_PROC,
185 		    "%s: Q%d: depth=%d; pushing %p->%p\n",
186 		    __func__,
187 		    txq->axq_qnum,
188 		    txq->axq_fifo_depth,
189 		    bf,
190 		    bf_last);
191 
192 		/*
193 		 * Append it to the FIFO staging list
194 		 */
195 		ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list);
196 
197 		/*
198 		 * Set fifo start / fifo end flags appropriately
199 		 *
200 		 */
201 		bf->bf_flags |= ATH_BUF_FIFOPTR;
202 		bf_last->bf_flags |= ATH_BUF_FIFOEND;
203 
204 		/*
205 		 * Push _into_ the FIFO.
206 		 */
207 		ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
208 #ifdef	ATH_DEBUG
209 		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
210 			ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
211 #endif/* ATH_DEBUG */
212 #ifdef	ATH_DEBUG_ALQ
213 		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
214 			ath_tx_alq_post(sc, bf);
215 #endif /* ATH_DEBUG_ALQ */
216 		txq->axq_fifo_depth++;
217 		i++;
218 	}
219 	if (i > 0)
220 		ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
221 }
222 
223 /*
224  * Re-initialise the DMA FIFO with the current contents of
225  * said TXQ.
226  *
227  * This should only be called as part of the chip reset path, as it
228  * assumes the FIFO is currently empty.
229  */
230 static void
231 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
232 {
233 	struct ath_buf *bf;
234 	int i = 0;
235 	int fifostart = 1;
236 	int old_fifo_depth;
237 
238 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n",
239 	    __func__,
240 	    txq->axq_qnum);
241 
242 	ATH_TXQ_LOCK_ASSERT(txq);
243 
244 	/*
245 	 * Let's log if the tracked FIFO depth doesn't match
246 	 * what we actually push in.
247 	 */
248 	old_fifo_depth = txq->axq_fifo_depth;
249 	txq->axq_fifo_depth = 0;
250 
251 	/*
252 	 * Walk the FIFO staging list, looking for "head" entries.
253 	 * Since we may have a partially completed list of frames,
254 	 * we push the first frame we see into the FIFO and re-mark
255 	 * it as the head entry.  We then skip entries until we see
256 	 * FIFO end, at which point we get ready to push another
257 	 * entry into the FIFO.
258 	 */
259 	TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) {
260 		/*
261 		 * If we're looking for FIFOEND and we haven't found
262 		 * it, skip.
263 		 *
264 		 * If we're looking for FIFOEND and we've found it,
265 		 * reset for another descriptor.
266 		 */
267 #ifdef	ATH_DEBUG
268 		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
269 			ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
270 #endif/* ATH_DEBUG */
271 #ifdef	ATH_DEBUG_ALQ
272 		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
273 			ath_tx_alq_post(sc, bf);
274 #endif /* ATH_DEBUG_ALQ */
275 
276 		if (fifostart == 0) {
277 			if (bf->bf_flags & ATH_BUF_FIFOEND)
278 				fifostart = 1;
279 			continue;
280 		}
281 
282 		/* Make sure we're not overflowing the FIFO! */
283 		if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) {
284 			device_printf(sc->sc_dev,
285 			    "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n",
286 			    __func__,
287 			    txq->axq_qnum,
288 			    txq->axq_fifo_depth);
289 		}
290 
291 #if 0
292 		DPRINTF(sc, ATH_DEBUG_RESET,
293 		    "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n",
294 		    __func__,
295 		    txq->axq_qnum,
296 		    txq->axq_fifo_depth,
297 		    bf,
298 		    !! (bf->bf_flags & ATH_BUF_FIFOPTR),
299 		    !! (bf->bf_flags & ATH_BUF_FIFOEND));
300 #endif
301 
302 		/*
303 		 * Set this to be the first buffer in the FIFO
304 		 * list - even if it's also the last buffer in
305 		 * a FIFO list!
306 		 */
307 		bf->bf_flags |= ATH_BUF_FIFOPTR;
308 
309 		/* Push it into the FIFO and bump the FIFO count */
310 		ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
311 		txq->axq_fifo_depth++;
312 
313 		/*
314 		 * If this isn't the last entry either, let's
315 		 * clear fifostart so we continue looking for
316 		 * said last entry.
317 		 */
318 		if (! (bf->bf_flags & ATH_BUF_FIFOEND))
319 			fifostart = 0;
320 		i++;
321 	}
322 
323 	/* Only bother starting the queue if there's something in it */
324 	if (i > 0)
325 		ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
326 
327 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n",
328 	    __func__,
329 	    txq->axq_qnum,
330 	    old_fifo_depth,
331 	    txq->axq_fifo_depth);
332 
333 	/* And now, let's check! */
334 	if (txq->axq_fifo_depth != old_fifo_depth) {
335 		device_printf(sc->sc_dev,
336 		    "%s: Q%d: FIFO depth should be %d, is %d\n",
337 		    __func__,
338 		    txq->axq_qnum,
339 		    old_fifo_depth,
340 		    txq->axq_fifo_depth);
341 	}
342 }
343 
344 /*
345  * Hand off this frame to a hardware queue.
346  *
347  * Things are a bit hairy in the EDMA world.  The TX FIFO is only
348  * 8 entries deep, so we need to keep track of exactly what we've
349  * pushed into the FIFO and what's just sitting in the TX queue,
350  * waiting to go out.
351  *
352  * So this is split into two halves - frames get appended to the
353  * TXQ; then a scheduler is called to push some frames into the
354  * actual TX FIFO.
355  */
356 static void
357 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
358     struct ath_buf *bf)
359 {
360 
361 	ATH_TXQ_LOCK(txq);
362 
363 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
364 	    ("%s: busy status 0x%x", __func__, bf->bf_flags));
365 
366 	/*
367 	 * XXX TODO: write a hard-coded check to ensure that
368 	 * the queue id in the TX descriptor matches txq->axq_qnum.
369 	 */
370 
371 	/* Update aggr stats */
372 	if (bf->bf_state.bfs_aggr)
373 		txq->axq_aggr_depth++;
374 
375 	/* Push and update frame stats */
376 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
377 
378 	/* For now, set the link pointer in the last descriptor
379 	 * to be NULL.
380 	 *
381 	 * Later on, when it comes time to handling multiple descriptors
382 	 * in one FIFO push, we can link descriptors together this way.
383 	 */
384 
385 	/*
386 	 * Finally, call the FIFO schedule routine to schedule some
387 	 * frames to the FIFO.
388 	 */
389 	ath_edma_tx_fifo_fill(sc, txq);
390 	ATH_TXQ_UNLOCK(txq);
391 }
392 
393 /*
394  * Hand off this frame to a multicast software queue.
395  *
396  * The EDMA TX CABQ will get a list of chained frames, chained
397  * together using the next pointer.  The single head of that
398  * particular queue is pushed to the hardware CABQ.
399  */
400 static void
401 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
402     struct ath_buf *bf)
403 {
404 
405 	ATH_TX_LOCK_ASSERT(sc);
406 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
407 	    ("%s: busy status 0x%x", __func__, bf->bf_flags));
408 
409 	ATH_TXQ_LOCK(txq);
410 	/*
411 	 * XXX this is mostly duplicated in ath_tx_handoff_mcast().
412 	 */
413 	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
414 		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
415 		struct ieee80211_frame *wh;
416 
417 		/* mark previous frame */
418 		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
419 		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
420 
421 		/* re-sync buffer to memory */
422 		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
423 		   BUS_DMASYNC_PREWRITE);
424 
425 		/* link descriptor */
426 		ath_hal_settxdesclink(sc->sc_ah,
427 		    bf_last->bf_lastds,
428 		    bf->bf_daddr);
429 	}
430 #ifdef	ATH_DEBUG_ALQ
431 	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
432 		ath_tx_alq_post(sc, bf);
433 #endif	/* ATH_DEBUG_ALQ */
434 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
435 	ATH_TXQ_UNLOCK(txq);
436 }
437 
438 /*
439  * Handoff this frame to the hardware.
440  *
441  * For the multicast queue, this will treat it as a software queue
442  * and append it to the list, after updating the MORE_DATA flag
443  * in the previous frame.  The cabq processing code will ensure
444  * that the queue contents gets transferred over.
445  *
446  * For the hardware queues, this will queue a frame to the queue
447  * like before, then populate the FIFO from that.  Since the
448  * EDMA hardware has 8 FIFO slots per TXQ, this ensures that
449  * frames such as management frames don't get prematurely dropped.
450  *
451  * This does imply that a similar flush-hwq-to-fifoq method will
452  * need to be called from the processq function, before the
453  * per-node software scheduler is called.
454  */
455 static void
456 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
457     struct ath_buf *bf)
458 {
459 
460 	DPRINTF(sc, ATH_DEBUG_XMIT_DESC,
461 	    "%s: called; bf=%p, txq=%p, qnum=%d\n",
462 	    __func__,
463 	    bf,
464 	    txq,
465 	    txq->axq_qnum);
466 
467 	if (txq->axq_qnum == ATH_TXQ_SWQ)
468 		ath_edma_xmit_handoff_mcast(sc, txq, bf);
469 	else
470 		ath_edma_xmit_handoff_hw(sc, txq, bf);
471 }
472 
473 static int
474 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum)
475 {
476 	struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
477 
478 	te->m_fifo = kmalloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH,
479 			     M_ATHDEV, M_INTWAIT | M_ZERO);
480 	if (te->m_fifo == NULL) {
481 		device_printf(sc->sc_dev, "%s: malloc failed\n",
482 		    __func__);
483 		return (-ENOMEM);
484 	}
485 
486 	/*
487 	 * Set initial "empty" state.
488 	 */
489 	te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0;
490 
491 	return (0);
492 }
493 
494 static int
495 ath_edma_free_txfifo(struct ath_softc *sc, int qnum)
496 {
497 	struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
498 
499 	/* XXX TODO: actually deref the ath_buf entries? */
500 	kfree(te->m_fifo, M_ATHDEV);
501 	return (0);
502 }
503 
504 static int
505 ath_edma_dma_txsetup(struct ath_softc *sc)
506 {
507 	int error;
508 	int i;
509 
510 	error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma,
511 	    NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE);
512 	if (error != 0)
513 		return (error);
514 
515 	ath_hal_setuptxstatusring(sc->sc_ah,
516 	    (void *) sc->sc_txsdma.dd_desc,
517 	    sc->sc_txsdma.dd_desc_paddr,
518 	    ATH_TXSTATUS_RING_SIZE);
519 
520 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
521 		ath_edma_setup_txfifo(sc, i);
522 	}
523 
524 	return (0);
525 }
526 
527 static int
528 ath_edma_dma_txteardown(struct ath_softc *sc)
529 {
530 	int i;
531 
532 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
533 		ath_edma_free_txfifo(sc, i);
534 	}
535 
536 	ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL);
537 	return (0);
538 }
539 
540 /*
541  * Drain all TXQs, potentially after completing the existing completed
542  * frames.
543  */
544 static void
545 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
546 {
547 	int i;
548 
549 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
550 
551 	(void) ath_stoptxdma(sc);
552 
553 	/*
554 	 * If reset type is noloss, the TX FIFO needs to be serviced
555 	 * and those frames need to be handled.
556 	 *
557 	 * Otherwise, just toss everything in each TX queue.
558 	 */
559 	if (reset_type == ATH_RESET_NOLOSS) {
560 		ath_edma_tx_processq(sc, 0);
561 		for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
562 			if (ATH_TXQ_SETUP(sc, i)) {
563 				ATH_TXQ_LOCK(&sc->sc_txq[i]);
564 				/*
565 				 * Free the holding buffer; DMA is now
566 				 * stopped.
567 				 */
568 				ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
569 				/*
570 				 * Reset the link pointer to NULL; there's
571 				 * no frames to chain DMA to.
572 				 */
573 				sc->sc_txq[i].axq_link = NULL;
574 				ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
575 			}
576 		}
577 	} else {
578 		for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
579 			if (ATH_TXQ_SETUP(sc, i))
580 				ath_tx_draintxq(sc, &sc->sc_txq[i]);
581 		}
582 	}
583 
584 	/* XXX dump out the TX completion FIFO contents */
585 
586 	/* XXX dump out the frames */
587 
588 	sc->sc_wd_timer = 0;
589 }
590 
591 /*
592  * TX completion tasklet.
593  */
594 
595 static void
596 ath_edma_tx_proc(void *arg, int npending)
597 {
598 	struct ath_softc *sc = (struct ath_softc *) arg;
599 
600 #if 0
601 	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n",
602 	    __func__, npending);
603 #endif
604 	ath_edma_tx_processq(sc, 1);
605 }
606 
607 /*
608  * Process the TX status queue.
609  */
610 static void
611 ath_edma_tx_processq(struct ath_softc *sc, int dosched)
612 {
613 	struct ath_hal *ah = sc->sc_ah;
614 	HAL_STATUS status;
615 	struct ath_tx_status ts;
616 	struct ath_txq *txq;
617 	struct ath_buf *bf;
618 	struct ieee80211_node *ni;
619 	int nacked = 0;
620 	int idx;
621 
622 #ifdef	ATH_DEBUG
623 	/* XXX */
624 	uint32_t txstatus[32];
625 #endif
626 
627 	for (idx = 0; ; idx++) {
628 		bzero(&ts, sizeof(ts));
629 
630 		ATH_TXSTATUS_LOCK(sc);
631 #ifdef	ATH_DEBUG
632 		ath_hal_gettxrawtxdesc(ah, txstatus);
633 #endif
634 		status = ath_hal_txprocdesc(ah, NULL, (void *) &ts);
635 		ATH_TXSTATUS_UNLOCK(sc);
636 
637 		if (status == HAL_EINPROGRESS)
638 			break;
639 
640 #ifdef	ATH_DEBUG
641 		if (sc->sc_debug & ATH_DEBUG_TX_PROC)
642 			if (ts.ts_queue_id != sc->sc_bhalq)
643 			ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id,
644 			    idx, (status == HAL_OK));
645 #endif
646 
647 		/*
648 		 * If there is an error with this descriptor, continue
649 		 * processing.
650 		 *
651 		 * XXX TBD: log some statistics?
652 		 */
653 		if (status == HAL_EIO) {
654 			device_printf(sc->sc_dev, "%s: invalid TX status?\n",
655 			    __func__);
656 			break;
657 		}
658 
659 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG)
660 		if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS))
661 			if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
662 			    sc->sc_tx_statuslen,
663 			    (char *) txstatus);
664 #endif /* ATH_DEBUG_ALQ */
665 
666 		/*
667 		 * At this point we have a valid status descriptor.
668 		 * The QID and descriptor ID (which currently isn't set)
669 		 * is part of the status.
670 		 *
671 		 * We then assume that the descriptor in question is the
672 		 * -head- of the given QID.  Eventually we should verify
673 		 * this by using the descriptor ID.
674 		 */
675 
676 		/*
677 		 * The beacon queue is not currently a "real" queue.
678 		 * Frames aren't pushed onto it and the lock isn't setup.
679 		 * So skip it for now; the beacon handling code will
680 		 * free and alloc more beacon buffers as appropriate.
681 		 */
682 		if (ts.ts_queue_id == sc->sc_bhalq)
683 			continue;
684 
685 		txq = &sc->sc_txq[ts.ts_queue_id];
686 
687 		ATH_TXQ_LOCK(txq);
688 		bf = ATH_TXQ_FIRST(&txq->fifo);
689 
690 		/*
691 		 * Work around the situation where I'm seeing notifications
692 		 * for Q1 when no frames are available.  That needs to be
693 		 * debugged but not by crashing _here_.
694 		 */
695 		if (bf == NULL) {
696 			device_printf(sc->sc_dev, "%s: Q%d: empty?\n",
697 			    __func__,
698 			    ts.ts_queue_id);
699 			ATH_TXQ_UNLOCK(txq);
700 			continue;
701 		}
702 
703 		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n",
704 		    __func__,
705 		    ts.ts_queue_id, bf,
706 		    !! (bf->bf_flags & ATH_BUF_FIFOPTR),
707 		    !! (bf->bf_flags & ATH_BUF_FIFOEND));
708 
709 		/* XXX TODO: actually output debugging info about this */
710 
711 #if 0
712 		/* XXX assert the buffer/descriptor matches the status descid */
713 		if (ts.ts_desc_id != bf->bf_descid) {
714 			device_printf(sc->sc_dev,
715 			    "%s: mismatched descid (qid=%d, tsdescid=%d, "
716 			    "bfdescid=%d\n",
717 			    __func__,
718 			    ts.ts_queue_id,
719 			    ts.ts_desc_id,
720 			    bf->bf_descid);
721 		}
722 #endif
723 
724 		/* This removes the buffer and decrements the queue depth */
725 		ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
726 		if (bf->bf_state.bfs_aggr)
727 			txq->axq_aggr_depth--;
728 
729 		/*
730 		 * If this was the end of a FIFO set, decrement FIFO depth
731 		 */
732 		if (bf->bf_flags & ATH_BUF_FIFOEND)
733 			txq->axq_fifo_depth--;
734 
735 		/*
736 		 * If this isn't the final buffer in a FIFO set, mark
737 		 * the buffer as busy so it goes onto the holding queue.
738 		 */
739 		if (! (bf->bf_flags & ATH_BUF_FIFOEND))
740 			bf->bf_flags |= ATH_BUF_BUSY;
741 
742 		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n",
743 		    __func__,
744 		    txq->axq_qnum,
745 		    txq->axq_fifo_depth,
746 		    txq->fifo.axq_depth);
747 
748 		/* XXX assert FIFO depth >= 0 */
749 		ATH_TXQ_UNLOCK(txq);
750 
751 		/*
752 		 * Outside of the TX lock - if the buffer is end
753 		 * end buffer in this FIFO, we don't need a holding
754 		 * buffer any longer.
755 		 */
756 		if (bf->bf_flags & ATH_BUF_FIFOEND) {
757 			ATH_TXQ_LOCK(txq);
758 			ath_txq_freeholdingbuf(sc, txq);
759 			ATH_TXQ_UNLOCK(txq);
760 		}
761 
762 		/*
763 		 * First we need to make sure ts_rate is valid.
764 		 *
765 		 * Pre-EDMA chips pass the whole TX descriptor to
766 		 * the proctxdesc function which will then fill out
767 		 * ts_rate based on the ts_finaltsi (final TX index)
768 		 * in the TX descriptor.  However the TX completion
769 		 * FIFO doesn't have this information.  So here we
770 		 * do a separate HAL call to populate that information.
771 		 *
772 		 * The same problem exists with ts_longretry.
773 		 * The FreeBSD HAL corrects ts_longretry in the HAL layer;
774 		 * the AR9380 HAL currently doesn't.  So until the HAL
775 		 * is imported and this can be added, we correct for it
776 		 * here.
777 		 */
778 		/* XXX TODO */
779 		/* XXX faked for now. Ew. */
780 		if (ts.ts_finaltsi < 4) {
781 			ts.ts_rate =
782 			    bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode;
783 			switch (ts.ts_finaltsi) {
784 			case 3: ts.ts_longretry +=
785 			    bf->bf_state.bfs_rc[2].tries;
786 			case 2: ts.ts_longretry +=
787 			    bf->bf_state.bfs_rc[1].tries;
788 			case 1: ts.ts_longretry +=
789 			    bf->bf_state.bfs_rc[0].tries;
790 			}
791 		} else {
792 			device_printf(sc->sc_dev, "%s: finaltsi=%d\n",
793 			    __func__,
794 			    ts.ts_finaltsi);
795 			ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode;
796 		}
797 
798 		/*
799 		 * XXX This is terrible.
800 		 *
801 		 * Right now, some code uses the TX status that is
802 		 * passed in here, but the completion handlers in the
803 		 * software TX path also use bf_status.ds_txstat.
804 		 * Ew.  That should all go away.
805 		 *
806 		 * XXX It's also possible the rate control completion
807 		 * routine is called twice.
808 		 */
809 		memcpy(&bf->bf_status, &ts, sizeof(ts));
810 
811 		ni = bf->bf_node;
812 
813 		/* Update RSSI */
814 		/* XXX duplicate from ath_tx_processq */
815 		if (ni != NULL && ts.ts_status == 0 &&
816 		    ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
817 			nacked++;
818 			sc->sc_stats.ast_tx_rssi = ts.ts_rssi;
819 			ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
820 			    ts.ts_rssi);
821 		}
822 
823 		/* Handle frame completion and rate control update */
824 		ath_tx_process_buf_completion(sc, txq, &ts, bf);
825 
826 		/* bf is invalid at this point */
827 
828 		/*
829 		 * Now that there's space in the FIFO, let's push some
830 		 * more frames into it.
831 		 */
832 		ATH_TXQ_LOCK(txq);
833 		if (dosched)
834 			ath_edma_tx_fifo_fill(sc, txq);
835 		ATH_TXQ_UNLOCK(txq);
836 	}
837 
838 	sc->sc_wd_timer = 0;
839 
840 	/* Kick software scheduler */
841 	/*
842 	 * XXX It's inefficient to do this if the FIFO queue is full,
843 	 * but there's no easy way right now to only populate
844 	 * the txq task for _one_ TXQ.  This should be fixed.
845 	 */
846 	if (dosched)
847 		ath_tx_swq_kick(sc);
848 }
849 
850 static void
851 ath_edma_attach_comp_func(struct ath_softc *sc)
852 {
853 
854 	TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc);
855 }
856 
857 void
858 ath_xmit_setup_edma(struct ath_softc *sc)
859 {
860 
861 	/* Fetch EDMA field and buffer sizes */
862 	(void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen);
863 	(void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen);
864 	(void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps);
865 
866 	if (bootverbose) {
867 		device_printf(sc->sc_dev, "TX descriptor length: %d\n",
868 		    sc->sc_tx_desclen);
869 		device_printf(sc->sc_dev, "TX status length: %d\n",
870 		    sc->sc_tx_statuslen);
871 		device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n",
872 		    sc->sc_tx_nmaps);
873 	}
874 
875 	sc->sc_tx.xmit_setup = ath_edma_dma_txsetup;
876 	sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown;
877 	sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func;
878 
879 	sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart;
880 	sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff;
881 	sc->sc_tx.xmit_drain = ath_edma_tx_drain;
882 }
883