1 /*-
2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 /*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 *
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
48 */
49 #include "opt_ah.h"
50 #include "opt_wlan.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/lock.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/errno.h>
62 #include <sys/callout.h>
63 #include <sys/bus.h>
64 #include <sys/endian.h>
65 #include <sys/kthread.h>
66 #include <sys/taskqueue.h>
67 #include <sys/caps.h>
68 #include <sys/module.h>
69 #include <sys/ktr.h>
70
71 #if defined(__DragonFly__)
72 /* empty */
73 #else
74 #include <sys/smp.h> /* for mp_ncpus */
75 #include <machine/bus.h>
76 #endif
77
78 #include <net/if.h>
79 #include <net/if_var.h>
80 #include <net/if_dl.h>
81 #include <net/if_media.h>
82 #include <net/if_types.h>
83 #include <net/if_arp.h>
84 #include <net/ethernet.h>
85 #include <net/if_llc.h>
86 #if defined(__DragonFly__)
87 #include <net/ifq_var.h>
88 #endif
89
90 #include <netproto/802_11/ieee80211_var.h>
91 #include <netproto/802_11/ieee80211_regdomain.h>
92 #ifdef IEEE80211_SUPPORT_SUPERG
93 #include <netproto/802_11/ieee80211_superg.h>
94 #endif
95 #ifdef IEEE80211_SUPPORT_TDMA
96 #include <netproto/802_11/ieee80211_tdma.h>
97 #endif
98
99 #include <net/bpf.h>
100
101 #ifdef INET
102 #include <netinet/in.h>
103 #include <netinet/if_ether.h>
104 #endif
105
106 #include <dev/netif/ath/ath/if_athvar.h>
107 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
108 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
109
110 #include <dev/netif/ath/ath/if_ath_debug.h>
111 #include <dev/netif/ath/ath/if_ath_misc.h>
112 #include <dev/netif/ath/ath/if_ath_tsf.h>
113 #include <dev/netif/ath/ath/if_ath_tx.h>
114 #include <dev/netif/ath/ath/if_ath_sysctl.h>
115 #include <dev/netif/ath/ath/if_ath_led.h>
116 #include <dev/netif/ath/ath/if_ath_keycache.h>
117 #include <dev/netif/ath/ath/if_ath_rx.h>
118 #include <dev/netif/ath/ath/if_ath_beacon.h>
119 #include <dev/netif/ath/ath/if_athdfs.h>
120 #include <dev/netif/ath/ath/if_ath_descdma.h>
121
122 #ifdef ATH_TX99_DIAG
123 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
124 #endif
125
126 #include <dev/netif/ath/ath/if_ath_rx_edma.h>
127
128 #ifdef ATH_DEBUG_ALQ
129 #include <dev/netif/ath/ath/if_ath_alq.h>
130 #endif
131
132 /*
133 * some general macros
134 */
135 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
136 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1)
137
138 MALLOC_DECLARE(M_ATHDEV);
139
140 /*
141 * XXX TODO:
142 *
143 * + Make sure the FIFO is correctly flushed and reinitialised
144 * through a reset;
145 * + Verify multi-descriptor frames work!
146 * + There's a "memory use after free" which needs to be tracked down
147 * and fixed ASAP. I've seen this in the legacy path too, so it
148 * may be a generic RX path issue.
149 */
150
151 /*
152 * XXX shuffle the function orders so these pre-declarations aren't
153 * required!
154 */
155 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
156 int nbufs);
157 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
158 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
159 static void ath_edma_recv_proc_queue(struct ath_softc *sc,
160 HAL_RX_QUEUE qtype, int dosched);
161 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
162 HAL_RX_QUEUE qtype, int dosched);
163
164 static void
ath_edma_stoprecv(struct ath_softc * sc,int dodelay)165 ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
166 {
167 struct ath_hal *ah = sc->sc_ah;
168
169 ATH_RX_LOCK(sc);
170
171 ath_hal_stoppcurecv(ah);
172 ath_hal_setrxfilter(ah, 0);
173
174 /*
175 *
176 */
177 if (ath_hal_stopdmarecv(ah) == AH_TRUE)
178 sc->sc_rx_stopped = 1;
179
180 /*
181 * Give the various bus FIFOs (not EDMA descriptor FIFO)
182 * time to finish flushing out data.
183 */
184 DELAY(3000);
185
186 /* Flush RX pending for each queue */
187 /* XXX should generic-ify this */
188 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
189 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
190 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
191 }
192
193 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
194 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
195 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
196 }
197 ATH_RX_UNLOCK(sc);
198 }
199
200 /*
201 * Re-initialise the FIFO given the current buffer contents.
202 * Specifically, walk from head -> tail, pushing the FIFO contents
203 * back into the FIFO.
204 */
205 static void
ath_edma_reinit_fifo(struct ath_softc * sc,HAL_RX_QUEUE qtype)206 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
207 {
208 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
209 struct ath_buf *bf;
210 int i, j;
211
212 ATH_RX_LOCK_ASSERT(sc);
213
214 i = re->m_fifo_head;
215 for (j = 0; j < re->m_fifo_depth; j++) {
216 bf = re->m_fifo[i];
217 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
218 "%s: Q%d: pos=%i, addr=0x%jx\n",
219 __func__,
220 qtype,
221 i,
222 (uintmax_t)bf->bf_daddr);
223 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
224 INCR(i, re->m_fifolen);
225 }
226
227 /* Ensure this worked out right */
228 if (i != re->m_fifo_tail) {
229 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
230 __func__,
231 i,
232 re->m_fifo_tail);
233 }
234 }
235
236 /*
237 * Start receive.
238 */
239 static int
ath_edma_startrecv(struct ath_softc * sc)240 ath_edma_startrecv(struct ath_softc *sc)
241 {
242 struct ath_hal *ah = sc->sc_ah;
243
244 ATH_RX_LOCK(sc);
245
246 /*
247 * Sanity check - are we being called whilst RX
248 * isn't stopped? If so, we may end up pushing
249 * too many entries into the RX FIFO and
250 * badness occurs.
251 */
252
253 /* Enable RX FIFO */
254 ath_hal_rxena(ah);
255
256 /*
257 * In theory the hardware has been initialised, right?
258 */
259 if (sc->sc_rx_resetted == 1) {
260 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
261 "%s: Re-initing HP FIFO\n", __func__);
262 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
263 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
264 "%s: Re-initing LP FIFO\n", __func__);
265 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
266 sc->sc_rx_resetted = 0;
267 } else {
268 device_printf(sc->sc_dev,
269 "%s: called without resetting chip?\n",
270 __func__);
271 }
272
273 /* Add up to m_fifolen entries in each queue */
274 /*
275 * These must occur after the above write so the FIFO buffers
276 * are pushed/tracked in the same order as the hardware will
277 * process them.
278 *
279 * XXX TODO: is this really necessary? We should've stopped
280 * the hardware already and reinitialised it, so it's a no-op.
281 */
282 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
283 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
284
285 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
286 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
287
288 ath_mode_init(sc);
289 ath_hal_startpcurecv(ah);
290
291 /*
292 * We're now doing RX DMA!
293 */
294 sc->sc_rx_stopped = 0;
295
296 ATH_RX_UNLOCK(sc);
297
298 return (0);
299 }
300
301 static void
ath_edma_recv_sched_queue(struct ath_softc * sc,HAL_RX_QUEUE qtype,int dosched)302 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
303 int dosched)
304 {
305
306 ATH_LOCK(sc);
307 ath_power_set_power_state(sc, HAL_PM_AWAKE);
308 ATH_UNLOCK(sc);
309
310 ath_edma_recv_proc_queue(sc, qtype, dosched);
311
312 ATH_LOCK(sc);
313 ath_power_restore_power_state(sc);
314 ATH_UNLOCK(sc);
315
316 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
317 }
318
319 static void
ath_edma_recv_sched(struct ath_softc * sc,int dosched)320 ath_edma_recv_sched(struct ath_softc *sc, int dosched)
321 {
322
323 ATH_LOCK(sc);
324 ath_power_set_power_state(sc, HAL_PM_AWAKE);
325 ATH_UNLOCK(sc);
326
327 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
328 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
329
330 ATH_LOCK(sc);
331 ath_power_restore_power_state(sc);
332 ATH_UNLOCK(sc);
333
334 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
335 }
336
337 static void
ath_edma_recv_flush(struct ath_softc * sc)338 ath_edma_recv_flush(struct ath_softc *sc)
339 {
340
341 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
342
343 ATH_PCU_LOCK(sc);
344 sc->sc_rxproc_cnt++;
345 ATH_PCU_UNLOCK(sc);
346
347 ATH_LOCK(sc);
348 ath_power_set_power_state(sc, HAL_PM_AWAKE);
349 ATH_UNLOCK(sc);
350
351 /*
352 * Flush any active frames from FIFO -> deferred list
353 */
354 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
355 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
356
357 /*
358 * Process what's in the deferred queue
359 */
360 /*
361 * XXX: If we read the tsf/channoise here and then pass it in,
362 * we could restore the power state before processing
363 * the deferred queue.
364 */
365 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
366 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
367
368 ATH_LOCK(sc);
369 ath_power_restore_power_state(sc);
370 ATH_UNLOCK(sc);
371
372 ATH_PCU_LOCK(sc);
373 sc->sc_rxproc_cnt--;
374 ATH_PCU_UNLOCK(sc);
375 }
376
377 /*
378 * Process frames from the current queue into the deferred queue.
379 */
380 static void
ath_edma_recv_proc_queue(struct ath_softc * sc,HAL_RX_QUEUE qtype,int dosched)381 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
382 int dosched)
383 {
384 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
385 struct ath_rx_status *rs;
386 struct ath_desc *ds;
387 struct ath_buf *bf;
388 struct mbuf *m;
389 struct ath_hal *ah = sc->sc_ah;
390 uint64_t tsf;
391 uint16_t nf;
392 int npkts = 0;
393
394 tsf = ath_hal_gettsf64(ah);
395 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
396 sc->sc_stats.ast_rx_noise = nf;
397
398 ATH_RX_LOCK(sc);
399
400 #if 1
401 if (sc->sc_rx_resetted == 1) {
402 /*
403 * XXX We shouldn't ever be scheduled if
404 * receive has been stopped - so complain
405 * loudly!
406 */
407 device_printf(sc->sc_dev,
408 "%s: sc_rx_resetted=1! Bad!\n",
409 __func__);
410 ATH_RX_UNLOCK(sc);
411 return;
412 }
413 #endif
414
415 do {
416 bf = re->m_fifo[re->m_fifo_head];
417 /* This shouldn't occur! */
418 if (bf == NULL) {
419 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
420 __func__,
421 qtype);
422 break;
423 }
424 m = bf->bf_m;
425 ds = bf->bf_desc;
426
427 /*
428 * Sync descriptor memory - this also syncs the buffer for us.
429 * EDMA descriptors are in cached memory.
430 */
431 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
432 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
433 rs = &bf->bf_status.ds_rxstat;
434 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
435 NULL, rs);
436 #ifdef ATH_DEBUG
437 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
438 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
439 #endif /* ATH_DEBUG */
440 #ifdef ATH_DEBUG_ALQ
441 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
442 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
443 sc->sc_rx_statuslen, (char *) ds);
444 #endif /* ATH_DEBUG */
445 if (bf->bf_rxstatus == HAL_EINPROGRESS)
446 break;
447
448 /*
449 * Completed descriptor.
450 */
451 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
452 "%s: Q%d: completed!\n", __func__, qtype);
453 npkts++;
454
455 /*
456 * We've been synced already, so unmap.
457 */
458 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
459
460 /*
461 * Remove the FIFO entry and place it on the completion
462 * queue.
463 */
464 re->m_fifo[re->m_fifo_head] = NULL;
465 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
466
467 /* Bump the descriptor FIFO stats */
468 INCR(re->m_fifo_head, re->m_fifolen);
469 re->m_fifo_depth--;
470 /* XXX check it doesn't fall below 0 */
471 } while (re->m_fifo_depth > 0);
472
473 /* Append some more fresh frames to the FIFO */
474 if (dosched)
475 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
476
477 ATH_RX_UNLOCK(sc);
478
479 /* rx signal state monitoring */
480 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
481
482 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
483 "ath edma rx proc: npkts=%d\n",
484 npkts);
485
486 return;
487 }
488
489 /*
490 * Flush the deferred queue.
491 *
492 * This destructively flushes the deferred queue - it doesn't
493 * call the wireless stack on each mbuf.
494 */
495 static void
ath_edma_flush_deferred_queue(struct ath_softc * sc)496 ath_edma_flush_deferred_queue(struct ath_softc *sc)
497 {
498 struct ath_buf *bf;
499
500 ATH_RX_LOCK_ASSERT(sc);
501
502 /* Free in one set, inside the lock */
503 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) {
504 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
505 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list);
506 /* Free the buffer/mbuf */
507 ath_edma_rxbuf_free(sc, bf);
508 }
509 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) {
510 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
511 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list);
512 /* Free the buffer/mbuf */
513 ath_edma_rxbuf_free(sc, bf);
514 }
515 }
516
517 static int
ath_edma_recv_proc_deferred_queue(struct ath_softc * sc,HAL_RX_QUEUE qtype,int dosched)518 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
519 int dosched)
520 {
521 int ngood = 0;
522 uint64_t tsf;
523 struct ath_buf *bf, *next;
524 struct ath_rx_status *rs;
525 int16_t nf;
526 ath_bufhead rxlist;
527 struct mbuf *m;
528
529 TAILQ_INIT(&rxlist);
530
531 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
532 /*
533 * XXX TODO: the NF/TSF should be stamped on the bufs themselves,
534 * otherwise we may end up adding in the wrong values if this
535 * is delayed too far..
536 */
537 tsf = ath_hal_gettsf64(sc->sc_ah);
538
539 /* Copy the list over */
540 ATH_RX_LOCK(sc);
541 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
542 ATH_RX_UNLOCK(sc);
543
544 /* Handle the completed descriptors */
545 /*
546 * XXX is this SAFE call needed? The ath_buf entries
547 * aren't modified by ath_rx_pkt, right?
548 */
549 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
550 /*
551 * Skip the RX descriptor status - start at the data offset
552 */
553 m_adj(bf->bf_m, sc->sc_rx_statuslen);
554
555 /* Handle the frame */
556
557 rs = &bf->bf_status.ds_rxstat;
558 m = bf->bf_m;
559 bf->bf_m = NULL;
560 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
561 ngood++;
562 }
563
564 if (ngood) {
565 sc->sc_lastrx = tsf;
566 }
567
568 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
569 "ath edma rx deferred proc: ngood=%d\n",
570 ngood);
571
572 /* Free in one set, inside the lock */
573 ATH_RX_LOCK(sc);
574 while (! TAILQ_EMPTY(&rxlist)) {
575 bf = TAILQ_FIRST(&rxlist);
576 TAILQ_REMOVE(&rxlist, bf, bf_list);
577 /* Free the buffer/mbuf */
578 ath_edma_rxbuf_free(sc, bf);
579 }
580 ATH_RX_UNLOCK(sc);
581
582 return (ngood);
583 }
584
585 static void
ath_edma_recv_tasklet(void * arg,int npending)586 ath_edma_recv_tasklet(void *arg, int npending)
587 {
588 struct ath_softc *sc = (struct ath_softc *) arg;
589 #ifdef IEEE80211_SUPPORT_SUPERG
590 struct ieee80211com *ic = &sc->sc_ic;
591 #endif
592
593 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
594 __func__,
595 npending);
596
597 ATH_PCU_LOCK(sc);
598 if (sc->sc_inreset_cnt > 0) {
599 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
600 __func__);
601 ATH_PCU_UNLOCK(sc);
602 return;
603 }
604 sc->sc_rxproc_cnt++;
605 ATH_PCU_UNLOCK(sc);
606
607 ATH_LOCK(sc);
608 ath_power_set_power_state(sc, HAL_PM_AWAKE);
609 ATH_UNLOCK(sc);
610
611 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
612 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
613
614 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
615 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
616
617 /*
618 * XXX: If we read the tsf/channoise here and then pass it in,
619 * we could restore the power state before processing
620 * the deferred queue.
621 */
622 ATH_LOCK(sc);
623 ath_power_restore_power_state(sc);
624 ATH_UNLOCK(sc);
625
626 #ifdef IEEE80211_SUPPORT_SUPERG
627 ieee80211_ff_age_all(ic, 100);
628 #endif
629 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
630 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
631
632 ATH_PCU_LOCK(sc);
633 sc->sc_rxproc_cnt--;
634 ATH_PCU_UNLOCK(sc);
635 }
636
637 /*
638 * Allocate an RX mbuf for the given ath_buf and initialise
639 * it for EDMA.
640 *
641 * + Allocate a 4KB mbuf;
642 * + Setup the DMA map for the given buffer;
643 * + Return that.
644 */
645 static int
ath_edma_rxbuf_init(struct ath_softc * sc,struct ath_buf * bf)646 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
647 {
648
649 struct mbuf *m;
650 int error;
651 int len;
652
653 ATH_RX_LOCK_ASSERT(sc);
654
655 #if defined(__DragonFly__)
656 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->sc_edma_bufsize);
657 #else
658 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA);
659 #endif
660 if (! m)
661 return (ENOBUFS); /* XXX ?*/
662
663 /* XXX warn/enforce alignment */
664
665 len = m->m_ext.ext_size;
666 #if 0
667 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
668 __func__,
669 m,
670 len,
671 mtod(m, char *));
672 #endif
673
674 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
675
676 /*
677 * Populate ath_buf fields.
678 */
679 bf->bf_desc = mtod(m, struct ath_desc *);
680 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */
681 bf->bf_m = m;
682
683 /*
684 * Zero the descriptor and ensure it makes it out to the
685 * bounce buffer if one is required.
686 *
687 * XXX PREWRITE will copy the whole buffer; we only needed it
688 * to sync the first 32 DWORDS. Oh well.
689 */
690 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
691
692 /*
693 * Create DMA mapping.
694 */
695 #if defined(__DragonFly__)
696 error = bus_dmamap_load_mbuf_segment(
697 sc->sc_dmat, bf->bf_dmamap, m,
698 bf->bf_segs, 1, &bf->bf_nseg, BUS_DMA_NOWAIT);
699 #else
700 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
701 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT);
702 #endif
703
704 if (error != 0) {
705 device_printf(sc->sc_dev, "%s: failed; error=%d\n",
706 __func__,
707 error);
708 m_freem(m);
709 return (error);
710 }
711
712 /*
713 * Set daddr to the physical mapping page.
714 */
715 bf->bf_daddr = bf->bf_segs[0].ds_addr;
716
717 /*
718 * Prepare for the upcoming read.
719 *
720 * We need to both sync some data into the buffer (the zero'ed
721 * descriptor payload) and also prepare for the read that's going
722 * to occur.
723 */
724 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
725 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
726
727 /* Finish! */
728 return (0);
729 }
730
731 /*
732 * Allocate a RX buffer.
733 */
734 static struct ath_buf *
ath_edma_rxbuf_alloc(struct ath_softc * sc)735 ath_edma_rxbuf_alloc(struct ath_softc *sc)
736 {
737 struct ath_buf *bf;
738 int error;
739
740 ATH_RX_LOCK_ASSERT(sc);
741
742 /* Allocate buffer */
743 bf = TAILQ_FIRST(&sc->sc_rxbuf);
744 /* XXX shouldn't happen upon startup? */
745 if (bf == NULL) {
746 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n",
747 __func__);
748 return (NULL);
749 }
750
751 /* Remove it from the free list */
752 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
753
754 /* Assign RX mbuf to it */
755 error = ath_edma_rxbuf_init(sc, bf);
756 if (error != 0) {
757 device_printf(sc->sc_dev,
758 "%s: bf=%p, rxbuf alloc failed! error=%d\n",
759 __func__,
760 bf,
761 error);
762 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
763 return (NULL);
764 }
765
766 return (bf);
767 }
768
769 static void
ath_edma_rxbuf_free(struct ath_softc * sc,struct ath_buf * bf)770 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
771 {
772
773 ATH_RX_LOCK_ASSERT(sc);
774
775 /*
776 * Only unload the frame if we haven't consumed
777 * the mbuf via ath_rx_pkt().
778 */
779 if (bf->bf_m) {
780 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
781 m_freem(bf->bf_m);
782 bf->bf_m = NULL;
783 }
784
785 /* XXX lock? */
786 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
787 }
788
789 /*
790 * Allocate up to 'n' entries and push them onto the hardware FIFO.
791 *
792 * Return how many entries were successfully pushed onto the
793 * FIFO.
794 */
795 static int
ath_edma_rxfifo_alloc(struct ath_softc * sc,HAL_RX_QUEUE qtype,int nbufs)796 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
797 {
798 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
799 struct ath_buf *bf;
800 int i;
801
802 ATH_RX_LOCK_ASSERT(sc);
803
804 /*
805 * Allocate buffers until the FIFO is full or nbufs is reached.
806 */
807 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
808 /* Ensure the FIFO is already blank, complain loudly! */
809 if (re->m_fifo[re->m_fifo_tail] != NULL) {
810 device_printf(sc->sc_dev,
811 "%s: Q%d: fifo[%d] != NULL (%p)\n",
812 __func__,
813 qtype,
814 re->m_fifo_tail,
815 re->m_fifo[re->m_fifo_tail]);
816
817 /* Free the slot */
818 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
819 re->m_fifo_depth--;
820 /* XXX check it's not < 0 */
821 re->m_fifo[re->m_fifo_tail] = NULL;
822 }
823
824 bf = ath_edma_rxbuf_alloc(sc);
825 /* XXX should ensure the FIFO is not NULL? */
826 if (bf == NULL) {
827 device_printf(sc->sc_dev,
828 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
829 __func__,
830 qtype,
831 i,
832 nbufs);
833 break;
834 }
835
836 re->m_fifo[re->m_fifo_tail] = bf;
837
838 /* Write to the RX FIFO */
839 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
840 "%s: Q%d: putrxbuf=%p (0x%jx)\n",
841 __func__,
842 qtype,
843 bf->bf_desc,
844 (uintmax_t) bf->bf_daddr);
845 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
846
847 re->m_fifo_depth++;
848 INCR(re->m_fifo_tail, re->m_fifolen);
849 }
850
851 /*
852 * Return how many were allocated.
853 */
854 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
855 __func__,
856 qtype,
857 nbufs,
858 i);
859 return (i);
860 }
861
862 static int
ath_edma_rxfifo_flush(struct ath_softc * sc,HAL_RX_QUEUE qtype)863 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
864 {
865 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
866 int i;
867
868 ATH_RX_LOCK_ASSERT(sc);
869
870 for (i = 0; i < re->m_fifolen; i++) {
871 if (re->m_fifo[i] != NULL) {
872 #ifdef ATH_DEBUG
873 struct ath_buf *bf = re->m_fifo[i];
874
875 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
876 ath_printrxbuf(sc, bf, 0, HAL_OK);
877 #endif
878 ath_edma_rxbuf_free(sc, re->m_fifo[i]);
879 re->m_fifo[i] = NULL;
880 re->m_fifo_depth--;
881 }
882 }
883
884 if (re->m_rxpending != NULL) {
885 m_freem(re->m_rxpending);
886 re->m_rxpending = NULL;
887 }
888 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
889
890 return (0);
891 }
892
893 /*
894 * Setup the initial RX FIFO structure.
895 */
896 static int
ath_edma_setup_rxfifo(struct ath_softc * sc,HAL_RX_QUEUE qtype)897 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
898 {
899 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
900
901 ATH_RX_LOCK_ASSERT(sc);
902
903 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
904 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
905 __func__,
906 qtype);
907 return (-EINVAL);
908 }
909
910 if (bootverbose)
911 device_printf(sc->sc_dev,
912 "%s: type=%d, FIFO depth = %d entries\n",
913 __func__,
914 qtype,
915 re->m_fifolen);
916
917 /* Allocate ath_buf FIFO array, pre-zero'ed */
918 /* DragonFly: note use of M_INTWAIT */
919 re->m_fifo = kmalloc(sizeof(struct ath_buf *) * re->m_fifolen,
920 M_ATHDEV, M_INTWAIT | M_ZERO);
921 if (re->m_fifo == NULL) {
922 device_printf(sc->sc_dev, "%s: malloc failed\n",
923 __func__);
924 return (-ENOMEM);
925 }
926
927 /*
928 * Set initial "empty" state.
929 */
930 re->m_rxpending = NULL;
931 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
932
933 return (0);
934 }
935
936 static int
ath_edma_rxfifo_free(struct ath_softc * sc,HAL_RX_QUEUE qtype)937 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
938 {
939 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
940
941 device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
942 __func__,
943 qtype);
944
945 kfree(re->m_fifo, M_ATHDEV);
946
947 return (0);
948 }
949
950 static int
ath_edma_dma_rxsetup(struct ath_softc * sc)951 ath_edma_dma_rxsetup(struct ath_softc *sc)
952 {
953 int error;
954
955 /*
956 * Create RX DMA tag and buffers.
957 */
958 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
959 "rx", ath_rxbuf, sc->sc_rx_statuslen);
960 if (error != 0)
961 return error;
962
963 ATH_RX_LOCK(sc);
964 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
965 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
966 ATH_RX_UNLOCK(sc);
967
968 return (0);
969 }
970
971 static int
ath_edma_dma_rxteardown(struct ath_softc * sc)972 ath_edma_dma_rxteardown(struct ath_softc *sc)
973 {
974
975 ATH_RX_LOCK(sc);
976 ath_edma_flush_deferred_queue(sc);
977 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
978 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
979
980 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
981 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
982 ATH_RX_UNLOCK(sc);
983
984 /* Free RX ath_buf */
985 /* Free RX DMA tag */
986 if (sc->sc_rxdma.dd_desc_len != 0)
987 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
988
989 return (0);
990 }
991
992 void
ath_recv_setup_edma(struct ath_softc * sc)993 ath_recv_setup_edma(struct ath_softc *sc)
994 {
995
996 /* Set buffer size to 4k */
997 sc->sc_edma_bufsize = 4096;
998
999 /* Fetch EDMA field and buffer sizes */
1000 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
1001
1002 /* Configure the hardware with the RX buffer size */
1003 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
1004 sc->sc_rx_statuslen);
1005
1006 if (bootverbose) {
1007 device_printf(sc->sc_dev, "RX status length: %d\n",
1008 sc->sc_rx_statuslen);
1009 device_printf(sc->sc_dev, "RX buffer size: %d\n",
1010 sc->sc_edma_bufsize);
1011 }
1012
1013 sc->sc_rx.recv_stop = ath_edma_stoprecv;
1014 sc->sc_rx.recv_start = ath_edma_startrecv;
1015 sc->sc_rx.recv_flush = ath_edma_recv_flush;
1016 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
1017 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
1018
1019 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
1020 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
1021
1022 sc->sc_rx.recv_sched = ath_edma_recv_sched;
1023 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;
1024 }
1025