1 /*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 /*
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
45 *
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
48 */
49 #include "opt_ah.h"
50 #include "opt_wlan.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/lock.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/errno.h>
62 #include <sys/callout.h>
63 #include <sys/bus.h>
64 #include <sys/endian.h>
65 #include <sys/kthread.h>
66 #include <sys/taskqueue.h>
67 #include <sys/caps.h>
68 #include <sys/module.h>
69 #include <sys/ktr.h>
70
71 #if defined(__DragonFly__)
72 /* empty */
73 #else
74 #include <sys/smp.h> /* for mp_ncpus */
75 #include <machine/bus.h>
76 #endif
77
78 #include <net/if.h>
79 #include <net/if_var.h>
80 #include <net/if_dl.h>
81 #include <net/if_media.h>
82 #include <net/if_types.h>
83 #include <net/if_arp.h>
84 #include <net/ethernet.h>
85 #include <net/if_llc.h>
86 #if defined(__DragonFly__)
87 #include <net/ifq_var.h>
88 #endif
89
90 #include <netproto/802_11/ieee80211_var.h>
91 #include <netproto/802_11/ieee80211_regdomain.h>
92 #ifdef IEEE80211_SUPPORT_SUPERG
93 #include <netproto/802_11/ieee80211_superg.h>
94 #endif
95 #ifdef IEEE80211_SUPPORT_TDMA
96 #include <netproto/802_11/ieee80211_tdma.h>
97 #endif
98
99 #include <net/bpf.h>
100
101 #ifdef INET
102 #include <netinet/in.h>
103 #include <netinet/if_ether.h>
104 #endif
105
106 #include <dev/netif/ath/ath/if_athvar.h>
107 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
108 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
109
110 #include <dev/netif/ath/ath/if_ath_debug.h>
111 #include <dev/netif/ath/ath/if_ath_misc.h>
112 #include <dev/netif/ath/ath/if_ath_tsf.h>
113 #include <dev/netif/ath/ath/if_ath_tx.h>
114 #include <dev/netif/ath/ath/if_ath_sysctl.h>
115 #include <dev/netif/ath/ath/if_ath_led.h>
116 #include <dev/netif/ath/ath/if_ath_keycache.h>
117 #include <dev/netif/ath/ath/if_ath_rx.h>
118 #include <dev/netif/ath/ath/if_ath_beacon.h>
119 #include <dev/netif/ath/ath/if_athdfs.h>
120 #include <dev/netif/ath/ath/if_ath_descdma.h>
121
122 #ifdef ATH_TX99_DIAG
123 #include <dev/netif/ath/ath/ath_tx99/ath_tx99.h>
124 #endif
125
126 #ifdef ATH_DEBUG_ALQ
127 #include <dev/netif/ath/ath/if_ath_alq.h>
128 #endif
129
130 #include <dev/netif/ath/ath/if_ath_lna_div.h>
131
132 /*
133 * Calculate the receive filter according to the
134 * operating mode and state:
135 *
136 * o always accept unicast, broadcast, and multicast traffic
137 * o accept PHY error frames when hardware doesn't have MIB support
138 * to count and we need them for ANI (sta mode only until recently)
139 * and we are not scanning (ANI is disabled)
140 * NB: older hal's add rx filter bits out of sight and we need to
141 * blindly preserve them
142 * o probe request frames are accepted only when operating in
143 * hostap, adhoc, mesh, or monitor modes
144 * o enable promiscuous mode
145 * - when in monitor mode
146 * - if interface marked PROMISC (assumes bridge setting is filtered)
147 * o accept beacons:
148 * - when operating in station mode for collecting rssi data when
149 * the station is otherwise quiet, or
150 * - when operating in adhoc mode so the 802.11 layer creates
151 * node table entries for peers,
152 * - when scanning
153 * - when doing s/w beacon miss (e.g. for ap+sta)
154 * - when operating in ap mode in 11g to detect overlapping bss that
155 * require protection
156 * - when operating in mesh mode to detect neighbors
157 * o accept control frames:
158 * - when in monitor mode
159 * XXX HT protection for 11n
160 */
161 u_int32_t
ath_calcrxfilter(struct ath_softc * sc)162 ath_calcrxfilter(struct ath_softc *sc)
163 {
164 struct ieee80211com *ic = &sc->sc_ic;
165 u_int32_t rfilt;
166
167 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
168 if (!sc->sc_needmib && !sc->sc_scanning)
169 rfilt |= HAL_RX_FILTER_PHYERR;
170 if (ic->ic_opmode != IEEE80211_M_STA)
171 rfilt |= HAL_RX_FILTER_PROBEREQ;
172 /* XXX ic->ic_monvaps != 0? */
173 if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_promisc > 0)
174 rfilt |= HAL_RX_FILTER_PROM;
175
176 /*
177 * Only listen to all beacons if we're scanning.
178 *
179 * Otherwise we only really need to hear beacons from
180 * our own BSSID.
181 *
182 * IBSS? software beacon miss? Just receive all beacons.
183 * We need to hear beacons/probe requests from everyone so
184 * we can merge ibss.
185 */
186 if (ic->ic_opmode == IEEE80211_M_IBSS || sc->sc_swbmiss) {
187 rfilt |= HAL_RX_FILTER_BEACON;
188 } else if (ic->ic_opmode == IEEE80211_M_STA) {
189 if (sc->sc_do_mybeacon && ! sc->sc_scanning) {
190 rfilt |= HAL_RX_FILTER_MYBEACON;
191 } else { /* scanning, non-mybeacon chips */
192 rfilt |= HAL_RX_FILTER_BEACON;
193 }
194 }
195
196 /*
197 * NB: We don't recalculate the rx filter when
198 * ic_protmode changes; otherwise we could do
199 * this only when ic_protmode != NONE.
200 */
201 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
202 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
203 rfilt |= HAL_RX_FILTER_BEACON;
204
205 /*
206 * Enable hardware PS-POLL RX only for hostap mode;
207 * STA mode sends PS-POLL frames but never
208 * receives them.
209 */
210 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
211 0, NULL) == HAL_OK &&
212 ic->ic_opmode == IEEE80211_M_HOSTAP)
213 rfilt |= HAL_RX_FILTER_PSPOLL;
214
215 if (sc->sc_nmeshvaps) {
216 rfilt |= HAL_RX_FILTER_BEACON;
217 if (sc->sc_hasbmatch)
218 rfilt |= HAL_RX_FILTER_BSSID;
219 else
220 rfilt |= HAL_RX_FILTER_PROM;
221 }
222 if (ic->ic_opmode == IEEE80211_M_MONITOR)
223 rfilt |= HAL_RX_FILTER_CONTROL;
224
225 /*
226 * Enable RX of compressed BAR frames only when doing
227 * 802.11n. Required for A-MPDU.
228 */
229 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
230 rfilt |= HAL_RX_FILTER_COMPBAR;
231
232 /*
233 * Enable radar PHY errors if requested by the
234 * DFS module.
235 */
236 if (sc->sc_dodfs)
237 rfilt |= HAL_RX_FILTER_PHYRADAR;
238
239 /*
240 * Enable spectral PHY errors if requested by the
241 * spectral module.
242 */
243 if (sc->sc_dospectral)
244 rfilt |= HAL_RX_FILTER_PHYRADAR;
245
246 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s\n",
247 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode]);
248 return rfilt;
249 }
250
251 static int
ath_legacy_rxbuf_init(struct ath_softc * sc,struct ath_buf * bf)252 ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
253 {
254 struct ath_hal *ah = sc->sc_ah;
255 int error;
256 struct mbuf *m;
257 struct ath_desc *ds;
258
259 /* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */
260
261 m = bf->bf_m;
262 if (m == NULL) {
263 /*
264 * NB: by assigning a page to the rx dma buffer we
265 * implicitly satisfy the Atheros requirement that
266 * this buffer be cache-line-aligned and sized to be
267 * multiple of the cache line size. Not doing this
268 * causes weird stuff to happen (for the 5210 at least).
269 */
270 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
271 if (m == NULL) {
272 DPRINTF(sc, ATH_DEBUG_ANY,
273 "%s: no mbuf/cluster\n", __func__);
274 sc->sc_stats.ast_rx_nombuf++;
275 return ENOMEM;
276 }
277 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
278
279 #if defined(__DragonFly__)
280 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat,
281 bf->bf_dmamap, m,
282 bf->bf_segs, 1, &bf->bf_nseg,
283 BUS_DMA_NOWAIT);
284 #else
285 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
286 bf->bf_dmamap, m,
287 bf->bf_segs, &bf->bf_nseg,
288 BUS_DMA_NOWAIT);
289 #endif
290 if (error != 0) {
291 DPRINTF(sc, ATH_DEBUG_ANY,
292 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
293 __func__, error);
294 sc->sc_stats.ast_rx_busdma++;
295 m_freem(m);
296 return error;
297 }
298 KASSERT(bf->bf_nseg == 1,
299 ("multi-segment packet; nseg %u", bf->bf_nseg));
300 bf->bf_m = m;
301 }
302 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
303
304 /*
305 * Setup descriptors. For receive we always terminate
306 * the descriptor list with a self-linked entry so we'll
307 * not get overrun under high load (as can happen with a
308 * 5212 when ANI processing enables PHY error frames).
309 *
310 * To insure the last descriptor is self-linked we create
311 * each descriptor as self-linked and add it to the end. As
312 * each additional descriptor is added the previous self-linked
313 * entry is ``fixed'' naturally. This should be safe even
314 * if DMA is happening. When processing RX interrupts we
315 * never remove/process the last, self-linked, entry on the
316 * descriptor list. This insures the hardware always has
317 * someplace to write a new frame.
318 */
319 /*
320 * 11N: we can no longer afford to self link the last descriptor.
321 * MAC acknowledges BA status as long as it copies frames to host
322 * buffer (or rx fifo). This can incorrectly acknowledge packets
323 * to a sender if last desc is self-linked.
324 */
325 ds = bf->bf_desc;
326 if (sc->sc_rxslink)
327 ds->ds_link = bf->bf_daddr; /* link to self */
328 else
329 ds->ds_link = 0; /* terminate the list */
330 ds->ds_data = bf->bf_segs[0].ds_addr;
331 ath_hal_setuprxdesc(ah, ds
332 , m->m_len /* buffer size */
333 , 0
334 );
335
336 if (sc->sc_rxlink != NULL)
337 *sc->sc_rxlink = bf->bf_daddr;
338 sc->sc_rxlink = &ds->ds_link;
339 return 0;
340 }
341
342 /*
343 * Intercept management frames to collect beacon rssi data
344 * and to do ibss merges.
345 */
346 void
ath_recv_mgmt(struct ieee80211_node * ni,struct mbuf * m,int subtype,const struct ieee80211_rx_stats * rxs,int rssi,int nf)347 ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
348 int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf)
349 {
350 struct ieee80211vap *vap = ni->ni_vap;
351 struct ath_softc *sc = vap->iv_ic->ic_softc;
352 uint64_t tsf_beacon_old, tsf_beacon;
353 uint64_t nexttbtt;
354 int64_t tsf_delta;
355 int32_t tsf_delta_bmiss;
356 int32_t tsf_remainder;
357 uint64_t tsf_beacon_target;
358 int tsf_intval;
359
360 tsf_beacon_old = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32;
361 tsf_beacon_old |= le32dec(ni->ni_tstamp.data);
362
363 #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
364 tsf_intval = 1;
365 if (ni->ni_intval > 0) {
366 tsf_intval = TU_TO_TSF(ni->ni_intval);
367 }
368 #undef TU_TO_TSF
369
370 /*
371 * Call up first so subsequent work can use information
372 * potentially stored in the node (e.g. for ibss merge).
373 */
374 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
375 switch (subtype) {
376 case IEEE80211_FC0_SUBTYPE_BEACON:
377
378 /*
379 * Only do the following processing if it's for
380 * the current BSS.
381 *
382 * In scan and IBSS mode we receive all beacons,
383 * which means we need to filter out stuff
384 * that isn't for us or we'll end up constantly
385 * trying to sync / merge to BSSes that aren't
386 * actually us.
387 */
388 if (IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) {
389 /* update rssi statistics for use by the hal */
390 /* XXX unlocked check against vap->iv_bss? */
391 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
392
393
394 tsf_beacon = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32;
395 tsf_beacon |= le32dec(ni->ni_tstamp.data);
396
397 nexttbtt = ath_hal_getnexttbtt(sc->sc_ah);
398
399 /*
400 * Let's calculate the delta and remainder, so we can see
401 * if the beacon timer from the AP is varying by more than
402 * a few TU. (Which would be a huge, huge problem.)
403 */
404 tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old;
405
406 tsf_delta_bmiss = tsf_delta / tsf_intval;
407
408 /*
409 * If our delta is greater than half the beacon interval,
410 * let's round the bmiss value up to the next beacon
411 * interval. Ie, we're running really, really early
412 * on the next beacon.
413 */
414 if (tsf_delta % tsf_intval > (tsf_intval / 2))
415 tsf_delta_bmiss ++;
416
417 tsf_beacon_target = tsf_beacon_old +
418 (((unsigned long long) tsf_delta_bmiss) * (long long) tsf_intval);
419
420 /*
421 * The remainder using '%' is between 0 .. intval-1.
422 * If we're actually running too fast, then the remainder
423 * will be some large number just under intval-1.
424 * So we need to look at whether we're running
425 * before or after the target beacon interval
426 * and if we are, modify how we do the remainder
427 * calculation.
428 */
429 if (tsf_beacon < tsf_beacon_target) {
430 tsf_remainder =
431 -(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval));
432 } else {
433 tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval;
434 }
435
436 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: old_tsf=%llu, new_tsf=%llu, target_tsf=%llu, delta=%lld, bmiss=%d, remainder=%d\n",
437 __func__,
438 (unsigned long long) tsf_beacon_old,
439 (unsigned long long) tsf_beacon,
440 (unsigned long long) tsf_beacon_target,
441 (long long) tsf_delta,
442 tsf_delta_bmiss,
443 tsf_remainder);
444
445 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf=%llu, nexttbtt=%llu, delta=%d\n",
446 __func__,
447 (unsigned long long) tsf_beacon,
448 (unsigned long long) nexttbtt,
449 (int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval);
450
451 /* We only do syncbeacon on STA VAPs; not on IBSS */
452 if (vap->iv_opmode == IEEE80211_M_STA &&
453 sc->sc_syncbeacon &&
454 ni == vap->iv_bss &&
455 (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) {
456 DPRINTF(sc, ATH_DEBUG_BEACON,
457 "%s: syncbeacon=1; syncing\n",
458 __func__);
459 /*
460 * Resync beacon timers using the tsf of the beacon
461 * frame we just received.
462 */
463 ath_beacon_config(sc, vap);
464 sc->sc_syncbeacon = 0;
465 }
466 }
467
468 /* fall thru... */
469 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
470 if (vap->iv_opmode == IEEE80211_M_IBSS &&
471 vap->iv_state == IEEE80211_S_RUN &&
472 ieee80211_ibss_merge_check(ni)) {
473 uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
474 uint64_t tsf = ath_extend_tsf(sc, rstamp,
475 ath_hal_gettsf64(sc->sc_ah));
476 /*
477 * Handle ibss merge as needed; check the tsf on the
478 * frame before attempting the merge. The 802.11 spec
479 * says the station should change it's bssid to match
480 * the oldest station with the same ssid, where oldest
481 * is determined by the tsf. Note that hardware
482 * reconfiguration happens through callback to
483 * ath_newstate as the state machine will go from
484 * RUN -> RUN when this happens.
485 */
486 if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
487 DPRINTF(sc, ATH_DEBUG_STATE,
488 "ibss merge, rstamp %u tsf %ju "
489 "tstamp %ju\n", rstamp, (uintmax_t)tsf,
490 (uintmax_t)ni->ni_tstamp.tsf);
491 (void) ieee80211_ibss_merge(ni);
492 }
493 }
494 break;
495 }
496 }
497
498 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
499 static void
ath_rx_tap_vendor(struct ath_softc * sc,struct mbuf * m,const struct ath_rx_status * rs,u_int64_t tsf,int16_t nf)500 ath_rx_tap_vendor(struct ath_softc *sc, struct mbuf *m,
501 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
502 {
503
504 /* Fill in the extension bitmap */
505 sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER);
506
507 /* Fill in the vendor header */
508 sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f;
509 sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03;
510 sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00;
511
512 /* XXX what should this be? */
513 sc->sc_rx_th.wr_vh.vh_sub_ns = 0;
514 sc->sc_rx_th.wr_vh.vh_skip_len =
515 htole16(sizeof(struct ath_radiotap_vendor_hdr));
516
517 /* General version info */
518 sc->sc_rx_th.wr_v.vh_version = 1;
519
520 sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask;
521
522 /* rssi */
523 sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0];
524 sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1];
525 sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2];
526 sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0];
527 sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1];
528 sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2];
529
530 /* evm */
531 sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0;
532 sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1;
533 sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2;
534 /* These are only populated from the AR9300 or later */
535 sc->sc_rx_th.wr_v.evm[3] = rs->rs_evm3;
536 sc->sc_rx_th.wr_v.evm[4] = rs->rs_evm4;
537
538 /* direction */
539 sc->sc_rx_th.wr_v.vh_flags = ATH_VENDOR_PKT_RX;
540
541 /* RX rate */
542 sc->sc_rx_th.wr_v.vh_rx_hwrate = rs->rs_rate;
543
544 /* RX flags */
545 sc->sc_rx_th.wr_v.vh_rs_flags = rs->rs_flags;
546
547 if (rs->rs_isaggr)
548 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_ISAGGR;
549 if (rs->rs_moreaggr)
550 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_MOREAGGR;
551
552 /* phyerr info */
553 if (rs->rs_status & HAL_RXERR_PHY) {
554 sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr;
555 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_RXPHYERR;
556 } else {
557 sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff;
558 }
559 sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status;
560 sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi;
561 }
562 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
563
564 static void
ath_rx_tap(struct ath_softc * sc,struct mbuf * m,const struct ath_rx_status * rs,u_int64_t tsf,int16_t nf)565 ath_rx_tap(struct ath_softc *sc, struct mbuf *m,
566 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
567 {
568 #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
569 #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
570 #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
571 #define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
572 const HAL_RATE_TABLE *rt;
573 uint8_t rix;
574
575 rt = sc->sc_currates;
576 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
577 rix = rt->rateCodeToIndex[rs->rs_rate];
578 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
579 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
580 #ifdef AH_SUPPORT_AR5416
581 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
582 if (rs->rs_status & HAL_RXERR_PHY) {
583 /*
584 * PHY error - make sure the channel flags
585 * reflect the actual channel configuration,
586 * not the received frame.
587 */
588 if (IEEE80211_IS_CHAN_HT40U(sc->sc_curchan))
589 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
590 else if (IEEE80211_IS_CHAN_HT40D(sc->sc_curchan))
591 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
592 else if (IEEE80211_IS_CHAN_HT20(sc->sc_curchan))
593 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
594 } else if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
595 struct ieee80211com *ic = &sc->sc_ic;
596
597 if ((rs->rs_flags & HAL_RX_2040) == 0)
598 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
599 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
600 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
601 else
602 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
603 if ((rs->rs_flags & HAL_RX_GI) == 0)
604 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
605 }
606
607 #endif
608 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
609 if (rs->rs_status & HAL_RXERR_CRC)
610 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
611 /* XXX propagate other error flags from descriptor */
612 sc->sc_rx_th.wr_antnoise = nf;
613 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
614 sc->sc_rx_th.wr_antenna = rs->rs_antenna;
615 #undef CHAN_HT
616 #undef CHAN_HT20
617 #undef CHAN_HT40U
618 #undef CHAN_HT40D
619 }
620
621 static void
ath_handle_micerror(struct ieee80211com * ic,struct ieee80211_frame * wh,int keyix)622 ath_handle_micerror(struct ieee80211com *ic,
623 struct ieee80211_frame *wh, int keyix)
624 {
625 struct ieee80211_node *ni;
626
627 /* XXX recheck MIC to deal w/ chips that lie */
628 /* XXX discard MIC errors on !data frames */
629 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
630 if (ni != NULL) {
631 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
632 ieee80211_free_node(ni);
633 }
634 }
635
636 /*
637 * Process a single packet.
638 *
639 * The mbuf must already be synced, unmapped and removed from bf->bf_m
640 * by this stage.
641 *
642 * The mbuf must be consumed by this routine - either passed up the
643 * net80211 stack, put on the holding queue, or freed.
644 */
645 int
ath_rx_pkt(struct ath_softc * sc,struct ath_rx_status * rs,HAL_STATUS status,uint64_t tsf,int nf,HAL_RX_QUEUE qtype,struct ath_buf * bf,struct mbuf * m)646 ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
647 uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf,
648 struct mbuf *m)
649 {
650 uint64_t rstamp;
651 int len, type;
652 struct ieee80211com *ic = &sc->sc_ic;
653 struct ieee80211_node *ni;
654 int is_good = 0;
655 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
656
657 /*
658 * Calculate the correct 64 bit TSF given
659 * the TSF64 register value and rs_tstamp.
660 */
661 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
662
663 /* These aren't specifically errors */
664 #ifdef AH_SUPPORT_AR5416
665 if (rs->rs_flags & HAL_RX_GI)
666 sc->sc_stats.ast_rx_halfgi++;
667 if (rs->rs_flags & HAL_RX_2040)
668 sc->sc_stats.ast_rx_2040++;
669 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
670 sc->sc_stats.ast_rx_pre_crc_err++;
671 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
672 sc->sc_stats.ast_rx_post_crc_err++;
673 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
674 sc->sc_stats.ast_rx_decrypt_busy_err++;
675 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
676 sc->sc_stats.ast_rx_hi_rx_chain++;
677 if (rs->rs_flags & HAL_RX_STBC)
678 sc->sc_stats.ast_rx_stbc++;
679 #endif /* AH_SUPPORT_AR5416 */
680
681 if (rs->rs_status != 0) {
682 if (rs->rs_status & HAL_RXERR_CRC)
683 sc->sc_stats.ast_rx_crcerr++;
684 if (rs->rs_status & HAL_RXERR_FIFO)
685 sc->sc_stats.ast_rx_fifoerr++;
686 if (rs->rs_status & HAL_RXERR_PHY) {
687 sc->sc_stats.ast_rx_phyerr++;
688 /* Process DFS radar events */
689 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
690 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
691 /* Now pass it to the radar processing code */
692 ath_dfs_process_phy_err(sc, m, rstamp, rs);
693 }
694
695 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */
696 if (rs->rs_phyerr < 64)
697 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
698 goto rx_error; /* NB: don't count in ierrors */
699 }
700 if (rs->rs_status & HAL_RXERR_DECRYPT) {
701 /*
702 * Decrypt error. If the error occurred
703 * because there was no hardware key, then
704 * let the frame through so the upper layers
705 * can process it. This is necessary for 5210
706 * parts which have no way to setup a ``clear''
707 * key cache entry.
708 *
709 * XXX do key cache faulting
710 */
711 if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
712 goto rx_accept;
713 sc->sc_stats.ast_rx_badcrypt++;
714 }
715 /*
716 * Similar as above - if the failure was a keymiss
717 * just punt it up to the upper layers for now.
718 */
719 if (rs->rs_status & HAL_RXERR_KEYMISS) {
720 sc->sc_stats.ast_rx_keymiss++;
721 goto rx_accept;
722 }
723 if (rs->rs_status & HAL_RXERR_MIC) {
724 sc->sc_stats.ast_rx_badmic++;
725 /*
726 * Do minimal work required to hand off
727 * the 802.11 header for notification.
728 */
729 /* XXX frag's and qos frames */
730 len = rs->rs_datalen;
731 if (len >= sizeof (struct ieee80211_frame)) {
732 ath_handle_micerror(ic,
733 mtod(m, struct ieee80211_frame *),
734 sc->sc_splitmic ?
735 rs->rs_keyix-32 : rs->rs_keyix);
736 }
737 }
738 #if defined(__DragonFly__)
739 ++ic->ic_ierrors; /* don't care about SMP races */
740 #else
741 counter_u64_add(ic->ic_ierrors, 1);
742 #endif
743 rx_error:
744 /*
745 * Cleanup any pending partial frame.
746 */
747 if (re->m_rxpending != NULL) {
748 m_freem(re->m_rxpending);
749 re->m_rxpending = NULL;
750 }
751 /*
752 * When a tap is present pass error frames
753 * that have been requested. By default we
754 * pass decrypt+mic errors but others may be
755 * interesting (e.g. crc).
756 */
757 if (ieee80211_radiotap_active(ic) &&
758 (rs->rs_status & sc->sc_monpass)) {
759 /* NB: bpf needs the mbuf length setup */
760 len = rs->rs_datalen;
761 m->m_pkthdr.len = m->m_len = len;
762 ath_rx_tap(sc, m, rs, rstamp, nf);
763 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
764 ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
765 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
766 ieee80211_radiotap_rx_all(ic, m);
767 }
768 /* XXX pass MIC errors up for s/w reclaculation */
769 m_freem(m); m = NULL;
770 goto rx_next;
771 }
772 rx_accept:
773 len = rs->rs_datalen;
774 m->m_len = len;
775
776 if (rs->rs_more) {
777 /*
778 * Frame spans multiple descriptors; save
779 * it for the next completed descriptor, it
780 * will be used to construct a jumbogram.
781 */
782 if (re->m_rxpending != NULL) {
783 /* NB: max frame size is currently 2 clusters */
784 sc->sc_stats.ast_rx_toobig++;
785 m_freem(re->m_rxpending);
786 }
787 m->m_pkthdr.len = len;
788 re->m_rxpending = m;
789 m = NULL;
790 goto rx_next;
791 } else if (re->m_rxpending != NULL) {
792 /*
793 * This is the second part of a jumbogram,
794 * chain it to the first mbuf, adjust the
795 * frame length, and clear the rxpending state.
796 */
797 re->m_rxpending->m_next = m;
798 re->m_rxpending->m_pkthdr.len += len;
799 m = re->m_rxpending;
800 re->m_rxpending = NULL;
801 } else {
802 /*
803 * Normal single-descriptor receive; setup packet length.
804 */
805 m->m_pkthdr.len = len;
806 }
807
808 /*
809 * Validate rs->rs_antenna.
810 *
811 * Some users w/ AR9285 NICs have reported crashes
812 * here because rs_antenna field is bogusly large.
813 * Let's enforce the maximum antenna limit of 8
814 * (and it shouldn't be hard coded, but that's a
815 * separate problem) and if there's an issue, print
816 * out an error and adjust rs_antenna to something
817 * sensible.
818 *
819 * This code should be removed once the actual
820 * root cause of the issue has been identified.
821 * For example, it may be that the rs_antenna
822 * field is only valid for the last frame of
823 * an aggregate and it just happens that it is
824 * "mostly" right. (This is a general statement -
825 * the majority of the statistics are only valid
826 * for the last frame in an aggregate.
827 */
828 if (rs->rs_antenna > 7) {
829 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
830 __func__, rs->rs_antenna);
831 #ifdef ATH_DEBUG
832 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
833 #endif /* ATH_DEBUG */
834 rs->rs_antenna = 0; /* XXX better than nothing */
835 }
836
837 /*
838 * If this is an AR9285/AR9485, then the receive and LNA
839 * configuration is stored in RSSI[2] / EXTRSSI[2].
840 * We can extract this out to build a much better
841 * receive antenna profile.
842 *
843 * Yes, this just blurts over the above RX antenna field
844 * for now. It's fine, the AR9285 doesn't really use
845 * that.
846 *
847 * Later on we should store away the fine grained LNA
848 * information and keep separate counters just for
849 * that. It'll help when debugging the AR9285/AR9485
850 * combined diversity code.
851 */
852 if (sc->sc_rx_lnamixer) {
853 rs->rs_antenna = 0;
854
855 /* Bits 0:1 - the LNA configuration used */
856 rs->rs_antenna |=
857 ((rs->rs_rssi_ctl[2] & HAL_RX_LNA_CFG_USED)
858 >> HAL_RX_LNA_CFG_USED_S);
859
860 /* Bit 2 - the external RX antenna switch */
861 if (rs->rs_rssi_ctl[2] & HAL_RX_LNA_EXTCFG)
862 rs->rs_antenna |= 0x4;
863 }
864
865 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
866
867 /*
868 * Populate the rx status block. When there are bpf
869 * listeners we do the additional work to provide
870 * complete status. Otherwise we fill in only the
871 * material required by ieee80211_input. Note that
872 * noise setting is filled in above.
873 */
874 if (ieee80211_radiotap_active(ic)) {
875 ath_rx_tap(sc, m, rs, rstamp, nf);
876 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
877 ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
878 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
879 }
880
881 /*
882 * From this point on we assume the frame is at least
883 * as large as ieee80211_frame_min; verify that.
884 */
885 if (len < IEEE80211_MIN_LEN) {
886 if (!ieee80211_radiotap_active(ic)) {
887 DPRINTF(sc, ATH_DEBUG_RECV,
888 "%s: short packet %d\n", __func__, len);
889 sc->sc_stats.ast_rx_tooshort++;
890 } else {
891 /* NB: in particular this captures ack's */
892 ieee80211_radiotap_rx_all(ic, m);
893 }
894 m_freem(m); m = NULL;
895 goto rx_next;
896 }
897
898 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
899 const HAL_RATE_TABLE *rt = sc->sc_currates;
900 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
901
902 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
903 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
904 }
905
906 m_adj(m, -IEEE80211_CRC_LEN);
907
908 /*
909 * Locate the node for sender, track state, and then
910 * pass the (referenced) node up to the 802.11 layer
911 * for its use.
912 */
913 ni = ieee80211_find_rxnode_withkey(ic,
914 mtod(m, const struct ieee80211_frame_min *),
915 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
916 IEEE80211_KEYIX_NONE : rs->rs_keyix);
917 sc->sc_lastrs = rs;
918
919 #ifdef AH_SUPPORT_AR5416
920 if (rs->rs_isaggr)
921 sc->sc_stats.ast_rx_agg++;
922 #endif /* AH_SUPPORT_AR5416 */
923
924 if (ni != NULL) {
925 /*
926 * Only punt packets for ampdu reorder processing for
927 * 11n nodes; net80211 enforces that M_AMPDU is only
928 * set for 11n nodes.
929 */
930 if (ni->ni_flags & IEEE80211_NODE_HT)
931 m->m_flags |= M_AMPDU;
932
933 /*
934 * Sending station is known, dispatch directly.
935 */
936 type = ieee80211_input(ni, m, rs->rs_rssi, nf);
937 ieee80211_free_node(ni);
938 m = NULL;
939 /*
940 * Arrange to update the last rx timestamp only for
941 * frames from our ap when operating in station mode.
942 * This assumes the rx key is always setup when
943 * associated.
944 */
945 if (ic->ic_opmode == IEEE80211_M_STA &&
946 rs->rs_keyix != HAL_RXKEYIX_INVALID)
947 is_good = 1;
948 } else {
949 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
950 m = NULL;
951 }
952
953 /*
954 * At this point we have passed the frame up the stack; thus
955 * the mbuf is no longer ours.
956 */
957
958 /*
959 * Track rx rssi and do any rx antenna management.
960 */
961 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
962 if (sc->sc_diversity) {
963 /*
964 * When using fast diversity, change the default rx
965 * antenna if diversity chooses the other antenna 3
966 * times in a row.
967 */
968 if (sc->sc_defant != rs->rs_antenna) {
969 if (++sc->sc_rxotherant >= 3)
970 ath_setdefantenna(sc, rs->rs_antenna);
971 } else
972 sc->sc_rxotherant = 0;
973 }
974
975 /* Handle slow diversity if enabled */
976 if (sc->sc_dolnadiv) {
977 ath_lna_rx_comb_scan(sc, rs, ticks, hz);
978 }
979
980 if (sc->sc_softled) {
981 /*
982 * Blink for any data frame. Otherwise do a
983 * heartbeat-style blink when idle. The latter
984 * is mainly for station mode where we depend on
985 * periodic beacon frames to trigger the poll event.
986 */
987 if (type == IEEE80211_FC0_TYPE_DATA) {
988 const HAL_RATE_TABLE *rt = sc->sc_currates;
989 ath_led_event(sc,
990 rt->rateCodeToIndex[rs->rs_rate]);
991 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
992 ath_led_event(sc, 0);
993 }
994 rx_next:
995 /*
996 * Debugging - complain if we didn't NULL the mbuf pointer
997 * here.
998 */
999 if (m != NULL) {
1000 device_printf(sc->sc_dev,
1001 "%s: mbuf %p should've been freed!\n",
1002 __func__,
1003 m);
1004 }
1005 return (is_good);
1006 }
1007
1008 #define ATH_RX_MAX 128
1009
1010 /*
1011 * XXX TODO: break out the "get buffers" from "call ath_rx_pkt()" like
1012 * the EDMA code does.
1013 *
1014 * XXX TODO: then, do all of the RX list management stuff inside
1015 * ATH_RX_LOCK() so we don't end up potentially racing. The EDMA
1016 * code is doing it right.
1017 */
1018 static void
ath_rx_proc(struct ath_softc * sc,int resched)1019 ath_rx_proc(struct ath_softc *sc, int resched)
1020 {
1021 #define PA2DESC(_sc, _pa) \
1022 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
1023 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
1024 struct ath_buf *bf;
1025 struct ath_hal *ah = sc->sc_ah;
1026 #ifdef IEEE80211_SUPPORT_SUPERG
1027 struct ieee80211com *ic = &sc->sc_ic;
1028 #endif
1029 struct ath_desc *ds;
1030 struct ath_rx_status *rs;
1031 struct mbuf *m;
1032 int ngood;
1033 HAL_STATUS status;
1034 int16_t nf;
1035 u_int64_t tsf;
1036 int npkts = 0;
1037 int kickpcu = 0;
1038 int ret;
1039
1040 /* XXX we must not hold the ATH_LOCK here */
1041 ATH_UNLOCK_ASSERT(sc);
1042 ATH_PCU_UNLOCK_ASSERT(sc);
1043
1044 ATH_PCU_LOCK(sc);
1045 sc->sc_rxproc_cnt++;
1046 kickpcu = sc->sc_kickpcu;
1047 ATH_PCU_UNLOCK(sc);
1048
1049 ATH_LOCK(sc);
1050 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1051 ATH_UNLOCK(sc);
1052
1053 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
1054 ngood = 0;
1055 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
1056 sc->sc_stats.ast_rx_noise = nf;
1057 tsf = ath_hal_gettsf64(ah);
1058 do {
1059 /*
1060 * Don't process too many packets at a time; give the
1061 * TX thread time to also run - otherwise the TX
1062 * latency can jump by quite a bit, causing throughput
1063 * degredation.
1064 */
1065 if (!kickpcu && npkts >= ATH_RX_MAX)
1066 break;
1067
1068 bf = TAILQ_FIRST(&sc->sc_rxbuf);
1069 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */
1070 device_printf(sc->sc_dev, "%s: no buffer!\n", __func__);
1071 break;
1072 } else if (bf == NULL) {
1073 /*
1074 * End of List:
1075 * this can happen for non-self-linked RX chains
1076 */
1077 sc->sc_stats.ast_rx_hitqueueend++;
1078 break;
1079 }
1080 m = bf->bf_m;
1081 if (m == NULL) { /* NB: shouldn't happen */
1082 /*
1083 * If mbuf allocation failed previously there
1084 * will be no mbuf; try again to re-populate it.
1085 */
1086 /* XXX make debug msg */
1087 device_printf(sc->sc_dev, "%s: no mbuf!\n", __func__);
1088 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
1089 goto rx_proc_next;
1090 }
1091 ds = bf->bf_desc;
1092 if (ds->ds_link == bf->bf_daddr) {
1093 /* NB: never process the self-linked entry at the end */
1094 sc->sc_stats.ast_rx_hitqueueend++;
1095 break;
1096 }
1097 /* XXX sync descriptor memory */
1098 /*
1099 * Must provide the virtual address of the current
1100 * descriptor, the physical address, and the virtual
1101 * address of the next descriptor in the h/w chain.
1102 * This allows the HAL to look ahead to see if the
1103 * hardware is done with a descriptor by checking the
1104 * done bit in the following descriptor and the address
1105 * of the current descriptor the DMA engine is working
1106 * on. All this is necessary because of our use of
1107 * a self-linked list to avoid rx overruns.
1108 */
1109 rs = &bf->bf_status.ds_rxstat;
1110 status = ath_hal_rxprocdesc(ah, ds,
1111 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
1112 #ifdef ATH_DEBUG
1113 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
1114 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
1115 #endif
1116
1117 #ifdef ATH_DEBUG_ALQ
1118 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
1119 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
1120 sc->sc_rx_statuslen, (char *) ds);
1121 #endif /* ATH_DEBUG_ALQ */
1122
1123 if (status == HAL_EINPROGRESS)
1124 break;
1125
1126 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
1127 npkts++;
1128
1129 /*
1130 * Process a single frame.
1131 */
1132 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD);
1133 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
1134 bf->bf_m = NULL;
1135 if (ath_rx_pkt(sc, rs, status, tsf, nf, HAL_RX_QUEUE_HP, bf, m))
1136 ngood++;
1137 rx_proc_next:
1138 /*
1139 * If there's a holding buffer, insert that onto
1140 * the RX list; the hardware is now definitely not pointing
1141 * to it now.
1142 */
1143 ret = 0;
1144 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf != NULL) {
1145 TAILQ_INSERT_TAIL(&sc->sc_rxbuf,
1146 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf,
1147 bf_list);
1148 ret = ath_rxbuf_init(sc,
1149 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf);
1150 }
1151 /*
1152 * Next, throw our buffer into the holding entry. The hardware
1153 * may use the descriptor to read the link pointer before
1154 * DMAing the next descriptor in to write out a packet.
1155 */
1156 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = bf;
1157 } while (ret == 0);
1158
1159 /* rx signal state monitoring */
1160 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
1161 if (ngood)
1162 sc->sc_lastrx = tsf;
1163
1164 ATH_KTR(sc, ATH_KTR_RXPROC, 2, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
1165 /* Queue DFS tasklet if needed */
1166 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
1167 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
1168
1169 /*
1170 * Now that all the RX frames were handled that
1171 * need to be handled, kick the PCU if there's
1172 * been an RXEOL condition.
1173 */
1174 if (resched && kickpcu) {
1175 ATH_PCU_LOCK(sc);
1176 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_rx_proc: kickpcu");
1177 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
1178 __func__, npkts);
1179
1180 /*
1181 * Go through the process of fully tearing down
1182 * the RX buffers and reinitialising them.
1183 *
1184 * There's a hardware bug that causes the RX FIFO
1185 * to get confused under certain conditions and
1186 * constantly write over the same frame, leading
1187 * the RX driver code here to get heavily confused.
1188 */
1189 /*
1190 * XXX Has RX DMA stopped enough here to just call
1191 * ath_startrecv()?
1192 * XXX Do we need to use the holding buffer to restart
1193 * RX DMA by appending entries to the final
1194 * descriptor? Quite likely.
1195 */
1196 #if 1
1197 ath_startrecv(sc);
1198 #else
1199 /*
1200 * Disabled for now - it'd be nice to be able to do
1201 * this in order to limit the amount of CPU time spent
1202 * reinitialising the RX side (and thus minimise RX
1203 * drops) however there's a hardware issue that
1204 * causes things to get too far out of whack.
1205 */
1206 /*
1207 * XXX can we hold the PCU lock here?
1208 * Are there any net80211 buffer calls involved?
1209 */
1210 bf = TAILQ_FIRST(&sc->sc_rxbuf);
1211 ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
1212 ath_hal_rxena(ah); /* enable recv descriptors */
1213 ath_mode_init(sc); /* set filters, etc. */
1214 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
1215 #endif
1216
1217 ath_hal_intrset(ah, sc->sc_imask);
1218 sc->sc_kickpcu = 0;
1219 ATH_PCU_UNLOCK(sc);
1220 }
1221
1222 #ifdef IEEE80211_SUPPORT_SUPERG
1223 if (resched)
1224 ieee80211_ff_age_all(ic, 100);
1225 #endif
1226
1227 /*
1228 * Put the hardware to sleep again if we're done with it.
1229 */
1230 ATH_LOCK(sc);
1231 ath_power_restore_power_state(sc);
1232 ATH_UNLOCK(sc);
1233
1234 /*
1235 * If we hit the maximum number of frames in this round,
1236 * reschedule for another immediate pass. This gives
1237 * the TX and TX completion routines time to run, which
1238 * will reduce latency.
1239 */
1240 if (npkts >= ATH_RX_MAX)
1241 sc->sc_rx.recv_sched(sc, resched);
1242
1243 ATH_PCU_LOCK(sc);
1244 sc->sc_rxproc_cnt--;
1245 ATH_PCU_UNLOCK(sc);
1246 }
1247 #undef PA2DESC
1248 #undef ATH_RX_MAX
1249
1250 /*
1251 * Only run the RX proc if it's not already running.
1252 * Since this may get run as part of the reset/flush path,
1253 * the task can't clash with an existing, running tasklet.
1254 */
1255 static void
ath_legacy_rx_tasklet(void * arg,int npending)1256 ath_legacy_rx_tasklet(void *arg, int npending)
1257 {
1258 struct ath_softc *sc = arg;
1259
1260 ATH_KTR(sc, ATH_KTR_RXPROC, 1, "ath_rx_proc: pending=%d", npending);
1261 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
1262 ATH_PCU_LOCK(sc);
1263 if (sc->sc_inreset_cnt > 0) {
1264 device_printf(sc->sc_dev,
1265 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
1266 ATH_PCU_UNLOCK(sc);
1267 return;
1268 }
1269 ATH_PCU_UNLOCK(sc);
1270
1271 ath_rx_proc(sc, 1);
1272 }
1273
1274 static void
ath_legacy_flushrecv(struct ath_softc * sc)1275 ath_legacy_flushrecv(struct ath_softc *sc)
1276 {
1277
1278 ath_rx_proc(sc, 0);
1279 }
1280
1281 static void
ath_legacy_flush_rxpending(struct ath_softc * sc)1282 ath_legacy_flush_rxpending(struct ath_softc *sc)
1283 {
1284
1285 /* XXX ATH_RX_LOCK_ASSERT(sc); */
1286
1287 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) {
1288 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
1289 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
1290 }
1291 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) {
1292 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
1293 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
1294 }
1295 }
1296
1297 static int
ath_legacy_flush_rxholdbf(struct ath_softc * sc)1298 ath_legacy_flush_rxholdbf(struct ath_softc *sc)
1299 {
1300 struct ath_buf *bf;
1301
1302 /* XXX ATH_RX_LOCK_ASSERT(sc); */
1303 /*
1304 * If there are RX holding buffers, free them here and return
1305 * them to the list.
1306 *
1307 * XXX should just verify that bf->bf_m is NULL, as it must
1308 * be at this point!
1309 */
1310 bf = sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf;
1311 if (bf != NULL) {
1312 if (bf->bf_m != NULL)
1313 m_freem(bf->bf_m);
1314 bf->bf_m = NULL;
1315 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
1316 (void) ath_rxbuf_init(sc, bf);
1317 }
1318 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = NULL;
1319
1320 bf = sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf;
1321 if (bf != NULL) {
1322 if (bf->bf_m != NULL)
1323 m_freem(bf->bf_m);
1324 bf->bf_m = NULL;
1325 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
1326 (void) ath_rxbuf_init(sc, bf);
1327 }
1328 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf = NULL;
1329
1330 return (0);
1331 }
1332
1333 /*
1334 * Disable the receive h/w in preparation for a reset.
1335 */
1336 static void
ath_legacy_stoprecv(struct ath_softc * sc,int dodelay)1337 ath_legacy_stoprecv(struct ath_softc *sc, int dodelay)
1338 {
1339 #define PA2DESC(_sc, _pa) \
1340 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
1341 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
1342 struct ath_hal *ah = sc->sc_ah;
1343
1344 ATH_RX_LOCK(sc);
1345
1346 ath_hal_stoppcurecv(ah); /* disable PCU */
1347 ath_hal_setrxfilter(ah, 0); /* clear recv filter */
1348 ath_hal_stopdmarecv(ah); /* disable DMA engine */
1349 /*
1350 * TODO: see if this particular DELAY() is required; it may be
1351 * masking some missing FIFO flush or DMA sync.
1352 */
1353 #if 0
1354 if (dodelay)
1355 #endif
1356 DELAY(3000); /* 3ms is long enough for 1 frame */
1357 #ifdef ATH_DEBUG
1358 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
1359 struct ath_buf *bf;
1360 u_int ix;
1361
1362 device_printf(sc->sc_dev,
1363 "%s: rx queue %p, link %p\n",
1364 __func__,
1365 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah, HAL_RX_QUEUE_HP),
1366 sc->sc_rxlink);
1367 ix = 0;
1368 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
1369 struct ath_desc *ds = bf->bf_desc;
1370 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
1371 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
1372 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
1373 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
1374 ath_printrxbuf(sc, bf, ix, status == HAL_OK);
1375 ix++;
1376 }
1377 }
1378 #endif
1379
1380 (void) ath_legacy_flush_rxpending(sc);
1381 (void) ath_legacy_flush_rxholdbf(sc);
1382
1383 sc->sc_rxlink = NULL; /* just in case */
1384
1385 ATH_RX_UNLOCK(sc);
1386 #undef PA2DESC
1387 }
1388
1389 /*
1390 * XXX TODO: something was calling startrecv without calling
1391 * stoprecv. Let's figure out what/why. It was showing up
1392 * as a mbuf leak (rxpending) and ath_buf leak (holdbf.)
1393 */
1394
1395 /*
1396 * Enable the receive h/w following a reset.
1397 */
1398 static int
ath_legacy_startrecv(struct ath_softc * sc)1399 ath_legacy_startrecv(struct ath_softc *sc)
1400 {
1401 struct ath_hal *ah = sc->sc_ah;
1402 struct ath_buf *bf;
1403
1404 ATH_RX_LOCK(sc);
1405
1406 /*
1407 * XXX should verify these are already all NULL!
1408 */
1409 sc->sc_rxlink = NULL;
1410 (void) ath_legacy_flush_rxpending(sc);
1411 (void) ath_legacy_flush_rxholdbf(sc);
1412
1413 /*
1414 * Re-chain all of the buffers in the RX buffer list.
1415 */
1416 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
1417 int error = ath_rxbuf_init(sc, bf);
1418 if (error != 0) {
1419 DPRINTF(sc, ATH_DEBUG_RECV,
1420 "%s: ath_rxbuf_init failed %d\n",
1421 __func__, error);
1422 return error;
1423 }
1424 }
1425
1426 bf = TAILQ_FIRST(&sc->sc_rxbuf);
1427 ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
1428 ath_hal_rxena(ah); /* enable recv descriptors */
1429 ath_mode_init(sc); /* set filters, etc. */
1430 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
1431
1432 ATH_RX_UNLOCK(sc);
1433 return 0;
1434 }
1435
1436 static int
ath_legacy_dma_rxsetup(struct ath_softc * sc)1437 ath_legacy_dma_rxsetup(struct ath_softc *sc)
1438 {
1439 int error;
1440
1441 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
1442 "rx", sizeof(struct ath_desc), ath_rxbuf, 1);
1443 if (error != 0)
1444 return (error);
1445
1446 return (0);
1447 }
1448
1449 static int
ath_legacy_dma_rxteardown(struct ath_softc * sc)1450 ath_legacy_dma_rxteardown(struct ath_softc *sc)
1451 {
1452
1453 if (sc->sc_rxdma.dd_desc_len != 0)
1454 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
1455 return (0);
1456 }
1457
1458 static void
ath_legacy_recv_sched(struct ath_softc * sc,int dosched)1459 ath_legacy_recv_sched(struct ath_softc *sc, int dosched)
1460 {
1461
1462 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1463 }
1464
1465 static void
ath_legacy_recv_sched_queue(struct ath_softc * sc,HAL_RX_QUEUE q,int dosched)1466 ath_legacy_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE q,
1467 int dosched)
1468 {
1469
1470 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1471 }
1472
1473 void
ath_recv_setup_legacy(struct ath_softc * sc)1474 ath_recv_setup_legacy(struct ath_softc *sc)
1475 {
1476
1477 /* Sensible legacy defaults */
1478 /*
1479 * XXX this should be changed to properly support the
1480 * exact RX descriptor size for each HAL.
1481 */
1482 sc->sc_rx_statuslen = sizeof(struct ath_desc);
1483
1484 sc->sc_rx.recv_start = ath_legacy_startrecv;
1485 sc->sc_rx.recv_stop = ath_legacy_stoprecv;
1486 sc->sc_rx.recv_flush = ath_legacy_flushrecv;
1487 sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet;
1488 sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init;
1489
1490 sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup;
1491 sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown;
1492 sc->sc_rx.recv_sched = ath_legacy_recv_sched;
1493 sc->sc_rx.recv_sched_queue = ath_legacy_recv_sched_queue;
1494 }
1495