1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
5  * Original from Linux kernel 3.0.1
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <ipxe/io.h>
21 
22 #include "ath9k.h"
23 #include "ar9003_mac.h"
24 
25 /*
26  * Setup and link descriptors.
27  *
28  * 11N: we can no longer afford to self link the last descriptor.
29  * MAC acknowledges BA status as long as it copies frames to host
30  * buffer (or rx fifo). This can incorrectly acknowledge packets
31  * to a sender if last desc is self-linked.
32  */
ath_rx_buf_link(struct ath_softc * sc,struct ath_buf * bf)33 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
34 {
35 	struct ath_hw *ah = sc->sc_ah;
36 	struct ath_common *common = ath9k_hw_common(ah);
37 	struct ath_desc *ds;
38 //	struct io_buffer *iob;
39 
40 	ATH_RXBUF_RESET(bf);
41 
42 	ds = bf->bf_desc;
43 	ds->ds_link = 0; /* link to null */
44 	ds->ds_data = bf->bf_buf_addr;
45 
46 //	/* virtual addr of the beginning of the buffer. */
47 //	iob = bf->bf_mpdu;
48 //	ds->ds_vdata = iob->data;
49 
50 	/*
51 	 * setup rx descriptors. The rx_bufsize here tells the hardware
52 	 * how much data it can DMA to us and that we are prepared
53 	 * to process
54 	 */
55 	ath9k_hw_setuprxdesc(ah, ds,
56 			     common->rx_bufsize,
57 			     0);
58 
59 	if (sc->rx.rxlink == NULL)
60 		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
61 	else
62 		*sc->rx.rxlink = bf->bf_daddr;
63 
64 	sc->rx.rxlink = &ds->ds_link;
65 }
66 
ath_setdefantenna(struct ath_softc * sc,u32 antenna)67 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
68 {
69 	/* XXX block beacon interrupts */
70 	ath9k_hw_setantenna(sc->sc_ah, antenna);
71 	sc->rx.defant = antenna;
72 	sc->rx.rxotherant = 0;
73 }
74 
ath_opmode_init(struct ath_softc * sc)75 static void ath_opmode_init(struct ath_softc *sc)
76 {
77 	struct ath_hw *ah = sc->sc_ah;
78 	struct ath_common *common = ath9k_hw_common(ah);
79 
80 	u32 rfilt, mfilt[2];
81 
82 	/* configure rx filter */
83 	rfilt = ath_calcrxfilter(sc);
84 	ath9k_hw_setrxfilter(ah, rfilt);
85 
86 	/* configure bssid mask */
87 	ath_hw_setbssidmask(common);
88 
89 	/* configure operational mode */
90 	ath9k_hw_setopmode(ah);
91 
92 	/* calculate and install multicast filter */
93 	mfilt[0] = mfilt[1] = ~0;
94 	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
95 }
96 
ath_rx_init(struct ath_softc * sc,int nbufs)97 int ath_rx_init(struct ath_softc *sc, int nbufs)
98 {
99 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
100 	struct io_buffer *iob;
101 	struct ath_buf *bf;
102 	int error = 0;
103 
104 	sc->sc_flags &= ~SC_OP_RXFLUSH;
105 
106 	common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
107 			     sc->sc_ah->caps.rx_status_len;
108 
109 	DBG2("ath9k: cachelsz %d rxbufsize %d\n",
110 		common->cachelsz, common->rx_bufsize);
111 
112 	/* Initialize rx descriptors */
113 
114 	error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
115 			"rx", nbufs, 1, 0);
116 	if (error != 0) {
117 		DBG("ath9k: "
118 			"failed to allocate rx descriptors: %d\n",
119 			error);
120 		goto err;
121 	}
122 
123 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
124 		iob = alloc_iob_raw ( common->rx_bufsize, common->cachelsz, 0 );
125 		if (iob == NULL) {
126 			error = -ENOMEM;
127 			goto err;
128 		}
129 
130 		bf->bf_mpdu = iob;
131 		bf->bf_buf_addr = virt_to_bus ( iob->data );
132 	}
133 	sc->rx.rxlink = NULL;
134 
135 err:
136 	if (error)
137 		ath_rx_cleanup(sc);
138 
139 	return error;
140 }
141 
ath_rx_cleanup(struct ath_softc * sc)142 void ath_rx_cleanup(struct ath_softc *sc)
143 {
144 	struct io_buffer *iob;
145 	struct ath_buf *bf;
146 
147 	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
148 		iob = bf->bf_mpdu;
149 		if (iob) {
150 			free_iob(iob);
151 			bf->bf_buf_addr = 0;
152 			bf->bf_mpdu = NULL;
153 		}
154 	}
155 
156 	if (sc->rx.rxdma.dd_desc_len != 0)
157 		ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
158 }
159 
160 /*
161  * Calculate the receive filter according to the
162  * operating mode and state:
163  *
164  * o always accept unicast, broadcast, and multicast traffic
165  * o maintain current state of phy error reception (the hal
166  *   may enable phy error frames for noise immunity work)
167  * o probe request frames are accepted only when operating in
168  *   hostap, adhoc, or monitor modes
169  * o enable promiscuous mode according to the interface state
170  * o accept beacons:
171  *   - when operating in adhoc mode so the 802.11 layer creates
172  *     node table entries for peers,
173  *   - when operating in station mode for collecting rssi data when
174  *     the station is otherwise quiet, or
175  *   - when operating as a repeater so we see repeater-sta beacons
176  *   - when scanning
177  */
178 
ath_calcrxfilter(struct ath_softc * sc)179 u32 ath_calcrxfilter(struct ath_softc *sc)
180 {
181 #define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
182 
183 	u32 rfilt;
184 
185 	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
186 		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
187 		| ATH9K_RX_FILTER_MCAST | ATH9K_RX_FILTER_BEACON;
188 
189 	return rfilt;
190 
191 #undef RX_FILTER_PRESERVE
192 }
193 
ath_startrecv(struct ath_softc * sc)194 int ath_startrecv(struct ath_softc *sc)
195 {
196 	struct ath_hw *ah = sc->sc_ah;
197 	struct ath_buf *bf, *tbf;
198 
199 	if (list_empty(&sc->rx.rxbuf))
200 		goto start_recv;
201 
202 	sc->rx.rxlink = NULL;
203 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
204 		ath_rx_buf_link(sc, bf);
205 	}
206 
207 	/* We could have deleted elements so the list may be empty now */
208 	if (list_empty(&sc->rx.rxbuf))
209 		goto start_recv;
210 
211 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
212 	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
213 	ath9k_hw_rxena(ah);
214 
215 start_recv:
216 	ath_opmode_init(sc);
217 	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
218 
219 	return 0;
220 }
221 
ath_stoprecv(struct ath_softc * sc)222 int ath_stoprecv(struct ath_softc *sc)
223 {
224 	struct ath_hw *ah = sc->sc_ah;
225 	int stopped, reset = 0;
226 
227 	ath9k_hw_abortpcurecv(ah);
228 	ath9k_hw_setrxfilter(ah, 0);
229 	stopped = ath9k_hw_stopdmarecv(ah, &reset);
230 
231 	sc->rx.rxlink = NULL;
232 
233 	if (!(ah->ah_flags & AH_UNPLUGGED) &&
234 	    !stopped) {
235 		DBG("ath9k: "
236 			"Could not stop RX, we could be "
237 			"confusing the DMA engine when we start RX up\n");
238 	}
239 	return stopped && !reset;
240 }
241 
ath_flushrecv(struct ath_softc * sc)242 void ath_flushrecv(struct ath_softc *sc)
243 {
244 	sc->sc_flags |= SC_OP_RXFLUSH;
245 	ath_rx_tasklet(sc, 1, 0);
246 	sc->sc_flags &= ~SC_OP_RXFLUSH;
247 }
248 
ath_get_next_rx_buf(struct ath_softc * sc,struct ath_rx_status * rs)249 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
250 					   struct ath_rx_status *rs)
251 {
252 	struct ath_hw *ah = sc->sc_ah;
253 	struct ath_desc *ds;
254 	struct ath_buf *bf;
255 	int ret;
256 
257 	if (list_empty(&sc->rx.rxbuf)) {
258 		sc->rx.rxlink = NULL;
259 		return NULL;
260 	}
261 
262 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
263 	ds = bf->bf_desc;
264 
265 	/*
266 	 * Must provide the virtual address of the current
267 	 * descriptor, the physical address, and the virtual
268 	 * address of the next descriptor in the h/w chain.
269 	 * This allows the HAL to look ahead to see if the
270 	 * hardware is done with a descriptor by checking the
271 	 * done bit in the following descriptor and the address
272 	 * of the current descriptor the DMA engine is working
273 	 * on.  All this is necessary because of our use of
274 	 * a self-linked list to avoid rx overruns.
275 	 */
276 	ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
277 	if (ret == -EINPROGRESS) {
278 		struct ath_rx_status trs;
279 		struct ath_buf *tbf;
280 		struct ath_desc *tds;
281 
282 		memset(&trs, 0, sizeof(trs));
283 		if ((&bf->list)->next == &sc->rx.rxbuf) {
284 			sc->rx.rxlink = NULL;
285 			return NULL;
286 		}
287 
288 		tbf = list_entry(bf->list.next, struct ath_buf, list);
289 
290 		/*
291 		 * On some hardware the descriptor status words could
292 		 * get corrupted, including the done bit. Because of
293 		 * this, check if the next descriptor's done bit is
294 		 * set or not.
295 		 *
296 		 * If the next descriptor's done bit is set, the current
297 		 * descriptor has been corrupted. Force s/w to discard
298 		 * this descriptor and continue...
299 		 */
300 
301 		tds = tbf->bf_desc;
302 		ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
303 		if (ret == -EINPROGRESS)
304 			return NULL;
305 	}
306 
307 	if (!bf->bf_mpdu)
308 		return bf;
309 
310 	return bf;
311 }
312 
313 /* Assumes you've already done the endian to CPU conversion */
ath9k_rx_accept(struct ath_common * common,struct ath_rx_status * rx_stats,int * decrypt_error)314 static int ath9k_rx_accept(struct ath_common *common,
315 			    struct ath_rx_status *rx_stats,
316 			    int *decrypt_error)
317 {
318 	struct ath_hw *ah = common->ah;
319 	u8 rx_status_len = ah->caps.rx_status_len;
320 
321 
322 	if (!rx_stats->rs_datalen)
323 		return 0;
324         /*
325          * rs_status follows rs_datalen so if rs_datalen is too large
326          * we can take a hint that hardware corrupted it, so ignore
327          * those frames.
328          */
329 	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
330 		return 0;
331 
332 	/* Only use error bits from the last fragment */
333 	if (rx_stats->rs_more)
334 		return 1;
335 
336 	/*
337 	 * The rx_stats->rs_status will not be set until the end of the
338 	 * chained descriptors so it can be ignored if rs_more is set. The
339 	 * rs_more will be false at the last element of the chained
340 	 * descriptors.
341 	 */
342 	if (rx_stats->rs_status != 0) {
343 		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
344 			return 0;
345 
346 		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
347 			*decrypt_error = 1;
348 		}
349 		/*
350 		 * Reject error frames with the exception of
351 		 * decryption and MIC failures. For monitor mode,
352 		 * we also ignore the CRC error.
353 		 */
354 		if (ah->is_monitoring) {
355 			if (rx_stats->rs_status &
356 			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
357 			      ATH9K_RXERR_CRC))
358 				return 0;
359 		} else {
360 			if (rx_stats->rs_status &
361 			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
362 				return 0;
363 			}
364 		}
365 	}
366 	return 1;
367 }
368 
ath9k_process_rate(struct ath_common * common __unused,struct net80211_device * dev,struct ath_rx_status * rx_stats,int * rix)369 static int ath9k_process_rate(struct ath_common *common __unused,
370 			      struct net80211_device *dev,
371 			      struct ath_rx_status *rx_stats,
372 			      int *rix)
373 {
374 	struct ath_softc *sc = (struct ath_softc *)dev->priv;
375 	int band;
376 	int i = 0;
377 
378 	band = (dev->channels + sc->dev->channel)->band;
379 
380 	for (i = 0; i < sc->hwinfo->nr_rates[band]; i++) {
381 		if (sc->rates[i].hw_value == rx_stats->rs_rate) {
382 			*rix = i;
383 			return 0;
384 		}
385 		if (sc->rates[i].hw_value_short == rx_stats->rs_rate) {
386 			*rix = i;
387 			return 0;
388 		}
389 	}
390 
391 	/*
392 	 * No valid hardware bitrate found -- we should not get here
393 	 * because hardware has already validated this frame as OK.
394 	 */
395 	DBG("ath9k: "
396 		"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
397 		rx_stats->rs_rate);
398 
399 	return -EINVAL;
400 }
401 
402 /*
403  * For Decrypt or Demic errors, we only mark packet status here and always push
404  * up the frame up to let mac80211 handle the actual error case, be it no
405  * decryption key or real decryption error. This let us keep statistics there.
406  */
ath9k_rx_iob_preprocess(struct ath_common * common,struct net80211_device * dev,struct ath_rx_status * rx_stats,int * rix,int * decrypt_error)407 static int ath9k_rx_iob_preprocess(struct ath_common *common,
408 				   struct net80211_device *dev,
409 				   struct ath_rx_status *rx_stats,
410 				   int *rix,
411 				   int *decrypt_error)
412 {
413 	/*
414 	 * everything but the rate is checked here, the rate check is done
415 	 * separately to avoid doing two lookups for a rate for each frame.
416 	 */
417 	if (!ath9k_rx_accept(common, rx_stats, decrypt_error))
418 		return -EINVAL;
419 
420 	/* Only use status info from the last fragment */
421 	if (rx_stats->rs_more)
422 		return 0;
423 
424 	if (ath9k_process_rate(common, dev, rx_stats, rix))
425 		return -EINVAL;
426 
427 	return 0;
428 }
429 
ath_rx_tasklet(struct ath_softc * sc,int flush,int hp __unused)430 int ath_rx_tasklet(struct ath_softc *sc, int flush, int hp __unused)
431 {
432 	struct ath_buf *bf;
433 	struct io_buffer *iob = NULL, *requeue_iob;
434 	struct ath_hw *ah = sc->sc_ah;
435 	struct ath_common *common = ath9k_hw_common(ah);
436 	/*
437 	 * The hw can technically differ from common->hw when using ath9k
438 	 * virtual wiphy so to account for that we iterate over the active
439 	 * wiphys and find the appropriate wiphy and therefore hw.
440 	 */
441 	struct net80211_device *dev = sc->dev;
442 	int retval;
443 	int decrypt_error = 0;
444 	struct ath_rx_status rs;
445 	int rix = 0;
446 
447 	do {
448 		/* If handling rx interrupt and flush is in progress => exit */
449 		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
450 			break;
451 
452 		memset(&rs, 0, sizeof(rs));
453 		bf = ath_get_next_rx_buf(sc, &rs);
454 
455 		if (!bf)
456 			break;
457 
458 		iob = bf->bf_mpdu;
459 		if (!iob)
460 			continue;
461 
462 		/*
463 		 * If we're asked to flush receive queue, directly
464 		 * chain it back at the queue without processing it.
465 		 */
466 		if (flush)
467 			goto requeue_drop_frag;
468 
469 		retval = ath9k_rx_iob_preprocess(common, dev, &rs,
470 						 &rix, &decrypt_error);
471 		if (retval)
472 			goto requeue_drop_frag;
473 
474 		/* Ensure we always have an iob to requeue once we are done
475 		 * processing the current buffer's iob */
476 		requeue_iob = alloc_iob_raw ( common->rx_bufsize,
477 					      common->cachelsz, 0 );
478 
479 		/* If there is no memory we ignore the current RX'd frame,
480 		 * tell hardware it can give us a new frame using the old
481 		 * iob and put it at the tail of the sc->rx.rxbuf list for
482 		 * processing. */
483 		if (!requeue_iob)
484 			goto requeue_drop_frag;
485 
486 		iob_put(iob, rs.rs_datalen + ah->caps.rx_status_len);
487 		if (ah->caps.rx_status_len)
488 			iob_pull(iob, ah->caps.rx_status_len);
489 
490 		/* We will now give hardware our shiny new allocated iob */
491 		bf->bf_mpdu = requeue_iob;
492 		bf->bf_buf_addr = virt_to_bus ( requeue_iob->data );
493 
494 		/*
495 		 * change the default rx antenna if rx diversity chooses the
496 		 * other antenna 3 times in a row.
497 		 */
498 		if (sc->rx.defant != rs.rs_antenna) {
499 			if (++sc->rx.rxotherant >= 3)
500 				ath_setdefantenna(sc, rs.rs_antenna);
501 		} else {
502 			sc->rx.rxotherant = 0;
503 		}
504 
505 		DBGIO("ath9k: rx %d bytes, signal %d, bitrate %d, hw_value %d\n", rs.rs_datalen,
506 		                                rs.rs_rssi, sc->rates[rix].bitrate, rs.rs_rate);
507 
508 		net80211_rx(dev, iob, rs.rs_rssi,
509 				sc->rates[rix].bitrate);
510 
511 requeue_drop_frag:
512 		list_del(&bf->list);
513 		list_add_tail(&bf->list, &sc->rx.rxbuf);
514 		ath_rx_buf_link(sc, bf);
515 		ath9k_hw_rxena(ah);
516 	} while (1);
517 
518 	return 0;
519 }
520