1 /*
2  * Copyright (c) 2010 Broadcom Corporation
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include <net/mac80211.h>
17 
18 #include "rate.h"
19 #include "scb.h"
20 #include "phy/phy_hal.h"
21 #include "antsel.h"
22 #include "main.h"
23 #include "ampdu.h"
24 #include "debug.h"
25 #include "brcms_trace_events.h"
26 
27 /* max number of mpdus in an ampdu */
28 #define AMPDU_MAX_MPDU			32
29 /* max number of mpdus in an ampdu to a legacy */
30 #define AMPDU_NUM_MPDU_LEGACY		16
31 /* max Tx ba window size (in pdu) */
32 #define AMPDU_TX_BA_MAX_WSIZE		64
33 /* default Tx ba window size (in pdu) */
34 #define AMPDU_TX_BA_DEF_WSIZE		64
35 /* default Rx ba window size (in pdu) */
36 #define AMPDU_RX_BA_DEF_WSIZE		64
37 /* max Rx ba window size (in pdu) */
38 #define AMPDU_RX_BA_MAX_WSIZE		64
39 /* max dur of tx ampdu (in msec) */
40 #define	AMPDU_MAX_DUR			5
41 /* default tx retry limit */
42 #define AMPDU_DEF_RETRY_LIMIT		5
43 /* default tx retry limit at reg rate */
44 #define AMPDU_DEF_RR_RETRY_LIMIT	2
45 /* default ffpld reserved bytes */
46 #define AMPDU_DEF_FFPLD_RSVD		2048
47 /* # of inis to be freed on detach */
48 #define AMPDU_INI_FREE			10
49 /* max # of mpdus released at a time */
50 #define	AMPDU_SCB_MAX_RELEASE		20
51 
52 #define NUM_FFPLD_FIFO 4	/* number of fifo concerned by pre-loading */
53 #define FFPLD_TX_MAX_UNFL   200	/* default value of the average number of ampdu
54 				 * without underflows
55 				 */
56 #define FFPLD_MPDU_SIZE 1800	/* estimate of maximum mpdu size */
57 #define FFPLD_MAX_MCS 23	/* we don't deal with mcs 32 */
58 #define FFPLD_PLD_INCR 1000	/* increments in bytes */
59 #define FFPLD_MAX_AMPDU_CNT 5000	/* maximum number of ampdu we
60 					 * accumulate between resets.
61 					 */
62 
63 #define AMPDU_DELIMITER_LEN	4
64 
65 /* max allowed number of mpdus in an ampdu (2 streams) */
66 #define AMPDU_NUM_MPDU		16
67 
68 #define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
69 
70 /* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
71 #define AMPDU_MAX_MPDU_OVERHEAD (FCS_LEN + DOT11_ICV_AES_LEN +\
72 	AMPDU_DELIMITER_LEN + 3\
73 	+ DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
74 
75 /* modulo add/sub, bound = 2^k */
76 #define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
77 #define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
78 
79 /* structure to hold tx fifo information and pre-loading state
80  * counters specific to tx underflows of ampdus
81  * some counters might be redundant with the ones in wlc or ampdu structures.
82  * This allows to maintain a specific state independently of
83  * how often and/or when the wlc counters are updated.
84  *
85  * ampdu_pld_size: number of bytes to be pre-loaded
86  * mcs2ampdu_table: per-mcs max # of mpdus in an ampdu
87  * prev_txfunfl: num of underflows last read from the HW macstats counter
88  * accum_txfunfl: num of underflows since we modified pld params
89  * accum_txampdu: num of tx ampdu since we modified pld params
90  * prev_txampdu: previous reading of tx ampdu
91  * dmaxferrate: estimated dma avg xfer rate in kbits/sec
92  */
93 struct brcms_fifo_info {
94 	u16 ampdu_pld_size;
95 	u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1];
96 	u16 prev_txfunfl;
97 	u32 accum_txfunfl;
98 	u32 accum_txampdu;
99 	u32 prev_txampdu;
100 	u32 dmaxferrate;
101 };
102 
103 /* AMPDU module specific state
104  *
105  * wlc: pointer to main wlc structure
106  * scb_handle: scb cubby handle to retrieve data from scb
107  * ini_enable: per-tid initiator enable/disable of ampdu
108  * ba_tx_wsize: Tx ba window size (in pdu)
109  * ba_rx_wsize: Rx ba window size (in pdu)
110  * retry_limit: mpdu transmit retry limit
111  * rr_retry_limit: mpdu transmit retry limit at regular rate
112  * retry_limit_tid: per-tid mpdu transmit retry limit
113  * rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate
114  * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
115  * max_pdu: max pdus allowed in ampdu
116  * dur: max duration of an ampdu (in msec)
117  * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
118  * ffpld_rsvd: number of bytes to reserve for preload
119  * max_txlen: max size of ampdu per mcs, bw and sgi
120  * mfbr: enable multiple fallback rate
121  * tx_max_funl: underflows should be kept such that
122  *		(tx_max_funfl*underflows) < tx frames
123  * fifo_tb: table of fifo infos
124  */
125 struct ampdu_info {
126 	struct brcms_c_info *wlc;
127 	int scb_handle;
128 	u8 ini_enable[AMPDU_MAX_SCB_TID];
129 	u8 ba_tx_wsize;
130 	u8 ba_rx_wsize;
131 	u8 retry_limit;
132 	u8 rr_retry_limit;
133 	u8 retry_limit_tid[AMPDU_MAX_SCB_TID];
134 	u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
135 	u8 mpdu_density;
136 	s8 max_pdu;
137 	u8 dur;
138 	u8 rx_factor;
139 	u32 ffpld_rsvd;
140 	u32 max_txlen[MCS_TABLE_SIZE][2][2];
141 	bool mfbr;
142 	u32 tx_max_funl;
143 	struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
144 };
145 
brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info * ampdu,u8 dur)146 static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
147 {
148 	u32 rate, mcs;
149 
150 	for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
151 		/* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
152 		/* 20MHz, No SGI */
153 		rate = mcs_2_rate(mcs, false, false);
154 		ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
155 		/* 40 MHz, No SGI */
156 		rate = mcs_2_rate(mcs, true, false);
157 		ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
158 		/* 20MHz, SGI */
159 		rate = mcs_2_rate(mcs, false, true);
160 		ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
161 		/* 40 MHz, SGI */
162 		rate = mcs_2_rate(mcs, true, true);
163 		ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
164 	}
165 }
166 
brcms_c_ampdu_cap(struct ampdu_info * ampdu)167 static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
168 {
169 	if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
170 		return true;
171 	else
172 		return false;
173 }
174 
brcms_c_ampdu_set(struct ampdu_info * ampdu,bool on)175 static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
176 {
177 	struct brcms_c_info *wlc = ampdu->wlc;
178 	struct bcma_device *core = wlc->hw->d11core;
179 
180 	wlc->pub->_ampdu = false;
181 
182 	if (on) {
183 		if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
184 			brcms_err(core, "wl%d: driver not nmode enabled\n",
185 				  wlc->pub->unit);
186 			return -ENOTSUPP;
187 		}
188 		if (!brcms_c_ampdu_cap(ampdu)) {
189 			brcms_err(core, "wl%d: device not ampdu capable\n",
190 				  wlc->pub->unit);
191 			return -ENOTSUPP;
192 		}
193 		wlc->pub->_ampdu = on;
194 	}
195 
196 	return 0;
197 }
198 
brcms_c_ffpld_init(struct ampdu_info * ampdu)199 static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
200 {
201 	int i, j;
202 	struct brcms_fifo_info *fifo;
203 
204 	for (j = 0; j < NUM_FFPLD_FIFO; j++) {
205 		fifo = (ampdu->fifo_tb + j);
206 		fifo->ampdu_pld_size = 0;
207 		for (i = 0; i <= FFPLD_MAX_MCS; i++)
208 			fifo->mcs2ampdu_table[i] = 255;
209 		fifo->dmaxferrate = 0;
210 		fifo->accum_txampdu = 0;
211 		fifo->prev_txfunfl = 0;
212 		fifo->accum_txfunfl = 0;
213 
214 	}
215 }
216 
brcms_c_ampdu_attach(struct brcms_c_info * wlc)217 struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
218 {
219 	struct ampdu_info *ampdu;
220 	int i;
221 
222 	ampdu = kzalloc(sizeof(*ampdu), GFP_ATOMIC);
223 	if (!ampdu)
224 		return NULL;
225 
226 	ampdu->wlc = wlc;
227 
228 	for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
229 		ampdu->ini_enable[i] = true;
230 	/* Disable ampdu for VO by default */
231 	ampdu->ini_enable[PRIO_8021D_VO] = false;
232 	ampdu->ini_enable[PRIO_8021D_NC] = false;
233 
234 	/* Disable ampdu for BK by default since not enough fifo space */
235 	ampdu->ini_enable[PRIO_8021D_NONE] = false;
236 	ampdu->ini_enable[PRIO_8021D_BK] = false;
237 
238 	ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE;
239 	ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE;
240 	ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
241 	ampdu->max_pdu = AUTO;
242 	ampdu->dur = AMPDU_MAX_DUR;
243 
244 	ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
245 	/*
246 	 * bump max ampdu rcv size to 64k for all 11n
247 	 * devices except 4321A0 and 4321A1
248 	 */
249 	if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
250 		ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
251 	else
252 		ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K;
253 	ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
254 	ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
255 
256 	for (i = 0; i < AMPDU_MAX_SCB_TID; i++) {
257 		ampdu->retry_limit_tid[i] = ampdu->retry_limit;
258 		ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
259 	}
260 
261 	brcms_c_scb_ampdu_update_max_txlen(ampdu, ampdu->dur);
262 	ampdu->mfbr = false;
263 	/* try to set ampdu to the default value */
264 	brcms_c_ampdu_set(ampdu, wlc->pub->_ampdu);
265 
266 	ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
267 	brcms_c_ffpld_init(ampdu);
268 
269 	return ampdu;
270 }
271 
brcms_c_ampdu_detach(struct ampdu_info * ampdu)272 void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
273 {
274 	kfree(ampdu);
275 }
276 
brcms_c_scb_ampdu_update_config(struct ampdu_info * ampdu,struct scb * scb)277 static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
278 					    struct scb *scb)
279 {
280 	struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
281 	int i;
282 
283 	scb_ampdu->max_pdu = AMPDU_NUM_MPDU;
284 
285 	/* go back to legacy size if some preloading is occurring */
286 	for (i = 0; i < NUM_FFPLD_FIFO; i++) {
287 		if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR)
288 			scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY;
289 	}
290 
291 	/* apply user override */
292 	if (ampdu->max_pdu != AUTO)
293 		scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
294 
295 	scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu,
296 				   AMPDU_SCB_MAX_RELEASE);
297 
298 	if (scb_ampdu->max_rx_ampdu_bytes)
299 		scb_ampdu->release = min_t(u8, scb_ampdu->release,
300 			scb_ampdu->max_rx_ampdu_bytes / 1600);
301 
302 	scb_ampdu->release = min(scb_ampdu->release,
303 				 ampdu->fifo_tb[TX_AC_BE_FIFO].
304 				 mcs2ampdu_table[FFPLD_MAX_MCS]);
305 }
306 
brcms_c_scb_ampdu_update_config_all(struct ampdu_info * ampdu)307 static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu)
308 {
309 	brcms_c_scb_ampdu_update_config(ampdu, &ampdu->wlc->pri_scb);
310 }
311 
brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info * ampdu,int f)312 static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
313 {
314 	int i;
315 	u32 phy_rate, dma_rate, tmp;
316 	u8 max_mpdu;
317 	struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
318 
319 	/* recompute the dma rate */
320 	/* note : we divide/multiply by 100 to avoid integer overflows */
321 	max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
322 			 AMPDU_NUM_MPDU_LEGACY);
323 	phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
324 	dma_rate =
325 	    (((phy_rate / 100) *
326 	      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
327 	     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
328 	fifo->dmaxferrate = dma_rate;
329 
330 	/* fill up the mcs2ampdu table; do not recalc the last mcs */
331 	dma_rate = dma_rate >> 7;
332 	for (i = 0; i < FFPLD_MAX_MCS; i++) {
333 		/* shifting to keep it within integer range */
334 		phy_rate = mcs_2_rate(i, true, false) >> 7;
335 		if (phy_rate > dma_rate) {
336 			tmp = ((fifo->ampdu_pld_size * phy_rate) /
337 			       ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
338 			tmp = min_t(u32, tmp, 255);
339 			fifo->mcs2ampdu_table[i] = (u8) tmp;
340 		}
341 	}
342 }
343 
344 /* evaluate the dma transfer rate using the tx underflows as feedback.
345  * If necessary, increase tx fifo preloading. If not enough,
346  * decrease maximum ampdu size for each mcs till underflows stop
347  * Return 1 if pre-loading not active, -1 if not an underflow event,
348  * 0 if pre-loading module took care of the event.
349  */
brcms_c_ffpld_check_txfunfl(struct brcms_c_info * wlc,int fid)350 static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
351 {
352 	struct ampdu_info *ampdu = wlc->ampdu;
353 	u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
354 	u32 txunfl_ratio;
355 	u8 max_mpdu;
356 	u32 current_ampdu_cnt = 0;
357 	u16 max_pld_size;
358 	u32 new_txunfl;
359 	struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
360 	uint xmtfifo_sz;
361 	u16 cur_txunfl;
362 
363 	/* return if we got here for a different reason than underflows */
364 	cur_txunfl = brcms_b_read_shm(wlc->hw,
365 				      M_UCODE_MACSTAT +
366 				      offsetof(struct macstat, txfunfl[fid]));
367 	new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
368 	if (new_txunfl == 0) {
369 		brcms_dbg_ht(wlc->hw->d11core,
370 			     "TX status FRAG set but no tx underflows\n");
371 		return -1;
372 	}
373 	fifo->prev_txfunfl = cur_txunfl;
374 
375 	if (!ampdu->tx_max_funl)
376 		return 1;
377 
378 	/* check if fifo is big enough */
379 	if (brcms_b_xmtfifo_sz_get(wlc->hw, fid, &xmtfifo_sz))
380 		return -1;
381 
382 	if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
383 		return 1;
384 
385 	max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd;
386 	fifo->accum_txfunfl += new_txunfl;
387 
388 	/* we need to wait for at least 10 underflows */
389 	if (fifo->accum_txfunfl < 10)
390 		return 0;
391 
392 	brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d  tx_underflows %d\n",
393 		     current_ampdu_cnt, fifo->accum_txfunfl);
394 
395 	/*
396 	   compute the current ratio of tx unfl per ampdu.
397 	   When the current ampdu count becomes too
398 	   big while the ratio remains small, we reset
399 	   the current count in order to not
400 	   introduce too big of a latency in detecting a
401 	   large amount of tx underflows later.
402 	 */
403 
404 	txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
405 
406 	if (txunfl_ratio > ampdu->tx_max_funl) {
407 		if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
408 			fifo->accum_txfunfl = 0;
409 
410 		return 0;
411 	}
412 	max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
413 			 AMPDU_NUM_MPDU_LEGACY);
414 
415 	/* In case max value max_pdu is already lower than
416 	   the fifo depth, there is nothing more we can do.
417 	 */
418 
419 	if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) {
420 		fifo->accum_txfunfl = 0;
421 		return 0;
422 	}
423 
424 	if (fifo->ampdu_pld_size < max_pld_size) {
425 
426 		/* increment by TX_FIFO_PLD_INC bytes */
427 		fifo->ampdu_pld_size += FFPLD_PLD_INCR;
428 		if (fifo->ampdu_pld_size > max_pld_size)
429 			fifo->ampdu_pld_size = max_pld_size;
430 
431 		/* update scb release size */
432 		brcms_c_scb_ampdu_update_config_all(ampdu);
433 
434 		/*
435 		 * compute a new dma xfer rate for max_mpdu @ max mcs.
436 		 * This is the minimum dma rate that can achieve no
437 		 * underflow condition for the current mpdu size.
438 		 *
439 		 * note : we divide/multiply by 100 to avoid integer overflows
440 		 */
441 		fifo->dmaxferrate =
442 		    (((phy_rate / 100) *
443 		      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
444 		     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
445 
446 		brcms_dbg_ht(wlc->hw->d11core,
447 			     "DMA estimated transfer rate %d; "
448 			     "pre-load size %d\n",
449 			     fifo->dmaxferrate, fifo->ampdu_pld_size);
450 	} else {
451 
452 		/* decrease ampdu size */
453 		if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) {
454 			if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255)
455 				fifo->mcs2ampdu_table[FFPLD_MAX_MCS] =
456 				    AMPDU_NUM_MPDU_LEGACY - 1;
457 			else
458 				fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
459 
460 			/* recompute the table */
461 			brcms_c_ffpld_calc_mcs2ampdu_table(ampdu, fid);
462 
463 			/* update scb release size */
464 			brcms_c_scb_ampdu_update_config_all(ampdu);
465 		}
466 	}
467 	fifo->accum_txfunfl = 0;
468 	return 0;
469 }
470 
471 void
brcms_c_ampdu_tx_operational(struct brcms_c_info * wlc,u8 tid,uint max_rx_ampdu_bytes)472 brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
473 	uint max_rx_ampdu_bytes) /* from ht_cap in beacon */
474 {
475 	struct scb_ampdu *scb_ampdu;
476 	struct ampdu_info *ampdu = wlc->ampdu;
477 	struct scb *scb = &wlc->pri_scb;
478 	scb_ampdu = &scb->scb_ampdu;
479 
480 	if (!ampdu->ini_enable[tid]) {
481 		brcms_err(wlc->hw->d11core, "%s: Rejecting tid %d\n",
482 			  __func__, tid);
483 		return;
484 	}
485 
486 	scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
487 }
488 
brcms_c_ampdu_reset_session(struct brcms_ampdu_session * session,struct brcms_c_info * wlc)489 void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
490 				 struct brcms_c_info *wlc)
491 {
492 	session->wlc = wlc;
493 	skb_queue_head_init(&session->skb_list);
494 	session->max_ampdu_len = 0;    /* determined from first MPDU */
495 	session->max_ampdu_frames = 0; /* determined from first MPDU */
496 	session->ampdu_len = 0;
497 	session->dma_len = 0;
498 }
499 
500 /*
501  * Preps the given packet for AMPDU based on the session data. If the
502  * frame cannot be accomodated in the current session, -ENOSPC is
503  * returned.
504  */
brcms_c_ampdu_add_frame(struct brcms_ampdu_session * session,struct sk_buff * p)505 int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
506 			    struct sk_buff *p)
507 {
508 	struct brcms_c_info *wlc = session->wlc;
509 	struct ampdu_info *ampdu = wlc->ampdu;
510 	struct scb *scb = &wlc->pri_scb;
511 	struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
512 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
513 	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
514 	struct d11txh *txh = (struct d11txh *)p->data;
515 	unsigned ampdu_frames;
516 	u8 ndelim, tid;
517 	u8 *plcp;
518 	uint len;
519 	u16 mcl;
520 	bool fbr_iscck;
521 	bool rr;
522 
523 	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
524 	plcp = (u8 *)(txh + 1);
525 	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
526 	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
527 			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
528 	len = roundup(len, 4) + (ndelim + 1) * AMPDU_DELIMITER_LEN;
529 
530 	ampdu_frames = skb_queue_len(&session->skb_list);
531 	if (ampdu_frames != 0) {
532 		struct sk_buff *first;
533 
534 		if (ampdu_frames + 1 > session->max_ampdu_frames ||
535 		    session->ampdu_len + len > session->max_ampdu_len)
536 			return -ENOSPC;
537 
538 		/*
539 		 * We aren't really out of space if the new frame is of
540 		 * a different priority, but we want the same behaviour
541 		 * so return -ENOSPC anyway.
542 		 *
543 		 * XXX: The old AMPDU code did this, but is it really
544 		 * necessary?
545 		 */
546 		first = skb_peek(&session->skb_list);
547 		if (p->priority != first->priority)
548 			return -ENOSPC;
549 	}
550 
551 	/*
552 	 * Now that we're sure this frame can be accomodated, update the
553 	 * session information.
554 	 */
555 	session->ampdu_len += len;
556 	session->dma_len += p->len;
557 
558 	tid = (u8)p->priority;
559 
560 	/* Handle retry limits */
561 	if (txrate[0].count <= ampdu->rr_retry_limit_tid[tid]) {
562 		txrate[0].count++;
563 		rr = true;
564 	} else {
565 		txrate[1].count++;
566 		rr = false;
567 	}
568 
569 	if (ampdu_frames == 0) {
570 		u8 plcp0, plcp3, is40, sgi, mcs;
571 		uint fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
572 		struct brcms_fifo_info *f = &ampdu->fifo_tb[fifo];
573 
574 		if (rr) {
575 			plcp0 = plcp[0];
576 			plcp3 = plcp[3];
577 		} else {
578 			plcp0 = txh->FragPLCPFallback[0];
579 			plcp3 = txh->FragPLCPFallback[3];
580 
581 		}
582 
583 		/* Limit AMPDU size based on MCS */
584 		is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
585 		sgi = plcp3_issgi(plcp3) ? 1 : 0;
586 		mcs = plcp0 & ~MIMO_PLCP_40MHZ;
587 		session->max_ampdu_len = min(scb_ampdu->max_rx_ampdu_bytes,
588 					     ampdu->max_txlen[mcs][is40][sgi]);
589 
590 		session->max_ampdu_frames = scb_ampdu->max_pdu;
591 		if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
592 			session->max_ampdu_frames =
593 				min_t(u16, f->mcs2ampdu_table[mcs],
594 				      session->max_ampdu_frames);
595 		}
596 	}
597 
598 	/*
599 	 * Treat all frames as "middle" frames of AMPDU here. First and
600 	 * last frames must be fixed up after all MPDUs have been prepped.
601 	 */
602 	mcl = le16_to_cpu(txh->MacTxControlLow);
603 	mcl &= ~TXC_AMPDU_MASK;
604 	mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
605 	mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
606 	txh->MacTxControlLow = cpu_to_le16(mcl);
607 	txh->PreloadSize = 0;	/* always default to 0 */
608 
609 	skb_queue_tail(&session->skb_list, p);
610 
611 	return 0;
612 }
613 
brcms_c_ampdu_finalize(struct brcms_ampdu_session * session)614 void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
615 {
616 	struct brcms_c_info *wlc = session->wlc;
617 	struct ampdu_info *ampdu = wlc->ampdu;
618 	struct sk_buff *first, *last;
619 	struct d11txh *txh;
620 	struct ieee80211_tx_info *tx_info;
621 	struct ieee80211_tx_rate *txrate;
622 	u8 ndelim;
623 	u8 *plcp;
624 	uint len;
625 	uint fifo;
626 	struct brcms_fifo_info *f;
627 	u16 mcl;
628 	bool fbr;
629 	bool fbr_iscck;
630 	struct ieee80211_rts *rts;
631 	bool use_rts = false, use_cts = false;
632 	u16 dma_len = session->dma_len;
633 	u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
634 	u32 rspec = 0, rspec_fallback = 0;
635 	u32 rts_rspec = 0, rts_rspec_fallback = 0;
636 	u8 plcp0, is40, mcs;
637 	u16 mch;
638 	u8 preamble_type = BRCMS_GF_PREAMBLE;
639 	u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
640 	u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
641 	u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
642 
643 	if (skb_queue_empty(&session->skb_list))
644 		return;
645 
646 	first = skb_peek(&session->skb_list);
647 	last = skb_peek_tail(&session->skb_list);
648 
649 	/* Need to fix up last MPDU first to adjust AMPDU length */
650 	txh = (struct d11txh *)last->data;
651 	fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
652 	f = &ampdu->fifo_tb[fifo];
653 
654 	mcl = le16_to_cpu(txh->MacTxControlLow);
655 	mcl &= ~TXC_AMPDU_MASK;
656 	mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
657 	txh->MacTxControlLow = cpu_to_le16(mcl);
658 
659 	/* remove the null delimiter after last mpdu */
660 	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
661 	txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
662 	session->ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
663 
664 	/* remove the pad len from last mpdu */
665 	fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
666 	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
667 			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
668 	session->ampdu_len -= roundup(len, 4) - len;
669 
670 	/* Now fix up the first MPDU */
671 	tx_info = IEEE80211_SKB_CB(first);
672 	txrate = tx_info->status.rates;
673 	txh = (struct d11txh *)first->data;
674 	plcp = (u8 *)(txh + 1);
675 	rts = (struct ieee80211_rts *)&txh->rts_frame;
676 
677 	mcl = le16_to_cpu(txh->MacTxControlLow);
678 	/* If only one MPDU leave it marked as last */
679 	if (first != last) {
680 		mcl &= ~TXC_AMPDU_MASK;
681 		mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
682 	}
683 	mcl |= TXC_STARTMSDU;
684 	if (ieee80211_is_rts(rts->frame_control)) {
685 		mcl |= TXC_SENDRTS;
686 		use_rts = true;
687 	}
688 	if (ieee80211_is_cts(rts->frame_control)) {
689 		mcl |= TXC_SENDCTS;
690 		use_cts = true;
691 	}
692 	txh->MacTxControlLow = cpu_to_le16(mcl);
693 
694 	fbr = txrate[1].count > 0;
695 	if (!fbr)
696 		plcp0 = plcp[0];
697 	else
698 		plcp0 = txh->FragPLCPFallback[0];
699 
700 	is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
701 	mcs = plcp0 & ~MIMO_PLCP_40MHZ;
702 
703 	if (is40) {
704 		if (CHSPEC_SB_UPPER(wlc_phy_chanspec_get(wlc->band->pi)))
705 			mimo_ctlchbw = PHY_TXC1_BW_20MHZ_UP;
706 		else
707 			mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
708 	}
709 
710 	/* rebuild the rspec and rspec_fallback */
711 	rspec = RSPEC_MIMORATE;
712 	rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
713 	if (plcp[0] & MIMO_PLCP_40MHZ)
714 		rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
715 
716 	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
717 	if (fbr_iscck) {
718 		rspec_fallback =
719 			cck_rspec(cck_phy2mac_rate(txh->FragPLCPFallback[0]));
720 	} else {
721 		rspec_fallback = RSPEC_MIMORATE;
722 		rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
723 		if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
724 			rspec_fallback |= PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT;
725 	}
726 
727 	if (use_rts || use_cts) {
728 		rts_rspec =
729 			brcms_c_rspec_to_rts_rspec(wlc, rspec,
730 						   false, mimo_ctlchbw);
731 		rts_rspec_fallback =
732 			brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback,
733 						   false, mimo_ctlchbw);
734 	}
735 
736 	BRCMS_SET_MIMO_PLCP_LEN(plcp, session->ampdu_len);
737 	/* mark plcp to indicate ampdu */
738 	BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
739 
740 	/* reset the mixed mode header durations */
741 	if (txh->MModeLen) {
742 		u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec,
743 						     session->ampdu_len);
744 		txh->MModeLen = cpu_to_le16(mmodelen);
745 		preamble_type = BRCMS_MM_PREAMBLE;
746 	}
747 	if (txh->MModeFbrLen) {
748 		u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback,
749 						     session->ampdu_len);
750 		txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
751 		fbr_preamble_type = BRCMS_MM_PREAMBLE;
752 	}
753 
754 	/* set the preload length */
755 	if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
756 		dma_len = min(dma_len, f->ampdu_pld_size);
757 		txh->PreloadSize = cpu_to_le16(dma_len);
758 	} else {
759 		txh->PreloadSize = 0;
760 	}
761 
762 	mch = le16_to_cpu(txh->MacTxControlHigh);
763 
764 	/* update RTS dur fields */
765 	if (use_rts || use_cts) {
766 		u16 durid;
767 		if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
768 		    TXC_PREAMBLE_RTS_MAIN_SHORT)
769 			rts_preamble_type = BRCMS_SHORT_PREAMBLE;
770 
771 		if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
772 		     TXC_PREAMBLE_RTS_FB_SHORT)
773 			rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
774 
775 		durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
776 						   rspec, rts_preamble_type,
777 						   preamble_type,
778 						   session->ampdu_len, true);
779 		rts->duration = cpu_to_le16(durid);
780 		durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
781 						   rts_rspec_fallback,
782 						   rspec_fallback,
783 						   rts_fbr_preamble_type,
784 						   fbr_preamble_type,
785 						   session->ampdu_len, true);
786 		txh->RTSDurFallback = cpu_to_le16(durid);
787 		/* set TxFesTimeNormal */
788 		txh->TxFesTimeNormal = rts->duration;
789 		/* set fallback rate version of TxFesTimeNormal */
790 		txh->TxFesTimeFallback = txh->RTSDurFallback;
791 	}
792 
793 	/* set flag and plcp for fallback rate */
794 	if (fbr) {
795 		mch |= TXC_AMPDU_FBR;
796 		txh->MacTxControlHigh = cpu_to_le16(mch);
797 		BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
798 		BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
799 	}
800 
801 	brcms_dbg_ht(wlc->hw->d11core, "wl%d: count %d ampdu_len %d\n",
802 		     wlc->pub->unit, skb_queue_len(&session->skb_list),
803 		     session->ampdu_len);
804 }
805 
806 static void
brcms_c_ampdu_rate_status(struct brcms_c_info * wlc,struct ieee80211_tx_info * tx_info,struct tx_status * txs,u8 mcs)807 brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
808 			  struct ieee80211_tx_info *tx_info,
809 			  struct tx_status *txs, u8 mcs)
810 {
811 	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
812 	int i;
813 
814 	/* clear the rest of the rates */
815 	for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
816 		txrate[i].idx = -1;
817 		txrate[i].count = 0;
818 	}
819 }
820 
821 static void
brcms_c_ampdu_dotxstatus_complete(struct ampdu_info * ampdu,struct scb * scb,struct sk_buff * p,struct tx_status * txs,u32 s1,u32 s2)822 brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
823 			      struct sk_buff *p, struct tx_status *txs,
824 			      u32 s1, u32 s2)
825 {
826 	struct scb_ampdu *scb_ampdu;
827 	struct brcms_c_info *wlc = ampdu->wlc;
828 	struct scb_ampdu_tid_ini *ini;
829 	u8 bitmap[8], queue, tid;
830 	struct d11txh *txh;
831 	u8 *plcp;
832 	struct ieee80211_hdr *h;
833 	u16 seq, start_seq = 0, bindex, index, mcl;
834 	u8 mcs = 0;
835 	bool ba_recd = false, ack_recd = false;
836 	u8 tot_mpdu = 0;
837 	uint supr_status;
838 	bool retry = true;
839 	u16 mimoantsel = 0;
840 	u8 retry_limit;
841 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
842 
843 #ifdef DEBUG
844 	u8 hole[AMPDU_MAX_MPDU];
845 	memset(hole, 0, sizeof(hole));
846 #endif
847 
848 	scb_ampdu = &scb->scb_ampdu;
849 	tid = (u8) (p->priority);
850 
851 	ini = &scb_ampdu->ini[tid];
852 	retry_limit = ampdu->retry_limit_tid[tid];
853 	memset(bitmap, 0, sizeof(bitmap));
854 	queue = txs->frameid & TXFID_QUEUE_MASK;
855 	supr_status = txs->status & TX_STATUS_SUPR_MASK;
856 
857 	if (txs->status & TX_STATUS_ACK_RCV) {
858 		WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
859 		start_seq = txs->sequence >> SEQNUM_SHIFT;
860 		bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
861 		    TX_STATUS_BA_BMAP03_SHIFT;
862 
863 		WARN_ON(s1 & TX_STATUS_INTERMEDIATE);
864 		WARN_ON(!(s1 & TX_STATUS_AMPDU));
865 
866 		bitmap[0] |=
867 		    (s1 & TX_STATUS_BA_BMAP47_MASK) <<
868 		    TX_STATUS_BA_BMAP47_SHIFT;
869 		bitmap[1] = (s1 >> 8) & 0xff;
870 		bitmap[2] = (s1 >> 16) & 0xff;
871 		bitmap[3] = (s1 >> 24) & 0xff;
872 
873 		bitmap[4] = s2 & 0xff;
874 		bitmap[5] = (s2 >> 8) & 0xff;
875 		bitmap[6] = (s2 >> 16) & 0xff;
876 		bitmap[7] = (s2 >> 24) & 0xff;
877 
878 		ba_recd = true;
879 	} else {
880 		if (supr_status) {
881 			if (supr_status == TX_STATUS_SUPR_BADCH) {
882 				brcms_dbg_ht(wlc->hw->d11core,
883 					  "%s: Pkt tx suppressed, illegal channel possibly %d\n",
884 					  __func__, CHSPEC_CHANNEL(
885 					  wlc->default_bss->chanspec));
886 			} else {
887 				if (supr_status != TX_STATUS_SUPR_FRAG)
888 					brcms_err(wlc->hw->d11core,
889 						  "%s: supr_status 0x%x\n",
890 						  __func__, supr_status);
891 			}
892 			/* no need to retry for badch; will fail again */
893 			if (supr_status == TX_STATUS_SUPR_BADCH ||
894 			    supr_status == TX_STATUS_SUPR_EXPTIME) {
895 				retry = false;
896 			} else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
897 				/* TX underflow:
898 				 *   try tuning pre-loading or ampdu size
899 				 */
900 			} else if (supr_status == TX_STATUS_SUPR_FRAG) {
901 				/*
902 				 * if there were underflows, but pre-loading
903 				 * is not active, notify rate adaptation.
904 				 */
905 				brcms_c_ffpld_check_txfunfl(wlc, queue);
906 			}
907 		} else if (txs->phyerr) {
908 			brcms_dbg_ht(wlc->hw->d11core,
909 				     "%s: ampdu tx phy error (0x%x)\n",
910 				     __func__, txs->phyerr);
911 		}
912 	}
913 
914 	/* loop through all pkts and retry if not acked */
915 	while (p) {
916 		tx_info = IEEE80211_SKB_CB(p);
917 		txh = (struct d11txh *) p->data;
918 		mcl = le16_to_cpu(txh->MacTxControlLow);
919 		plcp = (u8 *) (txh + 1);
920 		h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
921 		seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
922 
923 		trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
924 
925 		if (tot_mpdu == 0) {
926 			mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
927 			mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
928 		}
929 
930 		index = TX_SEQ_TO_INDEX(seq);
931 		ack_recd = false;
932 		if (ba_recd) {
933 			int block_acked;
934 
935 			bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
936 			if (bindex < AMPDU_TX_BA_MAX_WSIZE)
937 				block_acked = isset(bitmap, bindex);
938 			else
939 				block_acked = 0;
940 			brcms_dbg_ht(wlc->hw->d11core,
941 				     "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
942 				     tid, seq, start_seq, bindex,
943 				     block_acked, index);
944 			/* if acked then clear bit and free packet */
945 			if (block_acked) {
946 				ini->txretry[index] = 0;
947 
948 				/*
949 				 * ampdu_ack_len:
950 				 *   number of acked aggregated frames
951 				 */
952 				/* ampdu_len: number of aggregated frames */
953 				brcms_c_ampdu_rate_status(wlc, tx_info, txs,
954 							  mcs);
955 				tx_info->flags |= IEEE80211_TX_STAT_ACK;
956 				tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
957 				tx_info->status.ampdu_ack_len =
958 					tx_info->status.ampdu_len = 1;
959 
960 				skb_pull(p, D11_PHY_HDR_LEN);
961 				skb_pull(p, D11_TXH_LEN);
962 
963 				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
964 							    p);
965 				ack_recd = true;
966 			}
967 		}
968 		/* either retransmit or send bar if ack not recd */
969 		if (!ack_recd) {
970 			if (retry && (ini->txretry[index] < (int)retry_limit)) {
971 				int ret;
972 				ini->txretry[index]++;
973 				ret = brcms_c_txfifo(wlc, queue, p);
974 				/*
975 				 * We shouldn't be out of space in the DMA
976 				 * ring here since we're reinserting a frame
977 				 * that was just pulled out.
978 				 */
979 				WARN_ONCE(ret, "queue %d out of txds\n", queue);
980 			} else {
981 				/* Retry timeout */
982 				ieee80211_tx_info_clear_status(tx_info);
983 				tx_info->status.ampdu_ack_len = 0;
984 				tx_info->status.ampdu_len = 1;
985 				tx_info->flags |=
986 				    IEEE80211_TX_STAT_AMPDU_NO_BACK;
987 				skb_pull(p, D11_PHY_HDR_LEN);
988 				skb_pull(p, D11_TXH_LEN);
989 				brcms_dbg_ht(wlc->hw->d11core,
990 					     "BA Timeout, seq %d\n",
991 					     seq);
992 				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
993 							    p);
994 			}
995 		}
996 		tot_mpdu++;
997 
998 		/* break out if last packet of ampdu */
999 		if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1000 		    TXC_AMPDU_LAST)
1001 			break;
1002 
1003 		p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
1004 	}
1005 
1006 	/* update rate state */
1007 	brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
1008 }
1009 
1010 void
brcms_c_ampdu_dotxstatus(struct ampdu_info * ampdu,struct scb * scb,struct sk_buff * p,struct tx_status * txs)1011 brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
1012 		     struct sk_buff *p, struct tx_status *txs)
1013 {
1014 	struct brcms_c_info *wlc = ampdu->wlc;
1015 	u32 s1 = 0, s2 = 0;
1016 
1017 	/* BMAC_NOTE: For the split driver, second level txstatus comes later
1018 	 * So if the ACK was received then wait for the second level else just
1019 	 * call the first one
1020 	 */
1021 	if (txs->status & TX_STATUS_ACK_RCV) {
1022 		u8 status_delay = 0;
1023 
1024 		/* wait till the next 8 bytes of txstatus is available */
1025 		s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
1026 		while ((s1 & TXS_V) == 0) {
1027 			udelay(1);
1028 			status_delay++;
1029 			if (status_delay > 10)
1030 				return; /* error condition */
1031 			s1 = bcma_read32(wlc->hw->d11core,
1032 					 D11REGOFFS(frmtxstatus));
1033 		}
1034 
1035 		s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
1036 	}
1037 
1038 	if (scb) {
1039 		brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
1040 	} else {
1041 		/* loop through all pkts and free */
1042 		u8 queue = txs->frameid & TXFID_QUEUE_MASK;
1043 		struct d11txh *txh;
1044 		u16 mcl;
1045 		while (p) {
1046 			txh = (struct d11txh *) p->data;
1047 			trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
1048 					   sizeof(*txh));
1049 			mcl = le16_to_cpu(txh->MacTxControlLow);
1050 			brcmu_pkt_buf_free_skb(p);
1051 			/* break out if last packet of ampdu */
1052 			if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1053 			    TXC_AMPDU_LAST)
1054 				break;
1055 			p = dma_getnexttxp(wlc->hw->di[queue],
1056 					   DMA_RANGE_TRANSMITTED);
1057 		}
1058 	}
1059 }
1060 
brcms_c_ampdu_macaddr_upd(struct brcms_c_info * wlc)1061 void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc)
1062 {
1063 	char template[T_RAM_ACCESS_SZ * 2];
1064 
1065 	/* driver needs to write the ta in the template; ta is at offset 16 */
1066 	memset(template, 0, sizeof(template));
1067 	memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
1068 	brcms_b_write_template_ram(wlc->hw, (T_BA_TPL_BASE + 16),
1069 				  (T_RAM_ACCESS_SZ * 2),
1070 				  template);
1071 }
1072 
brcms_c_aggregatable(struct brcms_c_info * wlc,u8 tid)1073 bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid)
1074 {
1075 	return wlc->ampdu->ini_enable[tid];
1076 }
1077 
brcms_c_ampdu_shm_upd(struct ampdu_info * ampdu)1078 void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
1079 {
1080 	struct brcms_c_info *wlc = ampdu->wlc;
1081 
1082 	/*
1083 	 * Extend ucode internal watchdog timer to
1084 	 * match larger received frames
1085 	 */
1086 	if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
1087 	    IEEE80211_HT_MAX_AMPDU_64K) {
1088 		brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
1089 		brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
1090 	} else {
1091 		brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
1092 		brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
1093 	}
1094 }
1095 
1096 /*
1097  * callback function that helps invalidating ampdu packets in a DMA queue
1098  */
dma_cb_fn_ampdu(void * txi,void * arg_a)1099 static void dma_cb_fn_ampdu(void *txi, void *arg_a)
1100 {
1101 	struct ieee80211_sta *sta = arg_a;
1102 	struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
1103 
1104 	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
1105 	    (tx_info->rate_driver_data[0] == sta || sta == NULL))
1106 		tx_info->rate_driver_data[0] = NULL;
1107 }
1108 
1109 /*
1110  * When a remote party is no longer available for ampdu communication, any
1111  * pending tx ampdu packets in the driver have to be flushed.
1112  */
brcms_c_ampdu_flush(struct brcms_c_info * wlc,struct ieee80211_sta * sta,u16 tid)1113 void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
1114 		     struct ieee80211_sta *sta, u16 tid)
1115 {
1116 	brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
1117 }
1118