1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2019 MediaTek Inc.
3  *
4  * Author: Ryder Lee <ryder.lee@mediatek.com>
5  *         Roy Luo <royluo@google.com>
6  *         Felix Fietkau <nbd@nbd.name>
7  *         Lorenzo Bianconi <lorenzo@kernel.org>
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/timekeeping.h>
12 #include "mt7615.h"
13 #include "../dma.h"
14 #include "mac.h"
15 
16 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
17 					    u8 idx, bool unicast)
18 {
19 	struct mt7615_sta *sta;
20 	struct mt76_wcid *wcid;
21 
22 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
23 		return NULL;
24 
25 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
26 	if (unicast || !wcid)
27 		return wcid;
28 
29 	if (!wcid->sta)
30 		return NULL;
31 
32 	sta = container_of(wcid, struct mt7615_sta, wcid);
33 	if (!sta->vif)
34 		return NULL;
35 
36 	return &sta->vif->sta.wcid;
37 }
38 
39 static int mt7615_get_rate(struct mt7615_dev *dev,
40 			   struct ieee80211_supported_band *sband,
41 			   int idx, bool cck)
42 {
43 	int offset = 0;
44 	int len = sband->n_bitrates;
45 	int i;
46 
47 	if (cck) {
48 		if (sband == &dev->mt76.sband_5g.sband)
49 			return 0;
50 
51 		idx &= ~BIT(2); /* short preamble */
52 	} else if (sband == &dev->mt76.sband_2g.sband) {
53 		offset = 4;
54 	}
55 
56 	for (i = offset; i < len; i++) {
57 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
58 			return i;
59 	}
60 
61 	return 0;
62 }
63 
64 static void mt7615_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
65 {
66 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
67 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
68 	u8 *pn = status->iv;
69 	u8 *hdr;
70 
71 	__skb_push(skb, 8);
72 	memmove(skb->data, skb->data + 8, hdr_len);
73 	hdr = skb->data + hdr_len;
74 
75 	hdr[0] = pn[5];
76 	hdr[1] = pn[4];
77 	hdr[2] = 0;
78 	hdr[3] = 0x20 | (key_id << 6);
79 	hdr[4] = pn[3];
80 	hdr[5] = pn[2];
81 	hdr[6] = pn[1];
82 	hdr[7] = pn[0];
83 
84 	status->flag &= ~RX_FLAG_IV_STRIPPED;
85 }
86 
87 int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
88 {
89 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
90 	struct ieee80211_supported_band *sband;
91 	struct ieee80211_hdr *hdr;
92 	__le32 *rxd = (__le32 *)skb->data;
93 	u32 rxd0 = le32_to_cpu(rxd[0]);
94 	u32 rxd1 = le32_to_cpu(rxd[1]);
95 	u32 rxd2 = le32_to_cpu(rxd[2]);
96 	bool unicast, remove_pad, insert_ccmp_hdr = false;
97 	int i, idx;
98 
99 	memset(status, 0, sizeof(*status));
100 
101 	unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
102 	idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
103 	status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
104 
105 	/* TODO: properly support DBDC */
106 	status->freq = dev->mt76.chandef.chan->center_freq;
107 	status->band = dev->mt76.chandef.chan->band;
108 	if (status->band == NL80211_BAND_5GHZ)
109 		sband = &dev->mt76.sband_5g.sband;
110 	else
111 		sband = &dev->mt76.sband_2g.sband;
112 
113 	if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
114 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
115 
116 	if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
117 		status->flag |= RX_FLAG_MMIC_ERROR;
118 
119 	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
120 	    !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
121 		status->flag |= RX_FLAG_DECRYPTED;
122 		status->flag |= RX_FLAG_IV_STRIPPED;
123 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
124 	}
125 
126 	remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
127 
128 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
129 		return -EINVAL;
130 
131 	if (!sband->channels)
132 		return -EINVAL;
133 
134 	rxd += 4;
135 	if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
136 		rxd += 4;
137 		if ((u8 *)rxd - skb->data >= skb->len)
138 			return -EINVAL;
139 	}
140 
141 	if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
142 		u8 *data = (u8 *)rxd;
143 
144 		if (status->flag & RX_FLAG_DECRYPTED) {
145 			status->iv[0] = data[5];
146 			status->iv[1] = data[4];
147 			status->iv[2] = data[3];
148 			status->iv[3] = data[2];
149 			status->iv[4] = data[1];
150 			status->iv[5] = data[0];
151 
152 			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
153 		}
154 		rxd += 4;
155 		if ((u8 *)rxd - skb->data >= skb->len)
156 			return -EINVAL;
157 	}
158 
159 	if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
160 		rxd += 2;
161 		if ((u8 *)rxd - skb->data >= skb->len)
162 			return -EINVAL;
163 	}
164 
165 	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
166 		u32 rxdg0 = le32_to_cpu(rxd[0]);
167 		u32 rxdg1 = le32_to_cpu(rxd[1]);
168 		u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
169 		bool cck = false;
170 
171 		i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
172 		switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
173 		case MT_PHY_TYPE_CCK:
174 			cck = true;
175 			/* fall through */
176 		case MT_PHY_TYPE_OFDM:
177 			i = mt7615_get_rate(dev, sband, i, cck);
178 			break;
179 		case MT_PHY_TYPE_HT_GF:
180 		case MT_PHY_TYPE_HT:
181 			status->encoding = RX_ENC_HT;
182 			if (i > 31)
183 				return -EINVAL;
184 			break;
185 		case MT_PHY_TYPE_VHT:
186 			status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
187 			status->encoding = RX_ENC_VHT;
188 			break;
189 		default:
190 			return -EINVAL;
191 		}
192 		status->rate_idx = i;
193 
194 		switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
195 		case MT_PHY_BW_20:
196 			break;
197 		case MT_PHY_BW_40:
198 			status->bw = RATE_INFO_BW_40;
199 			break;
200 		case MT_PHY_BW_80:
201 			status->bw = RATE_INFO_BW_80;
202 			break;
203 		case MT_PHY_BW_160:
204 			status->bw = RATE_INFO_BW_160;
205 			break;
206 		default:
207 			return -EINVAL;
208 		}
209 
210 		if (rxdg0 & MT_RXV1_HT_SHORT_GI)
211 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
212 		if (rxdg0 & MT_RXV1_HT_AD_CODE)
213 			status->enc_flags |= RX_ENC_FLAG_LDPC;
214 
215 		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
216 
217 		/* TODO: RSSI */
218 		rxd += 6;
219 		if ((u8 *)rxd - skb->data >= skb->len)
220 			return -EINVAL;
221 	}
222 
223 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
224 
225 	if (insert_ccmp_hdr) {
226 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
227 
228 		mt7615_insert_ccmp_hdr(skb, key_id);
229 	}
230 
231 	hdr = (struct ieee80211_hdr *)skb->data;
232 	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
233 		return 0;
234 
235 	status->aggr = unicast &&
236 		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
237 	status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
238 	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
239 
240 	return 0;
241 }
242 
243 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
244 {
245 }
246 
247 void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
248 			    struct mt76_queue_entry *e)
249 {
250 	if (!e->txwi) {
251 		dev_kfree_skb_any(e->skb);
252 		return;
253 	}
254 
255 	/* error path */
256 	if (e->skb == DMA_DUMMY_DATA) {
257 		struct mt76_txwi_cache *t;
258 		struct mt7615_dev *dev;
259 		struct mt7615_txp *txp;
260 		u8 *txwi_ptr;
261 
262 		txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
263 		txp = (struct mt7615_txp *)(txwi_ptr + MT_TXD_SIZE);
264 		dev = container_of(mdev, struct mt7615_dev, mt76);
265 
266 		spin_lock_bh(&dev->token_lock);
267 		t = idr_remove(&dev->token, le16_to_cpu(txp->token));
268 		spin_unlock_bh(&dev->token_lock);
269 		e->skb = t ? t->skb : NULL;
270 	}
271 
272 	if (e->skb)
273 		mt76_tx_complete_skb(mdev, e->skb);
274 }
275 
276 u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
277 			   const struct ieee80211_tx_rate *rate,
278 			   bool stbc, u8 *bw)
279 {
280 	u8 phy, nss, rate_idx;
281 	u16 rateval;
282 
283 	*bw = 0;
284 
285 	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
286 		rate_idx = ieee80211_rate_get_vht_mcs(rate);
287 		nss = ieee80211_rate_get_vht_nss(rate);
288 		phy = MT_PHY_TYPE_VHT;
289 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
290 			*bw = 1;
291 		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
292 			*bw = 2;
293 		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
294 			*bw = 3;
295 	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
296 		rate_idx = rate->idx;
297 		nss = 1 + (rate->idx >> 3);
298 		phy = MT_PHY_TYPE_HT;
299 		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
300 			phy = MT_PHY_TYPE_HT_GF;
301 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
302 			*bw = 1;
303 	} else {
304 		const struct ieee80211_rate *r;
305 		int band = dev->mt76.chandef.chan->band;
306 		u16 val;
307 
308 		nss = 1;
309 		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
310 		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
311 			val = r->hw_value_short;
312 		else
313 			val = r->hw_value;
314 
315 		phy = val >> 8;
316 		rate_idx = val & 0xff;
317 	}
318 
319 	rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
320 		   FIELD_PREP(MT_TX_RATE_MODE, phy) |
321 		   FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
322 
323 	if (stbc && nss == 1)
324 		rateval |= MT_TX_RATE_STBC;
325 
326 	return rateval;
327 }
328 
329 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
330 			  struct sk_buff *skb, struct mt76_wcid *wcid,
331 			  struct ieee80211_sta *sta, int pid,
332 			  struct ieee80211_key_conf *key)
333 {
334 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
335 	struct ieee80211_tx_rate *rate = &info->control.rates[0];
336 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
337 	struct ieee80211_vif *vif = info->control.vif;
338 	int tx_count = 8;
339 	u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0;
340 	__le16 fc = hdr->frame_control;
341 	u16 seqno = 0;
342 	u32 val;
343 
344 	if (vif) {
345 		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
346 
347 		omac_idx = mvif->omac_idx;
348 	}
349 
350 	if (sta) {
351 		struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
352 
353 		tx_count = msta->rate_count;
354 	}
355 
356 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
357 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
358 
359 	if (ieee80211_is_data(fc)) {
360 		q_idx = skb_get_queue_mapping(skb);
361 		p_fmt = MT_TX_TYPE_CT;
362 	} else if (ieee80211_is_beacon(fc)) {
363 		q_idx = MT_LMAC_BCN0;
364 		p_fmt = MT_TX_TYPE_FW;
365 	} else {
366 		q_idx = MT_LMAC_ALTX0;
367 		p_fmt = MT_TX_TYPE_CT;
368 	}
369 
370 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
371 	      FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
372 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
373 	txwi[0] = cpu_to_le32(val);
374 
375 	val = MT_TXD1_LONG_FORMAT |
376 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
377 	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
378 	      FIELD_PREP(MT_TXD1_HDR_INFO,
379 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
380 	      FIELD_PREP(MT_TXD1_TID,
381 			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
382 	      FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
383 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
384 	txwi[1] = cpu_to_le32(val);
385 
386 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
387 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
388 	      FIELD_PREP(MT_TXD2_MULTICAST,
389 			 is_multicast_ether_addr(hdr->addr1));
390 	txwi[2] = cpu_to_le32(val);
391 
392 	if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
393 		txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
394 
395 	txwi[4] = 0;
396 	txwi[6] = 0;
397 
398 	if (rate->idx >= 0 && rate->count &&
399 	    !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
400 		bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
401 		u8 bw;
402 		u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
403 
404 		txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
405 
406 		val = MT_TXD6_FIXED_BW |
407 		      FIELD_PREP(MT_TXD6_BW, bw) |
408 		      FIELD_PREP(MT_TXD6_TX_RATE, rateval);
409 		txwi[6] |= cpu_to_le32(val);
410 
411 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
412 			txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
413 
414 		if (info->flags & IEEE80211_TX_CTL_LDPC)
415 			txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
416 
417 		if (!(rate->flags & (IEEE80211_TX_RC_MCS |
418 				     IEEE80211_TX_RC_VHT_MCS)))
419 			txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
420 
421 		tx_count = rate->count;
422 	}
423 
424 	if (!ieee80211_is_beacon(fc)) {
425 		val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
426 		      FIELD_PREP(MT_TXD5_PID, pid);
427 		txwi[5] = cpu_to_le32(val);
428 	} else {
429 		txwi[5] = 0;
430 		/* use maximum tx count for beacons */
431 		tx_count = 0x1f;
432 	}
433 
434 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
435 	if (ieee80211_is_data_qos(hdr->frame_control)) {
436 		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
437 		val |= MT_TXD3_SN_VALID;
438 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
439 		struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
440 
441 		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
442 		val |= MT_TXD3_SN_VALID;
443 	}
444 	val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
445 
446 	txwi[3] = cpu_to_le32(val);
447 
448 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
449 		txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
450 
451 	if (key)
452 		txwi[3] |= cpu_to_le32(MT_TXD3_PROTECT_FRAME);
453 
454 	txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
455 		  FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
456 
457 	return 0;
458 }
459 
460 void mt7615_txp_skb_unmap(struct mt76_dev *dev,
461 			  struct mt76_txwi_cache *t)
462 {
463 	struct mt7615_txp *txp;
464 	u8 *txwi;
465 	int i;
466 
467 	txwi = mt76_get_txwi_ptr(dev, t);
468 	txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
469 	for (i = 1; i < txp->nbuf; i++)
470 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
471 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
472 }
473 
474 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
475 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
476 			  struct ieee80211_sta *sta,
477 			  struct mt76_tx_info *tx_info)
478 {
479 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
480 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
481 	struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
482 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
483 	struct ieee80211_key_conf *key = info->control.hw_key;
484 	struct ieee80211_vif *vif = info->control.vif;
485 	int i, pid, id, nbuf = tx_info->nbuf - 1;
486 	u8 *txwi = (u8 *)txwi_ptr;
487 	struct mt76_txwi_cache *t;
488 	struct mt7615_txp *txp;
489 
490 	if (!wcid)
491 		wcid = &dev->mt76.global_wcid;
492 
493 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
494 
495 	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
496 		spin_lock_bh(&dev->mt76.lock);
497 		msta->rate_probe = true;
498 		mt7615_mcu_set_rates(dev, msta, &info->control.rates[0],
499 				     msta->rates);
500 		spin_unlock_bh(&dev->mt76.lock);
501 	}
502 
503 	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
504 			      pid, key);
505 
506 	txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
507 	for (i = 0; i < nbuf; i++) {
508 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
509 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
510 	}
511 	txp->nbuf = nbuf;
512 
513 	/* pass partial skb header to fw */
514 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
515 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
516 
517 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
518 
519 	if (!key)
520 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
521 
522 	if (ieee80211_is_mgmt(hdr->frame_control))
523 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
524 
525 	if (vif) {
526 		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
527 
528 		txp->bss_idx = mvif->idx;
529 	}
530 
531 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
532 	t->skb = tx_info->skb;
533 
534 	spin_lock_bh(&dev->token_lock);
535 	id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
536 	spin_unlock_bh(&dev->token_lock);
537 	if (id < 0)
538 		return id;
539 
540 	txp->token = cpu_to_le16(id);
541 	txp->rept_wds_wcid = 0xff;
542 	tx_info->skb = DMA_DUMMY_DATA;
543 
544 	return 0;
545 }
546 
547 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
548 			    struct ieee80211_tx_info *info, __le32 *txs_data)
549 {
550 	struct ieee80211_supported_band *sband;
551 	int i, idx, count, final_idx = 0;
552 	bool fixed_rate, final_mpdu, ack_timeout;
553 	bool probe, ampdu, cck = false;
554 	u32 final_rate, final_rate_flags, final_nss, txs;
555 	u8 pid;
556 
557 	fixed_rate = info->status.rates[0].count;
558 	probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
559 
560 	txs = le32_to_cpu(txs_data[1]);
561 	final_mpdu = txs & MT_TXS1_ACKED_MPDU;
562 	ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
563 
564 	txs = le32_to_cpu(txs_data[3]);
565 	count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
566 
567 	txs = le32_to_cpu(txs_data[0]);
568 	pid = FIELD_GET(MT_TXS0_PID, txs);
569 	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
570 	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
571 
572 	if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
573 		return false;
574 
575 	if (txs & MT_TXS0_QUEUE_TIMEOUT)
576 		return false;
577 
578 	if (!ack_timeout)
579 		info->flags |= IEEE80211_TX_STAT_ACK;
580 
581 	info->status.ampdu_len = 1;
582 	info->status.ampdu_ack_len = !!(info->flags &
583 					IEEE80211_TX_STAT_ACK);
584 
585 	if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
586 		info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
587 
588 	if (fixed_rate && !probe) {
589 		info->status.rates[0].count = count;
590 		goto out;
591 	}
592 
593 	for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
594 		int cur_count = min_t(int, count, 2 * MT7615_RATE_RETRY);
595 
596 		if (!i && probe) {
597 			cur_count = 1;
598 		} else {
599 			info->status.rates[i] = sta->rates[idx];
600 			idx++;
601 		}
602 
603 		if (i && info->status.rates[i].idx < 0) {
604 			info->status.rates[i - 1].count += count;
605 			break;
606 		}
607 
608 		if (!count) {
609 			info->status.rates[i].idx = -1;
610 			break;
611 		}
612 
613 		info->status.rates[i].count = cur_count;
614 		final_idx = i;
615 		count -= cur_count;
616 	}
617 
618 out:
619 	final_rate_flags = info->status.rates[final_idx].flags;
620 
621 	switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
622 	case MT_PHY_TYPE_CCK:
623 		cck = true;
624 		/* fall through */
625 	case MT_PHY_TYPE_OFDM:
626 		if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
627 			sband = &dev->mt76.sband_5g.sband;
628 		else
629 			sband = &dev->mt76.sband_2g.sband;
630 		final_rate &= MT_TX_RATE_IDX;
631 		final_rate = mt7615_get_rate(dev, sband, final_rate, cck);
632 		final_rate_flags = 0;
633 		break;
634 	case MT_PHY_TYPE_HT_GF:
635 	case MT_PHY_TYPE_HT:
636 		final_rate_flags |= IEEE80211_TX_RC_MCS;
637 		final_rate &= MT_TX_RATE_IDX;
638 		if (final_rate > 31)
639 			return false;
640 		break;
641 	case MT_PHY_TYPE_VHT:
642 		final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
643 		final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
644 		final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
645 		break;
646 	default:
647 		return false;
648 	}
649 
650 	info->status.rates[final_idx].idx = final_rate;
651 	info->status.rates[final_idx].flags = final_rate_flags;
652 
653 	return true;
654 }
655 
656 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
657 				   struct mt7615_sta *sta, int pid,
658 				   __le32 *txs_data)
659 {
660 	struct mt76_dev *mdev = &dev->mt76;
661 	struct sk_buff_head list;
662 	struct sk_buff *skb;
663 
664 	if (pid < MT_PACKET_ID_FIRST)
665 		return false;
666 
667 	mt76_tx_status_lock(mdev, &list);
668 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
669 	if (skb) {
670 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
671 
672 		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
673 			spin_lock_bh(&dev->mt76.lock);
674 			if (sta->rate_probe) {
675 				mt7615_mcu_set_rates(dev, sta, NULL,
676 						     sta->rates);
677 				sta->rate_probe = false;
678 			}
679 			spin_unlock_bh(&dev->mt76.lock);
680 		}
681 
682 		if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
683 			ieee80211_tx_info_clear_status(info);
684 			info->status.rates[0].idx = -1;
685 		}
686 
687 		mt76_tx_status_skb_done(mdev, skb, &list);
688 	}
689 	mt76_tx_status_unlock(mdev, &list);
690 
691 	return !!skb;
692 }
693 
694 void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
695 {
696 	struct ieee80211_tx_info info = {};
697 	struct ieee80211_sta *sta = NULL;
698 	struct mt7615_sta *msta = NULL;
699 	struct mt76_wcid *wcid;
700 	__le32 *txs_data = data;
701 	u32 txs;
702 	u8 wcidx;
703 	u8 pid;
704 
705 	txs = le32_to_cpu(txs_data[0]);
706 	pid = FIELD_GET(MT_TXS0_PID, txs);
707 	txs = le32_to_cpu(txs_data[2]);
708 	wcidx = FIELD_GET(MT_TXS2_WCID, txs);
709 
710 	if (pid == MT_PACKET_ID_NO_ACK)
711 		return;
712 
713 	if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
714 		return;
715 
716 	rcu_read_lock();
717 
718 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
719 	if (!wcid)
720 		goto out;
721 
722 	msta = container_of(wcid, struct mt7615_sta, wcid);
723 	sta = wcid_to_sta(wcid);
724 
725 	if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
726 		goto out;
727 
728 	if (wcidx >= MT7615_WTBL_STA || !sta)
729 		goto out;
730 
731 	if (mt7615_fill_txs(dev, msta, &info, txs_data))
732 		ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
733 
734 out:
735 	rcu_read_unlock();
736 }
737 
738 void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
739 {
740 	struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
741 	struct mt76_dev *mdev = &dev->mt76;
742 	struct mt76_txwi_cache *txwi;
743 	u8 i, count;
744 
745 	count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
746 	for (i = 0; i < count; i++) {
747 		spin_lock_bh(&dev->token_lock);
748 		txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
749 		spin_unlock_bh(&dev->token_lock);
750 
751 		if (!txwi)
752 			continue;
753 
754 		mt7615_txp_skb_unmap(mdev, txwi);
755 		if (txwi->skb) {
756 			mt76_tx_complete_skb(mdev, txwi->skb);
757 			txwi->skb = NULL;
758 		}
759 
760 		mt76_put_txwi(mdev, txwi);
761 	}
762 	dev_kfree_skb(skb);
763 }
764 
765 void mt7615_mac_work(struct work_struct *work)
766 {
767 	struct mt7615_dev *dev;
768 
769 	dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
770 						mac_work.work);
771 
772 	mt76_tx_status_check(&dev->mt76, NULL, false);
773 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
774 				     MT7615_WATCHDOG_TIME);
775 }
776