xref: /linux/drivers/net/wireless/realtek/rtw89/phy.c (revision 5eb02701)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "coex.h"
6 #include "debug.h"
7 #include "fw.h"
8 #include "mac.h"
9 #include "phy.h"
10 #include "ps.h"
11 #include "reg.h"
12 #include "sar.h"
13 #include "txrx.h"
14 #include "util.h"
15 
rtw89_phy0_phy1_offset(struct rtw89_dev * rtwdev,u32 addr)16 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
17 {
18 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
19 
20 	return phy->phy0_phy1_offset(rtwdev, addr);
21 }
22 
get_max_amsdu_len(struct rtw89_dev * rtwdev,const struct rtw89_ra_report * report)23 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
24 			     const struct rtw89_ra_report *report)
25 {
26 	u32 bit_rate = report->bit_rate;
27 
28 	/* lower than ofdm, do not aggregate */
29 	if (bit_rate < 550)
30 		return 1;
31 
32 	/* avoid AMSDU for legacy rate */
33 	if (report->might_fallback_legacy)
34 		return 1;
35 
36 	/* lower than 20M vht 2ss mcs8, make it small */
37 	if (bit_rate < 1800)
38 		return 1200;
39 
40 	/* lower than 40M vht 2ss mcs9, make it medium */
41 	if (bit_rate < 4000)
42 		return 2600;
43 
44 	/* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
45 	if (bit_rate < 7000)
46 		return 3500;
47 
48 	return rtwdev->chip->max_amsdu_limit;
49 }
50 
get_mcs_ra_mask(u16 mcs_map,u8 highest_mcs,u8 gap)51 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
52 {
53 	u64 ra_mask = 0;
54 	u8 mcs_cap;
55 	int i, nss;
56 
57 	for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
58 		mcs_cap = mcs_map & 0x3;
59 		switch (mcs_cap) {
60 		case 2:
61 			ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
62 			break;
63 		case 1:
64 			ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
65 			break;
66 		case 0:
67 			ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
68 			break;
69 		default:
70 			break;
71 		}
72 	}
73 
74 	return ra_mask;
75 }
76 
get_he_ra_mask(struct ieee80211_sta * sta)77 static u64 get_he_ra_mask(struct ieee80211_sta *sta)
78 {
79 	struct ieee80211_sta_he_cap cap = sta->deflink.he_cap;
80 	u16 mcs_map;
81 
82 	switch (sta->deflink.bandwidth) {
83 	case IEEE80211_STA_RX_BW_160:
84 		if (cap.he_cap_elem.phy_cap_info[0] &
85 		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
86 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
87 		else
88 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
89 		break;
90 	default:
91 		mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
92 	}
93 
94 	/* MCS11, MCS9, MCS7 */
95 	return get_mcs_ra_mask(mcs_map, 11, 2);
96 }
97 
get_eht_mcs_ra_mask(u8 * max_nss,u8 start_mcs,u8 n_nss)98 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss)
99 {
100 	u64 nss_mcs_shift;
101 	u64 nss_mcs_val;
102 	u64 mask = 0;
103 	int i, j;
104 	u8 nss;
105 
106 	for (i = 0; i < n_nss; i++) {
107 		nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX);
108 		if (!nss)
109 			continue;
110 
111 		nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0);
112 
113 		for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16)
114 			mask |= nss_mcs_val << nss_mcs_shift;
115 	}
116 
117 	return mask;
118 }
119 
get_eht_ra_mask(struct ieee80211_sta * sta)120 static u64 get_eht_ra_mask(struct ieee80211_sta *sta)
121 {
122 	struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
123 	struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
124 	struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
125 	u8 *he_phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
126 
127 	switch (sta->deflink.bandwidth) {
128 	case IEEE80211_STA_RX_BW_320:
129 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320;
130 		/* MCS 9, 11, 13 */
131 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
132 	case IEEE80211_STA_RX_BW_160:
133 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160;
134 		/* MCS 9, 11, 13 */
135 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
136 	case IEEE80211_STA_RX_BW_20:
137 		if (!(he_phy_cap[0] &
138 		      IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
139 			mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
140 			/* MCS 7, 9, 11, 13 */
141 			return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
142 		}
143 		fallthrough;
144 	case IEEE80211_STA_RX_BW_80:
145 	default:
146 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80;
147 		/* MCS 9, 11, 13 */
148 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
149 	}
150 }
151 
152 #define RA_FLOOR_TABLE_SIZE	7
153 #define RA_FLOOR_UP_GAP		3
rtw89_phy_ra_mask_rssi(struct rtw89_dev * rtwdev,u8 rssi,u8 ratr_state)154 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
155 				  u8 ratr_state)
156 {
157 	u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
158 	u8 rssi_lv = 0;
159 	u8 i;
160 
161 	rssi >>= 1;
162 	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
163 		if (i >= ratr_state)
164 			rssi_lv_t[i] += RA_FLOOR_UP_GAP;
165 		if (rssi < rssi_lv_t[i]) {
166 			rssi_lv = i;
167 			break;
168 		}
169 	}
170 	if (rssi_lv == 0)
171 		return 0xffffffffffffffffULL;
172 	else if (rssi_lv == 1)
173 		return 0xfffffffffffffff0ULL;
174 	else if (rssi_lv == 2)
175 		return 0xffffffffffffefe0ULL;
176 	else if (rssi_lv == 3)
177 		return 0xffffffffffffcfc0ULL;
178 	else if (rssi_lv == 4)
179 		return 0xffffffffffff8f80ULL;
180 	else if (rssi_lv >= 5)
181 		return 0xffffffffffff0f00ULL;
182 
183 	return 0xffffffffffffffffULL;
184 }
185 
rtw89_phy_ra_mask_recover(u64 ra_mask,u64 ra_mask_bak)186 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
187 {
188 	if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
189 		ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
190 
191 	if (ra_mask == 0)
192 		ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
193 
194 	return ra_mask;
195 }
196 
rtw89_phy_ra_mask_cfg(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta,const struct rtw89_chan * chan)197 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
198 				 const struct rtw89_chan *chan)
199 {
200 	struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
201 	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
202 	enum nl80211_band band;
203 	u64 cfg_mask;
204 
205 	if (!rtwsta->use_cfg_mask)
206 		return -1;
207 
208 	switch (chan->band_type) {
209 	case RTW89_BAND_2G:
210 		band = NL80211_BAND_2GHZ;
211 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
212 					   RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
213 		break;
214 	case RTW89_BAND_5G:
215 		band = NL80211_BAND_5GHZ;
216 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
217 					   RA_MASK_OFDM_RATES);
218 		break;
219 	case RTW89_BAND_6G:
220 		band = NL80211_BAND_6GHZ;
221 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
222 					   RA_MASK_OFDM_RATES);
223 		break;
224 	default:
225 		rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
226 		return -1;
227 	}
228 
229 	if (sta->deflink.he_cap.has_he) {
230 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
231 					    RA_MASK_HE_1SS_RATES);
232 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
233 					    RA_MASK_HE_2SS_RATES);
234 	} else if (sta->deflink.vht_cap.vht_supported) {
235 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
236 					    RA_MASK_VHT_1SS_RATES);
237 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
238 					    RA_MASK_VHT_2SS_RATES);
239 	} else if (sta->deflink.ht_cap.ht_supported) {
240 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
241 					    RA_MASK_HT_1SS_RATES);
242 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
243 					    RA_MASK_HT_2SS_RATES);
244 	}
245 
246 	return cfg_mask;
247 }
248 
249 static const u64
250 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
251 			     RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
252 static const u64
253 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
254 			      RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
255 static const u64
256 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
257 			     RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
258 static const u64
259 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
260 			      RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
261 
rtw89_phy_ra_gi_ltf(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta,const struct rtw89_chan * chan,bool * fix_giltf_en,u8 * fix_giltf)262 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
263 				struct rtw89_sta *rtwsta,
264 				const struct rtw89_chan *chan,
265 				bool *fix_giltf_en, u8 *fix_giltf)
266 {
267 	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
268 	u8 band = chan->band_type;
269 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
270 	u8 he_gi = mask->control[nl_band].he_gi;
271 	u8 he_ltf = mask->control[nl_band].he_ltf;
272 
273 	if (!rtwsta->use_cfg_mask)
274 		return;
275 
276 	if (he_ltf == 2 && he_gi == 2) {
277 		*fix_giltf = RTW89_GILTF_LGI_4XHE32;
278 	} else if (he_ltf == 2 && he_gi == 0) {
279 		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
280 	} else if (he_ltf == 1 && he_gi == 1) {
281 		*fix_giltf = RTW89_GILTF_2XHE16;
282 	} else if (he_ltf == 1 && he_gi == 0) {
283 		*fix_giltf = RTW89_GILTF_2XHE08;
284 	} else if (he_ltf == 0 && he_gi == 1) {
285 		*fix_giltf = RTW89_GILTF_1XHE16;
286 	} else if (he_ltf == 0 && he_gi == 0) {
287 		*fix_giltf = RTW89_GILTF_1XHE08;
288 	} else {
289 		*fix_giltf_en = false;
290 		return;
291 	}
292 
293 	*fix_giltf_en = true;
294 }
295 
rtw89_phy_ra_sta_update(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,bool csi)296 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
297 				    struct ieee80211_sta *sta, bool csi)
298 {
299 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
300 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
301 	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
302 	struct rtw89_ra_info *ra = &rtwsta->ra;
303 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
304 						       rtwvif->sub_entity_idx);
305 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
306 	const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
307 	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
308 	u64 ra_mask = 0;
309 	u64 ra_mask_bak;
310 	u8 mode = 0;
311 	u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
312 	u8 bw_mode = 0;
313 	u8 stbc_en = 0;
314 	u8 ldpc_en = 0;
315 	u8 fix_giltf = 0;
316 	u8 i;
317 	bool sgi = false;
318 	bool fix_giltf_en = false;
319 
320 	memset(ra, 0, sizeof(*ra));
321 	/* Set the ra mask from sta's capability */
322 	if (sta->deflink.eht_cap.has_eht) {
323 		mode |= RTW89_RA_MODE_EHT;
324 		ra_mask |= get_eht_ra_mask(sta);
325 		high_rate_masks = rtw89_ra_mask_eht_rates;
326 	} else if (sta->deflink.he_cap.has_he) {
327 		mode |= RTW89_RA_MODE_HE;
328 		csi_mode = RTW89_RA_RPT_MODE_HE;
329 		ra_mask |= get_he_ra_mask(sta);
330 		high_rate_masks = rtw89_ra_mask_he_rates;
331 		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] &
332 		    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
333 			stbc_en = 1;
334 		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
335 		    IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
336 			ldpc_en = 1;
337 		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, chan, &fix_giltf_en, &fix_giltf);
338 	} else if (sta->deflink.vht_cap.vht_supported) {
339 		u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
340 
341 		mode |= RTW89_RA_MODE_VHT;
342 		csi_mode = RTW89_RA_RPT_MODE_VHT;
343 		/* MCS9, MCS8, MCS7 */
344 		ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
345 		high_rate_masks = rtw89_ra_mask_vht_rates;
346 		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
347 			stbc_en = 1;
348 		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
349 			ldpc_en = 1;
350 	} else if (sta->deflink.ht_cap.ht_supported) {
351 		mode |= RTW89_RA_MODE_HT;
352 		csi_mode = RTW89_RA_RPT_MODE_HT;
353 		ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
354 			   ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
355 			   (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
356 			   (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
357 		high_rate_masks = rtw89_ra_mask_ht_rates;
358 		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
359 			stbc_en = 1;
360 		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
361 			ldpc_en = 1;
362 	}
363 
364 	switch (chan->band_type) {
365 	case RTW89_BAND_2G:
366 		ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
367 		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
368 			mode |= RTW89_RA_MODE_CCK;
369 		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
370 			mode |= RTW89_RA_MODE_OFDM;
371 		break;
372 	case RTW89_BAND_5G:
373 		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
374 		mode |= RTW89_RA_MODE_OFDM;
375 		break;
376 	case RTW89_BAND_6G:
377 		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4;
378 		mode |= RTW89_RA_MODE_OFDM;
379 		break;
380 	default:
381 		rtw89_err(rtwdev, "Unknown band type\n");
382 		break;
383 	}
384 
385 	ra_mask_bak = ra_mask;
386 
387 	if (mode >= RTW89_RA_MODE_HT) {
388 		u64 mask = 0;
389 		for (i = 0; i < rtwdev->hal.tx_nss; i++)
390 			mask |= high_rate_masks[i];
391 		if (mode & RTW89_RA_MODE_OFDM)
392 			mask |= RA_MASK_SUBOFDM_RATES;
393 		if (mode & RTW89_RA_MODE_CCK)
394 			mask |= RA_MASK_SUBCCK_RATES;
395 		ra_mask &= mask;
396 	} else if (mode & RTW89_RA_MODE_OFDM) {
397 		ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
398 	}
399 
400 	if (mode != RTW89_RA_MODE_CCK)
401 		ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
402 
403 	ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
404 	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
405 
406 	switch (sta->deflink.bandwidth) {
407 	case IEEE80211_STA_RX_BW_160:
408 		bw_mode = RTW89_CHANNEL_WIDTH_160;
409 		sgi = sta->deflink.vht_cap.vht_supported &&
410 		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
411 		break;
412 	case IEEE80211_STA_RX_BW_80:
413 		bw_mode = RTW89_CHANNEL_WIDTH_80;
414 		sgi = sta->deflink.vht_cap.vht_supported &&
415 		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
416 		break;
417 	case IEEE80211_STA_RX_BW_40:
418 		bw_mode = RTW89_CHANNEL_WIDTH_40;
419 		sgi = sta->deflink.ht_cap.ht_supported &&
420 		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
421 		break;
422 	default:
423 		bw_mode = RTW89_CHANNEL_WIDTH_20;
424 		sgi = sta->deflink.ht_cap.ht_supported &&
425 		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
426 		break;
427 	}
428 
429 	if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
430 	    IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
431 		ra->dcm_cap = 1;
432 
433 	if (rate_pattern->enable && !vif->p2p) {
434 		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
435 		ra_mask &= rate_pattern->ra_mask;
436 		mode = rate_pattern->ra_mode;
437 	}
438 
439 	ra->bw_cap = bw_mode;
440 	ra->er_cap = rtwsta->er_cap;
441 	ra->mode_ctrl = mode;
442 	ra->macid = rtwsta->mac_id;
443 	ra->stbc_cap = stbc_en;
444 	ra->ldpc_cap = ldpc_en;
445 	ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
446 	ra->en_sgi = sgi;
447 	ra->ra_mask = ra_mask;
448 	ra->fix_giltf_en = fix_giltf_en;
449 	ra->fix_giltf = fix_giltf;
450 
451 	if (!csi)
452 		return;
453 
454 	ra->fixed_csi_rate_en = false;
455 	ra->ra_csi_rate_en = true;
456 	ra->cr_tbl_sel = false;
457 	ra->band_num = rtwvif->phy_idx;
458 	ra->csi_bw = bw_mode;
459 	ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
460 	ra->csi_mcs_ss_idx = 5;
461 	ra->csi_mode = csi_mode;
462 }
463 
rtw89_phy_ra_updata_sta(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,u32 changed)464 void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
465 			     u32 changed)
466 {
467 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
468 	struct rtw89_ra_info *ra = &rtwsta->ra;
469 
470 	rtw89_phy_ra_sta_update(rtwdev, sta, false);
471 
472 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
473 		ra->upd_mask = 1;
474 	if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
475 		ra->upd_bw_nss_mask = 1;
476 
477 	rtw89_debug(rtwdev, RTW89_DBG_RA,
478 		    "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
479 		    ra->macid,
480 		    ra->bw_cap,
481 		    ra->ss_num,
482 		    ra->en_sgi,
483 		    ra->giltf);
484 
485 	rtw89_fw_h2c_ra(rtwdev, ra, false);
486 }
487 
__check_rate_pattern(struct rtw89_phy_rate_pattern * next,u16 rate_base,u64 ra_mask,u8 ra_mode,u32 rate_ctrl,u32 ctrl_skip,bool force)488 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
489 				 u16 rate_base, u64 ra_mask, u8 ra_mode,
490 				 u32 rate_ctrl, u32 ctrl_skip, bool force)
491 {
492 	u8 n, c;
493 
494 	if (rate_ctrl == ctrl_skip)
495 		return true;
496 
497 	n = hweight32(rate_ctrl);
498 	if (n == 0)
499 		return true;
500 
501 	if (force && n != 1)
502 		return false;
503 
504 	if (next->enable)
505 		return false;
506 
507 	c = __fls(rate_ctrl);
508 	next->rate = rate_base + c;
509 	next->ra_mode = ra_mode;
510 	next->ra_mask = ra_mask;
511 	next->enable = true;
512 
513 	return true;
514 }
515 
516 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
517 	{ \
518 		[RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
519 		[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
520 	}
521 
rtw89_phy_rate_pattern_vif(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)522 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
523 				struct ieee80211_vif *vif,
524 				const struct cfg80211_bitrate_mask *mask)
525 {
526 	struct ieee80211_supported_band *sband;
527 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
528 	struct rtw89_phy_rate_pattern next_pattern = {0};
529 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
530 						       rtwvif->sub_entity_idx);
531 	static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
532 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
533 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
534 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
535 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
536 	};
537 	static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
538 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
539 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
540 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
541 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
542 	};
543 	static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
544 		RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
545 		RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
546 		RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
547 		RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
548 	};
549 	u8 band = chan->band_type;
550 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
551 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
552 	u8 tx_nss = rtwdev->hal.tx_nss;
553 	u8 i;
554 
555 	for (i = 0; i < tx_nss; i++)
556 		if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
557 					  RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
558 					  mask->control[nl_band].he_mcs[i],
559 					  0, true))
560 			goto out;
561 
562 	for (i = 0; i < tx_nss; i++)
563 		if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
564 					  RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
565 					  mask->control[nl_band].vht_mcs[i],
566 					  0, true))
567 			goto out;
568 
569 	for (i = 0; i < tx_nss; i++)
570 		if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
571 					  RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
572 					  mask->control[nl_band].ht_mcs[i],
573 					  0, true))
574 			goto out;
575 
576 	/* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
577 	 * require at least one basic rate for ieee80211_set_bitrate_mask,
578 	 * so the decision just depends on if all bitrates are set or not.
579 	 */
580 	sband = rtwdev->hw->wiphy->bands[nl_band];
581 	if (band == RTW89_BAND_2G) {
582 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
583 					  RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
584 					  RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
585 					  mask->control[nl_band].legacy,
586 					  BIT(sband->n_bitrates) - 1, false))
587 			goto out;
588 	} else {
589 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
590 					  RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
591 					  mask->control[nl_band].legacy,
592 					  BIT(sband->n_bitrates) - 1, false))
593 			goto out;
594 	}
595 
596 	if (!next_pattern.enable)
597 		goto out;
598 
599 	rtwvif->rate_pattern = next_pattern;
600 	rtw89_debug(rtwdev, RTW89_DBG_RA,
601 		    "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
602 		    next_pattern.rate,
603 		    next_pattern.ra_mask,
604 		    next_pattern.ra_mode);
605 	return;
606 
607 out:
608 	rtwvif->rate_pattern.enable = false;
609 	rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
610 }
611 
rtw89_phy_ra_updata_sta_iter(void * data,struct ieee80211_sta * sta)612 static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
613 {
614 	struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
615 
616 	rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
617 }
618 
rtw89_phy_ra_update(struct rtw89_dev * rtwdev)619 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
620 {
621 	ieee80211_iterate_stations_atomic(rtwdev->hw,
622 					  rtw89_phy_ra_updata_sta_iter,
623 					  rtwdev);
624 }
625 
rtw89_phy_ra_assoc(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta)626 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
627 {
628 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
629 	struct rtw89_ra_info *ra = &rtwsta->ra;
630 	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
631 	bool csi = rtw89_sta_has_beamformer_cap(sta);
632 
633 	rtw89_phy_ra_sta_update(rtwdev, sta, csi);
634 
635 	if (rssi > 40)
636 		ra->init_rate_lv = 1;
637 	else if (rssi > 20)
638 		ra->init_rate_lv = 2;
639 	else if (rssi > 1)
640 		ra->init_rate_lv = 3;
641 	else
642 		ra->init_rate_lv = 0;
643 	ra->upd_all = 1;
644 	rtw89_debug(rtwdev, RTW89_DBG_RA,
645 		    "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
646 		    ra->macid,
647 		    ra->mode_ctrl,
648 		    ra->bw_cap,
649 		    ra->ss_num,
650 		    ra->init_rate_lv);
651 	rtw89_debug(rtwdev, RTW89_DBG_RA,
652 		    "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
653 		    ra->dcm_cap,
654 		    ra->er_cap,
655 		    ra->ldpc_cap,
656 		    ra->stbc_cap,
657 		    ra->en_sgi,
658 		    ra->giltf);
659 
660 	rtw89_fw_h2c_ra(rtwdev, ra, csi);
661 }
662 
rtw89_phy_get_txsc(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_bandwidth dbw)663 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
664 		      const struct rtw89_chan *chan,
665 		      enum rtw89_bandwidth dbw)
666 {
667 	enum rtw89_bandwidth cbw = chan->band_width;
668 	u8 pri_ch = chan->primary_channel;
669 	u8 central_ch = chan->channel;
670 	u8 txsc_idx = 0;
671 	u8 tmp = 0;
672 
673 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
674 		return txsc_idx;
675 
676 	switch (cbw) {
677 	case RTW89_CHANNEL_WIDTH_40:
678 		txsc_idx = pri_ch > central_ch ? 1 : 2;
679 		break;
680 	case RTW89_CHANNEL_WIDTH_80:
681 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
682 			if (pri_ch > central_ch)
683 				txsc_idx = (pri_ch - central_ch) >> 1;
684 			else
685 				txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
686 		} else {
687 			txsc_idx = pri_ch > central_ch ? 9 : 10;
688 		}
689 		break;
690 	case RTW89_CHANNEL_WIDTH_160:
691 		if (pri_ch > central_ch)
692 			tmp = (pri_ch - central_ch) >> 1;
693 		else
694 			tmp = ((central_ch - pri_ch) >> 1) + 1;
695 
696 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
697 			txsc_idx = tmp;
698 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
699 			if (tmp == 1 || tmp == 3)
700 				txsc_idx = 9;
701 			else if (tmp == 5 || tmp == 7)
702 				txsc_idx = 11;
703 			else if (tmp == 2 || tmp == 4)
704 				txsc_idx = 10;
705 			else if (tmp == 6 || tmp == 8)
706 				txsc_idx = 12;
707 			else
708 				return 0xff;
709 		} else {
710 			txsc_idx = pri_ch > central_ch ? 13 : 14;
711 		}
712 		break;
713 	case RTW89_CHANNEL_WIDTH_80_80:
714 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
715 			if (pri_ch > central_ch)
716 				txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
717 			else
718 				txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
719 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
720 			txsc_idx = pri_ch > central_ch ? 10 : 12;
721 		} else {
722 			txsc_idx = 14;
723 		}
724 		break;
725 	default:
726 		break;
727 	}
728 
729 	return txsc_idx;
730 }
731 EXPORT_SYMBOL(rtw89_phy_get_txsc);
732 
rtw89_phy_get_txsb(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_bandwidth dbw)733 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
734 		      enum rtw89_bandwidth dbw)
735 {
736 	enum rtw89_bandwidth cbw = chan->band_width;
737 	u8 pri_ch = chan->primary_channel;
738 	u8 central_ch = chan->channel;
739 	u8 txsb_idx = 0;
740 
741 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
742 		return txsb_idx;
743 
744 	switch (cbw) {
745 	case RTW89_CHANNEL_WIDTH_40:
746 		txsb_idx = pri_ch > central_ch ? 1 : 0;
747 		break;
748 	case RTW89_CHANNEL_WIDTH_80:
749 		if (dbw == RTW89_CHANNEL_WIDTH_20)
750 			txsb_idx = (pri_ch - central_ch + 6) / 4;
751 		else
752 			txsb_idx = pri_ch > central_ch ? 1 : 0;
753 		break;
754 	case RTW89_CHANNEL_WIDTH_160:
755 		if (dbw == RTW89_CHANNEL_WIDTH_20)
756 			txsb_idx = (pri_ch - central_ch + 14) / 4;
757 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
758 			txsb_idx = (pri_ch - central_ch + 12) / 8;
759 		else
760 			txsb_idx = pri_ch > central_ch ? 1 : 0;
761 		break;
762 	case RTW89_CHANNEL_WIDTH_320:
763 		if (dbw == RTW89_CHANNEL_WIDTH_20)
764 			txsb_idx = (pri_ch - central_ch + 30) / 4;
765 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
766 			txsb_idx = (pri_ch - central_ch + 28) / 8;
767 		else if (dbw == RTW89_CHANNEL_WIDTH_80)
768 			txsb_idx = (pri_ch - central_ch + 24) / 16;
769 		else
770 			txsb_idx = pri_ch > central_ch ? 1 : 0;
771 		break;
772 	default:
773 		break;
774 	}
775 
776 	return txsb_idx;
777 }
778 EXPORT_SYMBOL(rtw89_phy_get_txsb);
779 
rtw89_phy_check_swsi_busy(struct rtw89_dev * rtwdev)780 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
781 {
782 	return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
783 	       !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
784 }
785 
rtw89_phy_read_rf(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)786 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
787 		      u32 addr, u32 mask)
788 {
789 	const struct rtw89_chip_info *chip = rtwdev->chip;
790 	const u32 *base_addr = chip->rf_base_addr;
791 	u32 val, direct_addr;
792 
793 	if (rf_path >= rtwdev->chip->rf_path_num) {
794 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
795 		return INV_RF_DATA;
796 	}
797 
798 	addr &= 0xff;
799 	direct_addr = base_addr[rf_path] + (addr << 2);
800 	mask &= RFREG_MASK;
801 
802 	val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
803 
804 	return val;
805 }
806 EXPORT_SYMBOL(rtw89_phy_read_rf);
807 
rtw89_phy_read_rf_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)808 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
809 			       enum rtw89_rf_path rf_path, u32 addr, u32 mask)
810 {
811 	bool busy;
812 	bool done;
813 	u32 val;
814 	int ret;
815 
816 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
817 				       1, 30, false, rtwdev);
818 	if (ret) {
819 		rtw89_err(rtwdev, "read rf busy swsi\n");
820 		return INV_RF_DATA;
821 	}
822 
823 	mask &= RFREG_MASK;
824 
825 	val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
826 	      FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
827 	rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
828 	udelay(2);
829 
830 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
831 				       30, false, rtwdev, R_SWSI_V1,
832 				       B_SWSI_R_DATA_DONE_V1);
833 	if (ret) {
834 		rtw89_err(rtwdev, "read swsi busy\n");
835 		return INV_RF_DATA;
836 	}
837 
838 	return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
839 }
840 
rtw89_phy_read_rf_v1(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)841 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
842 			 u32 addr, u32 mask)
843 {
844 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
845 
846 	if (rf_path >= rtwdev->chip->rf_path_num) {
847 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
848 		return INV_RF_DATA;
849 	}
850 
851 	if (ad_sel)
852 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
853 	else
854 		return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
855 }
856 EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
857 
rtw89_phy_read_full_rf_v2_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr)858 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
859 				       enum rtw89_rf_path rf_path, u32 addr)
860 {
861 	static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
862 	static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
863 	bool busy, done;
864 	int ret;
865 	u32 val;
866 
867 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
868 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
869 				       1, 3800, false,
870 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
871 	if (ret) {
872 		rtw89_warn(rtwdev, "poll HWSI is busy\n");
873 		return INV_RF_DATA;
874 	}
875 
876 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
877 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
878 	udelay(2);
879 
880 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
881 				       1, 3800, false,
882 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
883 	if (ret) {
884 		rtw89_warn(rtwdev, "read HWSI is busy\n");
885 		val = INV_RF_DATA;
886 		goto out;
887 	}
888 
889 	val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
890 out:
891 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
892 
893 	return val;
894 }
895 
rtw89_phy_read_rf_v2_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)896 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
897 				  enum rtw89_rf_path rf_path, u32 addr, u32 mask)
898 {
899 	u32 val;
900 
901 	val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
902 
903 	return (val & mask) >> __ffs(mask);
904 }
905 
rtw89_phy_read_rf_v2(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)906 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
907 			 u32 addr, u32 mask)
908 {
909 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
910 
911 	if (rf_path >= rtwdev->chip->rf_path_num) {
912 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
913 		return INV_RF_DATA;
914 	}
915 
916 	if (ad_sel)
917 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
918 	else
919 		return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
920 }
921 EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
922 
rtw89_phy_write_rf(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)923 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
924 			u32 addr, u32 mask, u32 data)
925 {
926 	const struct rtw89_chip_info *chip = rtwdev->chip;
927 	const u32 *base_addr = chip->rf_base_addr;
928 	u32 direct_addr;
929 
930 	if (rf_path >= rtwdev->chip->rf_path_num) {
931 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
932 		return false;
933 	}
934 
935 	addr &= 0xff;
936 	direct_addr = base_addr[rf_path] + (addr << 2);
937 	mask &= RFREG_MASK;
938 
939 	rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
940 
941 	/* delay to ensure writing properly */
942 	udelay(1);
943 
944 	return true;
945 }
946 EXPORT_SYMBOL(rtw89_phy_write_rf);
947 
rtw89_phy_write_rf_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)948 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
949 				 enum rtw89_rf_path rf_path, u32 addr, u32 mask,
950 				 u32 data)
951 {
952 	u8 bit_shift;
953 	u32 val;
954 	bool busy, b_msk_en = false;
955 	int ret;
956 
957 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
958 				       1, 30, false, rtwdev);
959 	if (ret) {
960 		rtw89_err(rtwdev, "write rf busy swsi\n");
961 		return false;
962 	}
963 
964 	data &= RFREG_MASK;
965 	mask &= RFREG_MASK;
966 
967 	if (mask != RFREG_MASK) {
968 		b_msk_en = true;
969 		rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
970 				       mask);
971 		bit_shift = __ffs(mask);
972 		data = (data << bit_shift) & RFREG_MASK;
973 	}
974 
975 	val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
976 	      FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
977 	      FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
978 	      FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
979 
980 	rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
981 
982 	return true;
983 }
984 
rtw89_phy_write_rf_v1(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)985 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
986 			   u32 addr, u32 mask, u32 data)
987 {
988 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
989 
990 	if (rf_path >= rtwdev->chip->rf_path_num) {
991 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
992 		return false;
993 	}
994 
995 	if (ad_sel)
996 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
997 	else
998 		return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
999 }
1000 EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
1001 
1002 static
rtw89_phy_write_full_rf_v2_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 data)1003 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1004 				  u32 addr, u32 data)
1005 {
1006 	static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
1007 	static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
1008 	bool busy;
1009 	u32 val;
1010 	int ret;
1011 
1012 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1013 				       1, 3800, false,
1014 				       rtwdev, addr_is_idle[rf_path], BIT(29));
1015 	if (ret) {
1016 		rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
1017 		return false;
1018 	}
1019 
1020 	val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
1021 	      u32_encode_bits(data, B_HWSI_DATA_VAL);
1022 
1023 	rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
1024 
1025 	return true;
1026 }
1027 
1028 static
rtw89_phy_write_rf_a_v2(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1029 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1030 			     u32 addr, u32 mask, u32 data)
1031 {
1032 	u32 val;
1033 
1034 	if (mask == RFREG_MASK) {
1035 		val = data;
1036 	} else {
1037 		val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
1038 		val &= ~mask;
1039 		val |= (data << __ffs(mask)) & mask;
1040 	}
1041 
1042 	return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
1043 }
1044 
rtw89_phy_write_rf_v2(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1045 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1046 			   u32 addr, u32 mask, u32 data)
1047 {
1048 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1049 
1050 	if (rf_path >= rtwdev->chip->rf_path_num) {
1051 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1052 		return INV_RF_DATA;
1053 	}
1054 
1055 	if (ad_sel)
1056 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1057 	else
1058 		return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
1059 }
1060 EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
1061 
rtw89_chip_rf_v1(struct rtw89_dev * rtwdev)1062 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
1063 {
1064 	return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
1065 }
1066 
rtw89_phy_bb_reset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)1067 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
1068 			       enum rtw89_phy_idx phy_idx)
1069 {
1070 	const struct rtw89_chip_info *chip = rtwdev->chip;
1071 
1072 	chip->ops->bb_reset(rtwdev, phy_idx);
1073 }
1074 
rtw89_phy_config_bb_reg(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1075 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
1076 				    const struct rtw89_reg2_def *reg,
1077 				    enum rtw89_rf_path rf_path,
1078 				    void *extra_data)
1079 {
1080 	u32 addr;
1081 
1082 	if (reg->addr == 0xfe) {
1083 		mdelay(50);
1084 	} else if (reg->addr == 0xfd) {
1085 		mdelay(5);
1086 	} else if (reg->addr == 0xfc) {
1087 		mdelay(1);
1088 	} else if (reg->addr == 0xfb) {
1089 		udelay(50);
1090 	} else if (reg->addr == 0xfa) {
1091 		udelay(5);
1092 	} else if (reg->addr == 0xf9) {
1093 		udelay(1);
1094 	} else if (reg->data == BYPASS_CR_DATA) {
1095 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
1096 	} else {
1097 		addr = reg->addr;
1098 
1099 		if ((uintptr_t)extra_data == RTW89_PHY_1)
1100 			addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
1101 
1102 		rtw89_phy_write32(rtwdev, addr, reg->data);
1103 	}
1104 }
1105 
1106 union rtw89_phy_bb_gain_arg {
1107 	u32 addr;
1108 	struct {
1109 		union {
1110 			u8 type;
1111 			struct {
1112 				u8 rxsc_start:4;
1113 				u8 bw:4;
1114 			};
1115 		};
1116 		u8 path;
1117 		u8 gain_band;
1118 		u8 cfg_type;
1119 	};
1120 } __packed;
1121 
1122 static void
rtw89_phy_cfg_bb_gain_error(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1123 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
1124 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1125 {
1126 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1127 	u8 type = arg.type;
1128 	u8 path = arg.path;
1129 	u8 gband = arg.gain_band;
1130 	int i;
1131 
1132 	switch (type) {
1133 	case 0:
1134 		for (i = 0; i < 4; i++, data >>= 8)
1135 			gain->lna_gain[gband][path][i] = data & 0xff;
1136 		break;
1137 	case 1:
1138 		for (i = 4; i < 7; i++, data >>= 8)
1139 			gain->lna_gain[gband][path][i] = data & 0xff;
1140 		break;
1141 	case 2:
1142 		for (i = 0; i < 2; i++, data >>= 8)
1143 			gain->tia_gain[gband][path][i] = data & 0xff;
1144 		break;
1145 	default:
1146 		rtw89_warn(rtwdev,
1147 			   "bb gain error {0x%x:0x%x} with unknown type: %d\n",
1148 			   arg.addr, data, type);
1149 		break;
1150 	}
1151 }
1152 
1153 enum rtw89_phy_bb_rxsc_start_idx {
1154 	RTW89_BB_RXSC_START_IDX_FULL = 0,
1155 	RTW89_BB_RXSC_START_IDX_20 = 1,
1156 	RTW89_BB_RXSC_START_IDX_20_1 = 5,
1157 	RTW89_BB_RXSC_START_IDX_40 = 9,
1158 	RTW89_BB_RXSC_START_IDX_80 = 13,
1159 };
1160 
1161 static void
rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1162 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
1163 			  union rtw89_phy_bb_gain_arg arg, u32 data)
1164 {
1165 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1166 	u8 rxsc_start = arg.rxsc_start;
1167 	u8 bw = arg.bw;
1168 	u8 path = arg.path;
1169 	u8 gband = arg.gain_band;
1170 	u8 rxsc;
1171 	s8 ofst;
1172 	int i;
1173 
1174 	switch (bw) {
1175 	case RTW89_CHANNEL_WIDTH_20:
1176 		gain->rpl_ofst_20[gband][path] = (s8)data;
1177 		break;
1178 	case RTW89_CHANNEL_WIDTH_40:
1179 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1180 			gain->rpl_ofst_40[gband][path][0] = (s8)data;
1181 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1182 			for (i = 0; i < 2; i++, data >>= 8) {
1183 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1184 				ofst = (s8)(data & 0xff);
1185 				gain->rpl_ofst_40[gband][path][rxsc] = ofst;
1186 			}
1187 		}
1188 		break;
1189 	case RTW89_CHANNEL_WIDTH_80:
1190 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1191 			gain->rpl_ofst_80[gband][path][0] = (s8)data;
1192 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1193 			for (i = 0; i < 4; i++, data >>= 8) {
1194 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1195 				ofst = (s8)(data & 0xff);
1196 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1197 			}
1198 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1199 			for (i = 0; i < 2; i++, data >>= 8) {
1200 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1201 				ofst = (s8)(data & 0xff);
1202 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1203 			}
1204 		}
1205 		break;
1206 	case RTW89_CHANNEL_WIDTH_160:
1207 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1208 			gain->rpl_ofst_160[gband][path][0] = (s8)data;
1209 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1210 			for (i = 0; i < 4; i++, data >>= 8) {
1211 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1212 				ofst = (s8)(data & 0xff);
1213 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1214 			}
1215 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
1216 			for (i = 0; i < 4; i++, data >>= 8) {
1217 				rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
1218 				ofst = (s8)(data & 0xff);
1219 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1220 			}
1221 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1222 			for (i = 0; i < 4; i++, data >>= 8) {
1223 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1224 				ofst = (s8)(data & 0xff);
1225 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1226 			}
1227 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
1228 			for (i = 0; i < 2; i++, data >>= 8) {
1229 				rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
1230 				ofst = (s8)(data & 0xff);
1231 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1232 			}
1233 		}
1234 		break;
1235 	default:
1236 		rtw89_warn(rtwdev,
1237 			   "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
1238 			   arg.addr, data, bw);
1239 		break;
1240 	}
1241 }
1242 
1243 static void
rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1244 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
1245 			     union rtw89_phy_bb_gain_arg arg, u32 data)
1246 {
1247 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1248 	u8 type = arg.type;
1249 	u8 path = arg.path;
1250 	u8 gband = arg.gain_band;
1251 	int i;
1252 
1253 	switch (type) {
1254 	case 0:
1255 		for (i = 0; i < 4; i++, data >>= 8)
1256 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1257 		break;
1258 	case 1:
1259 		for (i = 4; i < 7; i++, data >>= 8)
1260 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1261 		break;
1262 	default:
1263 		rtw89_warn(rtwdev,
1264 			   "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
1265 			   arg.addr, data, type);
1266 		break;
1267 	}
1268 }
1269 
1270 static void
rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1271 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
1272 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1273 {
1274 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1275 	u8 type = arg.type;
1276 	u8 path = arg.path;
1277 	u8 gband = arg.gain_band;
1278 	int i;
1279 
1280 	switch (type) {
1281 	case 0:
1282 		for (i = 0; i < 4; i++, data >>= 8)
1283 			gain->lna_op1db[gband][path][i] = data & 0xff;
1284 		break;
1285 	case 1:
1286 		for (i = 4; i < 7; i++, data >>= 8)
1287 			gain->lna_op1db[gband][path][i] = data & 0xff;
1288 		break;
1289 	case 2:
1290 		for (i = 0; i < 4; i++, data >>= 8)
1291 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1292 		break;
1293 	case 3:
1294 		for (i = 4; i < 8; i++, data >>= 8)
1295 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1296 		break;
1297 	default:
1298 		rtw89_warn(rtwdev,
1299 			   "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
1300 			   arg.addr, data, type);
1301 		break;
1302 	}
1303 }
1304 
rtw89_phy_config_bb_gain_ax(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1305 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
1306 					const struct rtw89_reg2_def *reg,
1307 					enum rtw89_rf_path rf_path,
1308 					void *extra_data)
1309 {
1310 	const struct rtw89_chip_info *chip = rtwdev->chip;
1311 	union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
1312 	struct rtw89_efuse *efuse = &rtwdev->efuse;
1313 
1314 	if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
1315 		return;
1316 
1317 	if (arg.path >= chip->rf_path_num)
1318 		return;
1319 
1320 	if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
1321 		rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
1322 		return;
1323 	}
1324 
1325 	switch (arg.cfg_type) {
1326 	case 0:
1327 		rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
1328 		break;
1329 	case 1:
1330 		rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
1331 		break;
1332 	case 2:
1333 		rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
1334 		break;
1335 	case 3:
1336 		rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
1337 		break;
1338 	case 4:
1339 		/* This cfg_type is only used by rfe_type >= 50 with eFEM */
1340 		if (efuse->rfe_type < 50)
1341 			break;
1342 		fallthrough;
1343 	default:
1344 		rtw89_warn(rtwdev,
1345 			   "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
1346 			   arg.addr, reg->data, arg.cfg_type);
1347 		break;
1348 	}
1349 }
1350 
1351 static void
rtw89_phy_cofig_rf_reg_store(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,struct rtw89_fw_h2c_rf_reg_info * info)1352 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
1353 			     const struct rtw89_reg2_def *reg,
1354 			     enum rtw89_rf_path rf_path,
1355 			     struct rtw89_fw_h2c_rf_reg_info *info)
1356 {
1357 	u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
1358 	u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
1359 
1360 	if (page >= RTW89_H2C_RF_PAGE_NUM) {
1361 		rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
1362 			   rf_path, info->curr_idx);
1363 		return;
1364 	}
1365 
1366 	info->rtw89_phy_config_rf_h2c[page][idx] =
1367 		cpu_to_le32((reg->addr << 20) | reg->data);
1368 	info->curr_idx++;
1369 }
1370 
rtw89_phy_config_rf_reg_fw(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info)1371 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
1372 				      struct rtw89_fw_h2c_rf_reg_info *info)
1373 {
1374 	u16 remain = info->curr_idx;
1375 	u16 len = 0;
1376 	u8 i;
1377 	int ret = 0;
1378 
1379 	if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
1380 		rtw89_warn(rtwdev,
1381 			   "rf reg h2c total len %d larger than %d\n",
1382 			   remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
1383 		ret = -EINVAL;
1384 		goto out;
1385 	}
1386 
1387 	for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
1388 		len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
1389 		ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
1390 		if (ret)
1391 			goto out;
1392 	}
1393 out:
1394 	info->curr_idx = 0;
1395 
1396 	return ret;
1397 }
1398 
rtw89_phy_config_rf_reg_noio(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1399 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
1400 					 const struct rtw89_reg2_def *reg,
1401 					 enum rtw89_rf_path rf_path,
1402 					 void *extra_data)
1403 {
1404 	u32 addr = reg->addr;
1405 
1406 	if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
1407 	    addr == 0xfa || addr == 0xf9)
1408 		return;
1409 
1410 	if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
1411 		return;
1412 
1413 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1414 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1415 }
1416 
rtw89_phy_config_rf_reg(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1417 static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
1418 				    const struct rtw89_reg2_def *reg,
1419 				    enum rtw89_rf_path rf_path,
1420 				    void *extra_data)
1421 {
1422 	if (reg->addr == 0xfe) {
1423 		mdelay(50);
1424 	} else if (reg->addr == 0xfd) {
1425 		mdelay(5);
1426 	} else if (reg->addr == 0xfc) {
1427 		mdelay(1);
1428 	} else if (reg->addr == 0xfb) {
1429 		udelay(50);
1430 	} else if (reg->addr == 0xfa) {
1431 		udelay(5);
1432 	} else if (reg->addr == 0xf9) {
1433 		udelay(1);
1434 	} else {
1435 		rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
1436 		rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1437 					     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1438 	}
1439 }
1440 
rtw89_phy_config_rf_reg_v1(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1441 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
1442 				const struct rtw89_reg2_def *reg,
1443 				enum rtw89_rf_path rf_path,
1444 				void *extra_data)
1445 {
1446 	rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
1447 
1448 	if (reg->addr < 0x100)
1449 		return;
1450 
1451 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1452 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1453 }
1454 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
1455 
rtw89_phy_sel_headline(struct rtw89_dev * rtwdev,const struct rtw89_phy_table * table,u32 * headline_size,u32 * headline_idx,u8 rfe,u8 cv)1456 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
1457 				  const struct rtw89_phy_table *table,
1458 				  u32 *headline_size, u32 *headline_idx,
1459 				  u8 rfe, u8 cv)
1460 {
1461 	const struct rtw89_reg2_def *reg;
1462 	u32 headline;
1463 	u32 compare, target;
1464 	u8 rfe_para, cv_para;
1465 	u8 cv_max = 0;
1466 	bool case_matched = false;
1467 	u32 i;
1468 
1469 	for (i = 0; i < table->n_regs; i++) {
1470 		reg = &table->regs[i];
1471 		headline = get_phy_headline(reg->addr);
1472 		if (headline != PHY_HEADLINE_VALID)
1473 			break;
1474 	}
1475 	*headline_size = i;
1476 	if (*headline_size == 0)
1477 		return 0;
1478 
1479 	/* case 1: RFE match, CV match */
1480 	compare = get_phy_compare(rfe, cv);
1481 	for (i = 0; i < *headline_size; i++) {
1482 		reg = &table->regs[i];
1483 		target = get_phy_target(reg->addr);
1484 		if (target == compare) {
1485 			*headline_idx = i;
1486 			return 0;
1487 		}
1488 	}
1489 
1490 	/* case 2: RFE match, CV don't care */
1491 	compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
1492 	for (i = 0; i < *headline_size; i++) {
1493 		reg = &table->regs[i];
1494 		target = get_phy_target(reg->addr);
1495 		if (target == compare) {
1496 			*headline_idx = i;
1497 			return 0;
1498 		}
1499 	}
1500 
1501 	/* case 3: RFE match, CV max in table */
1502 	for (i = 0; i < *headline_size; i++) {
1503 		reg = &table->regs[i];
1504 		rfe_para = get_phy_cond_rfe(reg->addr);
1505 		cv_para = get_phy_cond_cv(reg->addr);
1506 		if (rfe_para == rfe) {
1507 			if (cv_para >= cv_max) {
1508 				cv_max = cv_para;
1509 				*headline_idx = i;
1510 				case_matched = true;
1511 			}
1512 		}
1513 	}
1514 
1515 	if (case_matched)
1516 		return 0;
1517 
1518 	/* case 4: RFE don't care, CV max in table */
1519 	for (i = 0; i < *headline_size; i++) {
1520 		reg = &table->regs[i];
1521 		rfe_para = get_phy_cond_rfe(reg->addr);
1522 		cv_para = get_phy_cond_cv(reg->addr);
1523 		if (rfe_para == PHY_COND_DONT_CARE) {
1524 			if (cv_para >= cv_max) {
1525 				cv_max = cv_para;
1526 				*headline_idx = i;
1527 				case_matched = true;
1528 			}
1529 		}
1530 	}
1531 
1532 	if (case_matched)
1533 		return 0;
1534 
1535 	return -EINVAL;
1536 }
1537 
rtw89_phy_init_reg(struct rtw89_dev * rtwdev,const struct rtw89_phy_table * table,void (* config)(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * data),void * extra_data)1538 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
1539 			       const struct rtw89_phy_table *table,
1540 			       void (*config)(struct rtw89_dev *rtwdev,
1541 					      const struct rtw89_reg2_def *reg,
1542 					      enum rtw89_rf_path rf_path,
1543 					      void *data),
1544 			       void *extra_data)
1545 {
1546 	const struct rtw89_reg2_def *reg;
1547 	enum rtw89_rf_path rf_path = table->rf_path;
1548 	u8 rfe = rtwdev->efuse.rfe_type;
1549 	u8 cv = rtwdev->hal.cv;
1550 	u32 i;
1551 	u32 headline_size = 0, headline_idx = 0;
1552 	u32 target = 0, cfg_target;
1553 	u8 cond;
1554 	bool is_matched = true;
1555 	bool target_found = false;
1556 	int ret;
1557 
1558 	ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
1559 				     &headline_idx, rfe, cv);
1560 	if (ret) {
1561 		rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
1562 		return;
1563 	}
1564 
1565 	cfg_target = get_phy_target(table->regs[headline_idx].addr);
1566 	for (i = headline_size; i < table->n_regs; i++) {
1567 		reg = &table->regs[i];
1568 		cond = get_phy_cond(reg->addr);
1569 		switch (cond) {
1570 		case PHY_COND_BRANCH_IF:
1571 		case PHY_COND_BRANCH_ELIF:
1572 			target = get_phy_target(reg->addr);
1573 			break;
1574 		case PHY_COND_BRANCH_ELSE:
1575 			is_matched = false;
1576 			if (!target_found) {
1577 				rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
1578 					   reg->addr, reg->data);
1579 				return;
1580 			}
1581 			break;
1582 		case PHY_COND_BRANCH_END:
1583 			is_matched = true;
1584 			target_found = false;
1585 			break;
1586 		case PHY_COND_CHECK:
1587 			if (target_found) {
1588 				is_matched = false;
1589 				break;
1590 			}
1591 
1592 			if (target == cfg_target) {
1593 				is_matched = true;
1594 				target_found = true;
1595 			} else {
1596 				is_matched = false;
1597 				target_found = false;
1598 			}
1599 			break;
1600 		default:
1601 			if (is_matched)
1602 				config(rtwdev, reg, rf_path, extra_data);
1603 			break;
1604 		}
1605 	}
1606 }
1607 
rtw89_phy_init_bb_reg(struct rtw89_dev * rtwdev)1608 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
1609 {
1610 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1611 	const struct rtw89_chip_info *chip = rtwdev->chip;
1612 	const struct rtw89_phy_table *bb_table;
1613 	const struct rtw89_phy_table *bb_gain_table;
1614 
1615 	bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
1616 	rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
1617 	if (rtwdev->dbcc_en)
1618 		rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
1619 				   (void *)RTW89_PHY_1);
1620 	rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
1621 
1622 	bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
1623 	if (bb_gain_table)
1624 		rtw89_phy_init_reg(rtwdev, bb_gain_table,
1625 				   chip->phy_def->config_bb_gain, NULL);
1626 	rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
1627 }
1628 
rtw89_phy_nctl_poll(struct rtw89_dev * rtwdev)1629 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
1630 {
1631 	rtw89_phy_write32(rtwdev, 0x8080, 0x4);
1632 	udelay(1);
1633 	return rtw89_phy_read32(rtwdev, 0x8080);
1634 }
1635 
rtw89_phy_init_rf_reg(struct rtw89_dev * rtwdev,bool noio)1636 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
1637 {
1638 	void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
1639 		       enum rtw89_rf_path rf_path, void *data);
1640 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1641 	const struct rtw89_chip_info *chip = rtwdev->chip;
1642 	const struct rtw89_phy_table *rf_table;
1643 	struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
1644 	u8 path;
1645 
1646 	rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL);
1647 	if (!rf_reg_info)
1648 		return;
1649 
1650 	for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
1651 		rf_table = elm_info->rf_radio[path] ?
1652 			   elm_info->rf_radio[path] : chip->rf_table[path];
1653 		rf_reg_info->rf_path = rf_table->rf_path;
1654 		if (noio)
1655 			config = rtw89_phy_config_rf_reg_noio;
1656 		else
1657 			config = rf_table->config ? rf_table->config :
1658 				 rtw89_phy_config_rf_reg;
1659 		rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
1660 		if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
1661 			rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
1662 				   rf_reg_info->rf_path);
1663 	}
1664 	kfree(rf_reg_info);
1665 }
1666 
rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev * rtwdev)1667 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
1668 {
1669 	const struct rtw89_chip_info *chip = rtwdev->chip;
1670 	u32 val;
1671 	int ret;
1672 
1673 	/* IQK/DPK clock & reset */
1674 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
1675 	rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
1676 	rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
1677 	if (chip->chip_id != RTL8851B)
1678 		rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
1679 	if (chip->chip_id == RTL8852B)
1680 		rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
1681 
1682 	/* check 0x8080 */
1683 	rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
1684 
1685 	ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
1686 				1000, false, rtwdev);
1687 	if (ret)
1688 		rtw89_err(rtwdev, "failed to poll nctl block\n");
1689 }
1690 
rtw89_phy_init_rf_nctl(struct rtw89_dev * rtwdev)1691 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1692 {
1693 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1694 	const struct rtw89_chip_info *chip = rtwdev->chip;
1695 	const struct rtw89_phy_table *nctl_table;
1696 
1697 	rtw89_phy_preinit_rf_nctl(rtwdev);
1698 
1699 	nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
1700 	rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
1701 
1702 	if (chip->nctl_post_table)
1703 		rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
1704 }
1705 
rtw89_phy0_phy1_offset_ax(struct rtw89_dev * rtwdev,u32 addr)1706 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
1707 {
1708 	u32 phy_page = addr >> 8;
1709 	u32 ofst = 0;
1710 
1711 	switch (phy_page) {
1712 	case 0x6:
1713 	case 0x7:
1714 	case 0x8:
1715 	case 0x9:
1716 	case 0xa:
1717 	case 0xb:
1718 	case 0xc:
1719 	case 0xd:
1720 	case 0x19:
1721 	case 0x1a:
1722 	case 0x1b:
1723 		ofst = 0x2000;
1724 		break;
1725 	default:
1726 		/* warning case */
1727 		ofst = 0;
1728 		break;
1729 	}
1730 
1731 	if (phy_page >= 0x40 && phy_page <= 0x4f)
1732 		ofst = 0x2000;
1733 
1734 	return ofst;
1735 }
1736 
rtw89_phy_write32_idx(struct rtw89_dev * rtwdev,u32 addr,u32 mask,u32 data,enum rtw89_phy_idx phy_idx)1737 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1738 			   u32 data, enum rtw89_phy_idx phy_idx)
1739 {
1740 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1741 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1742 	rtw89_phy_write32_mask(rtwdev, addr, mask, data);
1743 }
1744 EXPORT_SYMBOL(rtw89_phy_write32_idx);
1745 
rtw89_phy_read32_idx(struct rtw89_dev * rtwdev,u32 addr,u32 mask,enum rtw89_phy_idx phy_idx)1746 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1747 			 enum rtw89_phy_idx phy_idx)
1748 {
1749 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1750 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1751 	return rtw89_phy_read32_mask(rtwdev, addr, mask);
1752 }
1753 EXPORT_SYMBOL(rtw89_phy_read32_idx);
1754 
rtw89_phy_set_phy_regs(struct rtw89_dev * rtwdev,u32 addr,u32 mask,u32 val)1755 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1756 			    u32 val)
1757 {
1758 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
1759 
1760 	if (!rtwdev->dbcc_en)
1761 		return;
1762 
1763 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
1764 }
1765 EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
1766 
rtw89_phy_write_reg3_tbl(struct rtw89_dev * rtwdev,const struct rtw89_phy_reg3_tbl * tbl)1767 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
1768 			      const struct rtw89_phy_reg3_tbl *tbl)
1769 {
1770 	const struct rtw89_reg3_def *reg3;
1771 	int i;
1772 
1773 	for (i = 0; i < tbl->size; i++) {
1774 		reg3 = &tbl->reg3[i];
1775 		rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
1776 	}
1777 }
1778 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
1779 
1780 static const u8 rtw89_rs_idx_num_ax[] = {
1781 	[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
1782 	[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
1783 	[RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX,
1784 	[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
1785 	[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX,
1786 };
1787 
1788 static const u8 rtw89_rs_nss_num_ax[] = {
1789 	[RTW89_RS_CCK] = 1,
1790 	[RTW89_RS_OFDM] = 1,
1791 	[RTW89_RS_MCS] = RTW89_NSS_NUM,
1792 	[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
1793 	[RTW89_RS_OFFSET] = 1,
1794 };
1795 
rtw89_phy_raw_byr_seek(struct rtw89_dev * rtwdev,struct rtw89_txpwr_byrate * head,const struct rtw89_rate_desc * desc)1796 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
1797 			   struct rtw89_txpwr_byrate *head,
1798 			   const struct rtw89_rate_desc *desc)
1799 {
1800 	switch (desc->rs) {
1801 	case RTW89_RS_CCK:
1802 		return &head->cck[desc->idx];
1803 	case RTW89_RS_OFDM:
1804 		return &head->ofdm[desc->idx];
1805 	case RTW89_RS_MCS:
1806 		return &head->mcs[desc->ofdma][desc->nss][desc->idx];
1807 	case RTW89_RS_HEDCM:
1808 		return &head->hedcm[desc->ofdma][desc->nss][desc->idx];
1809 	case RTW89_RS_OFFSET:
1810 		return &head->offset[desc->idx];
1811 	default:
1812 		rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs);
1813 		return &head->trap;
1814 	}
1815 }
1816 
rtw89_phy_load_txpwr_byrate(struct rtw89_dev * rtwdev,const struct rtw89_txpwr_table * tbl)1817 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
1818 				 const struct rtw89_txpwr_table *tbl)
1819 {
1820 	const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
1821 	const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
1822 	struct rtw89_txpwr_byrate *byr_head;
1823 	struct rtw89_rate_desc desc = {};
1824 	s8 *byr;
1825 	u32 data;
1826 	u8 i;
1827 
1828 	for (; cfg < end; cfg++) {
1829 		byr_head = &rtwdev->byr[cfg->band][0];
1830 		desc.rs = cfg->rs;
1831 		desc.nss = cfg->nss;
1832 		data = cfg->data;
1833 
1834 		for (i = 0; i < cfg->len; i++, data >>= 8) {
1835 			desc.idx = cfg->shf + i;
1836 			byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
1837 			*byr = data & 0xff;
1838 		}
1839 	}
1840 }
1841 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
1842 
rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev * rtwdev,s8 txpwr_rf)1843 static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
1844 {
1845 	const struct rtw89_chip_info *chip = rtwdev->chip;
1846 
1847 	return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
1848 }
1849 
rtw89_phy_read_txpwr_byrate(struct rtw89_dev * rtwdev,u8 band,u8 bw,const struct rtw89_rate_desc * rate_desc)1850 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
1851 			       const struct rtw89_rate_desc *rate_desc)
1852 {
1853 	struct rtw89_txpwr_byrate *byr_head;
1854 	s8 *byr;
1855 
1856 	if (rate_desc->rs == RTW89_RS_CCK)
1857 		band = RTW89_BAND_2G;
1858 
1859 	byr_head = &rtwdev->byr[band][bw];
1860 	byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc);
1861 
1862 	return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr);
1863 }
1864 
rtw89_channel_6g_to_idx(struct rtw89_dev * rtwdev,u8 channel_6g)1865 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
1866 {
1867 	switch (channel_6g) {
1868 	case 1 ... 29:
1869 		return (channel_6g - 1) / 2;
1870 	case 33 ... 61:
1871 		return (channel_6g - 3) / 2;
1872 	case 65 ... 93:
1873 		return (channel_6g - 5) / 2;
1874 	case 97 ... 125:
1875 		return (channel_6g - 7) / 2;
1876 	case 129 ... 157:
1877 		return (channel_6g - 9) / 2;
1878 	case 161 ... 189:
1879 		return (channel_6g - 11) / 2;
1880 	case 193 ... 221:
1881 		return (channel_6g - 13) / 2;
1882 	case 225 ... 253:
1883 		return (channel_6g - 15) / 2;
1884 	default:
1885 		rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
1886 		return 0;
1887 	}
1888 }
1889 
rtw89_channel_to_idx(struct rtw89_dev * rtwdev,u8 band,u8 channel)1890 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
1891 {
1892 	if (band == RTW89_BAND_6G)
1893 		return rtw89_channel_6g_to_idx(rtwdev, channel);
1894 
1895 	switch (channel) {
1896 	case 1 ... 14:
1897 		return channel - 1;
1898 	case 36 ... 64:
1899 		return (channel - 36) / 2;
1900 	case 100 ... 144:
1901 		return ((channel - 100) / 2) + 15;
1902 	case 149 ... 177:
1903 		return ((channel - 149) / 2) + 38;
1904 	default:
1905 		rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
1906 		return 0;
1907 	}
1908 }
1909 
rtw89_phy_read_txpwr_limit(struct rtw89_dev * rtwdev,u8 band,u8 bw,u8 ntx,u8 rs,u8 bf,u8 ch)1910 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
1911 			      u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
1912 {
1913 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
1914 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
1915 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
1916 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
1917 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
1918 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
1919 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
1920 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
1921 	u8 regd = rtw89_regd_get(rtwdev, band);
1922 	u8 reg6 = regulatory->reg_6ghz_power;
1923 	s8 lmt = 0, sar;
1924 
1925 	switch (band) {
1926 	case RTW89_BAND_2G:
1927 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
1928 		if (lmt)
1929 			break;
1930 
1931 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
1932 		break;
1933 	case RTW89_BAND_5G:
1934 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
1935 		if (lmt)
1936 			break;
1937 
1938 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
1939 		break;
1940 	case RTW89_BAND_6G:
1941 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
1942 		if (lmt)
1943 			break;
1944 
1945 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
1946 				       [RTW89_REG_6GHZ_POWER_DFLT]
1947 				       [ch_idx];
1948 		break;
1949 	default:
1950 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
1951 		return 0;
1952 	}
1953 
1954 	lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
1955 	sar = rtw89_query_sar(rtwdev, freq);
1956 
1957 	return min(lmt, sar);
1958 }
1959 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
1960 
1961 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch)		\
1962 	do {								\
1963 		u8 __i;							\
1964 		for (__i = 0; __i < RTW89_BF_NUM; __i++)		\
1965 			ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev,	\
1966 							      band,	\
1967 							      bw, ntx,	\
1968 							      rs, __i,	\
1969 							      (ch));	\
1970 	} while (0)
1971 
rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch)1972 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev,
1973 					      struct rtw89_txpwr_limit_ax *lmt,
1974 					      u8 band, u8 ntx, u8 ch)
1975 {
1976 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
1977 				    ntx, RTW89_RS_CCK, ch);
1978 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
1979 				    ntx, RTW89_RS_CCK, ch);
1980 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1981 				    ntx, RTW89_RS_OFDM, ch);
1982 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1983 				    RTW89_CHANNEL_WIDTH_20,
1984 				    ntx, RTW89_RS_MCS, ch);
1985 }
1986 
rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch,u8 pri_ch)1987 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev,
1988 					      struct rtw89_txpwr_limit_ax *lmt,
1989 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
1990 {
1991 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
1992 				    ntx, RTW89_RS_CCK, ch - 2);
1993 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
1994 				    ntx, RTW89_RS_CCK, ch);
1995 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1996 				    ntx, RTW89_RS_OFDM, pri_ch);
1997 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1998 				    RTW89_CHANNEL_WIDTH_20,
1999 				    ntx, RTW89_RS_MCS, ch - 2);
2000 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2001 				    RTW89_CHANNEL_WIDTH_20,
2002 				    ntx, RTW89_RS_MCS, ch + 2);
2003 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2004 				    RTW89_CHANNEL_WIDTH_40,
2005 				    ntx, RTW89_RS_MCS, ch);
2006 }
2007 
rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch,u8 pri_ch)2008 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev,
2009 					      struct rtw89_txpwr_limit_ax *lmt,
2010 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
2011 {
2012 	s8 val_0p5_n[RTW89_BF_NUM];
2013 	s8 val_0p5_p[RTW89_BF_NUM];
2014 	u8 i;
2015 
2016 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2017 				    ntx, RTW89_RS_OFDM, pri_ch);
2018 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2019 				    RTW89_CHANNEL_WIDTH_20,
2020 				    ntx, RTW89_RS_MCS, ch - 6);
2021 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2022 				    RTW89_CHANNEL_WIDTH_20,
2023 				    ntx, RTW89_RS_MCS, ch - 2);
2024 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2025 				    RTW89_CHANNEL_WIDTH_20,
2026 				    ntx, RTW89_RS_MCS, ch + 2);
2027 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2028 				    RTW89_CHANNEL_WIDTH_20,
2029 				    ntx, RTW89_RS_MCS, ch + 6);
2030 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2031 				    RTW89_CHANNEL_WIDTH_40,
2032 				    ntx, RTW89_RS_MCS, ch - 4);
2033 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2034 				    RTW89_CHANNEL_WIDTH_40,
2035 				    ntx, RTW89_RS_MCS, ch + 4);
2036 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2037 				    RTW89_CHANNEL_WIDTH_80,
2038 				    ntx, RTW89_RS_MCS, ch);
2039 
2040 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2041 				    ntx, RTW89_RS_MCS, ch - 4);
2042 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2043 				    ntx, RTW89_RS_MCS, ch + 4);
2044 
2045 	for (i = 0; i < RTW89_BF_NUM; i++)
2046 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2047 }
2048 
rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch,u8 pri_ch)2049 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev,
2050 					       struct rtw89_txpwr_limit_ax *lmt,
2051 					       u8 band, u8 ntx, u8 ch, u8 pri_ch)
2052 {
2053 	s8 val_0p5_n[RTW89_BF_NUM];
2054 	s8 val_0p5_p[RTW89_BF_NUM];
2055 	s8 val_2p5_n[RTW89_BF_NUM];
2056 	s8 val_2p5_p[RTW89_BF_NUM];
2057 	u8 i;
2058 
2059 	/* fill ofdm section */
2060 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2061 				    ntx, RTW89_RS_OFDM, pri_ch);
2062 
2063 	/* fill mcs 20m section */
2064 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2065 				    RTW89_CHANNEL_WIDTH_20,
2066 				    ntx, RTW89_RS_MCS, ch - 14);
2067 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2068 				    RTW89_CHANNEL_WIDTH_20,
2069 				    ntx, RTW89_RS_MCS, ch - 10);
2070 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2071 				    RTW89_CHANNEL_WIDTH_20,
2072 				    ntx, RTW89_RS_MCS, ch - 6);
2073 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2074 				    RTW89_CHANNEL_WIDTH_20,
2075 				    ntx, RTW89_RS_MCS, ch - 2);
2076 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
2077 				    RTW89_CHANNEL_WIDTH_20,
2078 				    ntx, RTW89_RS_MCS, ch + 2);
2079 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
2080 				    RTW89_CHANNEL_WIDTH_20,
2081 				    ntx, RTW89_RS_MCS, ch + 6);
2082 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
2083 				    RTW89_CHANNEL_WIDTH_20,
2084 				    ntx, RTW89_RS_MCS, ch + 10);
2085 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
2086 				    RTW89_CHANNEL_WIDTH_20,
2087 				    ntx, RTW89_RS_MCS, ch + 14);
2088 
2089 	/* fill mcs 40m section */
2090 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2091 				    RTW89_CHANNEL_WIDTH_40,
2092 				    ntx, RTW89_RS_MCS, ch - 12);
2093 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2094 				    RTW89_CHANNEL_WIDTH_40,
2095 				    ntx, RTW89_RS_MCS, ch - 4);
2096 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
2097 				    RTW89_CHANNEL_WIDTH_40,
2098 				    ntx, RTW89_RS_MCS, ch + 4);
2099 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
2100 				    RTW89_CHANNEL_WIDTH_40,
2101 				    ntx, RTW89_RS_MCS, ch + 12);
2102 
2103 	/* fill mcs 80m section */
2104 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2105 				    RTW89_CHANNEL_WIDTH_80,
2106 				    ntx, RTW89_RS_MCS, ch - 8);
2107 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
2108 				    RTW89_CHANNEL_WIDTH_80,
2109 				    ntx, RTW89_RS_MCS, ch + 8);
2110 
2111 	/* fill mcs 160m section */
2112 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
2113 				    RTW89_CHANNEL_WIDTH_160,
2114 				    ntx, RTW89_RS_MCS, ch);
2115 
2116 	/* fill mcs 40m 0p5 section */
2117 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2118 				    ntx, RTW89_RS_MCS, ch - 4);
2119 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2120 				    ntx, RTW89_RS_MCS, ch + 4);
2121 
2122 	for (i = 0; i < RTW89_BF_NUM; i++)
2123 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2124 
2125 	/* fill mcs 40m 2p5 section */
2126 	__fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
2127 				    ntx, RTW89_RS_MCS, ch - 8);
2128 	__fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
2129 				    ntx, RTW89_RS_MCS, ch + 8);
2130 
2131 	for (i = 0; i < RTW89_BF_NUM; i++)
2132 		lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
2133 }
2134 
2135 static
rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,struct rtw89_txpwr_limit_ax * lmt,u8 ntx)2136 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev,
2137 				   const struct rtw89_chan *chan,
2138 				   struct rtw89_txpwr_limit_ax *lmt,
2139 				   u8 ntx)
2140 {
2141 	u8 band = chan->band_type;
2142 	u8 pri_ch = chan->primary_channel;
2143 	u8 ch = chan->channel;
2144 	u8 bw = chan->band_width;
2145 
2146 	memset(lmt, 0, sizeof(*lmt));
2147 
2148 	switch (bw) {
2149 	case RTW89_CHANNEL_WIDTH_20:
2150 		rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch);
2151 		break;
2152 	case RTW89_CHANNEL_WIDTH_40:
2153 		rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch,
2154 						  pri_ch);
2155 		break;
2156 	case RTW89_CHANNEL_WIDTH_80:
2157 		rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch,
2158 						  pri_ch);
2159 		break;
2160 	case RTW89_CHANNEL_WIDTH_160:
2161 		rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch,
2162 						   pri_ch);
2163 		break;
2164 	}
2165 }
2166 
rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev * rtwdev,u8 band,u8 ru,u8 ntx,u8 ch)2167 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
2168 				 u8 ru, u8 ntx, u8 ch)
2169 {
2170 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2171 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
2172 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
2173 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
2174 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2175 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
2176 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
2177 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
2178 	u8 regd = rtw89_regd_get(rtwdev, band);
2179 	u8 reg6 = regulatory->reg_6ghz_power;
2180 	s8 lmt_ru = 0, sar;
2181 
2182 	switch (band) {
2183 	case RTW89_BAND_2G:
2184 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2185 		if (lmt_ru)
2186 			break;
2187 
2188 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2189 		break;
2190 	case RTW89_BAND_5G:
2191 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2192 		if (lmt_ru)
2193 			break;
2194 
2195 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2196 		break;
2197 	case RTW89_BAND_6G:
2198 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
2199 		if (lmt_ru)
2200 			break;
2201 
2202 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW]
2203 					     [RTW89_REG_6GHZ_POWER_DFLT]
2204 					     [ch_idx];
2205 		break;
2206 	default:
2207 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
2208 		return 0;
2209 	}
2210 
2211 	lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
2212 	sar = rtw89_query_sar(rtwdev, freq);
2213 
2214 	return min(lmt_ru, sar);
2215 }
2216 
2217 static void
rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2218 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev,
2219 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2220 				     u8 band, u8 ntx, u8 ch)
2221 {
2222 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2223 							RTW89_RU26,
2224 							ntx, ch);
2225 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2226 							RTW89_RU52,
2227 							ntx, ch);
2228 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2229 							 RTW89_RU106,
2230 							 ntx, ch);
2231 }
2232 
2233 static void
rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2234 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev,
2235 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2236 				     u8 band, u8 ntx, u8 ch)
2237 {
2238 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2239 							RTW89_RU26,
2240 							ntx, ch - 2);
2241 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2242 							RTW89_RU26,
2243 							ntx, ch + 2);
2244 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2245 							RTW89_RU52,
2246 							ntx, ch - 2);
2247 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2248 							RTW89_RU52,
2249 							ntx, ch + 2);
2250 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2251 							 RTW89_RU106,
2252 							 ntx, ch - 2);
2253 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2254 							 RTW89_RU106,
2255 							 ntx, ch + 2);
2256 }
2257 
2258 static void
rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2259 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev,
2260 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2261 				     u8 band, u8 ntx, u8 ch)
2262 {
2263 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2264 							RTW89_RU26,
2265 							ntx, ch - 6);
2266 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2267 							RTW89_RU26,
2268 							ntx, ch - 2);
2269 	lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2270 							RTW89_RU26,
2271 							ntx, ch + 2);
2272 	lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2273 							RTW89_RU26,
2274 							ntx, ch + 6);
2275 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2276 							RTW89_RU52,
2277 							ntx, ch - 6);
2278 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2279 							RTW89_RU52,
2280 							ntx, ch - 2);
2281 	lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2282 							RTW89_RU52,
2283 							ntx, ch + 2);
2284 	lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2285 							RTW89_RU52,
2286 							ntx, ch + 6);
2287 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2288 							 RTW89_RU106,
2289 							 ntx, ch - 6);
2290 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2291 							 RTW89_RU106,
2292 							 ntx, ch - 2);
2293 	lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2294 							 RTW89_RU106,
2295 							 ntx, ch + 2);
2296 	lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2297 							 RTW89_RU106,
2298 							 ntx, ch + 6);
2299 }
2300 
2301 static void
rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2302 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev,
2303 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2304 				      u8 band, u8 ntx, u8 ch)
2305 {
2306 	static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
2307 	int i;
2308 
2309 	static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX);
2310 	for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) {
2311 		lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2312 								RTW89_RU26,
2313 								ntx,
2314 								ch + ofst[i]);
2315 		lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2316 								RTW89_RU52,
2317 								ntx,
2318 								ch + ofst[i]);
2319 		lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2320 								 RTW89_RU106,
2321 								 ntx,
2322 								 ch + ofst[i]);
2323 	}
2324 }
2325 
2326 static
rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 ntx)2327 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
2328 				      const struct rtw89_chan *chan,
2329 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2330 				      u8 ntx)
2331 {
2332 	u8 band = chan->band_type;
2333 	u8 ch = chan->channel;
2334 	u8 bw = chan->band_width;
2335 
2336 	memset(lmt_ru, 0, sizeof(*lmt_ru));
2337 
2338 	switch (bw) {
2339 	case RTW89_CHANNEL_WIDTH_20:
2340 		rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx,
2341 						     ch);
2342 		break;
2343 	case RTW89_CHANNEL_WIDTH_40:
2344 		rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx,
2345 						     ch);
2346 		break;
2347 	case RTW89_CHANNEL_WIDTH_80:
2348 		rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx,
2349 						     ch);
2350 		break;
2351 	case RTW89_CHANNEL_WIDTH_160:
2352 		rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx,
2353 						      ch);
2354 		break;
2355 	}
2356 }
2357 
rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)2358 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev,
2359 					  const struct rtw89_chan *chan,
2360 					  enum rtw89_phy_idx phy_idx)
2361 {
2362 	u8 max_nss_num = rtwdev->chip->rf_path_num;
2363 	static const u8 rs[] = {
2364 		RTW89_RS_CCK,
2365 		RTW89_RS_OFDM,
2366 		RTW89_RS_MCS,
2367 		RTW89_RS_HEDCM,
2368 	};
2369 	struct rtw89_rate_desc cur = {};
2370 	u8 band = chan->band_type;
2371 	u8 ch = chan->channel;
2372 	u32 addr, val;
2373 	s8 v[4] = {};
2374 	u8 i;
2375 
2376 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2377 		    "[TXPWR] set txpwr byrate with ch=%d\n", ch);
2378 
2379 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4);
2380 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4);
2381 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4);
2382 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4);
2383 
2384 	addr = R_AX_PWR_BY_RATE;
2385 	for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
2386 		for (i = 0; i < ARRAY_SIZE(rs); i++) {
2387 			if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]])
2388 				continue;
2389 
2390 			cur.rs = rs[i];
2391 			for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]];
2392 			     cur.idx++) {
2393 				v[cur.idx % 4] =
2394 					rtw89_phy_read_txpwr_byrate(rtwdev,
2395 								    band, 0,
2396 								    &cur);
2397 
2398 				if ((cur.idx + 1) % 4)
2399 					continue;
2400 
2401 				val = FIELD_PREP(GENMASK(7, 0), v[0]) |
2402 				      FIELD_PREP(GENMASK(15, 8), v[1]) |
2403 				      FIELD_PREP(GENMASK(23, 16), v[2]) |
2404 				      FIELD_PREP(GENMASK(31, 24), v[3]);
2405 
2406 				rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
2407 							val);
2408 				addr += 4;
2409 			}
2410 		}
2411 	}
2412 }
2413 
2414 static
rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)2415 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev,
2416 				   const struct rtw89_chan *chan,
2417 				   enum rtw89_phy_idx phy_idx)
2418 {
2419 	struct rtw89_rate_desc desc = {
2420 		.nss = RTW89_NSS_1,
2421 		.rs = RTW89_RS_OFFSET,
2422 	};
2423 	u8 band = chan->band_type;
2424 	s8 v[RTW89_RATE_OFFSET_NUM_AX] = {};
2425 	u32 val;
2426 
2427 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
2428 
2429 	for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++)
2430 		v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc);
2431 
2432 	BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5);
2433 	val = FIELD_PREP(GENMASK(3, 0), v[0]) |
2434 	      FIELD_PREP(GENMASK(7, 4), v[1]) |
2435 	      FIELD_PREP(GENMASK(11, 8), v[2]) |
2436 	      FIELD_PREP(GENMASK(15, 12), v[3]) |
2437 	      FIELD_PREP(GENMASK(19, 16), v[4]);
2438 
2439 	rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
2440 				     GENMASK(19, 0), val);
2441 }
2442 
rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)2443 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev,
2444 					 const struct rtw89_chan *chan,
2445 					 enum rtw89_phy_idx phy_idx)
2446 {
2447 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
2448 	struct rtw89_txpwr_limit_ax lmt;
2449 	u8 ch = chan->channel;
2450 	u8 bw = chan->band_width;
2451 	const s8 *ptr;
2452 	u32 addr, val;
2453 	u8 i, j;
2454 
2455 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2456 		    "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
2457 
2458 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) !=
2459 		     RTW89_TXPWR_LMT_PAGE_SIZE_AX);
2460 
2461 	addr = R_AX_PWR_LMT;
2462 	for (i = 0; i < max_ntx_num; i++) {
2463 		rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i);
2464 
2465 		ptr = (s8 *)&lmt;
2466 		for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX;
2467 		     j += 4, addr += 4, ptr += 4) {
2468 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
2469 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
2470 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
2471 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
2472 
2473 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
2474 		}
2475 	}
2476 }
2477 
rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)2478 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
2479 					    const struct rtw89_chan *chan,
2480 					    enum rtw89_phy_idx phy_idx)
2481 {
2482 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
2483 	struct rtw89_txpwr_limit_ru_ax lmt_ru;
2484 	u8 ch = chan->channel;
2485 	u8 bw = chan->band_width;
2486 	const s8 *ptr;
2487 	u32 addr, val;
2488 	u8 i, j;
2489 
2490 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2491 		    "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
2492 
2493 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) !=
2494 		     RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX);
2495 
2496 	addr = R_AX_PWR_RU_LMT;
2497 	for (i = 0; i < max_ntx_num; i++) {
2498 		rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i);
2499 
2500 		ptr = (s8 *)&lmt_ru;
2501 		for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX;
2502 		     j += 4, addr += 4, ptr += 4) {
2503 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
2504 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
2505 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
2506 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
2507 
2508 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
2509 		}
2510 	}
2511 }
2512 
2513 struct rtw89_phy_iter_ra_data {
2514 	struct rtw89_dev *rtwdev;
2515 	struct sk_buff *c2h;
2516 };
2517 
rtw89_phy_c2h_ra_rpt_iter(void * data,struct ieee80211_sta * sta)2518 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
2519 {
2520 	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
2521 	struct rtw89_dev *rtwdev = ra_data->rtwdev;
2522 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
2523 	const struct rtw89_c2h_ra_rpt *c2h =
2524 		(const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
2525 	struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
2526 	const struct rtw89_chip_info *chip = rtwdev->chip;
2527 	bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
2528 	u8 mode, rate, bw, giltf, mac_id;
2529 	u16 legacy_bitrate;
2530 	bool valid;
2531 	u8 mcs = 0;
2532 	u8 t;
2533 
2534 	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
2535 	if (mac_id != rtwsta->mac_id)
2536 		return;
2537 
2538 	rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
2539 	bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW);
2540 	giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF);
2541 	mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL);
2542 
2543 	if (format_v1) {
2544 		t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7);
2545 		rate |= u8_encode_bits(t, BIT(7));
2546 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2);
2547 		bw |= u8_encode_bits(t, BIT(2));
2548 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2);
2549 		mode |= u8_encode_bits(t, BIT(2));
2550 	}
2551 
2552 	if (mode == RTW89_RA_RPT_MODE_LEGACY) {
2553 		valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
2554 		if (!valid)
2555 			return;
2556 	}
2557 
2558 	memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
2559 
2560 	switch (mode) {
2561 	case RTW89_RA_RPT_MODE_LEGACY:
2562 		ra_report->txrate.legacy = legacy_bitrate;
2563 		break;
2564 	case RTW89_RA_RPT_MODE_HT:
2565 		ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
2566 		if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
2567 			rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
2568 						FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
2569 		else
2570 			rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
2571 		ra_report->txrate.mcs = rate;
2572 		if (giltf)
2573 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2574 		mcs = ra_report->txrate.mcs & 0x07;
2575 		break;
2576 	case RTW89_RA_RPT_MODE_VHT:
2577 		ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
2578 		ra_report->txrate.mcs = format_v1 ?
2579 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
2580 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
2581 		ra_report->txrate.nss = format_v1 ?
2582 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
2583 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
2584 		if (giltf)
2585 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2586 		mcs = ra_report->txrate.mcs;
2587 		break;
2588 	case RTW89_RA_RPT_MODE_HE:
2589 		ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
2590 		ra_report->txrate.mcs = format_v1 ?
2591 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
2592 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
2593 		ra_report->txrate.nss  = format_v1 ?
2594 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
2595 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
2596 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
2597 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
2598 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
2599 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
2600 		else
2601 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
2602 		mcs = ra_report->txrate.mcs;
2603 		break;
2604 	case RTW89_RA_RPT_MODE_EHT:
2605 		ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS;
2606 		ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1);
2607 		ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1;
2608 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
2609 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8;
2610 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
2611 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6;
2612 		else
2613 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2;
2614 		mcs = ra_report->txrate.mcs;
2615 		break;
2616 	}
2617 
2618 	ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
2619 	ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
2620 	ra_report->hw_rate = format_v1 ?
2621 			     u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) |
2622 			     u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) :
2623 			     u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
2624 			     u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
2625 	ra_report->might_fallback_legacy = mcs <= 2;
2626 	sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
2627 	rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
2628 }
2629 
2630 static void
rtw89_phy_c2h_ra_rpt(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2631 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2632 {
2633 	struct rtw89_phy_iter_ra_data ra_data;
2634 
2635 	ra_data.rtwdev = rtwdev;
2636 	ra_data.c2h = c2h;
2637 	ieee80211_iterate_stations_atomic(rtwdev->hw,
2638 					  rtw89_phy_c2h_ra_rpt_iter,
2639 					  &ra_data);
2640 }
2641 
2642 static
2643 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
2644 					  struct sk_buff *c2h, u32 len) = {
2645 	[RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
2646 	[RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
2647 	[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
2648 };
2649 
rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev * rtwdev,enum rtw89_phy_c2h_rfk_log_func func,void * content,u16 len)2650 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
2651 				      enum rtw89_phy_c2h_rfk_log_func func,
2652 				      void *content, u16 len)
2653 {
2654 	struct rtw89_c2h_rf_txgapk_rpt_log *txgapk;
2655 	struct rtw89_c2h_rf_rxdck_rpt_log *rxdck;
2656 	struct rtw89_c2h_rf_dack_rpt_log *dack;
2657 	struct rtw89_c2h_rf_dpk_rpt_log *dpk;
2658 
2659 	switch (func) {
2660 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
2661 		if (len != sizeof(*dpk))
2662 			goto out;
2663 
2664 		dpk = content;
2665 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2666 			    "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n",
2667 			    dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok);
2668 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2669 			    "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n",
2670 			    dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q);
2671 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2672 			    "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n",
2673 			    dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov);
2674 		return;
2675 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
2676 		if (len != sizeof(*dack))
2677 			goto out;
2678 
2679 		dack = content;
2680 
2681 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ver=0x%x 0x%x\n",
2682 			    dack->fwdack_ver, dack->fwdack_rpt_ver);
2683 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n",
2684 			    dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]);
2685 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n",
2686 			    dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]);
2687 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n",
2688 			    dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]);
2689 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n",
2690 			    dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]);
2691 
2692 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n",
2693 			    dack->addck2_d[0][0][0], dack->addck2_d[0][0][1]);
2694 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n",
2695 			    dack->addck2_d[0][1][0], dack->addck2_d[0][1][1]);
2696 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n",
2697 			    dack->addck2_d[1][0][0], dack->addck2_d[1][0][1]);
2698 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n",
2699 			    dack->addck2_d[1][1][0], dack->addck2_d[1][1][1]);
2700 
2701 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
2702 			    dack->adgaink_d[0][0], dack->adgaink_d[0][1]);
2703 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
2704 			    dack->adgaink_d[1][0], dack->adgaink_d[1][1]);
2705 
2706 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
2707 			    dack->dadck_d[0][0], dack->dadck_d[0][1]);
2708 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
2709 			    dack->dadck_d[1][0], dack->dadck_d[1][1]);
2710 
2711 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n",
2712 			    dack->biask_d[0][0]);
2713 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n",
2714 			    dack->biask_d[1][0]);
2715 
2716 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic: %*ph\n",
2717 			    (int)sizeof(dack->msbk_d[0][0]), dack->msbk_d[0][0]);
2718 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc: %*ph\n",
2719 			    (int)sizeof(dack->msbk_d[0][1]), dack->msbk_d[0][1]);
2720 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic: %*ph\n",
2721 			    (int)sizeof(dack->msbk_d[1][0]), dack->msbk_d[1][0]);
2722 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc: %*ph\n",
2723 			    (int)sizeof(dack->msbk_d[1][1]), dack->msbk_d[1][1]);
2724 		return;
2725 	case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
2726 		if (len != sizeof(*rxdck))
2727 			goto out;
2728 
2729 		rxdck = content;
2730 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2731 			    "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n",
2732 			    rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch,
2733 			    rxdck->timeout);
2734 		return;
2735 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
2736 		if (len != sizeof(*txgapk))
2737 			goto out;
2738 
2739 		txgapk = content;
2740 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2741 			    "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n",
2742 			    le32_to_cpu(txgapk->r0x8010[0]),
2743 			    le32_to_cpu(txgapk->r0x8010[1]));
2744 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n",
2745 			    txgapk->chk_id);
2746 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n",
2747 			    le32_to_cpu(txgapk->chk_cnt));
2748 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n",
2749 			    txgapk->ver);
2750 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n",
2751 			    txgapk->rsv1);
2752 
2753 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n",
2754 			    (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]);
2755 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n",
2756 			    (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]);
2757 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n",
2758 			    (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]);
2759 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
2760 			    (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
2761 		return;
2762 	default:
2763 		break;
2764 	}
2765 
2766 out:
2767 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2768 		    "unexpected RFK func %d report log with length %d\n", func, len);
2769 }
2770 
rtw89_phy_c2h_rfk_run_log(struct rtw89_dev * rtwdev,enum rtw89_phy_c2h_rfk_log_func func,void * content,u16 len)2771 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev,
2772 				      enum rtw89_phy_c2h_rfk_log_func func,
2773 				      void *content, u16 len)
2774 {
2775 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
2776 	const struct rtw89_c2h_rf_run_log *log = content;
2777 	const struct rtw89_fw_element_hdr *elm;
2778 	u32 fmt_idx;
2779 	u16 offset;
2780 
2781 	if (sizeof(*log) != len)
2782 		return false;
2783 
2784 	if (!elm_info->rfk_log_fmt)
2785 		return false;
2786 
2787 	elm = elm_info->rfk_log_fmt->elm[func];
2788 	fmt_idx = le32_to_cpu(log->fmt_idx);
2789 	if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr)
2790 		return false;
2791 
2792 	offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]);
2793 	if (offset == 0)
2794 		return false;
2795 
2796 	rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset],
2797 		    le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]),
2798 		    le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3]));
2799 
2800 	return true;
2801 }
2802 
rtw89_phy_c2h_rfk_log(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len,enum rtw89_phy_c2h_rfk_log_func func,const char * rfk_name)2803 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
2804 				  u32 len, enum rtw89_phy_c2h_rfk_log_func func,
2805 				  const char *rfk_name)
2806 {
2807 	struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data;
2808 	struct rtw89_c2h_rf_log_hdr *log_hdr;
2809 	void *log_ptr = c2h_hdr;
2810 	u16 content_len;
2811 	u16 chunk_len;
2812 	bool handled;
2813 
2814 	if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK))
2815 		return;
2816 
2817 	log_ptr += sizeof(*c2h_hdr);
2818 	len -= sizeof(*c2h_hdr);
2819 
2820 	while (len > sizeof(*log_hdr)) {
2821 		log_hdr = log_ptr;
2822 		content_len = le16_to_cpu(log_hdr->len);
2823 		chunk_len = content_len + sizeof(*log_hdr);
2824 
2825 		if (chunk_len > len)
2826 			break;
2827 
2828 		switch (log_hdr->type) {
2829 		case RTW89_RF_RUN_LOG:
2830 			handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func,
2831 							    log_hdr->content, content_len);
2832 			if (handled)
2833 				break;
2834 
2835 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n",
2836 				    rfk_name, content_len, log_hdr->content);
2837 			break;
2838 		case RTW89_RF_RPT_LOG:
2839 			rtw89_phy_c2h_rfk_rpt_log(rtwdev, func,
2840 						  log_hdr->content, content_len);
2841 			break;
2842 		default:
2843 			return;
2844 		}
2845 
2846 		log_ptr += chunk_len;
2847 		len -= chunk_len;
2848 	}
2849 }
2850 
2851 static void
rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2852 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2853 {
2854 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
2855 			      RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK");
2856 }
2857 
2858 static void
rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2859 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2860 {
2861 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
2862 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK");
2863 }
2864 
2865 static void
rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2866 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2867 {
2868 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
2869 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK");
2870 }
2871 
2872 static void
rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2873 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2874 {
2875 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
2876 			      RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK");
2877 }
2878 
2879 static void
rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2880 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2881 {
2882 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
2883 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI");
2884 }
2885 
2886 static void
rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2887 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2888 {
2889 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
2890 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
2891 }
2892 
2893 static
2894 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
2895 					       struct sk_buff *c2h, u32 len) = {
2896 	[RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk,
2897 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk,
2898 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack,
2899 	[RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
2900 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
2901 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
2902 };
2903 
2904 static
rtw89_phy_rfk_report_prep(struct rtw89_dev * rtwdev)2905 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
2906 {
2907 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
2908 
2909 	wait->state = RTW89_RFK_STATE_START;
2910 	wait->start_time = ktime_get();
2911 	reinit_completion(&wait->completion);
2912 }
2913 
2914 static
rtw89_phy_rfk_report_wait(struct rtw89_dev * rtwdev,const char * rfk_name,unsigned int ms)2915 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
2916 			      unsigned int ms)
2917 {
2918 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
2919 	unsigned long time_left;
2920 
2921 	/* Since we can't receive C2H event during SER, use a fixed delay. */
2922 	if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
2923 		fsleep(1000 * ms / 2);
2924 		goto out;
2925 	}
2926 
2927 	time_left = wait_for_completion_timeout(&wait->completion,
2928 						msecs_to_jiffies(ms));
2929 	if (time_left == 0) {
2930 		rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
2931 		return -ETIMEDOUT;
2932 	} else if (wait->state != RTW89_RFK_STATE_OK) {
2933 		rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
2934 			   rfk_name, wait->state);
2935 		return -EFAULT;
2936 	}
2937 
2938 out:
2939 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
2940 		    rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
2941 
2942 	return 0;
2943 }
2944 
2945 static void
rtw89_phy_c2h_rfk_report_state(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)2946 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2947 {
2948 	const struct rtw89_c2h_rfk_report *report =
2949 		(const struct rtw89_c2h_rfk_report *)c2h->data;
2950 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
2951 
2952 	wait->state = report->state;
2953 	wait->version = report->version;
2954 
2955 	complete(&wait->completion);
2956 
2957 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2958 		    "RFK report state %d with version %d (%*ph)\n",
2959 		    wait->state, wait->version,
2960 		    (int)(len - sizeof(report->hdr)), &report->state);
2961 }
2962 
2963 static
2964 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
2965 						  struct sk_buff *c2h, u32 len) = {
2966 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
2967 };
2968 
rtw89_phy_c2h_chk_atomic(struct rtw89_dev * rtwdev,u8 class,u8 func)2969 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
2970 {
2971 	switch (class) {
2972 	case RTW89_PHY_C2H_RFK_LOG:
2973 		switch (func) {
2974 		case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
2975 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
2976 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
2977 		case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
2978 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
2979 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
2980 			return true;
2981 		default:
2982 			return false;
2983 		}
2984 	case RTW89_PHY_C2H_RFK_REPORT:
2985 		switch (func) {
2986 		case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE:
2987 			return true;
2988 		default:
2989 			return false;
2990 		}
2991 	default:
2992 		return false;
2993 	}
2994 }
2995 
rtw89_phy_c2h_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb,u32 len,u8 class,u8 func)2996 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
2997 			  u32 len, u8 class, u8 func)
2998 {
2999 	void (*handler)(struct rtw89_dev *rtwdev,
3000 			struct sk_buff *c2h, u32 len) = NULL;
3001 
3002 	switch (class) {
3003 	case RTW89_PHY_C2H_CLASS_RA:
3004 		if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
3005 			handler = rtw89_phy_c2h_ra_handler[func];
3006 		break;
3007 	case RTW89_PHY_C2H_RFK_LOG:
3008 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler))
3009 			handler = rtw89_phy_c2h_rfk_log_handler[func];
3010 		break;
3011 	case RTW89_PHY_C2H_RFK_REPORT:
3012 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler))
3013 			handler = rtw89_phy_c2h_rfk_report_handler[func];
3014 		break;
3015 	case RTW89_PHY_C2H_CLASS_DM:
3016 		if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
3017 			return;
3018 		fallthrough;
3019 	default:
3020 		rtw89_info(rtwdev, "c2h class %d not support\n", class);
3021 		return;
3022 	}
3023 	if (!handler) {
3024 		rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
3025 			   func);
3026 		return;
3027 	}
3028 	handler(rtwdev, skb, len);
3029 }
3030 
rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,unsigned int ms)3031 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
3032 				    enum rtw89_phy_idx phy_idx,
3033 				    unsigned int ms)
3034 {
3035 	int ret;
3036 
3037 	rtw89_phy_rfk_report_prep(rtwdev);
3038 
3039 	ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
3040 	if (ret)
3041 		return ret;
3042 
3043 	return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
3044 }
3045 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
3046 
rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_tssi_mode tssi_mode,unsigned int ms)3047 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
3048 				enum rtw89_phy_idx phy_idx,
3049 				enum rtw89_tssi_mode tssi_mode,
3050 				unsigned int ms)
3051 {
3052 	int ret;
3053 
3054 	rtw89_phy_rfk_report_prep(rtwdev);
3055 
3056 	ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
3057 	if (ret)
3058 		return ret;
3059 
3060 	return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
3061 }
3062 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
3063 
rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,unsigned int ms)3064 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
3065 			       enum rtw89_phy_idx phy_idx,
3066 			       unsigned int ms)
3067 {
3068 	int ret;
3069 
3070 	rtw89_phy_rfk_report_prep(rtwdev);
3071 
3072 	ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
3073 	if (ret)
3074 		return ret;
3075 
3076 	return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
3077 }
3078 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
3079 
rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,unsigned int ms)3080 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
3081 			       enum rtw89_phy_idx phy_idx,
3082 			       unsigned int ms)
3083 {
3084 	int ret;
3085 
3086 	rtw89_phy_rfk_report_prep(rtwdev);
3087 
3088 	ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
3089 	if (ret)
3090 		return ret;
3091 
3092 	return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
3093 }
3094 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
3095 
rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,unsigned int ms)3096 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
3097 				  enum rtw89_phy_idx phy_idx,
3098 				  unsigned int ms)
3099 {
3100 	int ret;
3101 
3102 	rtw89_phy_rfk_report_prep(rtwdev);
3103 
3104 	ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
3105 	if (ret)
3106 		return ret;
3107 
3108 	return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
3109 }
3110 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
3111 
rtw89_phy_rfk_dack_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,unsigned int ms)3112 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
3113 				enum rtw89_phy_idx phy_idx,
3114 				unsigned int ms)
3115 {
3116 	int ret;
3117 
3118 	rtw89_phy_rfk_report_prep(rtwdev);
3119 
3120 	ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
3121 	if (ret)
3122 		return ret;
3123 
3124 	return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
3125 }
3126 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
3127 
rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,unsigned int ms)3128 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
3129 				 enum rtw89_phy_idx phy_idx,
3130 				 unsigned int ms)
3131 {
3132 	int ret;
3133 
3134 	rtw89_phy_rfk_report_prep(rtwdev);
3135 
3136 	ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
3137 	if (ret)
3138 		return ret;
3139 
3140 	return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
3141 }
3142 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
3143 
phy_tssi_get_cck_group(u8 ch)3144 static u32 phy_tssi_get_cck_group(u8 ch)
3145 {
3146 	switch (ch) {
3147 	case 1 ... 2:
3148 		return 0;
3149 	case 3 ... 5:
3150 		return 1;
3151 	case 6 ... 8:
3152 		return 2;
3153 	case 9 ... 11:
3154 		return 3;
3155 	case 12 ... 13:
3156 		return 4;
3157 	case 14:
3158 		return 5;
3159 	}
3160 
3161 	return 0;
3162 }
3163 
3164 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
3165 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
3166 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
3167 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
3168 	((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
3169 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
3170 	(PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3171 
phy_tssi_get_ofdm_group(u8 ch)3172 static u32 phy_tssi_get_ofdm_group(u8 ch)
3173 {
3174 	switch (ch) {
3175 	case 1 ... 2:
3176 		return 0;
3177 	case 3 ... 5:
3178 		return 1;
3179 	case 6 ... 8:
3180 		return 2;
3181 	case 9 ... 11:
3182 		return 3;
3183 	case 12 ... 14:
3184 		return 4;
3185 	case 36 ... 40:
3186 		return 5;
3187 	case 41 ... 43:
3188 		return PHY_TSSI_EXTRA_GROUP(5);
3189 	case 44 ... 48:
3190 		return 6;
3191 	case 49 ... 51:
3192 		return PHY_TSSI_EXTRA_GROUP(6);
3193 	case 52 ... 56:
3194 		return 7;
3195 	case 57 ... 59:
3196 		return PHY_TSSI_EXTRA_GROUP(7);
3197 	case 60 ... 64:
3198 		return 8;
3199 	case 100 ... 104:
3200 		return 9;
3201 	case 105 ... 107:
3202 		return PHY_TSSI_EXTRA_GROUP(9);
3203 	case 108 ... 112:
3204 		return 10;
3205 	case 113 ... 115:
3206 		return PHY_TSSI_EXTRA_GROUP(10);
3207 	case 116 ... 120:
3208 		return 11;
3209 	case 121 ... 123:
3210 		return PHY_TSSI_EXTRA_GROUP(11);
3211 	case 124 ... 128:
3212 		return 12;
3213 	case 129 ... 131:
3214 		return PHY_TSSI_EXTRA_GROUP(12);
3215 	case 132 ... 136:
3216 		return 13;
3217 	case 137 ... 139:
3218 		return PHY_TSSI_EXTRA_GROUP(13);
3219 	case 140 ... 144:
3220 		return 14;
3221 	case 149 ... 153:
3222 		return 15;
3223 	case 154 ... 156:
3224 		return PHY_TSSI_EXTRA_GROUP(15);
3225 	case 157 ... 161:
3226 		return 16;
3227 	case 162 ... 164:
3228 		return PHY_TSSI_EXTRA_GROUP(16);
3229 	case 165 ... 169:
3230 		return 17;
3231 	case 170 ... 172:
3232 		return PHY_TSSI_EXTRA_GROUP(17);
3233 	case 173 ... 177:
3234 		return 18;
3235 	}
3236 
3237 	return 0;
3238 }
3239 
phy_tssi_get_6g_ofdm_group(u8 ch)3240 static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
3241 {
3242 	switch (ch) {
3243 	case 1 ... 5:
3244 		return 0;
3245 	case 6 ... 8:
3246 		return PHY_TSSI_EXTRA_GROUP(0);
3247 	case 9 ... 13:
3248 		return 1;
3249 	case 14 ... 16:
3250 		return PHY_TSSI_EXTRA_GROUP(1);
3251 	case 17 ... 21:
3252 		return 2;
3253 	case 22 ... 24:
3254 		return PHY_TSSI_EXTRA_GROUP(2);
3255 	case 25 ... 29:
3256 		return 3;
3257 	case 33 ... 37:
3258 		return 4;
3259 	case 38 ... 40:
3260 		return PHY_TSSI_EXTRA_GROUP(4);
3261 	case 41 ... 45:
3262 		return 5;
3263 	case 46 ... 48:
3264 		return PHY_TSSI_EXTRA_GROUP(5);
3265 	case 49 ... 53:
3266 		return 6;
3267 	case 54 ... 56:
3268 		return PHY_TSSI_EXTRA_GROUP(6);
3269 	case 57 ... 61:
3270 		return 7;
3271 	case 65 ... 69:
3272 		return 8;
3273 	case 70 ... 72:
3274 		return PHY_TSSI_EXTRA_GROUP(8);
3275 	case 73 ... 77:
3276 		return 9;
3277 	case 78 ... 80:
3278 		return PHY_TSSI_EXTRA_GROUP(9);
3279 	case 81 ... 85:
3280 		return 10;
3281 	case 86 ... 88:
3282 		return PHY_TSSI_EXTRA_GROUP(10);
3283 	case 89 ... 93:
3284 		return 11;
3285 	case 97 ... 101:
3286 		return 12;
3287 	case 102 ... 104:
3288 		return PHY_TSSI_EXTRA_GROUP(12);
3289 	case 105 ... 109:
3290 		return 13;
3291 	case 110 ... 112:
3292 		return PHY_TSSI_EXTRA_GROUP(13);
3293 	case 113 ... 117:
3294 		return 14;
3295 	case 118 ... 120:
3296 		return PHY_TSSI_EXTRA_GROUP(14);
3297 	case 121 ... 125:
3298 		return 15;
3299 	case 129 ... 133:
3300 		return 16;
3301 	case 134 ... 136:
3302 		return PHY_TSSI_EXTRA_GROUP(16);
3303 	case 137 ... 141:
3304 		return 17;
3305 	case 142 ... 144:
3306 		return PHY_TSSI_EXTRA_GROUP(17);
3307 	case 145 ... 149:
3308 		return 18;
3309 	case 150 ... 152:
3310 		return PHY_TSSI_EXTRA_GROUP(18);
3311 	case 153 ... 157:
3312 		return 19;
3313 	case 161 ... 165:
3314 		return 20;
3315 	case 166 ... 168:
3316 		return PHY_TSSI_EXTRA_GROUP(20);
3317 	case 169 ... 173:
3318 		return 21;
3319 	case 174 ... 176:
3320 		return PHY_TSSI_EXTRA_GROUP(21);
3321 	case 177 ... 181:
3322 		return 22;
3323 	case 182 ... 184:
3324 		return PHY_TSSI_EXTRA_GROUP(22);
3325 	case 185 ... 189:
3326 		return 23;
3327 	case 193 ... 197:
3328 		return 24;
3329 	case 198 ... 200:
3330 		return PHY_TSSI_EXTRA_GROUP(24);
3331 	case 201 ... 205:
3332 		return 25;
3333 	case 206 ... 208:
3334 		return PHY_TSSI_EXTRA_GROUP(25);
3335 	case 209 ... 213:
3336 		return 26;
3337 	case 214 ... 216:
3338 		return PHY_TSSI_EXTRA_GROUP(26);
3339 	case 217 ... 221:
3340 		return 27;
3341 	case 225 ... 229:
3342 		return 28;
3343 	case 230 ... 232:
3344 		return PHY_TSSI_EXTRA_GROUP(28);
3345 	case 233 ... 237:
3346 		return 29;
3347 	case 238 ... 240:
3348 		return PHY_TSSI_EXTRA_GROUP(29);
3349 	case 241 ... 245:
3350 		return 30;
3351 	case 246 ... 248:
3352 		return PHY_TSSI_EXTRA_GROUP(30);
3353 	case 249 ... 253:
3354 		return 31;
3355 	}
3356 
3357 	return 0;
3358 }
3359 
phy_tssi_get_trim_group(u8 ch)3360 static u32 phy_tssi_get_trim_group(u8 ch)
3361 {
3362 	switch (ch) {
3363 	case 1 ... 8:
3364 		return 0;
3365 	case 9 ... 14:
3366 		return 1;
3367 	case 36 ... 48:
3368 		return 2;
3369 	case 49 ... 51:
3370 		return PHY_TSSI_EXTRA_GROUP(2);
3371 	case 52 ... 64:
3372 		return 3;
3373 	case 100 ... 112:
3374 		return 4;
3375 	case 113 ... 115:
3376 		return PHY_TSSI_EXTRA_GROUP(4);
3377 	case 116 ... 128:
3378 		return 5;
3379 	case 132 ... 144:
3380 		return 6;
3381 	case 149 ... 177:
3382 		return 7;
3383 	}
3384 
3385 	return 0;
3386 }
3387 
phy_tssi_get_6g_trim_group(u8 ch)3388 static u32 phy_tssi_get_6g_trim_group(u8 ch)
3389 {
3390 	switch (ch) {
3391 	case 1 ... 13:
3392 		return 0;
3393 	case 14 ... 16:
3394 		return PHY_TSSI_EXTRA_GROUP(0);
3395 	case 17 ... 29:
3396 		return 1;
3397 	case 33 ... 45:
3398 		return 2;
3399 	case 46 ... 48:
3400 		return PHY_TSSI_EXTRA_GROUP(2);
3401 	case 49 ... 61:
3402 		return 3;
3403 	case 65 ... 77:
3404 		return 4;
3405 	case 78 ... 80:
3406 		return PHY_TSSI_EXTRA_GROUP(4);
3407 	case 81 ... 93:
3408 		return 5;
3409 	case 97 ... 109:
3410 		return 6;
3411 	case 110 ... 112:
3412 		return PHY_TSSI_EXTRA_GROUP(6);
3413 	case 113 ... 125:
3414 		return 7;
3415 	case 129 ... 141:
3416 		return 8;
3417 	case 142 ... 144:
3418 		return PHY_TSSI_EXTRA_GROUP(8);
3419 	case 145 ... 157:
3420 		return 9;
3421 	case 161 ... 173:
3422 		return 10;
3423 	case 174 ... 176:
3424 		return PHY_TSSI_EXTRA_GROUP(10);
3425 	case 177 ... 189:
3426 		return 11;
3427 	case 193 ... 205:
3428 		return 12;
3429 	case 206 ... 208:
3430 		return PHY_TSSI_EXTRA_GROUP(12);
3431 	case 209 ... 221:
3432 		return 13;
3433 	case 225 ... 237:
3434 		return 14;
3435 	case 238 ... 240:
3436 		return PHY_TSSI_EXTRA_GROUP(14);
3437 	case 241 ... 253:
3438 		return 15;
3439 	}
3440 
3441 	return 0;
3442 }
3443 
phy_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,enum rtw89_rf_path path)3444 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
3445 			       enum rtw89_phy_idx phy,
3446 			       const struct rtw89_chan *chan,
3447 			       enum rtw89_rf_path path)
3448 {
3449 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3450 	enum rtw89_band band = chan->band_type;
3451 	u8 ch = chan->channel;
3452 	u32 gidx_1st;
3453 	u32 gidx_2nd;
3454 	s8 de_1st;
3455 	s8 de_2nd;
3456 	u32 gidx;
3457 	s8 val;
3458 
3459 	if (band == RTW89_BAND_6G)
3460 		goto calc_6g;
3461 
3462 	gidx = phy_tssi_get_ofdm_group(ch);
3463 
3464 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3465 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3466 		    path, gidx);
3467 
3468 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
3469 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3470 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3471 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3472 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3473 		val = (de_1st + de_2nd) / 2;
3474 
3475 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3476 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3477 			    path, val, de_1st, de_2nd);
3478 	} else {
3479 		val = tssi_info->tssi_mcs[path][gidx];
3480 
3481 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3482 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3483 	}
3484 
3485 	return val;
3486 
3487 calc_6g:
3488 	gidx = phy_tssi_get_6g_ofdm_group(ch);
3489 
3490 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3491 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3492 		    path, gidx);
3493 
3494 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
3495 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3496 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3497 		de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
3498 		de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
3499 		val = (de_1st + de_2nd) / 2;
3500 
3501 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3502 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3503 			    path, val, de_1st, de_2nd);
3504 	} else {
3505 		val = tssi_info->tssi_6g_mcs[path][gidx];
3506 
3507 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3508 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3509 	}
3510 
3511 	return val;
3512 }
3513 
phy_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,enum rtw89_rf_path path)3514 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3515 				    enum rtw89_phy_idx phy,
3516 				    const struct rtw89_chan *chan,
3517 				    enum rtw89_rf_path path)
3518 {
3519 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3520 	enum rtw89_band band = chan->band_type;
3521 	u8 ch = chan->channel;
3522 	u32 tgidx_1st;
3523 	u32 tgidx_2nd;
3524 	s8 tde_1st;
3525 	s8 tde_2nd;
3526 	u32 tgidx;
3527 	s8 val;
3528 
3529 	if (band == RTW89_BAND_6G)
3530 		goto calc_6g;
3531 
3532 	tgidx = phy_tssi_get_trim_group(ch);
3533 
3534 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3535 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3536 		    path, tgidx);
3537 
3538 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
3539 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3540 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3541 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3542 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3543 		val = (tde_1st + tde_2nd) / 2;
3544 
3545 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3546 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3547 			    path, val, tde_1st, tde_2nd);
3548 	} else {
3549 		val = tssi_info->tssi_trim[path][tgidx];
3550 
3551 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3552 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3553 			    path, val);
3554 	}
3555 
3556 	return val;
3557 
3558 calc_6g:
3559 	tgidx = phy_tssi_get_6g_trim_group(ch);
3560 
3561 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3562 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3563 		    path, tgidx);
3564 
3565 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
3566 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3567 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3568 		tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
3569 		tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
3570 		val = (tde_1st + tde_2nd) / 2;
3571 
3572 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3573 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3574 			    path, val, tde_1st, tde_2nd);
3575 	} else {
3576 		val = tssi_info->tssi_trim_6g[path][tgidx];
3577 
3578 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3579 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3580 			    path, val);
3581 	}
3582 
3583 	return val;
3584 }
3585 
rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,struct rtw89_h2c_rf_tssi * h2c)3586 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
3587 					       enum rtw89_phy_idx phy,
3588 					       const struct rtw89_chan *chan,
3589 					       struct rtw89_h2c_rf_tssi *h2c)
3590 {
3591 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3592 	u8 ch = chan->channel;
3593 	s8 trim_de;
3594 	s8 ofdm_de;
3595 	s8 cck_de;
3596 	u8 gidx;
3597 	s8 val;
3598 	int i;
3599 
3600 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3601 		    phy, ch);
3602 
3603 	for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
3604 		trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
3605 		h2c->curr_tssi_trim_de[i] = trim_de;
3606 
3607 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3608 			    "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
3609 
3610 		gidx = phy_tssi_get_cck_group(ch);
3611 		cck_de = tssi_info->tssi_cck[i][gidx];
3612 		val = u32_get_bits(cck_de + trim_de, 0xff);
3613 
3614 		h2c->curr_tssi_cck_de[i] = 0x0;
3615 		h2c->curr_tssi_cck_de_20m[i] = val;
3616 		h2c->curr_tssi_cck_de_40m[i] = val;
3617 		h2c->curr_tssi_efuse_cck_de[i] = cck_de;
3618 
3619 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3620 			    "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
3621 
3622 		ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
3623 		val = u32_get_bits(ofdm_de + trim_de, 0xff);
3624 
3625 		h2c->curr_tssi_ofdm_de[i] = 0x0;
3626 		h2c->curr_tssi_ofdm_de_20m[i] = val;
3627 		h2c->curr_tssi_ofdm_de_40m[i] = val;
3628 		h2c->curr_tssi_ofdm_de_80m[i] = val;
3629 		h2c->curr_tssi_ofdm_de_160m[i] = val;
3630 		h2c->curr_tssi_ofdm_de_320m[i] = val;
3631 		h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
3632 
3633 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3634 			    "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
3635 	}
3636 }
3637 
rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,struct rtw89_h2c_rf_tssi * h2c)3638 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
3639 					      enum rtw89_phy_idx phy,
3640 					      const struct rtw89_chan *chan,
3641 					      struct rtw89_h2c_rf_tssi *h2c)
3642 {
3643 	struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
3644 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3645 	const s8 *thm_up[RF_PATH_B + 1] = {};
3646 	const s8 *thm_down[RF_PATH_B + 1] = {};
3647 	u8 subband = chan->subband_type;
3648 	s8 thm_ofst[128] = {0};
3649 	u8 thermal;
3650 	u8 path;
3651 	u8 i, j;
3652 
3653 	switch (subband) {
3654 	default:
3655 	case RTW89_CH_2G:
3656 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
3657 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
3658 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
3659 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
3660 		break;
3661 	case RTW89_CH_5G_BAND_1:
3662 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
3663 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
3664 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
3665 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
3666 		break;
3667 	case RTW89_CH_5G_BAND_3:
3668 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
3669 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
3670 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
3671 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
3672 		break;
3673 	case RTW89_CH_5G_BAND_4:
3674 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
3675 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
3676 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
3677 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
3678 		break;
3679 	case RTW89_CH_6G_BAND_IDX0:
3680 	case RTW89_CH_6G_BAND_IDX1:
3681 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
3682 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
3683 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
3684 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
3685 		break;
3686 	case RTW89_CH_6G_BAND_IDX2:
3687 	case RTW89_CH_6G_BAND_IDX3:
3688 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
3689 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
3690 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
3691 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
3692 		break;
3693 	case RTW89_CH_6G_BAND_IDX4:
3694 	case RTW89_CH_6G_BAND_IDX5:
3695 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
3696 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
3697 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
3698 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
3699 		break;
3700 	case RTW89_CH_6G_BAND_IDX6:
3701 	case RTW89_CH_6G_BAND_IDX7:
3702 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
3703 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
3704 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
3705 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
3706 		break;
3707 	}
3708 
3709 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3710 		    "[TSSI] tmeter tbl on subband: %u\n", subband);
3711 
3712 	for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
3713 		thermal = tssi_info->thermal[path];
3714 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3715 			    "path: %u, pg thermal: 0x%x\n", path, thermal);
3716 
3717 		if (thermal == 0xff) {
3718 			h2c->pg_thermal[path] = 0x38;
3719 			memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
3720 			continue;
3721 		}
3722 
3723 		h2c->pg_thermal[path] = thermal;
3724 
3725 		i = 0;
3726 		for (j = 0; j < 64; j++)
3727 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3728 				      thm_up[path][i++] :
3729 				      thm_up[path][DELTA_SWINGIDX_SIZE - 1];
3730 
3731 		i = 1;
3732 		for (j = 127; j >= 64; j--)
3733 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3734 				      -thm_down[path][i++] :
3735 				      -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
3736 
3737 		for (i = 0; i < 128; i += 4) {
3738 			h2c->ftable[path][i + 0] = thm_ofst[i + 3];
3739 			h2c->ftable[path][i + 1] = thm_ofst[i + 2];
3740 			h2c->ftable[path][i + 2] = thm_ofst[i + 1];
3741 			h2c->ftable[path][i + 3] = thm_ofst[i + 0];
3742 
3743 			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3744 				    "thm ofst [%x]: %02x %02x %02x %02x\n",
3745 				    i, thm_ofst[i], thm_ofst[i + 1],
3746 				    thm_ofst[i + 2], thm_ofst[i + 3]);
3747 		}
3748 	}
3749 }
3750 
rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev * rtwdev,bool sc_xo)3751 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
3752 {
3753 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
3754 	u32 reg_mask;
3755 
3756 	if (sc_xo)
3757 		reg_mask = xtal->sc_xo_mask;
3758 	else
3759 		reg_mask = xtal->sc_xi_mask;
3760 
3761 	return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
3762 }
3763 
rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev * rtwdev,bool sc_xo,u8 val)3764 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
3765 				       u8 val)
3766 {
3767 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
3768 	u32 reg_mask;
3769 
3770 	if (sc_xo)
3771 		reg_mask = xtal->sc_xo_mask;
3772 	else
3773 		reg_mask = xtal->sc_xi_mask;
3774 
3775 	rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
3776 }
3777 
rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev * rtwdev,u8 crystal_cap,bool force)3778 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
3779 					  u8 crystal_cap, bool force)
3780 {
3781 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
3782 	const struct rtw89_chip_info *chip = rtwdev->chip;
3783 	u8 sc_xi_val, sc_xo_val;
3784 
3785 	if (!force && cfo->crystal_cap == crystal_cap)
3786 		return;
3787 	crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
3788 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
3789 		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
3790 		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
3791 		sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
3792 		sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
3793 	} else {
3794 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
3795 					crystal_cap, XTAL_SC_XO_MASK);
3796 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
3797 					crystal_cap, XTAL_SC_XI_MASK);
3798 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
3799 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
3800 	}
3801 	cfo->crystal_cap = sc_xi_val;
3802 	cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
3803 
3804 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
3805 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
3806 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
3807 		    cfo->x_cap_ofst);
3808 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
3809 }
3810 
rtw89_phy_cfo_reset(struct rtw89_dev * rtwdev)3811 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
3812 {
3813 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
3814 	u8 cap;
3815 
3816 	cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
3817 	cfo->is_adjust = false;
3818 	if (cfo->crystal_cap == cfo->def_x_cap)
3819 		return;
3820 	cap = cfo->crystal_cap;
3821 	cap += (cap > cfo->def_x_cap ? -1 : 1);
3822 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
3823 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
3824 		    "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
3825 		    cfo->def_x_cap);
3826 }
3827 
rtw89_dcfo_comp(struct rtw89_dev * rtwdev,s32 curr_cfo)3828 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
3829 {
3830 	const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
3831 	bool is_linked = rtwdev->total_sta_assoc > 0;
3832 	s32 cfo_avg_312;
3833 	s32 dcfo_comp_val;
3834 	int sign;
3835 
3836 	if (rtwdev->chip->chip_id == RTL8922A)
3837 		return;
3838 
3839 	if (!is_linked) {
3840 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
3841 			    is_linked);
3842 		return;
3843 	}
3844 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
3845 	if (curr_cfo == 0)
3846 		return;
3847 	dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
3848 	sign = curr_cfo > 0 ? 1 : -1;
3849 	cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val;
3850 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312);
3851 	if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
3852 		cfo_avg_312 = -cfo_avg_312;
3853 	rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
3854 			       cfo_avg_312);
3855 }
3856 
rtw89_dcfo_comp_init(struct rtw89_dev * rtwdev)3857 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
3858 {
3859 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3860 	const struct rtw89_chip_info *chip = rtwdev->chip;
3861 	const struct rtw89_cfo_regs *cfo = phy->cfo;
3862 
3863 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1);
3864 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8);
3865 
3866 	if (chip->chip_gen == RTW89_CHIP_AX) {
3867 		if (chip->cfo_hw_comp) {
3868 			rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2,
3869 					   B_AX_PWR_UL_CFO_MASK, 0x6);
3870 		} else {
3871 			rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
3872 			rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2,
3873 					  B_AX_PWR_UL_CFO_MASK);
3874 		}
3875 	}
3876 }
3877 
rtw89_phy_cfo_init(struct rtw89_dev * rtwdev)3878 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
3879 {
3880 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
3881 	struct rtw89_efuse *efuse = &rtwdev->efuse;
3882 
3883 	cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
3884 	cfo->crystal_cap = cfo->crystal_cap_default;
3885 	cfo->def_x_cap = cfo->crystal_cap;
3886 	cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
3887 	cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
3888 	cfo->is_adjust = false;
3889 	cfo->divergence_lock_en = false;
3890 	cfo->x_cap_ofst = 0;
3891 	cfo->lock_cnt = 0;
3892 	cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
3893 	cfo->apply_compensation = false;
3894 	cfo->residual_cfo_acc = 0;
3895 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
3896 		    cfo->crystal_cap_default);
3897 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
3898 	rtw89_dcfo_comp_init(rtwdev);
3899 	cfo->cfo_timer_ms = 2000;
3900 	cfo->cfo_trig_by_timer_en = false;
3901 	cfo->phy_cfo_trk_cnt = 0;
3902 	cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
3903 	cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
3904 }
3905 
rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev * rtwdev,s32 curr_cfo)3906 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
3907 					     s32 curr_cfo)
3908 {
3909 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
3910 	s8 crystal_cap = cfo->crystal_cap;
3911 	s32 cfo_abs = abs(curr_cfo);
3912 	int sign;
3913 
3914 	if (curr_cfo == 0) {
3915 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
3916 		return;
3917 	}
3918 	if (!cfo->is_adjust) {
3919 		if (cfo_abs > CFO_TRK_ENABLE_TH)
3920 			cfo->is_adjust = true;
3921 	} else {
3922 		if (cfo_abs <= CFO_TRK_STOP_TH)
3923 			cfo->is_adjust = false;
3924 	}
3925 	if (!cfo->is_adjust) {
3926 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
3927 		return;
3928 	}
3929 	sign = curr_cfo > 0 ? 1 : -1;
3930 	if (cfo_abs > CFO_TRK_STOP_TH_4)
3931 		crystal_cap += 7 * sign;
3932 	else if (cfo_abs > CFO_TRK_STOP_TH_3)
3933 		crystal_cap += 5 * sign;
3934 	else if (cfo_abs > CFO_TRK_STOP_TH_2)
3935 		crystal_cap += 3 * sign;
3936 	else if (cfo_abs > CFO_TRK_STOP_TH_1)
3937 		crystal_cap += 1 * sign;
3938 	else
3939 		return;
3940 	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
3941 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
3942 		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
3943 		    cfo->crystal_cap, cfo->def_x_cap);
3944 }
3945 
rtw89_phy_average_cfo_calc(struct rtw89_dev * rtwdev)3946 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
3947 {
3948 	const struct rtw89_chip_info *chip = rtwdev->chip;
3949 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
3950 	s32 cfo_khz_all = 0;
3951 	s32 cfo_cnt_all = 0;
3952 	s32 cfo_all_avg = 0;
3953 	u8 i;
3954 
3955 	if (rtwdev->total_sta_assoc != 1)
3956 		return 0;
3957 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
3958 	for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
3959 		if (cfo->cfo_cnt[i] == 0)
3960 			continue;
3961 		cfo_khz_all += cfo->cfo_tail[i];
3962 		cfo_cnt_all += cfo->cfo_cnt[i];
3963 		cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
3964 		cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
3965 		cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft,
3966 					cfo_cnt_all);
3967 	}
3968 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
3969 		    "CFO track for macid = %d\n", i);
3970 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
3971 		    "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
3972 		    cfo_khz_all, cfo_cnt_all, cfo_all_avg);
3973 	return cfo_all_avg;
3974 }
3975 
rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev * rtwdev)3976 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
3977 {
3978 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
3979 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
3980 	s32 target_cfo = 0;
3981 	s32 cfo_khz_all = 0;
3982 	s32 cfo_khz_all_tp_wgt = 0;
3983 	s32 cfo_avg = 0;
3984 	s32 max_cfo_lb = BIT(31);
3985 	s32 min_cfo_ub = GENMASK(30, 0);
3986 	u16 cfo_cnt_all = 0;
3987 	u8 active_entry_cnt = 0;
3988 	u8 sta_cnt = 0;
3989 	u32 tp_all = 0;
3990 	u8 i;
3991 	u8 cfo_tol = 0;
3992 
3993 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
3994 	if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
3995 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
3996 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
3997 			if (cfo->cfo_cnt[i] == 0)
3998 				continue;
3999 			cfo_khz_all += cfo->cfo_tail[i];
4000 			cfo_cnt_all += cfo->cfo_cnt[i];
4001 			cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
4002 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4003 				    "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
4004 				    cfo_khz_all, cfo_cnt_all, cfo_avg);
4005 			target_cfo = cfo_avg;
4006 		}
4007 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
4008 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
4009 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
4010 			if (cfo->cfo_cnt[i] == 0)
4011 				continue;
4012 			cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
4013 						  (s32)cfo->cfo_cnt[i]);
4014 			cfo_khz_all += cfo->cfo_avg[i];
4015 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4016 				    "Macid=%d, cfo_avg=%d\n", i,
4017 				    cfo->cfo_avg[i]);
4018 		}
4019 		sta_cnt = rtwdev->total_sta_assoc;
4020 		cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
4021 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
4022 			    "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
4023 			    cfo_khz_all, sta_cnt, cfo_avg);
4024 		target_cfo = cfo_avg;
4025 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
4026 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
4027 		cfo_tol = cfo->sta_cfo_tolerance;
4028 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
4029 			sta_cnt++;
4030 			if (cfo->cfo_cnt[i] != 0) {
4031 				cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
4032 							  (s32)cfo->cfo_cnt[i]);
4033 				active_entry_cnt++;
4034 			} else {
4035 				cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
4036 			}
4037 			max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
4038 			min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
4039 			cfo_khz_all += cfo->cfo_avg[i];
4040 			/* need tp for each entry */
4041 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4042 				    "[%d] cfo_avg=%d, tp=tbd\n",
4043 				    i, cfo->cfo_avg[i]);
4044 			if (sta_cnt >= rtwdev->total_sta_assoc)
4045 				break;
4046 		}
4047 		tp_all = stats->rx_throughput; /* need tp for each entry */
4048 		cfo_avg =  phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
4049 
4050 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
4051 			    sta_cnt);
4052 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
4053 			    active_entry_cnt);
4054 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
4055 			    "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
4056 			    cfo_khz_all_tp_wgt, cfo_avg);
4057 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
4058 			    max_cfo_lb, min_cfo_ub);
4059 		if (max_cfo_lb <= min_cfo_ub) {
4060 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4061 				    "cfo win_size=%d\n",
4062 				    min_cfo_ub - max_cfo_lb);
4063 			target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
4064 		} else {
4065 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
4066 				    "No intersection of cfo tolerance windows\n");
4067 			target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
4068 		}
4069 		for (i = 0; i < CFO_TRACK_MAX_USER; i++)
4070 			cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
4071 	}
4072 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
4073 	return target_cfo;
4074 }
4075 
rtw89_phy_cfo_statistics_reset(struct rtw89_dev * rtwdev)4076 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
4077 {
4078 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4079 
4080 	memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
4081 	memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
4082 	cfo->packet_count = 0;
4083 	cfo->packet_count_pre = 0;
4084 	cfo->cfo_avg_pre = 0;
4085 }
4086 
rtw89_phy_cfo_dm(struct rtw89_dev * rtwdev)4087 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
4088 {
4089 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4090 	s32 new_cfo = 0;
4091 	bool x_cap_update = false;
4092 	u8 pre_x_cap = cfo->crystal_cap;
4093 	u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
4094 
4095 	cfo->dcfo_avg = 0;
4096 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
4097 		    rtwdev->total_sta_assoc);
4098 	if (rtwdev->total_sta_assoc == 0) {
4099 		rtw89_phy_cfo_reset(rtwdev);
4100 		return;
4101 	}
4102 	if (cfo->packet_count == 0) {
4103 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
4104 		return;
4105 	}
4106 	if (cfo->packet_count == cfo->packet_count_pre) {
4107 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
4108 		return;
4109 	}
4110 	if (rtwdev->total_sta_assoc == 1)
4111 		new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
4112 	else
4113 		new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
4114 	if (cfo->divergence_lock_en) {
4115 		cfo->lock_cnt++;
4116 		if (cfo->lock_cnt > CFO_PERIOD_CNT) {
4117 			cfo->divergence_lock_en = false;
4118 			cfo->lock_cnt = 0;
4119 		} else {
4120 			rtw89_phy_cfo_reset(rtwdev);
4121 		}
4122 		return;
4123 	}
4124 	if (cfo->crystal_cap >= cfo->x_cap_ub ||
4125 	    cfo->crystal_cap <= cfo->x_cap_lb) {
4126 		cfo->divergence_lock_en = true;
4127 		rtw89_phy_cfo_reset(rtwdev);
4128 		return;
4129 	}
4130 
4131 	rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
4132 	cfo->cfo_avg_pre = new_cfo;
4133 	cfo->dcfo_avg_pre = cfo->dcfo_avg;
4134 	x_cap_update =  cfo->crystal_cap != pre_x_cap;
4135 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
4136 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
4137 		    cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
4138 		    cfo->x_cap_ofst);
4139 	if (x_cap_update) {
4140 		if (cfo->dcfo_avg > 0)
4141 			cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
4142 		else
4143 			cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
4144 	}
4145 	rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg);
4146 	rtw89_phy_cfo_statistics_reset(rtwdev);
4147 }
4148 
rtw89_phy_cfo_track_work(struct work_struct * work)4149 void rtw89_phy_cfo_track_work(struct work_struct *work)
4150 {
4151 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
4152 						cfo_track_work.work);
4153 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4154 
4155 	mutex_lock(&rtwdev->mutex);
4156 	if (!cfo->cfo_trig_by_timer_en)
4157 		goto out;
4158 	rtw89_leave_ps_mode(rtwdev);
4159 	rtw89_phy_cfo_dm(rtwdev);
4160 	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
4161 				     msecs_to_jiffies(cfo->cfo_timer_ms));
4162 out:
4163 	mutex_unlock(&rtwdev->mutex);
4164 }
4165 
rtw89_phy_cfo_start_work(struct rtw89_dev * rtwdev)4166 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
4167 {
4168 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4169 
4170 	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
4171 				     msecs_to_jiffies(cfo->cfo_timer_ms));
4172 }
4173 
rtw89_phy_cfo_track(struct rtw89_dev * rtwdev)4174 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
4175 {
4176 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4177 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
4178 	bool is_ul_ofdma = false, ofdma_acc_en = false;
4179 
4180 	if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
4181 		is_ul_ofdma = true;
4182 	if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
4183 	    is_ul_ofdma)
4184 		ofdma_acc_en = true;
4185 
4186 	switch (cfo->phy_cfo_status) {
4187 	case RTW89_PHY_DCFO_STATE_NORMAL:
4188 		if (stats->tx_throughput >= CFO_TP_UPPER) {
4189 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
4190 			cfo->cfo_trig_by_timer_en = true;
4191 			cfo->cfo_timer_ms = CFO_COMP_PERIOD;
4192 			rtw89_phy_cfo_start_work(rtwdev);
4193 		}
4194 		break;
4195 	case RTW89_PHY_DCFO_STATE_ENHANCE:
4196 		if (stats->tx_throughput <= CFO_TP_LOWER)
4197 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
4198 		else if (ofdma_acc_en &&
4199 			 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
4200 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
4201 		else
4202 			cfo->phy_cfo_trk_cnt++;
4203 
4204 		if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
4205 			cfo->phy_cfo_trk_cnt = 0;
4206 			cfo->cfo_trig_by_timer_en = false;
4207 		}
4208 		break;
4209 	case RTW89_PHY_DCFO_STATE_HOLD:
4210 		if (stats->tx_throughput <= CFO_TP_LOWER) {
4211 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
4212 			cfo->phy_cfo_trk_cnt = 0;
4213 			cfo->cfo_trig_by_timer_en = false;
4214 		} else {
4215 			cfo->phy_cfo_trk_cnt++;
4216 		}
4217 		break;
4218 	default:
4219 		cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
4220 		cfo->phy_cfo_trk_cnt = 0;
4221 		break;
4222 	}
4223 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
4224 		    "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
4225 		    stats->tx_throughput, cfo->phy_cfo_status,
4226 		    cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
4227 		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
4228 	if (cfo->cfo_trig_by_timer_en)
4229 		return;
4230 	rtw89_phy_cfo_dm(rtwdev);
4231 }
4232 
rtw89_phy_cfo_parse(struct rtw89_dev * rtwdev,s16 cfo_val,struct rtw89_rx_phy_ppdu * phy_ppdu)4233 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
4234 			 struct rtw89_rx_phy_ppdu *phy_ppdu)
4235 {
4236 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4237 	u8 macid = phy_ppdu->mac_id;
4238 
4239 	if (macid >= CFO_TRACK_MAX_USER) {
4240 		rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
4241 		return;
4242 	}
4243 
4244 	cfo->cfo_tail[macid] += cfo_val;
4245 	cfo->cfo_cnt[macid]++;
4246 	cfo->packet_count++;
4247 }
4248 
rtw89_phy_ul_tb_assoc(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)4249 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
4250 {
4251 	const struct rtw89_chip_info *chip = rtwdev->chip;
4252 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
4253 						       rtwvif->sub_entity_idx);
4254 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
4255 
4256 	if (!chip->ul_tb_waveform_ctrl)
4257 		return;
4258 
4259 	rtwvif->def_tri_idx =
4260 		rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
4261 
4262 	if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
4263 		rtwvif->dyn_tb_bedge_en = false;
4264 	else if (chan->band_type >= RTW89_BAND_5G &&
4265 		 chan->band_width >= RTW89_CHANNEL_WIDTH_40)
4266 		rtwvif->dyn_tb_bedge_en = true;
4267 	else
4268 		rtwvif->dyn_tb_bedge_en = false;
4269 
4270 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
4271 		    "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
4272 		    ul_tb_info->def_if_bandedge, rtwvif->def_tri_idx);
4273 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
4274 		    "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
4275 		    rtwvif->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
4276 }
4277 
4278 struct rtw89_phy_ul_tb_check_data {
4279 	bool valid;
4280 	bool high_tf_client;
4281 	bool low_tf_client;
4282 	bool dyn_tb_bedge_en;
4283 	u8 def_tri_idx;
4284 };
4285 
4286 struct rtw89_phy_power_diff {
4287 	u32 q_00;
4288 	u32 q_11;
4289 	u32 q_matrix_en;
4290 	u32 ultb_1t_norm_160;
4291 	u32 ultb_2t_norm_160;
4292 	u32 com1_norm_1sts;
4293 	u32 com2_resp_1sts_path;
4294 };
4295 
rtw89_phy_ofdma_power_diff(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)4296 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
4297 				       struct rtw89_vif *rtwvif)
4298 {
4299 	static const struct rtw89_phy_power_diff table[2] = {
4300 		{0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3},
4301 		{0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1},
4302 	};
4303 	const struct rtw89_phy_power_diff *param;
4304 	u32 reg;
4305 
4306 	if (!rtwdev->chip->ul_tb_pwr_diff)
4307 		return;
4308 
4309 	if (rtwvif->pwr_diff_en == rtwvif->pre_pwr_diff_en) {
4310 		rtwvif->pwr_diff_en = false;
4311 		return;
4312 	}
4313 
4314 	rtwvif->pre_pwr_diff_en = rtwvif->pwr_diff_en;
4315 	param = &table[rtwvif->pwr_diff_en];
4316 
4317 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL,
4318 			       param->q_00);
4319 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL,
4320 			       param->q_11);
4321 	rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX,
4322 			       B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en);
4323 
4324 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif->mac_idx);
4325 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160,
4326 			   param->ultb_1t_norm_160);
4327 
4328 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif->mac_idx);
4329 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160,
4330 			   param->ultb_2t_norm_160);
4331 
4332 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif->mac_idx);
4333 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS,
4334 			   param->com1_norm_1sts);
4335 
4336 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif->mac_idx);
4337 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH,
4338 			   param->com2_resp_1sts_path);
4339 }
4340 
4341 static
rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_phy_ul_tb_check_data * ul_tb_data)4342 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
4343 				struct rtw89_vif *rtwvif,
4344 				struct rtw89_phy_ul_tb_check_data *ul_tb_data)
4345 {
4346 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
4347 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
4348 
4349 	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
4350 		return;
4351 
4352 	if (!vif->cfg.assoc)
4353 		return;
4354 
4355 	if (rtwdev->chip->ul_tb_waveform_ctrl) {
4356 		if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
4357 			ul_tb_data->high_tf_client = true;
4358 		else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
4359 			ul_tb_data->low_tf_client = true;
4360 
4361 		ul_tb_data->valid = true;
4362 		ul_tb_data->def_tri_idx = rtwvif->def_tri_idx;
4363 		ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en;
4364 	}
4365 
4366 	rtw89_phy_ofdma_power_diff(rtwdev, rtwvif);
4367 }
4368 
rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev * rtwdev,struct rtw89_phy_ul_tb_check_data * ul_tb_data)4369 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev,
4370 					  struct rtw89_phy_ul_tb_check_data *ul_tb_data)
4371 {
4372 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
4373 
4374 	if (!rtwdev->chip->ul_tb_waveform_ctrl)
4375 		return;
4376 
4377 	if (ul_tb_data->dyn_tb_bedge_en) {
4378 		if (ul_tb_data->high_tf_client) {
4379 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
4380 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
4381 				    "[ULTB] Turn off if_bandedge\n");
4382 		} else if (ul_tb_data->low_tf_client) {
4383 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
4384 					       ul_tb_info->def_if_bandedge);
4385 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
4386 				    "[ULTB] Set to default if_bandedge = %d\n",
4387 				    ul_tb_info->def_if_bandedge);
4388 		}
4389 	}
4390 
4391 	if (ul_tb_info->dyn_tb_tri_en) {
4392 		if (ul_tb_data->high_tf_client) {
4393 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
4394 					       B_TXSHAPE_TRIANGULAR_CFG, 0);
4395 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
4396 				    "[ULTB] Turn off Tx triangle\n");
4397 		} else if (ul_tb_data->low_tf_client) {
4398 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
4399 					       B_TXSHAPE_TRIANGULAR_CFG,
4400 					       ul_tb_data->def_tri_idx);
4401 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
4402 				    "[ULTB] Set to default tx_shap_idx = %d\n",
4403 				    ul_tb_data->def_tri_idx);
4404 		}
4405 	}
4406 }
4407 
rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev * rtwdev)4408 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
4409 {
4410 	const struct rtw89_chip_info *chip = rtwdev->chip;
4411 	struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
4412 	struct rtw89_vif *rtwvif;
4413 
4414 	if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff)
4415 		return;
4416 
4417 	if (rtwdev->total_sta_assoc != 1)
4418 		return;
4419 
4420 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
4421 		rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data);
4422 
4423 	if (!ul_tb_data.valid)
4424 		return;
4425 
4426 	rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data);
4427 }
4428 
rtw89_phy_ul_tb_info_init(struct rtw89_dev * rtwdev)4429 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
4430 {
4431 	const struct rtw89_chip_info *chip = rtwdev->chip;
4432 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
4433 
4434 	if (!chip->ul_tb_waveform_ctrl)
4435 		return;
4436 
4437 	ul_tb_info->dyn_tb_tri_en = true;
4438 	ul_tb_info->def_if_bandedge =
4439 		rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
4440 }
4441 
4442 static
rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats * antdiv_sts)4443 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
4444 {
4445 	ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
4446 	ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
4447 	ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
4448 	antdiv_sts->pkt_cnt_cck = 0;
4449 	antdiv_sts->pkt_cnt_ofdm = 0;
4450 	antdiv_sts->pkt_cnt_non_legacy = 0;
4451 	antdiv_sts->evm = 0;
4452 }
4453 
rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu,struct rtw89_antdiv_stats * stats)4454 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
4455 					      struct rtw89_rx_phy_ppdu *phy_ppdu,
4456 					      struct rtw89_antdiv_stats *stats)
4457 {
4458 	if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
4459 		if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
4460 			ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
4461 			stats->pkt_cnt_cck++;
4462 		} else {
4463 			ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
4464 			stats->pkt_cnt_ofdm++;
4465 			stats->evm += phy_ppdu->ofdm.evm_min;
4466 		}
4467 	} else {
4468 		ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
4469 		stats->pkt_cnt_non_legacy++;
4470 		stats->evm += phy_ppdu->ofdm.evm_min;
4471 	}
4472 }
4473 
rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats * stats)4474 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
4475 {
4476 	if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
4477 	    stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
4478 		return ewma_rssi_read(&stats->non_legacy_rssi_avg);
4479 	else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
4480 		 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
4481 		return ewma_rssi_read(&stats->ofdm_rssi_avg);
4482 	else
4483 		return ewma_rssi_read(&stats->cck_rssi_avg);
4484 }
4485 
rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats * stats)4486 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
4487 {
4488 	return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
4489 }
4490 
rtw89_phy_antdiv_parse(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)4491 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
4492 			    struct rtw89_rx_phy_ppdu *phy_ppdu)
4493 {
4494 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4495 	struct rtw89_hal *hal = &rtwdev->hal;
4496 
4497 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
4498 		return;
4499 
4500 	rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
4501 
4502 	if (!antdiv->get_stats)
4503 		return;
4504 
4505 	if (hal->antenna_rx == RF_A)
4506 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
4507 	else if (hal->antenna_rx == RF_B)
4508 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
4509 }
4510 
rtw89_phy_antdiv_reg_init(struct rtw89_dev * rtwdev)4511 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
4512 {
4513 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
4514 			      0x0, RTW89_PHY_0);
4515 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
4516 			      0x0, RTW89_PHY_0);
4517 
4518 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
4519 			      0x0, RTW89_PHY_0);
4520 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
4521 			      0x0, RTW89_PHY_0);
4522 
4523 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
4524 			      0x0, RTW89_PHY_0);
4525 
4526 	rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
4527 			      0x0100, RTW89_PHY_0);
4528 
4529 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
4530 			      0x1, RTW89_PHY_0);
4531 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
4532 			      0x0, RTW89_PHY_0);
4533 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
4534 			      0x0, RTW89_PHY_0);
4535 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
4536 			      0x0, RTW89_PHY_0);
4537 }
4538 
rtw89_phy_antdiv_sts_reset(struct rtw89_dev * rtwdev)4539 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
4540 {
4541 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4542 
4543 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
4544 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
4545 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
4546 }
4547 
rtw89_phy_antdiv_init(struct rtw89_dev * rtwdev)4548 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
4549 {
4550 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4551 	struct rtw89_hal *hal = &rtwdev->hal;
4552 
4553 	if (!hal->ant_diversity)
4554 		return;
4555 
4556 	antdiv->get_stats = false;
4557 	antdiv->rssi_pre = 0;
4558 	rtw89_phy_antdiv_sts_reset(rtwdev);
4559 	rtw89_phy_antdiv_reg_init(rtwdev);
4560 }
4561 
rtw89_phy_stat_thermal_update(struct rtw89_dev * rtwdev)4562 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
4563 {
4564 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
4565 	int i;
4566 	u8 th;
4567 
4568 	for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
4569 		th = rtw89_chip_get_thermal(rtwdev, i);
4570 		if (th)
4571 			ewma_thermal_add(&phystat->avg_thermal[i], th);
4572 
4573 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
4574 			    "path(%d) thermal cur=%u avg=%ld", i, th,
4575 			    ewma_thermal_read(&phystat->avg_thermal[i]));
4576 	}
4577 }
4578 
4579 struct rtw89_phy_iter_rssi_data {
4580 	struct rtw89_dev *rtwdev;
4581 	struct rtw89_phy_ch_info *ch_info;
4582 	bool rssi_changed;
4583 };
4584 
rtw89_phy_stat_rssi_update_iter(void * data,struct ieee80211_sta * sta)4585 static void rtw89_phy_stat_rssi_update_iter(void *data,
4586 					    struct ieee80211_sta *sta)
4587 {
4588 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
4589 	struct rtw89_phy_iter_rssi_data *rssi_data =
4590 					(struct rtw89_phy_iter_rssi_data *)data;
4591 	struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
4592 	unsigned long rssi_curr;
4593 
4594 	rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi);
4595 
4596 	if (rssi_curr < ch_info->rssi_min) {
4597 		ch_info->rssi_min = rssi_curr;
4598 		ch_info->rssi_min_macid = rtwsta->mac_id;
4599 	}
4600 
4601 	if (rtwsta->prev_rssi == 0) {
4602 		rtwsta->prev_rssi = rssi_curr;
4603 	} else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) {
4604 		rtwsta->prev_rssi = rssi_curr;
4605 		rssi_data->rssi_changed = true;
4606 	}
4607 }
4608 
rtw89_phy_stat_rssi_update(struct rtw89_dev * rtwdev)4609 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
4610 {
4611 	struct rtw89_phy_iter_rssi_data rssi_data = {0};
4612 
4613 	rssi_data.rtwdev = rtwdev;
4614 	rssi_data.ch_info = &rtwdev->ch_info;
4615 	rssi_data.ch_info->rssi_min = U8_MAX;
4616 	ieee80211_iterate_stations_atomic(rtwdev->hw,
4617 					  rtw89_phy_stat_rssi_update_iter,
4618 					  &rssi_data);
4619 	if (rssi_data.rssi_changed)
4620 		rtw89_btc_ntfy_wl_sta(rtwdev);
4621 }
4622 
rtw89_phy_stat_init(struct rtw89_dev * rtwdev)4623 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
4624 {
4625 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
4626 	int i;
4627 
4628 	for (i = 0; i < rtwdev->chip->rf_path_num; i++)
4629 		ewma_thermal_init(&phystat->avg_thermal[i]);
4630 
4631 	rtw89_phy_stat_thermal_update(rtwdev);
4632 
4633 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
4634 	memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
4635 }
4636 
rtw89_phy_stat_track(struct rtw89_dev * rtwdev)4637 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
4638 {
4639 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
4640 
4641 	rtw89_phy_stat_thermal_update(rtwdev);
4642 	rtw89_phy_stat_rssi_update(rtwdev);
4643 
4644 	phystat->last_pkt_stat = phystat->cur_pkt_stat;
4645 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
4646 }
4647 
rtw89_phy_ccx_us_to_idx(struct rtw89_dev * rtwdev,u32 time_us)4648 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us)
4649 {
4650 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4651 
4652 	return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
4653 }
4654 
rtw89_phy_ccx_idx_to_us(struct rtw89_dev * rtwdev,u16 idx)4655 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx)
4656 {
4657 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4658 
4659 	return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
4660 }
4661 
rtw89_phy_ccx_top_setting_init(struct rtw89_dev * rtwdev)4662 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
4663 {
4664 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4665 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4666 	const struct rtw89_ccx_regs *ccx = phy->ccx;
4667 
4668 	env->ccx_manual_ctrl = false;
4669 	env->ccx_ongoing = false;
4670 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
4671 	env->ccx_period = 0;
4672 	env->ccx_unit_idx = RTW89_CCX_32_US;
4673 
4674 	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1);
4675 	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1);
4676 	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
4677 	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
4678 			       RTW89_CCX_EDCCA_BW20_0);
4679 }
4680 
rtw89_phy_ccx_get_report(struct rtw89_dev * rtwdev,u16 report,u16 score)4681 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report,
4682 				    u16 score)
4683 {
4684 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4685 	u32 numer = 0;
4686 	u16 ret = 0;
4687 
4688 	numer = report * score + (env->ccx_period >> 1);
4689 	if (env->ccx_period)
4690 		ret = numer / env->ccx_period;
4691 
4692 	return ret >= score ? score - 1 : ret;
4693 }
4694 
rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev * rtwdev,u16 time_ms,u32 * period,u32 * unit_idx)4695 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
4696 					    u16 time_ms, u32 *period,
4697 					    u32 *unit_idx)
4698 {
4699 	u32 idx;
4700 	u8 quotient;
4701 
4702 	if (time_ms >= CCX_MAX_PERIOD)
4703 		time_ms = CCX_MAX_PERIOD;
4704 
4705 	quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
4706 
4707 	if (quotient < 4)
4708 		idx = RTW89_CCX_4_US;
4709 	else if (quotient < 8)
4710 		idx = RTW89_CCX_8_US;
4711 	else if (quotient < 16)
4712 		idx = RTW89_CCX_16_US;
4713 	else
4714 		idx = RTW89_CCX_32_US;
4715 
4716 	*unit_idx = idx;
4717 	*period = (time_ms * MS_TO_4US_RATIO) >> idx;
4718 
4719 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4720 		    "[Trigger Time] period:%d, unit_idx:%d\n",
4721 		    *period, *unit_idx);
4722 }
4723 
rtw89_phy_ccx_racing_release(struct rtw89_dev * rtwdev)4724 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
4725 {
4726 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4727 
4728 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4729 		    "lv:(%d)->(0)\n", env->ccx_rac_lv);
4730 
4731 	env->ccx_ongoing = false;
4732 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
4733 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
4734 }
4735 
rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev * rtwdev,struct rtw89_ccx_para_info * para)4736 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
4737 					      struct rtw89_ccx_para_info *para)
4738 {
4739 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4740 	bool is_update = env->ifs_clm_app != para->ifs_clm_app;
4741 	u8 i = 0;
4742 	u16 *ifs_th_l = env->ifs_clm_th_l;
4743 	u16 *ifs_th_h = env->ifs_clm_th_h;
4744 	u32 ifs_th0_us = 0, ifs_th_times = 0;
4745 	u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
4746 
4747 	if (!is_update)
4748 		goto ifs_update_finished;
4749 
4750 	switch (para->ifs_clm_app) {
4751 	case RTW89_IFS_CLM_INIT:
4752 	case RTW89_IFS_CLM_BACKGROUND:
4753 	case RTW89_IFS_CLM_ACS:
4754 	case RTW89_IFS_CLM_DBG:
4755 	case RTW89_IFS_CLM_DIG:
4756 	case RTW89_IFS_CLM_TDMA_DIG:
4757 		ifs_th0_us = IFS_CLM_TH0_UPPER;
4758 		ifs_th_times = IFS_CLM_TH_MUL;
4759 		break;
4760 	case RTW89_IFS_CLM_DBG_MANUAL:
4761 		ifs_th0_us = para->ifs_clm_manual_th0;
4762 		ifs_th_times = para->ifs_clm_manual_th_times;
4763 		break;
4764 	default:
4765 		break;
4766 	}
4767 
4768 	/* Set sampling threshold for 4 different regions, unit in idx_cnt.
4769 	 * low[i] = high[i-1] + 1
4770 	 * high[i] = high[i-1] * ifs_th_times
4771 	 */
4772 	ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
4773 	ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
4774 	ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev,
4775 								 ifs_th0_us);
4776 	for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
4777 		ifs_th_l[i] = ifs_th_h[i - 1] + 1;
4778 		ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
4779 		ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]);
4780 	}
4781 
4782 ifs_update_finished:
4783 	if (!is_update)
4784 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4785 			    "No need to update IFS_TH\n");
4786 
4787 	return is_update;
4788 }
4789 
rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev * rtwdev)4790 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
4791 {
4792 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4793 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4794 	const struct rtw89_ccx_regs *ccx = phy->ccx;
4795 	u8 i = 0;
4796 
4797 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
4798 			       env->ifs_clm_th_l[0]);
4799 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
4800 			       env->ifs_clm_th_l[1]);
4801 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
4802 			       env->ifs_clm_th_l[2]);
4803 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
4804 			       env->ifs_clm_th_l[3]);
4805 
4806 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
4807 			       env->ifs_clm_th_h[0]);
4808 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
4809 			       env->ifs_clm_th_h[1]);
4810 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
4811 			       env->ifs_clm_th_h[2]);
4812 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
4813 			       env->ifs_clm_th_h[3]);
4814 
4815 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
4816 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4817 			    "Update IFS_T%d_th{low, high} : {%d, %d}\n",
4818 			    i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
4819 }
4820 
rtw89_phy_ifs_clm_setting_init(struct rtw89_dev * rtwdev)4821 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev)
4822 {
4823 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4824 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4825 	const struct rtw89_ccx_regs *ccx = phy->ccx;
4826 	struct rtw89_ccx_para_info para = {0};
4827 
4828 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
4829 	env->ifs_clm_mntr_time = 0;
4830 
4831 	para.ifs_clm_app = RTW89_IFS_CLM_INIT;
4832 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, &para))
4833 		rtw89_phy_ifs_clm_set_th_reg(rtwdev);
4834 
4835 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true);
4836 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true);
4837 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true);
4838 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true);
4839 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true);
4840 }
4841 
rtw89_phy_ccx_racing_ctrl(struct rtw89_dev * rtwdev,enum rtw89_env_racing_lv level)4842 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
4843 				     enum rtw89_env_racing_lv level)
4844 {
4845 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4846 	int ret = 0;
4847 
4848 	if (level >= RTW89_RAC_MAX_NUM) {
4849 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4850 			    "[WARNING] Wrong LV=%d\n", level);
4851 		return -EINVAL;
4852 	}
4853 
4854 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4855 		    "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
4856 		    env->ccx_rac_lv, level);
4857 
4858 	if (env->ccx_ongoing) {
4859 		if (level <= env->ccx_rac_lv)
4860 			ret = -EINVAL;
4861 		else
4862 			env->ccx_ongoing = false;
4863 	}
4864 
4865 	if (ret == 0)
4866 		env->ccx_rac_lv = level;
4867 
4868 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
4869 		    !ret);
4870 
4871 	return ret;
4872 }
4873 
rtw89_phy_ccx_trigger(struct rtw89_dev * rtwdev)4874 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev)
4875 {
4876 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4877 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4878 	const struct rtw89_ccx_regs *ccx = phy->ccx;
4879 
4880 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0);
4881 	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0);
4882 	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1);
4883 	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
4884 
4885 	env->ccx_ongoing = true;
4886 }
4887 
rtw89_phy_ifs_clm_get_utility(struct rtw89_dev * rtwdev)4888 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
4889 {
4890 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4891 	u8 i = 0;
4892 	u32 res = 0;
4893 
4894 	env->ifs_clm_tx_ratio =
4895 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT);
4896 	env->ifs_clm_edcca_excl_cca_ratio =
4897 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca,
4898 					 PERCENT);
4899 	env->ifs_clm_cck_fa_ratio =
4900 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT);
4901 	env->ifs_clm_ofdm_fa_ratio =
4902 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT);
4903 	env->ifs_clm_cck_cca_excl_fa_ratio =
4904 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa,
4905 					 PERCENT);
4906 	env->ifs_clm_ofdm_cca_excl_fa_ratio =
4907 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa,
4908 					 PERCENT);
4909 	env->ifs_clm_cck_fa_permil =
4910 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL);
4911 	env->ifs_clm_ofdm_fa_permil =
4912 		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL);
4913 
4914 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
4915 		if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
4916 			env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
4917 		} else {
4918 			env->ifs_clm_ifs_avg[i] =
4919 				rtw89_phy_ccx_idx_to_us(rtwdev,
4920 							env->ifs_clm_avg[i]);
4921 		}
4922 
4923 		res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]);
4924 		res += env->ifs_clm_his[i] >> 1;
4925 		if (env->ifs_clm_his[i])
4926 			res /= env->ifs_clm_his[i];
4927 		else
4928 			res = 0;
4929 		env->ifs_clm_cca_avg[i] = res;
4930 	}
4931 
4932 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4933 		    "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
4934 		    env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
4935 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4936 		    "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
4937 		    env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
4938 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4939 		    "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
4940 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
4941 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4942 		    "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
4943 		    env->ifs_clm_cck_cca_excl_fa_ratio,
4944 		    env->ifs_clm_ofdm_cca_excl_fa_ratio);
4945 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4946 		    "Time:[his, ifs_avg(us), cca_avg(us)]\n");
4947 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
4948 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
4949 			    i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
4950 			    env->ifs_clm_cca_avg[i]);
4951 }
4952 
rtw89_phy_ifs_clm_get_result(struct rtw89_dev * rtwdev)4953 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
4954 {
4955 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4956 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4957 	const struct rtw89_ccx_regs *ccx = phy->ccx;
4958 	u8 i = 0;
4959 
4960 	if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
4961 				  ccx->ifs_cnt_done_mask) == 0) {
4962 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
4963 			    "Get IFS_CLM report Fail\n");
4964 		return false;
4965 	}
4966 
4967 	env->ifs_clm_tx =
4968 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
4969 				      ccx->ifs_clm_tx_cnt_msk);
4970 	env->ifs_clm_edcca_excl_cca =
4971 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
4972 				      ccx->ifs_clm_edcca_excl_cca_fa_mask);
4973 	env->ifs_clm_cckcca_excl_fa =
4974 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
4975 				      ccx->ifs_clm_cckcca_excl_fa_mask);
4976 	env->ifs_clm_ofdmcca_excl_fa =
4977 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
4978 				      ccx->ifs_clm_ofdmcca_excl_fa_mask);
4979 	env->ifs_clm_cckfa =
4980 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
4981 				      ccx->ifs_clm_cck_fa_mask);
4982 	env->ifs_clm_ofdmfa =
4983 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
4984 				      ccx->ifs_clm_ofdm_fa_mask);
4985 
4986 	env->ifs_clm_his[0] =
4987 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
4988 				      ccx->ifs_t1_his_mask);
4989 	env->ifs_clm_his[1] =
4990 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
4991 				      ccx->ifs_t2_his_mask);
4992 	env->ifs_clm_his[2] =
4993 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
4994 				      ccx->ifs_t3_his_mask);
4995 	env->ifs_clm_his[3] =
4996 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
4997 				      ccx->ifs_t4_his_mask);
4998 
4999 	env->ifs_clm_avg[0] =
5000 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
5001 				      ccx->ifs_t1_avg_mask);
5002 	env->ifs_clm_avg[1] =
5003 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
5004 				      ccx->ifs_t2_avg_mask);
5005 	env->ifs_clm_avg[2] =
5006 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
5007 				      ccx->ifs_t3_avg_mask);
5008 	env->ifs_clm_avg[3] =
5009 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
5010 				      ccx->ifs_t4_avg_mask);
5011 
5012 	env->ifs_clm_cca[0] =
5013 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
5014 				      ccx->ifs_t1_cca_mask);
5015 	env->ifs_clm_cca[1] =
5016 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
5017 				      ccx->ifs_t2_cca_mask);
5018 	env->ifs_clm_cca[2] =
5019 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
5020 				      ccx->ifs_t3_cca_mask);
5021 	env->ifs_clm_cca[3] =
5022 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
5023 				      ccx->ifs_t4_cca_mask);
5024 
5025 	env->ifs_clm_total_ifs =
5026 		rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
5027 				      ccx->ifs_total_mask);
5028 
5029 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
5030 		    env->ifs_clm_total_ifs);
5031 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5032 		    "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
5033 		    env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
5034 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5035 		    "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
5036 		    env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
5037 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5038 		    "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
5039 		    env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
5040 
5041 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
5042 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
5043 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5044 			    "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
5045 			    env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
5046 
5047 	rtw89_phy_ifs_clm_get_utility(rtwdev);
5048 
5049 	return true;
5050 }
5051 
rtw89_phy_ifs_clm_set(struct rtw89_dev * rtwdev,struct rtw89_ccx_para_info * para)5052 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
5053 				 struct rtw89_ccx_para_info *para)
5054 {
5055 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5056 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
5057 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5058 	u32 period = 0;
5059 	u32 unit_idx = 0;
5060 
5061 	if (para->mntr_time == 0) {
5062 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5063 			    "[WARN] MNTR_TIME is 0\n");
5064 		return -EINVAL;
5065 	}
5066 
5067 	if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv))
5068 		return -EINVAL;
5069 
5070 	if (para->mntr_time != env->ifs_clm_mntr_time) {
5071 		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
5072 						&period, &unit_idx);
5073 		rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
5074 				       ccx->ifs_clm_period_mask, period);
5075 		rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
5076 				       ccx->ifs_clm_cnt_unit_mask,
5077 				       unit_idx);
5078 
5079 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5080 			    "Update IFS-CLM time ((%d)) -> ((%d))\n",
5081 			    env->ifs_clm_mntr_time, para->mntr_time);
5082 
5083 		env->ifs_clm_mntr_time = para->mntr_time;
5084 		env->ccx_period = (u16)period;
5085 		env->ccx_unit_idx = (u8)unit_idx;
5086 	}
5087 
5088 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) {
5089 		env->ifs_clm_app = para->ifs_clm_app;
5090 		rtw89_phy_ifs_clm_set_th_reg(rtwdev);
5091 	}
5092 
5093 	return 0;
5094 }
5095 
rtw89_phy_env_monitor_track(struct rtw89_dev * rtwdev)5096 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
5097 {
5098 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
5099 	struct rtw89_ccx_para_info para = {0};
5100 	u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
5101 
5102 	env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
5103 	if (env->ccx_manual_ctrl) {
5104 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5105 			    "CCX in manual ctrl\n");
5106 		return;
5107 	}
5108 
5109 	/* only ifs_clm for now */
5110 	if (rtw89_phy_ifs_clm_get_result(rtwdev))
5111 		env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
5112 
5113 	rtw89_phy_ccx_racing_release(rtwdev);
5114 	para.mntr_time = 1900;
5115 	para.rac_lv = RTW89_RAC_LV_1;
5116 	para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
5117 
5118 	if (rtw89_phy_ifs_clm_set(rtwdev, &para) == 0)
5119 		chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
5120 	if (chk_result)
5121 		rtw89_phy_ccx_trigger(rtwdev);
5122 
5123 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5124 		    "get_result=0x%x, chk_result:0x%x\n",
5125 		    env->ccx_watchdog_result, chk_result);
5126 }
5127 
rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap * ie_page)5128 static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
5129 {
5130 	if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
5131 	    *ie_page == RTW89_RSVD_9)
5132 		return false;
5133 	else if (*ie_page > RTW89_RSVD_9)
5134 		*ie_page -= 1;
5135 
5136 	return true;
5137 }
5138 
rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)5139 static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
5140 {
5141 	static const u8 ie_page_shift = 2;
5142 
5143 	return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
5144 }
5145 
rtw89_physts_get_ie_bitmap(struct rtw89_dev * rtwdev,enum rtw89_phy_status_bitmap ie_page)5146 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
5147 				      enum rtw89_phy_status_bitmap ie_page)
5148 {
5149 	u32 addr;
5150 
5151 	if (!rtw89_physts_ie_page_valid(&ie_page))
5152 		return 0;
5153 
5154 	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
5155 
5156 	return rtw89_phy_read32(rtwdev, addr);
5157 }
5158 
rtw89_physts_set_ie_bitmap(struct rtw89_dev * rtwdev,enum rtw89_phy_status_bitmap ie_page,u32 val)5159 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
5160 				       enum rtw89_phy_status_bitmap ie_page,
5161 				       u32 val)
5162 {
5163 	const struct rtw89_chip_info *chip = rtwdev->chip;
5164 	u32 addr;
5165 
5166 	if (!rtw89_physts_ie_page_valid(&ie_page))
5167 		return;
5168 
5169 	if (chip->chip_id == RTL8852A)
5170 		val &= B_PHY_STS_BITMAP_MSK_52A;
5171 
5172 	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
5173 	rtw89_phy_write32(rtwdev, addr, val);
5174 }
5175 
rtw89_physts_enable_ie_bitmap(struct rtw89_dev * rtwdev,enum rtw89_phy_status_bitmap bitmap,enum rtw89_phy_status_ie_type ie,bool enable)5176 static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev,
5177 					  enum rtw89_phy_status_bitmap bitmap,
5178 					  enum rtw89_phy_status_ie_type ie,
5179 					  bool enable)
5180 {
5181 	u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap);
5182 
5183 	if (enable)
5184 		val |= BIT(ie);
5185 	else
5186 		val &= ~BIT(ie);
5187 
5188 	rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val);
5189 }
5190 
rtw89_physts_enable_fail_report(struct rtw89_dev * rtwdev,bool enable,enum rtw89_phy_idx phy_idx)5191 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
5192 					    bool enable,
5193 					    enum rtw89_phy_idx phy_idx)
5194 {
5195 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5196 	const struct rtw89_physts_regs *physts = phy->physts;
5197 
5198 	if (enable) {
5199 		rtw89_phy_write32_clr(rtwdev, physts->setting_addr,
5200 				      physts->dis_trigger_fail_mask);
5201 		rtw89_phy_write32_clr(rtwdev, physts->setting_addr,
5202 				      physts->dis_trigger_brk_mask);
5203 	} else {
5204 		rtw89_phy_write32_set(rtwdev, physts->setting_addr,
5205 				      physts->dis_trigger_fail_mask);
5206 		rtw89_phy_write32_set(rtwdev, physts->setting_addr,
5207 				      physts->dis_trigger_brk_mask);
5208 	}
5209 }
5210 
rtw89_physts_parsing_init(struct rtw89_dev * rtwdev)5211 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
5212 {
5213 	u8 i;
5214 
5215 	rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
5216 
5217 	for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
5218 		if (i >= RTW89_CCK_PKT)
5219 			rtw89_physts_enable_ie_bitmap(rtwdev, i,
5220 						      RTW89_PHYSTS_IE09_FTR_0,
5221 						      true);
5222 		if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) ||
5223 		    (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT))
5224 			continue;
5225 		rtw89_physts_enable_ie_bitmap(rtwdev, i,
5226 					      RTW89_PHYSTS_IE24_OFDM_TD_PATH_A,
5227 					      true);
5228 	}
5229 	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT,
5230 				      RTW89_PHYSTS_IE13_DL_MU_DEF, true);
5231 	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT,
5232 				      RTW89_PHYSTS_IE13_DL_MU_DEF, true);
5233 
5234 	/* force IE01 for channel index, only channel field is valid */
5235 	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT,
5236 				      RTW89_PHYSTS_IE01_CMN_OFDM, true);
5237 }
5238 
rtw89_phy_dig_read_gain_table(struct rtw89_dev * rtwdev,int type)5239 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
5240 {
5241 	const struct rtw89_chip_info *chip = rtwdev->chip;
5242 	struct rtw89_dig_info *dig = &rtwdev->dig;
5243 	const struct rtw89_phy_dig_gain_cfg *cfg;
5244 	const char *msg;
5245 	u8 i;
5246 	s8 gain_base;
5247 	s8 *gain_arr;
5248 	u32 tmp;
5249 
5250 	switch (type) {
5251 	case RTW89_DIG_GAIN_LNA_G:
5252 		gain_arr = dig->lna_gain_g;
5253 		gain_base = LNA0_GAIN;
5254 		cfg = chip->dig_table->cfg_lna_g;
5255 		msg = "lna_gain_g";
5256 		break;
5257 	case RTW89_DIG_GAIN_TIA_G:
5258 		gain_arr = dig->tia_gain_g;
5259 		gain_base = TIA0_GAIN_G;
5260 		cfg = chip->dig_table->cfg_tia_g;
5261 		msg = "tia_gain_g";
5262 		break;
5263 	case RTW89_DIG_GAIN_LNA_A:
5264 		gain_arr = dig->lna_gain_a;
5265 		gain_base = LNA0_GAIN;
5266 		cfg = chip->dig_table->cfg_lna_a;
5267 		msg = "lna_gain_a";
5268 		break;
5269 	case RTW89_DIG_GAIN_TIA_A:
5270 		gain_arr = dig->tia_gain_a;
5271 		gain_base = TIA0_GAIN_A;
5272 		cfg = chip->dig_table->cfg_tia_a;
5273 		msg = "tia_gain_a";
5274 		break;
5275 	default:
5276 		return;
5277 	}
5278 
5279 	for (i = 0; i < cfg->size; i++) {
5280 		tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr,
5281 					    cfg->table[i].mask);
5282 		tmp >>= DIG_GAIN_SHIFT;
5283 		gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
5284 		gain_base += DIG_GAIN;
5285 
5286 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
5287 			    msg, i, gain_arr[i]);
5288 	}
5289 }
5290 
rtw89_phy_dig_update_gain_para(struct rtw89_dev * rtwdev)5291 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
5292 {
5293 	struct rtw89_dig_info *dig = &rtwdev->dig;
5294 	u32 tmp;
5295 	u8 i;
5296 
5297 	if (!rtwdev->hal.support_igi)
5298 		return;
5299 
5300 	tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
5301 				    B_PATH0_IB_PKPW_MSK);
5302 	dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
5303 	dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK,
5304 					    B_PATH0_IB_PBK_MSK);
5305 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
5306 		    dig->ib_pkpwr, dig->ib_pbk);
5307 
5308 	for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
5309 		rtw89_phy_dig_read_gain_table(rtwdev, i);
5310 }
5311 
5312 static const u8 rssi_nolink = 22;
5313 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
5314 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
5315 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
5316 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
5317 
rtw89_phy_dig_update_rssi_info(struct rtw89_dev * rtwdev)5318 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
5319 {
5320 	struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
5321 	struct rtw89_dig_info *dig = &rtwdev->dig;
5322 	bool is_linked = rtwdev->total_sta_assoc > 0;
5323 
5324 	if (is_linked) {
5325 		dig->igi_rssi = ch_info->rssi_min >> 1;
5326 	} else {
5327 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
5328 		dig->igi_rssi = rssi_nolink;
5329 	}
5330 }
5331 
rtw89_phy_dig_update_para(struct rtw89_dev * rtwdev)5332 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
5333 {
5334 	struct rtw89_dig_info *dig = &rtwdev->dig;
5335 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
5336 	bool is_linked = rtwdev->total_sta_assoc > 0;
5337 	const u16 *fa_th_src = NULL;
5338 
5339 	switch (chan->band_type) {
5340 	case RTW89_BAND_2G:
5341 		dig->lna_gain = dig->lna_gain_g;
5342 		dig->tia_gain = dig->tia_gain_g;
5343 		fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
5344 		dig->force_gaincode_idx_en = false;
5345 		dig->dyn_pd_th_en = true;
5346 		break;
5347 	case RTW89_BAND_5G:
5348 	default:
5349 		dig->lna_gain = dig->lna_gain_a;
5350 		dig->tia_gain = dig->tia_gain_a;
5351 		fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
5352 		dig->force_gaincode_idx_en = true;
5353 		dig->dyn_pd_th_en = true;
5354 		break;
5355 	}
5356 	memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
5357 	memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
5358 }
5359 
5360 static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
5361 static const u8 igi_max_performance_mode = 0x5a;
5362 static const u8 dynamic_pd_threshold_max;
5363 
rtw89_phy_dig_para_reset(struct rtw89_dev * rtwdev)5364 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
5365 {
5366 	struct rtw89_dig_info *dig = &rtwdev->dig;
5367 
5368 	dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
5369 	dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
5370 	dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
5371 	dig->force_gaincode.lna_idx = LNA_IDX_MAX;
5372 	dig->force_gaincode.tia_idx = TIA_IDX_MAX;
5373 	dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
5374 
5375 	dig->dyn_igi_max = igi_max_performance_mode;
5376 	dig->dyn_igi_min = dynamic_igi_min;
5377 	dig->dyn_pd_th_max = dynamic_pd_threshold_max;
5378 	dig->pd_low_th_ofst = pd_low_th_offset;
5379 	dig->is_linked_pre = false;
5380 }
5381 
rtw89_phy_dig_init(struct rtw89_dev * rtwdev)5382 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
5383 {
5384 	rtw89_phy_dig_update_gain_para(rtwdev);
5385 	rtw89_phy_dig_reset(rtwdev);
5386 }
5387 
rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev * rtwdev,u8 rssi)5388 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
5389 {
5390 	struct rtw89_dig_info *dig = &rtwdev->dig;
5391 	u8 lna_idx;
5392 
5393 	if (rssi < dig->igi_rssi_th[0])
5394 		lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
5395 	else if (rssi < dig->igi_rssi_th[1])
5396 		lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
5397 	else if (rssi < dig->igi_rssi_th[2])
5398 		lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
5399 	else if (rssi < dig->igi_rssi_th[3])
5400 		lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
5401 	else if (rssi < dig->igi_rssi_th[4])
5402 		lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
5403 	else
5404 		lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
5405 
5406 	return lna_idx;
5407 }
5408 
rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev * rtwdev,u8 rssi)5409 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
5410 {
5411 	struct rtw89_dig_info *dig = &rtwdev->dig;
5412 	u8 tia_idx;
5413 
5414 	if (rssi < dig->igi_rssi_th[0])
5415 		tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
5416 	else
5417 		tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
5418 
5419 	return tia_idx;
5420 }
5421 
5422 #define IB_PBK_BASE 110
5423 #define WB_RSSI_BASE 10
rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev * rtwdev,u8 rssi,struct rtw89_agc_gaincode_set * set)5424 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
5425 					struct rtw89_agc_gaincode_set *set)
5426 {
5427 	struct rtw89_dig_info *dig = &rtwdev->dig;
5428 	s8 lna_gain = dig->lna_gain[set->lna_idx];
5429 	s8 tia_gain = dig->tia_gain[set->tia_idx];
5430 	s32 wb_rssi = rssi + lna_gain + tia_gain;
5431 	s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
5432 	u8 rxb_idx;
5433 
5434 	rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
5435 	rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
5436 
5437 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
5438 		    wb_rssi, rxb_idx_tmp);
5439 
5440 	return rxb_idx;
5441 }
5442 
rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev * rtwdev,u8 rssi,struct rtw89_agc_gaincode_set * set)5443 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
5444 					   struct rtw89_agc_gaincode_set *set)
5445 {
5446 	set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi);
5447 	set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi);
5448 	set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set);
5449 
5450 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
5451 		    "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
5452 		    rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
5453 }
5454 
5455 #define IGI_OFFSET_MAX 25
5456 #define IGI_OFFSET_MUL 2
rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev * rtwdev)5457 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
5458 {
5459 	struct rtw89_dig_info *dig = &rtwdev->dig;
5460 	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
5461 	enum rtw89_dig_noisy_level noisy_lv;
5462 	u8 igi_offset = dig->fa_rssi_ofst;
5463 	u16 fa_ratio = 0;
5464 
5465 	fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
5466 
5467 	if (fa_ratio < dig->fa_th[0])
5468 		noisy_lv = RTW89_DIG_NOISY_LEVEL0;
5469 	else if (fa_ratio < dig->fa_th[1])
5470 		noisy_lv = RTW89_DIG_NOISY_LEVEL1;
5471 	else if (fa_ratio < dig->fa_th[2])
5472 		noisy_lv = RTW89_DIG_NOISY_LEVEL2;
5473 	else if (fa_ratio < dig->fa_th[3])
5474 		noisy_lv = RTW89_DIG_NOISY_LEVEL3;
5475 	else
5476 		noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
5477 
5478 	if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
5479 		igi_offset = 0;
5480 	else
5481 		igi_offset += noisy_lv * IGI_OFFSET_MUL;
5482 
5483 	igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
5484 	dig->fa_rssi_ofst = igi_offset;
5485 
5486 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
5487 		    "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
5488 		    dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
5489 
5490 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
5491 		    "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
5492 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
5493 		    env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
5494 		    noisy_lv, igi_offset);
5495 }
5496 
rtw89_phy_dig_set_lna_idx(struct rtw89_dev * rtwdev,u8 lna_idx)5497 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
5498 {
5499 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
5500 
5501 	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
5502 			       dig_regs->p0_lna_init.mask, lna_idx);
5503 	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
5504 			       dig_regs->p1_lna_init.mask, lna_idx);
5505 }
5506 
rtw89_phy_dig_set_tia_idx(struct rtw89_dev * rtwdev,u8 tia_idx)5507 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
5508 {
5509 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
5510 
5511 	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
5512 			       dig_regs->p0_tia_init.mask, tia_idx);
5513 	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
5514 			       dig_regs->p1_tia_init.mask, tia_idx);
5515 }
5516 
rtw89_phy_dig_set_rxb_idx(struct rtw89_dev * rtwdev,u8 rxb_idx)5517 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
5518 {
5519 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
5520 
5521 	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
5522 			       dig_regs->p0_rxb_init.mask, rxb_idx);
5523 	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
5524 			       dig_regs->p1_rxb_init.mask, rxb_idx);
5525 }
5526 
rtw89_phy_dig_set_igi_cr(struct rtw89_dev * rtwdev,const struct rtw89_agc_gaincode_set set)5527 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
5528 				     const struct rtw89_agc_gaincode_set set)
5529 {
5530 	if (!rtwdev->hal.support_igi)
5531 		return;
5532 
5533 	rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
5534 	rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
5535 	rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
5536 
5537 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
5538 		    set.lna_idx, set.tia_idx, set.rxb_idx);
5539 }
5540 
rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev * rtwdev,bool enable)5541 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
5542 						   bool enable)
5543 {
5544 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
5545 
5546 	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
5547 			       dig_regs->p0_p20_pagcugc_en.mask, enable);
5548 	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
5549 			       dig_regs->p0_s20_pagcugc_en.mask, enable);
5550 	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
5551 			       dig_regs->p1_p20_pagcugc_en.mask, enable);
5552 	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
5553 			       dig_regs->p1_s20_pagcugc_en.mask, enable);
5554 
5555 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
5556 }
5557 
rtw89_phy_dig_config_igi(struct rtw89_dev * rtwdev)5558 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
5559 {
5560 	struct rtw89_dig_info *dig = &rtwdev->dig;
5561 
5562 	if (!rtwdev->hal.support_igi)
5563 		return;
5564 
5565 	if (dig->force_gaincode_idx_en) {
5566 		rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
5567 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
5568 			    "Force gaincode index enabled.\n");
5569 	} else {
5570 		rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
5571 					       &dig->cur_gaincode);
5572 		rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
5573 	}
5574 }
5575 
rtw89_phy_dig_dyn_pd_th(struct rtw89_dev * rtwdev,u8 rssi,bool enable)5576 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
5577 				    bool enable)
5578 {
5579 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
5580 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
5581 	enum rtw89_bandwidth cbw = chan->band_width;
5582 	struct rtw89_dig_info *dig = &rtwdev->dig;
5583 	u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
5584 	u8 ofdm_cca_th;
5585 	s8 cck_cca_th;
5586 	u32 pd_val = 0;
5587 
5588 	if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
5589 		under_region += PD_TH_SB_FLTR_CMP_VAL;
5590 
5591 	switch (cbw) {
5592 	case RTW89_CHANNEL_WIDTH_40:
5593 		under_region += PD_TH_BW40_CMP_VAL;
5594 		break;
5595 	case RTW89_CHANNEL_WIDTH_80:
5596 		under_region += PD_TH_BW80_CMP_VAL;
5597 		break;
5598 	case RTW89_CHANNEL_WIDTH_160:
5599 		under_region += PD_TH_BW160_CMP_VAL;
5600 		break;
5601 	case RTW89_CHANNEL_WIDTH_20:
5602 		fallthrough;
5603 	default:
5604 		under_region += PD_TH_BW20_CMP_VAL;
5605 		break;
5606 	}
5607 
5608 	dig->dyn_pd_th_max = dig->igi_rssi;
5609 
5610 	final_rssi = min_t(u8, rssi, dig->igi_rssi);
5611 	ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
5612 			      PD_TH_MAX_RSSI + under_region);
5613 
5614 	if (enable) {
5615 		pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
5616 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
5617 			    "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
5618 			    final_rssi, ofdm_cca_th, under_region, pd_val);
5619 	} else {
5620 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
5621 			    "Dynamic PD th disabled, Set PD_low_bd=0\n");
5622 	}
5623 
5624 	rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
5625 			       dig_regs->pd_lower_bound_mask, pd_val);
5626 	rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
5627 			       dig_regs->pd_spatial_reuse_en, enable);
5628 
5629 	if (!rtwdev->hal.support_cckpd)
5630 		return;
5631 
5632 	cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
5633 	pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
5634 
5635 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
5636 		    "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
5637 		    final_rssi, cck_cca_th, under_region, pd_val);
5638 
5639 	rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg,
5640 			       dig_regs->bmode_cca_rssi_limit_en, enable);
5641 	rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
5642 			       dig_regs->bmode_rssi_nocca_low_th_mask, pd_val);
5643 }
5644 
rtw89_phy_dig_reset(struct rtw89_dev * rtwdev)5645 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
5646 {
5647 	struct rtw89_dig_info *dig = &rtwdev->dig;
5648 
5649 	dig->bypass_dig = false;
5650 	rtw89_phy_dig_para_reset(rtwdev);
5651 	rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
5652 	rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false);
5653 	rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
5654 	rtw89_phy_dig_update_para(rtwdev);
5655 }
5656 
5657 #define IGI_RSSI_MIN 10
rtw89_phy_dig(struct rtw89_dev * rtwdev)5658 void rtw89_phy_dig(struct rtw89_dev *rtwdev)
5659 {
5660 	struct rtw89_dig_info *dig = &rtwdev->dig;
5661 	bool is_linked = rtwdev->total_sta_assoc > 0;
5662 
5663 	if (unlikely(dig->bypass_dig)) {
5664 		dig->bypass_dig = false;
5665 		return;
5666 	}
5667 
5668 	if (!dig->is_linked_pre && is_linked) {
5669 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
5670 		rtw89_phy_dig_update_para(rtwdev);
5671 	} else if (dig->is_linked_pre && !is_linked) {
5672 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
5673 		rtw89_phy_dig_update_para(rtwdev);
5674 	}
5675 	dig->is_linked_pre = is_linked;
5676 
5677 	rtw89_phy_dig_igi_offset_by_env(rtwdev);
5678 	rtw89_phy_dig_update_rssi_info(rtwdev);
5679 
5680 	dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
5681 			    dig->igi_rssi - IGI_RSSI_MIN : 0;
5682 	dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
5683 	dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
5684 
5685 	dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
5686 				 dig->dyn_igi_max);
5687 
5688 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
5689 		    "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
5690 		    dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
5691 		    dig->igi_fa_rssi);
5692 
5693 	rtw89_phy_dig_config_igi(rtwdev);
5694 
5695 	rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
5696 
5697 	if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
5698 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true);
5699 	else
5700 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
5701 }
5702 
rtw89_phy_tx_path_div_sta_iter(void * data,struct ieee80211_sta * sta)5703 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
5704 {
5705 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
5706 	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
5707 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
5708 	struct rtw89_hal *hal = &rtwdev->hal;
5709 	bool *done = data;
5710 	u8 rssi_a, rssi_b;
5711 	u32 candidate;
5712 
5713 	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
5714 		return;
5715 
5716 	if (*done)
5717 		return;
5718 
5719 	*done = true;
5720 
5721 	rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
5722 	rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
5723 
5724 	if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
5725 		candidate = RF_A;
5726 	else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
5727 		candidate = RF_B;
5728 	else
5729 		return;
5730 
5731 	if (hal->antenna_tx == candidate)
5732 		return;
5733 
5734 	hal->antenna_tx = candidate;
5735 	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
5736 
5737 	if (hal->antenna_tx == RF_A) {
5738 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
5739 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
5740 	} else if (hal->antenna_tx == RF_B) {
5741 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
5742 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
5743 	}
5744 }
5745 
rtw89_phy_tx_path_div_track(struct rtw89_dev * rtwdev)5746 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
5747 {
5748 	struct rtw89_hal *hal = &rtwdev->hal;
5749 	bool done = false;
5750 
5751 	if (!hal->tx_path_diversity)
5752 		return;
5753 
5754 	ieee80211_iterate_stations_atomic(rtwdev->hw,
5755 					  rtw89_phy_tx_path_div_sta_iter,
5756 					  &done);
5757 }
5758 
5759 #define ANTDIV_MAIN 0
5760 #define ANTDIV_AUX 1
5761 
rtw89_phy_antdiv_set_ant(struct rtw89_dev * rtwdev)5762 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
5763 {
5764 	struct rtw89_hal *hal = &rtwdev->hal;
5765 	u8 default_ant, optional_ant;
5766 
5767 	if (!hal->ant_diversity || hal->antenna_tx == 0)
5768 		return;
5769 
5770 	if (hal->antenna_tx == RF_B) {
5771 		default_ant = ANTDIV_AUX;
5772 		optional_ant = ANTDIV_MAIN;
5773 	} else {
5774 		default_ant = ANTDIV_MAIN;
5775 		optional_ant = ANTDIV_AUX;
5776 	}
5777 
5778 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
5779 			      default_ant, RTW89_PHY_0);
5780 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
5781 			      default_ant, RTW89_PHY_0);
5782 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
5783 			      optional_ant, RTW89_PHY_0);
5784 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
5785 			      default_ant, RTW89_PHY_0);
5786 }
5787 
rtw89_phy_swap_hal_antenna(struct rtw89_dev * rtwdev)5788 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
5789 {
5790 	struct rtw89_hal *hal = &rtwdev->hal;
5791 
5792 	hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
5793 	hal->antenna_tx = hal->antenna_rx;
5794 }
5795 
rtw89_phy_antdiv_decision_state(struct rtw89_dev * rtwdev)5796 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
5797 {
5798 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5799 	struct rtw89_hal *hal = &rtwdev->hal;
5800 	bool no_change = false;
5801 	u8 main_rssi, aux_rssi;
5802 	u8 main_evm, aux_evm;
5803 	u32 candidate;
5804 
5805 	antdiv->get_stats = false;
5806 	antdiv->training_count = 0;
5807 
5808 	main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
5809 	main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
5810 	aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
5811 	aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
5812 
5813 	if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
5814 		candidate = RF_A;
5815 	else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
5816 		candidate = RF_B;
5817 	else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
5818 		candidate = RF_A;
5819 	else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
5820 		candidate = RF_B;
5821 	else
5822 		no_change = true;
5823 
5824 	if (no_change) {
5825 		/* swap back from training antenna to original */
5826 		rtw89_phy_swap_hal_antenna(rtwdev);
5827 		return;
5828 	}
5829 
5830 	hal->antenna_tx = candidate;
5831 	hal->antenna_rx = candidate;
5832 }
5833 
rtw89_phy_antdiv_training_state(struct rtw89_dev * rtwdev)5834 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
5835 {
5836 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5837 	u64 state_period;
5838 
5839 	if (antdiv->training_count % 2 == 0) {
5840 		if (antdiv->training_count == 0)
5841 			rtw89_phy_antdiv_sts_reset(rtwdev);
5842 
5843 		antdiv->get_stats = true;
5844 		state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
5845 	} else {
5846 		antdiv->get_stats = false;
5847 		state_period = msecs_to_jiffies(ANTDIV_DELAY);
5848 
5849 		rtw89_phy_swap_hal_antenna(rtwdev);
5850 		rtw89_phy_antdiv_set_ant(rtwdev);
5851 	}
5852 
5853 	antdiv->training_count++;
5854 	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work,
5855 				     state_period);
5856 }
5857 
rtw89_phy_antdiv_work(struct work_struct * work)5858 void rtw89_phy_antdiv_work(struct work_struct *work)
5859 {
5860 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
5861 						antdiv_work.work);
5862 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5863 
5864 	mutex_lock(&rtwdev->mutex);
5865 
5866 	if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
5867 		rtw89_phy_antdiv_training_state(rtwdev);
5868 	} else {
5869 		rtw89_phy_antdiv_decision_state(rtwdev);
5870 		rtw89_phy_antdiv_set_ant(rtwdev);
5871 	}
5872 
5873 	mutex_unlock(&rtwdev->mutex);
5874 }
5875 
rtw89_phy_antdiv_track(struct rtw89_dev * rtwdev)5876 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
5877 {
5878 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5879 	struct rtw89_hal *hal = &rtwdev->hal;
5880 	u8 rssi, rssi_pre;
5881 
5882 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
5883 		return;
5884 
5885 	rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
5886 	rssi_pre = antdiv->rssi_pre;
5887 	antdiv->rssi_pre = rssi;
5888 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
5889 
5890 	if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
5891 		return;
5892 
5893 	antdiv->training_count = 0;
5894 	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0);
5895 }
5896 
rtw89_phy_env_monitor_init(struct rtw89_dev * rtwdev)5897 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
5898 {
5899 	rtw89_phy_ccx_top_setting_init(rtwdev);
5900 	rtw89_phy_ifs_clm_setting_init(rtwdev);
5901 }
5902 
rtw89_phy_edcca_init(struct rtw89_dev * rtwdev)5903 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
5904 {
5905 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
5906 	struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
5907 
5908 	memset(edcca_bak, 0, sizeof(*edcca_bak));
5909 
5910 	if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) {
5911 		rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0);
5912 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2);
5913 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1);
5914 		rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0);
5915 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0);
5916 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0);
5917 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0);
5918 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1);
5919 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1);
5920 	}
5921 
5922 	rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st,
5923 			       edcca_regs->tx_collision_t2r_st_mask, 0x29);
5924 }
5925 
rtw89_phy_dm_init(struct rtw89_dev * rtwdev)5926 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
5927 {
5928 	rtw89_phy_stat_init(rtwdev);
5929 
5930 	rtw89_chip_bb_sethw(rtwdev);
5931 
5932 	rtw89_phy_env_monitor_init(rtwdev);
5933 	rtw89_physts_parsing_init(rtwdev);
5934 	rtw89_phy_dig_init(rtwdev);
5935 	rtw89_phy_cfo_init(rtwdev);
5936 	rtw89_phy_bb_wrap_init(rtwdev);
5937 	rtw89_phy_edcca_init(rtwdev);
5938 	rtw89_phy_ch_info_init(rtwdev);
5939 	rtw89_phy_ul_tb_info_init(rtwdev);
5940 	rtw89_phy_antdiv_init(rtwdev);
5941 	rtw89_chip_rfe_gpio(rtwdev);
5942 	rtw89_phy_antdiv_set_ant(rtwdev);
5943 
5944 	rtw89_chip_rfk_hw_init(rtwdev);
5945 	rtw89_phy_init_rf_nctl(rtwdev);
5946 	rtw89_chip_rfk_init(rtwdev);
5947 	rtw89_chip_set_txpwr_ctrl(rtwdev);
5948 	rtw89_chip_power_trim(rtwdev);
5949 	rtw89_chip_cfg_txrx_path(rtwdev);
5950 }
5951 
rtw89_phy_set_bss_color(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif)5952 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
5953 {
5954 	const struct rtw89_chip_info *chip = rtwdev->chip;
5955 	const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld;
5956 	enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
5957 	u8 bss_color;
5958 
5959 	if (!vif->bss_conf.he_support || !vif->cfg.assoc)
5960 		return;
5961 
5962 	bss_color = vif->bss_conf.he_bss_color.color;
5963 
5964 	rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1,
5965 			      phy_idx);
5966 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
5967 			      bss_color, phy_idx);
5968 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
5969 			      vif->cfg.aid, phy_idx);
5970 }
5971 
5972 static void
_rfk_write_rf(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)5973 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
5974 {
5975 	rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
5976 }
5977 
5978 static void
_rfk_write32_mask(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)5979 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
5980 {
5981 	rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
5982 }
5983 
5984 static void
_rfk_write32_set(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)5985 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
5986 {
5987 	rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
5988 }
5989 
5990 static void
_rfk_write32_clr(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)5991 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
5992 {
5993 	rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
5994 }
5995 
5996 static void
_rfk_delay(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)5997 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
5998 {
5999 	udelay(def->data);
6000 }
6001 
6002 static void
6003 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
6004 	[RTW89_RFK_F_WRF] = _rfk_write_rf,
6005 	[RTW89_RFK_F_WM] = _rfk_write32_mask,
6006 	[RTW89_RFK_F_WS] = _rfk_write32_set,
6007 	[RTW89_RFK_F_WC] = _rfk_write32_clr,
6008 	[RTW89_RFK_F_DELAY] = _rfk_delay,
6009 };
6010 
6011 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
6012 
6013 void
rtw89_rfk_parser(struct rtw89_dev * rtwdev,const struct rtw89_rfk_tbl * tbl)6014 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
6015 {
6016 	const struct rtw89_reg5_def *p = tbl->defs;
6017 	const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
6018 
6019 	for (; p < end; p++)
6020 		_rfk_handler[p->flag](rtwdev, p);
6021 }
6022 EXPORT_SYMBOL(rtw89_rfk_parser);
6023 
6024 #define RTW89_TSSI_FAST_MODE_NUM 4
6025 
6026 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
6027 	{0xD934, 0xff0000},
6028 	{0xD934, 0xff000000},
6029 	{0xD938, 0xff},
6030 	{0xD934, 0xff00},
6031 };
6032 
6033 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
6034 	{0xD930, 0xff0000},
6035 	{0xD930, 0xff000000},
6036 	{0xD934, 0xff},
6037 	{0xD930, 0xff00},
6038 };
6039 
6040 static
rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev * rtwdev,enum rtw89_mac_idx mac_idx,enum rtw89_tssi_bandedge_cfg bandedge_cfg,u32 val)6041 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
6042 					   enum rtw89_mac_idx mac_idx,
6043 					   enum rtw89_tssi_bandedge_cfg bandedge_cfg,
6044 					   u32 val)
6045 {
6046 	const struct rtw89_reg_def *regs;
6047 	u32 reg;
6048 	int i;
6049 
6050 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
6051 		regs = rtw89_tssi_fastmode_regs_flat;
6052 	else
6053 		regs = rtw89_tssi_fastmode_regs_level;
6054 
6055 	for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
6056 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
6057 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
6058 	}
6059 }
6060 
6061 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
6062 	{0xD91C, 0xff000000},
6063 	{0xD920, 0xff},
6064 	{0xD920, 0xff00},
6065 	{0xD920, 0xff0000},
6066 	{0xD920, 0xff000000},
6067 	{0xD924, 0xff},
6068 	{0xD924, 0xff00},
6069 	{0xD914, 0xff000000},
6070 	{0xD918, 0xff},
6071 	{0xD918, 0xff00},
6072 	{0xD918, 0xff0000},
6073 	{0xD918, 0xff000000},
6074 	{0xD91C, 0xff},
6075 	{0xD91C, 0xff00},
6076 	{0xD91C, 0xff0000},
6077 };
6078 
6079 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
6080 	{0xD910, 0xff},
6081 	{0xD910, 0xff00},
6082 	{0xD910, 0xff0000},
6083 	{0xD910, 0xff000000},
6084 	{0xD914, 0xff},
6085 	{0xD914, 0xff00},
6086 	{0xD914, 0xff0000},
6087 	{0xD908, 0xff},
6088 	{0xD908, 0xff00},
6089 	{0xD908, 0xff0000},
6090 	{0xD908, 0xff000000},
6091 	{0xD90C, 0xff},
6092 	{0xD90C, 0xff00},
6093 	{0xD90C, 0xff0000},
6094 	{0xD90C, 0xff000000},
6095 };
6096 
rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev * rtwdev,enum rtw89_mac_idx mac_idx,enum rtw89_tssi_bandedge_cfg bandedge_cfg)6097 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
6098 					  enum rtw89_mac_idx mac_idx,
6099 					  enum rtw89_tssi_bandedge_cfg bandedge_cfg)
6100 {
6101 	const struct rtw89_chip_info *chip = rtwdev->chip;
6102 	const struct rtw89_reg_def *regs;
6103 	const u32 *data;
6104 	u32 reg;
6105 	int i;
6106 
6107 	if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
6108 		return;
6109 
6110 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
6111 		regs = rtw89_tssi_bandedge_regs_flat;
6112 	else
6113 		regs = rtw89_tssi_bandedge_regs_level;
6114 
6115 	data = chip->tssi_dbw_table->data[bandedge_cfg];
6116 
6117 	for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
6118 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
6119 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
6120 	}
6121 
6122 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx);
6123 	rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
6124 
6125 	rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
6126 					      data[RTW89_TSSI_SBW20]);
6127 }
6128 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
6129 
6130 static
6131 const u8 rtw89_ch_base_table[16] = {1, 0xff,
6132 				    36, 100, 132, 149, 0xff,
6133 				    1, 33, 65, 97, 129, 161, 193, 225, 0xff};
6134 #define RTW89_CH_BASE_IDX_2G		0
6135 #define RTW89_CH_BASE_IDX_5G_FIRST	2
6136 #define RTW89_CH_BASE_IDX_5G_LAST	5
6137 #define RTW89_CH_BASE_IDX_6G_FIRST	7
6138 #define RTW89_CH_BASE_IDX_6G_LAST	14
6139 
6140 #define RTW89_CH_BASE_IDX_MASK		GENMASK(7, 4)
6141 #define RTW89_CH_OFFSET_MASK		GENMASK(3, 0)
6142 
rtw89_encode_chan_idx(struct rtw89_dev * rtwdev,u8 central_ch,u8 band)6143 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
6144 {
6145 	u8 chan_idx;
6146 	u8 last, first;
6147 	u8 idx;
6148 
6149 	switch (band) {
6150 	case RTW89_BAND_2G:
6151 		chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) |
6152 			   FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch);
6153 		return chan_idx;
6154 	case RTW89_BAND_5G:
6155 		first = RTW89_CH_BASE_IDX_5G_FIRST;
6156 		last = RTW89_CH_BASE_IDX_5G_LAST;
6157 		break;
6158 	case RTW89_BAND_6G:
6159 		first = RTW89_CH_BASE_IDX_6G_FIRST;
6160 		last = RTW89_CH_BASE_IDX_6G_LAST;
6161 		break;
6162 	default:
6163 		rtw89_warn(rtwdev, "Unsupported band %d\n", band);
6164 		return 0;
6165 	}
6166 
6167 	for (idx = last; idx >= first; idx--)
6168 		if (central_ch >= rtw89_ch_base_table[idx])
6169 			break;
6170 
6171 	if (idx < first) {
6172 		rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
6173 		return 0;
6174 	}
6175 
6176 	chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) |
6177 		   FIELD_PREP(RTW89_CH_OFFSET_MASK,
6178 			      (central_ch - rtw89_ch_base_table[idx]) >> 1);
6179 	return chan_idx;
6180 }
6181 EXPORT_SYMBOL(rtw89_encode_chan_idx);
6182 
rtw89_decode_chan_idx(struct rtw89_dev * rtwdev,u8 chan_idx,u8 * ch,enum nl80211_band * band)6183 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
6184 			   u8 *ch, enum nl80211_band *band)
6185 {
6186 	u8 idx, offset;
6187 
6188 	idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx);
6189 	offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx);
6190 
6191 	if (idx == RTW89_CH_BASE_IDX_2G) {
6192 		*band = NL80211_BAND_2GHZ;
6193 		*ch = offset;
6194 		return;
6195 	}
6196 
6197 	*band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
6198 	*ch = rtw89_ch_base_table[idx] + (offset << 1);
6199 }
6200 EXPORT_SYMBOL(rtw89_decode_chan_idx);
6201 
rtw89_phy_config_edcca(struct rtw89_dev * rtwdev,bool scan)6202 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan)
6203 {
6204 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
6205 	struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
6206 
6207 	if (scan) {
6208 		edcca_bak->a =
6209 			rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
6210 					      edcca_regs->edcca_mask);
6211 		edcca_bak->p =
6212 			rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
6213 					      edcca_regs->edcca_p_mask);
6214 		edcca_bak->ppdu =
6215 			rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level,
6216 					      edcca_regs->ppdu_mask);
6217 
6218 		rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
6219 				       edcca_regs->edcca_mask, EDCCA_MAX);
6220 		rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
6221 				       edcca_regs->edcca_p_mask, EDCCA_MAX);
6222 		rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
6223 				       edcca_regs->ppdu_mask, EDCCA_MAX);
6224 	} else {
6225 		rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
6226 				       edcca_regs->edcca_mask,
6227 				       edcca_bak->a);
6228 		rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
6229 				       edcca_regs->edcca_p_mask,
6230 				       edcca_bak->p);
6231 		rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
6232 				       edcca_regs->ppdu_mask,
6233 				       edcca_bak->ppdu);
6234 	}
6235 }
6236 
rtw89_phy_edcca_log(struct rtw89_dev * rtwdev)6237 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
6238 {
6239 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
6240 	bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
6241 	s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
6242 	u8 path, per20_bitmap;
6243 	u8 pwdb[8];
6244 	u32 tmp;
6245 
6246 	if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA))
6247 		return;
6248 
6249 	if (rtwdev->chip->chip_id == RTL8922A)
6250 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
6251 				       edcca_regs->rpt_sel_be_mask, 0);
6252 
6253 	rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
6254 			       edcca_regs->rpt_sel_mask, 0);
6255 	tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
6256 	path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
6257 	flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
6258 	flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40);
6259 	flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20);
6260 	flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20);
6261 	flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB);
6262 	pwdb_s20 = u32_get_bits(tmp, MASKBYTE1);
6263 	pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
6264 	pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
6265 
6266 	rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
6267 			       edcca_regs->rpt_sel_mask, 4);
6268 	tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
6269 	pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
6270 	pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
6271 
6272 	per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a,
6273 					     MASKBYTE0);
6274 
6275 	if (rtwdev->chip->chip_id == RTL8922A) {
6276 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
6277 				       edcca_regs->rpt_sel_be_mask, 4);
6278 		tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
6279 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
6280 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
6281 		pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
6282 		pwdb[3] = u32_get_bits(tmp, MASKBYTE0);
6283 
6284 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
6285 				       edcca_regs->rpt_sel_be_mask, 5);
6286 		tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
6287 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
6288 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
6289 		pwdb[6] = u32_get_bits(tmp, MASKBYTE1);
6290 		pwdb[7] = u32_get_bits(tmp, MASKBYTE0);
6291 	} else {
6292 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
6293 				       edcca_regs->rpt_sel_mask, 0);
6294 		tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
6295 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
6296 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
6297 
6298 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
6299 				       edcca_regs->rpt_sel_mask, 1);
6300 		tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
6301 		pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
6302 		pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
6303 
6304 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
6305 				       edcca_regs->rpt_sel_mask, 2);
6306 		tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
6307 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
6308 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
6309 
6310 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
6311 				       edcca_regs->rpt_sel_mask, 3);
6312 		tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
6313 		pwdb[6] = u32_get_bits(tmp, MASKBYTE3);
6314 		pwdb[7] = u32_get_bits(tmp, MASKBYTE2);
6315 	}
6316 
6317 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
6318 		    "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap);
6319 
6320 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
6321 		    "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n",
6322 		    pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5],
6323 		    pwdb[6], pwdb[7]);
6324 
6325 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
6326 		    "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n",
6327 		    path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80);
6328 
6329 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
6330 		    "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n",
6331 		    pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80);
6332 }
6333 
rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev * rtwdev)6334 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev)
6335 {
6336 	struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
6337 	bool is_linked = rtwdev->total_sta_assoc > 0;
6338 	u8 rssi_min = ch_info->rssi_min >> 1;
6339 	u8 edcca_thre;
6340 
6341 	if (!is_linked) {
6342 		edcca_thre = EDCCA_MAX;
6343 	} else {
6344 		edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER -
6345 			     EDCCA_TH_REF;
6346 		edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB);
6347 	}
6348 
6349 	return edcca_thre;
6350 }
6351 
rtw89_phy_edcca_thre_calc(struct rtw89_dev * rtwdev)6352 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev)
6353 {
6354 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
6355 	struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
6356 	u8 th;
6357 
6358 	th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev);
6359 	if (th == edcca_bak->th_old)
6360 		return;
6361 
6362 	edcca_bak->th_old = th;
6363 
6364 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
6365 		    "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th);
6366 
6367 	rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
6368 			       edcca_regs->edcca_mask, th);
6369 	rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
6370 			       edcca_regs->edcca_p_mask, th);
6371 	rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
6372 			       edcca_regs->ppdu_mask, th);
6373 }
6374 
rtw89_phy_edcca_track(struct rtw89_dev * rtwdev)6375 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
6376 {
6377 	struct rtw89_hal *hal = &rtwdev->hal;
6378 
6379 	if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA))
6380 		return;
6381 
6382 	rtw89_phy_edcca_thre_calc(rtwdev);
6383 	rtw89_phy_edcca_log(rtwdev);
6384 }
6385 
rtw89_phy_get_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)6386 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
6387 					   enum rtw89_phy_idx phy_idx)
6388 {
6389 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
6390 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
6391 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
6392 
6393 	switch (rtwdev->mlo_dbcc_mode) {
6394 	case MLO_1_PLUS_1_1RF:
6395 		if (phy_idx == RTW89_PHY_0)
6396 			return RF_A;
6397 		else
6398 			return RF_B;
6399 	case MLO_1_PLUS_1_2RF:
6400 		if (phy_idx == RTW89_PHY_0)
6401 			return RF_A;
6402 		else
6403 			return RF_D;
6404 	case MLO_0_PLUS_2_1RF:
6405 	case MLO_2_PLUS_0_1RF:
6406 		/* for both PHY 0/1 */
6407 		return RF_AB;
6408 	case MLO_0_PLUS_2_2RF:
6409 	case MLO_2_PLUS_0_2RF:
6410 	case MLO_2_PLUS_2_2RF:
6411 	default:
6412 		if (phy_idx == RTW89_PHY_0)
6413 			return RF_AB;
6414 		else
6415 			return RF_CD;
6416 	}
6417 }
6418 EXPORT_SYMBOL(rtw89_phy_get_kpath);
6419 
rtw89_phy_get_syn_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)6420 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
6421 					 enum rtw89_phy_idx phy_idx)
6422 {
6423 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
6424 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
6425 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
6426 
6427 	switch (rtwdev->mlo_dbcc_mode) {
6428 	case MLO_1_PLUS_1_1RF:
6429 		if (phy_idx == RTW89_PHY_0)
6430 			return RF_PATH_A;
6431 		else
6432 			return RF_PATH_B;
6433 	case MLO_1_PLUS_1_2RF:
6434 		if (phy_idx == RTW89_PHY_0)
6435 			return RF_PATH_A;
6436 		else
6437 			return RF_PATH_D;
6438 	case MLO_0_PLUS_2_1RF:
6439 	case MLO_2_PLUS_0_1RF:
6440 		if (phy_idx == RTW89_PHY_0)
6441 			return RF_PATH_A;
6442 		else
6443 			return RF_PATH_B;
6444 	case MLO_0_PLUS_2_2RF:
6445 	case MLO_2_PLUS_0_2RF:
6446 	case MLO_2_PLUS_2_2RF:
6447 	default:
6448 		if (phy_idx == RTW89_PHY_0)
6449 			return RF_PATH_A;
6450 		else
6451 			return RF_PATH_C;
6452 	}
6453 }
6454 EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
6455 
6456 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
6457 	.setting_addr = R_CCX,
6458 	.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
6459 	.measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
6460 	.trig_opt_mask = B_CCX_TRIG_OPT_MSK,
6461 	.en_mask = B_CCX_EN_MSK,
6462 	.ifs_cnt_addr = R_IFS_COUNTER,
6463 	.ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
6464 	.ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
6465 	.ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
6466 	.ifs_collect_en_mask = B_IFS_COLLECT_EN,
6467 	.ifs_t1_addr = R_IFS_T1,
6468 	.ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
6469 	.ifs_t1_en_mask = B_IFS_T1_EN_MSK,
6470 	.ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
6471 	.ifs_t2_addr = R_IFS_T2,
6472 	.ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
6473 	.ifs_t2_en_mask = B_IFS_T2_EN_MSK,
6474 	.ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
6475 	.ifs_t3_addr = R_IFS_T3,
6476 	.ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
6477 	.ifs_t3_en_mask = B_IFS_T3_EN_MSK,
6478 	.ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
6479 	.ifs_t4_addr = R_IFS_T4,
6480 	.ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
6481 	.ifs_t4_en_mask = B_IFS_T4_EN_MSK,
6482 	.ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
6483 	.ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT,
6484 	.ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
6485 	.ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
6486 	.ifs_clm_cca_addr = R_IFS_CLM_CCA,
6487 	.ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
6488 	.ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
6489 	.ifs_clm_fa_addr = R_IFS_CLM_FA,
6490 	.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
6491 	.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
6492 	.ifs_his_addr = R_IFS_HIS,
6493 	.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
6494 	.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
6495 	.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
6496 	.ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
6497 	.ifs_avg_l_addr = R_IFS_AVG_L,
6498 	.ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
6499 	.ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
6500 	.ifs_avg_h_addr = R_IFS_AVG_H,
6501 	.ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
6502 	.ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
6503 	.ifs_cca_l_addr = R_IFS_CCA_L,
6504 	.ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
6505 	.ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
6506 	.ifs_cca_h_addr = R_IFS_CCA_H,
6507 	.ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
6508 	.ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
6509 	.ifs_total_addr = R_IFSCNT,
6510 	.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
6511 	.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
6512 };
6513 
6514 static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
6515 	.setting_addr = R_PLCP_HISTOGRAM,
6516 	.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
6517 	.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
6518 };
6519 
6520 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = {
6521 	.comp = R_DCFO_WEIGHT,
6522 	.weighting_mask = B_DCFO_WEIGHT_MSK,
6523 	.comp_seg0 = R_DCFO_OPT,
6524 	.valid_0_mask = B_DCFO_OPT_EN,
6525 };
6526 
6527 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
6528 	.cr_base = 0x10000,
6529 	.ccx = &rtw89_ccx_regs_ax,
6530 	.physts = &rtw89_physts_regs_ax,
6531 	.cfo = &rtw89_cfo_regs_ax,
6532 	.phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
6533 	.config_bb_gain = rtw89_phy_config_bb_gain_ax,
6534 	.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
6535 	.bb_wrap_init = NULL,
6536 	.ch_info_init = NULL,
6537 
6538 	.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
6539 	.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
6540 	.set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax,
6541 	.set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax,
6542 };
6543 EXPORT_SYMBOL(rtw89_phy_gen_ax);
6544