1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
12 .hw_value = (_idx), \
13 .max_power = 30, \
14 }
15
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
19 .hw_value = (_idx), \
20 .max_power = 30, \
21 }
22
23 #define CHAN6G(_idx, _freq) { \
24 .band = NL80211_BAND_6GHZ, \
25 .center_freq = (_freq), \
26 .hw_value = (_idx), \
27 .max_power = 30, \
28 }
29
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 CHAN2G(1, 2412),
32 CHAN2G(2, 2417),
33 CHAN2G(3, 2422),
34 CHAN2G(4, 2427),
35 CHAN2G(5, 2432),
36 CHAN2G(6, 2437),
37 CHAN2G(7, 2442),
38 CHAN2G(8, 2447),
39 CHAN2G(9, 2452),
40 CHAN2G(10, 2457),
41 CHAN2G(11, 2462),
42 CHAN2G(12, 2467),
43 CHAN2G(13, 2472),
44 CHAN2G(14, 2484),
45 };
46
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(36, 5180),
49 CHAN5G(40, 5200),
50 CHAN5G(44, 5220),
51 CHAN5G(48, 5240),
52
53 CHAN5G(52, 5260),
54 CHAN5G(56, 5280),
55 CHAN5G(60, 5300),
56 CHAN5G(64, 5320),
57
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61 CHAN5G(112, 5560),
62 CHAN5G(116, 5580),
63 CHAN5G(120, 5600),
64 CHAN5G(124, 5620),
65 CHAN5G(128, 5640),
66 CHAN5G(132, 5660),
67 CHAN5G(136, 5680),
68 CHAN5G(140, 5700),
69 CHAN5G(144, 5720),
70
71 CHAN5G(149, 5745),
72 CHAN5G(153, 5765),
73 CHAN5G(157, 5785),
74 CHAN5G(161, 5805),
75 CHAN5G(165, 5825),
76 CHAN5G(169, 5845),
77 CHAN5G(173, 5865),
78 CHAN5G(177, 5885),
79 };
80
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 /* UNII-5 */
83 CHAN6G(1, 5955),
84 CHAN6G(5, 5975),
85 CHAN6G(9, 5995),
86 CHAN6G(13, 6015),
87 CHAN6G(17, 6035),
88 CHAN6G(21, 6055),
89 CHAN6G(25, 6075),
90 CHAN6G(29, 6095),
91 CHAN6G(33, 6115),
92 CHAN6G(37, 6135),
93 CHAN6G(41, 6155),
94 CHAN6G(45, 6175),
95 CHAN6G(49, 6195),
96 CHAN6G(53, 6215),
97 CHAN6G(57, 6235),
98 CHAN6G(61, 6255),
99 CHAN6G(65, 6275),
100 CHAN6G(69, 6295),
101 CHAN6G(73, 6315),
102 CHAN6G(77, 6335),
103 CHAN6G(81, 6355),
104 CHAN6G(85, 6375),
105 CHAN6G(89, 6395),
106 CHAN6G(93, 6415),
107 /* UNII-6 */
108 CHAN6G(97, 6435),
109 CHAN6G(101, 6455),
110 CHAN6G(105, 6475),
111 CHAN6G(109, 6495),
112 CHAN6G(113, 6515),
113 CHAN6G(117, 6535),
114 /* UNII-7 */
115 CHAN6G(121, 6555),
116 CHAN6G(125, 6575),
117 CHAN6G(129, 6595),
118 CHAN6G(133, 6615),
119 CHAN6G(137, 6635),
120 CHAN6G(141, 6655),
121 CHAN6G(145, 6675),
122 CHAN6G(149, 6695),
123 CHAN6G(153, 6715),
124 CHAN6G(157, 6735),
125 CHAN6G(161, 6755),
126 CHAN6G(165, 6775),
127 CHAN6G(169, 6795),
128 CHAN6G(173, 6815),
129 CHAN6G(177, 6835),
130 CHAN6G(181, 6855),
131 CHAN6G(185, 6875),
132 /* UNII-8 */
133 CHAN6G(189, 6895),
134 CHAN6G(193, 6915),
135 CHAN6G(197, 6935),
136 CHAN6G(201, 6955),
137 CHAN6G(205, 6975),
138 CHAN6G(209, 6995),
139 CHAN6G(213, 7015),
140 CHAN6G(217, 7035),
141 CHAN6G(221, 7055),
142 CHAN6G(225, 7075),
143 CHAN6G(229, 7095),
144 CHAN6G(233, 7115),
145 };
146
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159
160 struct ieee80211_rate mt76_rates[] = {
161 CCK_RATE(0, 10),
162 CCK_RATE(1, 20),
163 CCK_RATE(2, 55),
164 CCK_RATE(3, 110),
165 OFDM_RATE(11, 60),
166 OFDM_RATE(15, 90),
167 OFDM_RATE(10, 120),
168 OFDM_RATE(14, 180),
169 OFDM_RATE(9, 240),
170 OFDM_RATE(13, 360),
171 OFDM_RATE(8, 480),
172 OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 { .start_freq = 2402, .end_freq = 2494, },
178 { .start_freq = 5150, .end_freq = 5350, },
179 { .start_freq = 5350, .end_freq = 5470, },
180 { .start_freq = 5470, .end_freq = 5725, },
181 { .start_freq = 5725, .end_freq = 5950, },
182 { .start_freq = 5945, .end_freq = 6165, },
183 { .start_freq = 6165, .end_freq = 6405, },
184 { .start_freq = 6405, .end_freq = 6525, },
185 { .start_freq = 6525, .end_freq = 6705, },
186 { .start_freq = 6705, .end_freq = 6865, },
187 { .start_freq = 6865, .end_freq = 7125, },
188 };
189
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 .type = NL80211_SAR_TYPE_POWER,
192 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 .freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 struct mt76_dev *dev = phy->dev;
199 struct ieee80211_hw *hw = phy->hw;
200 struct device_node *np = dev->dev->of_node;
201
202 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 return 0;
204
205 np = of_get_child_by_name(np, "led");
206 if (np) {
207 if (!of_device_is_available(np)) {
208 of_node_put(np);
209 dev_info(dev->dev,
210 "led registration was explicitly disabled by dts\n");
211 return 0;
212 }
213
214 if (phy == &dev->phy) {
215 int led_pin;
216
217 if (!of_property_read_u32(np, "led-sources", &led_pin))
218 phy->leds.pin = led_pin;
219
220 phy->leds.al =
221 of_property_read_bool(np, "led-active-low");
222 }
223
224 of_node_put(np);
225 }
226
227 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 wiphy_name(hw->wiphy));
229
230 phy->leds.cdev.name = phy->leds.name;
231 phy->leds.cdev.default_trigger =
232 ieee80211_create_tpt_led_trigger(hw,
233 IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 mt76_tpt_blink,
235 ARRAY_SIZE(mt76_tpt_blink));
236
237 dev_info(dev->dev,
238 "registering led '%s'\n", phy->leds.name);
239
240 return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242
mt76_led_cleanup(struct mt76_phy * phy)243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 return;
247
248 led_classdev_unregister(&phy->leds.cdev);
249 }
250
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 struct ieee80211_supported_band *sband,
253 bool vht)
254 {
255 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 int i, nstream = hweight8(phy->antenna_mask);
257 struct ieee80211_sta_vht_cap *vht_cap;
258 u16 mcs_map = 0;
259
260 if (nstream > 1)
261 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 else
263 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264
265 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267
268 if (!vht)
269 return;
270
271 vht_cap = &sband->vht_cap;
272 if (nstream > 1)
273 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 else
275 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278
279 for (i = 0; i < 8; i++) {
280 if (i < nstream)
281 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 else
283 mcs_map |=
284 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 }
286 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 vht_cap->vht_mcs.tx_highest |=
290 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 if (phy->cap.has_2ghz)
296 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 if (phy->cap.has_5ghz)
298 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 if (phy->cap.has_6ghz)
300 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303
304 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 const struct ieee80211_channel *chan, int n_chan,
307 struct ieee80211_rate *rates, int n_rates,
308 bool ht, bool vht)
309 {
310 struct ieee80211_supported_band *sband = &msband->sband;
311 struct ieee80211_sta_vht_cap *vht_cap;
312 struct ieee80211_sta_ht_cap *ht_cap;
313 struct mt76_dev *dev = phy->dev;
314 void *chanlist;
315 int size;
316
317 size = n_chan * sizeof(*chan);
318 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 if (!chanlist)
320 return -ENOMEM;
321
322 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 GFP_KERNEL);
324 if (!msband->chan)
325 return -ENOMEM;
326
327 sband->channels = chanlist;
328 sband->n_channels = n_chan;
329 sband->bitrates = rates;
330 sband->n_bitrates = n_rates;
331
332 if (!ht)
333 return 0;
334
335 ht_cap = &sband->ht_cap;
336 ht_cap->ht_supported = true;
337 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 IEEE80211_HT_CAP_GRN_FLD |
339 IEEE80211_HT_CAP_SGI_20 |
340 IEEE80211_HT_CAP_SGI_40 |
341 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342
343 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345
346 mt76_init_stream_cap(phy, sband, vht);
347
348 if (!vht)
349 return 0;
350
351 vht_cap = &sband->vht_cap;
352 vht_cap->vht_supported = true;
353 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 IEEE80211_VHT_CAP_RXSTBC_1 |
355 IEEE80211_VHT_CAP_SHORT_GI_80 |
356 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357
358 return 0;
359 }
360
361 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 int n_rates)
364 {
365 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366
367 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 ARRAY_SIZE(mt76_channels_2ghz), rates,
369 n_rates, true, false);
370 }
371
372 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 int n_rates, bool vht)
375 {
376 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377
378 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 ARRAY_SIZE(mt76_channels_5ghz), rates,
380 n_rates, true, vht);
381 }
382
383 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 int n_rates)
386 {
387 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388
389 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 ARRAY_SIZE(mt76_channels_6ghz), rates,
391 n_rates, false, false);
392 }
393
394 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 enum nl80211_band band)
397 {
398 struct ieee80211_supported_band *sband = &msband->sband;
399 bool found = false;
400 int i;
401
402 if (!sband)
403 return;
404
405 for (i = 0; i < sband->n_channels; i++) {
406 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 continue;
408
409 found = true;
410 break;
411 }
412
413 if (found) {
414 phy->chandef.chan = &sband->channels[0];
415 phy->chan_state = &msband->chan[0];
416 return;
417 }
418
419 sband->n_channels = 0;
420 phy->hw->wiphy->bands[band] = NULL;
421 }
422
423 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)424 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
425 {
426 struct mt76_dev *dev = phy->dev;
427 struct wiphy *wiphy = hw->wiphy;
428
429 INIT_LIST_HEAD(&phy->tx_list);
430 spin_lock_init(&phy->tx_lock);
431
432 SET_IEEE80211_DEV(hw, dev->dev);
433 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
434
435 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
436 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
437 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
438 WIPHY_FLAG_SUPPORTS_TDLS |
439 WIPHY_FLAG_AP_UAPSD;
440
441 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
442 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
443 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
444
445 wiphy->available_antennas_tx = phy->antenna_mask;
446 wiphy->available_antennas_rx = phy->antenna_mask;
447
448 wiphy->sar_capa = &mt76_sar_capa;
449 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
450 sizeof(struct mt76_freq_range_power),
451 GFP_KERNEL);
452 if (!phy->frp)
453 return -ENOMEM;
454
455 hw->txq_data_size = sizeof(struct mt76_txq);
456 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
457
458 if (!hw->max_tx_fragments)
459 hw->max_tx_fragments = 16;
460
461 ieee80211_hw_set(hw, SIGNAL_DBM);
462 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
463 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
464 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
465 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
466 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
467 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
468 ieee80211_hw_set(hw, SPECTRUM_MGMT);
469
470 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
471 hw->max_tx_fragments > 1) {
472 ieee80211_hw_set(hw, TX_AMSDU);
473 ieee80211_hw_set(hw, TX_FRAG_LIST);
474 }
475
476 ieee80211_hw_set(hw, MFP_CAPABLE);
477 ieee80211_hw_set(hw, AP_LINK_PS);
478 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
479
480 return 0;
481 }
482
483 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)484 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
485 const struct ieee80211_ops *ops, u8 band_idx)
486 {
487 struct ieee80211_hw *hw;
488 unsigned int phy_size;
489 struct mt76_phy *phy;
490
491 phy_size = ALIGN(sizeof(*phy), 8);
492 hw = ieee80211_alloc_hw(size + phy_size, ops);
493 if (!hw)
494 return NULL;
495
496 phy = hw->priv;
497 phy->dev = dev;
498 phy->hw = hw;
499 phy->priv = hw->priv + phy_size;
500 phy->band_idx = band_idx;
501
502 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
503 hw->wiphy->interface_modes =
504 BIT(NL80211_IFTYPE_STATION) |
505 BIT(NL80211_IFTYPE_AP) |
506 #ifdef CONFIG_MAC80211_MESH
507 BIT(NL80211_IFTYPE_MESH_POINT) |
508 #endif
509 BIT(NL80211_IFTYPE_P2P_CLIENT) |
510 BIT(NL80211_IFTYPE_P2P_GO) |
511 BIT(NL80211_IFTYPE_ADHOC);
512
513 return phy;
514 }
515 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
516
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)517 int mt76_register_phy(struct mt76_phy *phy, bool vht,
518 struct ieee80211_rate *rates, int n_rates)
519 {
520 int ret;
521
522 ret = mt76_phy_init(phy, phy->hw);
523 if (ret)
524 return ret;
525
526 if (phy->cap.has_2ghz) {
527 ret = mt76_init_sband_2g(phy, rates, n_rates);
528 if (ret)
529 return ret;
530 }
531
532 if (phy->cap.has_5ghz) {
533 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
534 if (ret)
535 return ret;
536 }
537
538 if (phy->cap.has_6ghz) {
539 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
540 if (ret)
541 return ret;
542 }
543
544 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
545 ret = mt76_led_init(phy);
546 if (ret)
547 return ret;
548 }
549
550 wiphy_read_of_freq_limits(phy->hw->wiphy);
551 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
552 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
553 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
554
555 ret = ieee80211_register_hw(phy->hw);
556 if (ret)
557 return ret;
558
559 set_bit(MT76_STATE_REGISTERED, &phy->state);
560 phy->dev->phys[phy->band_idx] = phy;
561
562 return 0;
563 }
564 EXPORT_SYMBOL_GPL(mt76_register_phy);
565
mt76_unregister_phy(struct mt76_phy * phy)566 void mt76_unregister_phy(struct mt76_phy *phy)
567 {
568 struct mt76_dev *dev = phy->dev;
569
570 if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
571 return;
572
573 if (IS_ENABLED(CONFIG_MT76_LEDS))
574 mt76_led_cleanup(phy);
575 mt76_tx_status_check(dev, true);
576 ieee80211_unregister_hw(phy->hw);
577 dev->phys[phy->band_idx] = NULL;
578 }
579 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
580
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)581 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
582 {
583 bool is_qrx = mt76_queue_is_rx(dev, q);
584 struct page_pool_params pp_params = {
585 .order = 0,
586 .flags = 0,
587 .nid = NUMA_NO_NODE,
588 .dev = dev->dma_dev,
589 };
590 int idx = is_qrx ? q - dev->q_rx : -1;
591
592 /* Allocate page_pools just for rx/wed_tx_free queues */
593 if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
594 return 0;
595
596 switch (idx) {
597 case MT_RXQ_MAIN:
598 case MT_RXQ_BAND1:
599 case MT_RXQ_BAND2:
600 pp_params.pool_size = 256;
601 break;
602 default:
603 pp_params.pool_size = 16;
604 break;
605 }
606
607 if (mt76_is_mmio(dev)) {
608 /* rely on page_pool for DMA mapping */
609 pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
610 pp_params.dma_dir = DMA_FROM_DEVICE;
611 pp_params.max_len = PAGE_SIZE;
612 pp_params.offset = 0;
613 /* NAPI is available just for rx queues */
614 if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
615 pp_params.napi = &dev->napi[idx];
616 }
617
618 q->page_pool = page_pool_create(&pp_params);
619 if (IS_ERR(q->page_pool)) {
620 int err = PTR_ERR(q->page_pool);
621
622 q->page_pool = NULL;
623 return err;
624 }
625
626 return 0;
627 }
628 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
629
630 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)631 mt76_alloc_device(struct device *pdev, unsigned int size,
632 const struct ieee80211_ops *ops,
633 const struct mt76_driver_ops *drv_ops)
634 {
635 struct ieee80211_hw *hw;
636 struct mt76_phy *phy;
637 struct mt76_dev *dev;
638 int i;
639
640 hw = ieee80211_alloc_hw(size, ops);
641 if (!hw)
642 return NULL;
643
644 dev = hw->priv;
645 dev->hw = hw;
646 dev->dev = pdev;
647 dev->drv = drv_ops;
648 dev->dma_dev = pdev;
649
650 phy = &dev->phy;
651 phy->dev = dev;
652 phy->hw = hw;
653 phy->band_idx = MT_BAND0;
654 dev->phys[phy->band_idx] = phy;
655
656 spin_lock_init(&dev->rx_lock);
657 spin_lock_init(&dev->lock);
658 spin_lock_init(&dev->cc_lock);
659 spin_lock_init(&dev->status_lock);
660 spin_lock_init(&dev->wed_lock);
661 mutex_init(&dev->mutex);
662 init_waitqueue_head(&dev->tx_wait);
663
664 skb_queue_head_init(&dev->mcu.res_q);
665 init_waitqueue_head(&dev->mcu.wait);
666 mutex_init(&dev->mcu.mutex);
667 dev->tx_worker.fn = mt76_tx_worker;
668
669 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
670 hw->wiphy->interface_modes =
671 BIT(NL80211_IFTYPE_STATION) |
672 BIT(NL80211_IFTYPE_AP) |
673 #ifdef CONFIG_MAC80211_MESH
674 BIT(NL80211_IFTYPE_MESH_POINT) |
675 #endif
676 BIT(NL80211_IFTYPE_P2P_CLIENT) |
677 BIT(NL80211_IFTYPE_P2P_GO) |
678 BIT(NL80211_IFTYPE_ADHOC);
679
680 spin_lock_init(&dev->token_lock);
681 idr_init(&dev->token);
682
683 spin_lock_init(&dev->rx_token_lock);
684 idr_init(&dev->rx_token);
685
686 INIT_LIST_HEAD(&dev->wcid_list);
687 INIT_LIST_HEAD(&dev->sta_poll_list);
688 spin_lock_init(&dev->sta_poll_lock);
689
690 INIT_LIST_HEAD(&dev->txwi_cache);
691 INIT_LIST_HEAD(&dev->rxwi_cache);
692 dev->token_size = dev->drv->token_size;
693
694 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
695 skb_queue_head_init(&dev->rx_skb[i]);
696
697 dev->wq = alloc_ordered_workqueue("mt76", 0);
698 if (!dev->wq) {
699 ieee80211_free_hw(hw);
700 return NULL;
701 }
702
703 return dev;
704 }
705 EXPORT_SYMBOL_GPL(mt76_alloc_device);
706
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)707 int mt76_register_device(struct mt76_dev *dev, bool vht,
708 struct ieee80211_rate *rates, int n_rates)
709 {
710 struct ieee80211_hw *hw = dev->hw;
711 struct mt76_phy *phy = &dev->phy;
712 int ret;
713
714 dev_set_drvdata(dev->dev, dev);
715 mt76_wcid_init(&dev->global_wcid);
716 ret = mt76_phy_init(phy, hw);
717 if (ret)
718 return ret;
719
720 if (phy->cap.has_2ghz) {
721 ret = mt76_init_sband_2g(phy, rates, n_rates);
722 if (ret)
723 return ret;
724 }
725
726 if (phy->cap.has_5ghz) {
727 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
728 if (ret)
729 return ret;
730 }
731
732 if (phy->cap.has_6ghz) {
733 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
734 if (ret)
735 return ret;
736 }
737
738 wiphy_read_of_freq_limits(hw->wiphy);
739 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
740 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
741 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
742
743 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
744 ret = mt76_led_init(phy);
745 if (ret)
746 return ret;
747 }
748
749 ret = ieee80211_register_hw(hw);
750 if (ret)
751 return ret;
752
753 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
754 set_bit(MT76_STATE_REGISTERED, &phy->state);
755 sched_set_fifo_low(dev->tx_worker.task);
756
757 return 0;
758 }
759 EXPORT_SYMBOL_GPL(mt76_register_device);
760
mt76_unregister_device(struct mt76_dev * dev)761 void mt76_unregister_device(struct mt76_dev *dev)
762 {
763 struct ieee80211_hw *hw = dev->hw;
764
765 if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
766 return;
767
768 if (IS_ENABLED(CONFIG_MT76_LEDS))
769 mt76_led_cleanup(&dev->phy);
770 mt76_tx_status_check(dev, true);
771 mt76_wcid_cleanup(dev, &dev->global_wcid);
772 ieee80211_unregister_hw(hw);
773 }
774 EXPORT_SYMBOL_GPL(mt76_unregister_device);
775
mt76_free_device(struct mt76_dev * dev)776 void mt76_free_device(struct mt76_dev *dev)
777 {
778 mt76_worker_teardown(&dev->tx_worker);
779 if (dev->wq) {
780 destroy_workqueue(dev->wq);
781 dev->wq = NULL;
782 }
783 ieee80211_free_hw(dev->hw);
784 }
785 EXPORT_SYMBOL_GPL(mt76_free_device);
786
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)787 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
788 {
789 struct sk_buff *skb = phy->rx_amsdu[q].head;
790 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
791 struct mt76_dev *dev = phy->dev;
792
793 phy->rx_amsdu[q].head = NULL;
794 phy->rx_amsdu[q].tail = NULL;
795
796 /*
797 * Validate if the amsdu has a proper first subframe.
798 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
799 * flag of the QoS header gets flipped. In such cases, the first
800 * subframe has a LLC/SNAP header in the location of the destination
801 * address.
802 */
803 if (skb_shinfo(skb)->frag_list) {
804 int offset = 0;
805
806 if (!(status->flag & RX_FLAG_8023)) {
807 offset = ieee80211_get_hdrlen_from_skb(skb);
808
809 if ((status->flag &
810 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
811 RX_FLAG_DECRYPTED)
812 offset += 8;
813 }
814
815 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
816 dev_kfree_skb(skb);
817 return;
818 }
819 }
820 __skb_queue_tail(&dev->rx_skb[q], skb);
821 }
822
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)823 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
824 struct sk_buff *skb)
825 {
826 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
827
828 if (phy->rx_amsdu[q].head &&
829 (!status->amsdu || status->first_amsdu ||
830 status->seqno != phy->rx_amsdu[q].seqno))
831 mt76_rx_release_amsdu(phy, q);
832
833 if (!phy->rx_amsdu[q].head) {
834 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
835 phy->rx_amsdu[q].seqno = status->seqno;
836 phy->rx_amsdu[q].head = skb;
837 } else {
838 *phy->rx_amsdu[q].tail = skb;
839 phy->rx_amsdu[q].tail = &skb->next;
840 }
841
842 if (!status->amsdu || status->last_amsdu)
843 mt76_rx_release_amsdu(phy, q);
844 }
845
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)846 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
847 {
848 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
849 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
850
851 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
852 dev_kfree_skb(skb);
853 return;
854 }
855
856 #ifdef CONFIG_NL80211_TESTMODE
857 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
858 phy->test.rx_stats.packets[q]++;
859 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
860 phy->test.rx_stats.fcs_error[q]++;
861 }
862 #endif
863
864 mt76_rx_release_burst(phy, q, skb);
865 }
866 EXPORT_SYMBOL_GPL(mt76_rx);
867
mt76_has_tx_pending(struct mt76_phy * phy)868 bool mt76_has_tx_pending(struct mt76_phy *phy)
869 {
870 struct mt76_queue *q;
871 int i;
872
873 for (i = 0; i < __MT_TXQ_MAX; i++) {
874 q = phy->q_tx[i];
875 if (q && q->queued)
876 return true;
877 }
878
879 return false;
880 }
881 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
882
883 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)884 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
885 {
886 struct mt76_sband *msband;
887 int idx;
888
889 if (c->band == NL80211_BAND_2GHZ)
890 msband = &phy->sband_2g;
891 else if (c->band == NL80211_BAND_6GHZ)
892 msband = &phy->sband_6g;
893 else
894 msband = &phy->sband_5g;
895
896 idx = c - &msband->sband.channels[0];
897 return &msband->chan[idx];
898 }
899
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)900 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
901 {
902 struct mt76_channel_state *state = phy->chan_state;
903
904 state->cc_active += ktime_to_us(ktime_sub(time,
905 phy->survey_time));
906 phy->survey_time = time;
907 }
908 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
909
mt76_update_survey(struct mt76_phy * phy)910 void mt76_update_survey(struct mt76_phy *phy)
911 {
912 struct mt76_dev *dev = phy->dev;
913 ktime_t cur_time;
914
915 if (dev->drv->update_survey)
916 dev->drv->update_survey(phy);
917
918 cur_time = ktime_get_boottime();
919 mt76_update_survey_active_time(phy, cur_time);
920
921 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
922 struct mt76_channel_state *state = phy->chan_state;
923
924 spin_lock_bh(&dev->cc_lock);
925 state->cc_bss_rx += dev->cur_cc_bss_rx;
926 dev->cur_cc_bss_rx = 0;
927 spin_unlock_bh(&dev->cc_lock);
928 }
929 }
930 EXPORT_SYMBOL_GPL(mt76_update_survey);
931
mt76_set_channel(struct mt76_phy * phy)932 void mt76_set_channel(struct mt76_phy *phy)
933 {
934 struct mt76_dev *dev = phy->dev;
935 struct ieee80211_hw *hw = phy->hw;
936 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
937 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
938 int timeout = HZ / 5;
939
940 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
941 mt76_update_survey(phy);
942
943 if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
944 phy->chandef.width != chandef->width)
945 phy->dfs_state = MT_DFS_STATE_UNKNOWN;
946
947 phy->chandef = *chandef;
948 phy->chan_state = mt76_channel_state(phy, chandef->chan);
949
950 if (!offchannel)
951 phy->main_chan = chandef->chan;
952
953 if (chandef->chan != phy->main_chan)
954 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
955 }
956 EXPORT_SYMBOL_GPL(mt76_set_channel);
957
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)958 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
959 struct survey_info *survey)
960 {
961 struct mt76_phy *phy = hw->priv;
962 struct mt76_dev *dev = phy->dev;
963 struct mt76_sband *sband;
964 struct ieee80211_channel *chan;
965 struct mt76_channel_state *state;
966 int ret = 0;
967
968 mutex_lock(&dev->mutex);
969 if (idx == 0 && dev->drv->update_survey)
970 mt76_update_survey(phy);
971
972 if (idx >= phy->sband_2g.sband.n_channels +
973 phy->sband_5g.sband.n_channels) {
974 idx -= (phy->sband_2g.sband.n_channels +
975 phy->sband_5g.sband.n_channels);
976 sband = &phy->sband_6g;
977 } else if (idx >= phy->sband_2g.sband.n_channels) {
978 idx -= phy->sband_2g.sband.n_channels;
979 sband = &phy->sband_5g;
980 } else {
981 sband = &phy->sband_2g;
982 }
983
984 if (idx >= sband->sband.n_channels) {
985 ret = -ENOENT;
986 goto out;
987 }
988
989 chan = &sband->sband.channels[idx];
990 state = mt76_channel_state(phy, chan);
991
992 memset(survey, 0, sizeof(*survey));
993 survey->channel = chan;
994 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
995 survey->filled |= dev->drv->survey_flags;
996 if (state->noise)
997 survey->filled |= SURVEY_INFO_NOISE_DBM;
998
999 if (chan == phy->main_chan) {
1000 survey->filled |= SURVEY_INFO_IN_USE;
1001
1002 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1003 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1004 }
1005
1006 survey->time_busy = div_u64(state->cc_busy, 1000);
1007 survey->time_rx = div_u64(state->cc_rx, 1000);
1008 survey->time = div_u64(state->cc_active, 1000);
1009 survey->noise = state->noise;
1010
1011 spin_lock_bh(&dev->cc_lock);
1012 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1013 survey->time_tx = div_u64(state->cc_tx, 1000);
1014 spin_unlock_bh(&dev->cc_lock);
1015
1016 out:
1017 mutex_unlock(&dev->mutex);
1018
1019 return ret;
1020 }
1021 EXPORT_SYMBOL_GPL(mt76_get_survey);
1022
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1023 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1024 struct ieee80211_key_conf *key)
1025 {
1026 struct ieee80211_key_seq seq;
1027 int i;
1028
1029 wcid->rx_check_pn = false;
1030
1031 if (!key)
1032 return;
1033
1034 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1035 return;
1036
1037 wcid->rx_check_pn = true;
1038
1039 /* data frame */
1040 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1041 ieee80211_get_key_rx_seq(key, i, &seq);
1042 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1043 }
1044
1045 /* robust management frame */
1046 ieee80211_get_key_rx_seq(key, -1, &seq);
1047 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1048
1049 }
1050 EXPORT_SYMBOL(mt76_wcid_key_setup);
1051
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1052 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1053 {
1054 int signal = -128;
1055 u8 chains;
1056
1057 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1058 int cur, diff;
1059
1060 cur = *chain_signal;
1061 if (!(chains & BIT(0)) ||
1062 cur > 0)
1063 continue;
1064
1065 if (cur > signal)
1066 swap(cur, signal);
1067
1068 diff = signal - cur;
1069 if (diff == 0)
1070 signal += 3;
1071 else if (diff <= 2)
1072 signal += 2;
1073 else if (diff <= 6)
1074 signal += 1;
1075 }
1076
1077 return signal;
1078 }
1079 EXPORT_SYMBOL(mt76_rx_signal);
1080
1081 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1082 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1083 struct ieee80211_hw **hw,
1084 struct ieee80211_sta **sta)
1085 {
1086 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1087 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1088 struct mt76_rx_status mstat;
1089
1090 mstat = *((struct mt76_rx_status *)skb->cb);
1091 memset(status, 0, sizeof(*status));
1092
1093 status->flag = mstat.flag;
1094 status->freq = mstat.freq;
1095 status->enc_flags = mstat.enc_flags;
1096 status->encoding = mstat.encoding;
1097 status->bw = mstat.bw;
1098 if (status->encoding == RX_ENC_EHT) {
1099 status->eht.ru = mstat.eht.ru;
1100 status->eht.gi = mstat.eht.gi;
1101 } else {
1102 status->he_ru = mstat.he_ru;
1103 status->he_gi = mstat.he_gi;
1104 status->he_dcm = mstat.he_dcm;
1105 }
1106 status->rate_idx = mstat.rate_idx;
1107 status->nss = mstat.nss;
1108 status->band = mstat.band;
1109 status->signal = mstat.signal;
1110 status->chains = mstat.chains;
1111 status->ampdu_reference = mstat.ampdu_ref;
1112 status->device_timestamp = mstat.timestamp;
1113 status->mactime = mstat.timestamp;
1114 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1115 if (status->signal <= -128)
1116 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1117
1118 if (ieee80211_is_beacon(hdr->frame_control) ||
1119 ieee80211_is_probe_resp(hdr->frame_control))
1120 status->boottime_ns = ktime_get_boottime_ns();
1121
1122 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1123 BUILD_BUG_ON(sizeof(status->chain_signal) !=
1124 sizeof(mstat.chain_signal));
1125 memcpy(status->chain_signal, mstat.chain_signal,
1126 sizeof(mstat.chain_signal));
1127
1128 *sta = wcid_to_sta(mstat.wcid);
1129 *hw = mt76_phy_hw(dev, mstat.phy_idx);
1130 }
1131
1132 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1133 mt76_check_ccmp_pn(struct sk_buff *skb)
1134 {
1135 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1136 struct mt76_wcid *wcid = status->wcid;
1137 struct ieee80211_hdr *hdr;
1138 int security_idx;
1139 int ret;
1140
1141 if (!(status->flag & RX_FLAG_DECRYPTED))
1142 return;
1143
1144 if (status->flag & RX_FLAG_ONLY_MONITOR)
1145 return;
1146
1147 if (!wcid || !wcid->rx_check_pn)
1148 return;
1149
1150 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1151 if (status->flag & RX_FLAG_8023)
1152 goto skip_hdr_check;
1153
1154 hdr = mt76_skb_get_hdr(skb);
1155 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1156 /*
1157 * Validate the first fragment both here and in mac80211
1158 * All further fragments will be validated by mac80211 only.
1159 */
1160 if (ieee80211_is_frag(hdr) &&
1161 !ieee80211_is_first_frag(hdr->frame_control))
1162 return;
1163 }
1164
1165 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1166 *
1167 * the recipient shall maintain a single replay counter for received
1168 * individually addressed robust Management frames that are received
1169 * with the To DS subfield equal to 0, [...]
1170 */
1171 if (ieee80211_is_mgmt(hdr->frame_control) &&
1172 !ieee80211_has_tods(hdr->frame_control))
1173 security_idx = IEEE80211_NUM_TIDS;
1174
1175 skip_hdr_check:
1176 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1177 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1178 sizeof(status->iv));
1179 if (ret <= 0) {
1180 status->flag |= RX_FLAG_ONLY_MONITOR;
1181 return;
1182 }
1183
1184 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1185
1186 if (status->flag & RX_FLAG_IV_STRIPPED)
1187 status->flag |= RX_FLAG_PN_VALIDATED;
1188 }
1189
1190 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1191 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1192 int len)
1193 {
1194 struct mt76_wcid *wcid = status->wcid;
1195 struct ieee80211_rx_status info = {
1196 .enc_flags = status->enc_flags,
1197 .rate_idx = status->rate_idx,
1198 .encoding = status->encoding,
1199 .band = status->band,
1200 .nss = status->nss,
1201 .bw = status->bw,
1202 };
1203 struct ieee80211_sta *sta;
1204 u32 airtime;
1205 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1206
1207 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1208 spin_lock(&dev->cc_lock);
1209 dev->cur_cc_bss_rx += airtime;
1210 spin_unlock(&dev->cc_lock);
1211
1212 if (!wcid || !wcid->sta)
1213 return;
1214
1215 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1216 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1217 }
1218
1219 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1220 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1221 {
1222 struct mt76_wcid *wcid;
1223 int wcid_idx;
1224
1225 if (!dev->rx_ampdu_len)
1226 return;
1227
1228 wcid_idx = dev->rx_ampdu_status.wcid_idx;
1229 if (wcid_idx < ARRAY_SIZE(dev->wcid))
1230 wcid = rcu_dereference(dev->wcid[wcid_idx]);
1231 else
1232 wcid = NULL;
1233 dev->rx_ampdu_status.wcid = wcid;
1234
1235 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1236
1237 dev->rx_ampdu_len = 0;
1238 dev->rx_ampdu_ref = 0;
1239 }
1240
1241 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1242 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1243 {
1244 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1245 struct mt76_wcid *wcid = status->wcid;
1246
1247 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1248 return;
1249
1250 if (!wcid || !wcid->sta) {
1251 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1252
1253 if (status->flag & RX_FLAG_8023)
1254 return;
1255
1256 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1257 return;
1258
1259 wcid = NULL;
1260 }
1261
1262 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1263 status->ampdu_ref != dev->rx_ampdu_ref)
1264 mt76_airtime_flush_ampdu(dev);
1265
1266 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1267 if (!dev->rx_ampdu_len ||
1268 status->ampdu_ref != dev->rx_ampdu_ref) {
1269 dev->rx_ampdu_status = *status;
1270 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1271 dev->rx_ampdu_ref = status->ampdu_ref;
1272 }
1273
1274 dev->rx_ampdu_len += skb->len;
1275 return;
1276 }
1277
1278 mt76_airtime_report(dev, status, skb->len);
1279 }
1280
1281 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1282 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1283 {
1284 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1285 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1286 struct ieee80211_sta *sta;
1287 struct ieee80211_hw *hw;
1288 struct mt76_wcid *wcid = status->wcid;
1289 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1290 bool ps;
1291
1292 hw = mt76_phy_hw(dev, status->phy_idx);
1293 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1294 !(status->flag & RX_FLAG_8023)) {
1295 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1296 if (sta)
1297 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1298 }
1299
1300 mt76_airtime_check(dev, skb);
1301
1302 if (!wcid || !wcid->sta)
1303 return;
1304
1305 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1306
1307 if (status->signal <= 0)
1308 ewma_signal_add(&wcid->rssi, -status->signal);
1309
1310 wcid->inactive_count = 0;
1311
1312 if (status->flag & RX_FLAG_8023)
1313 return;
1314
1315 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1316 return;
1317
1318 if (ieee80211_is_pspoll(hdr->frame_control)) {
1319 ieee80211_sta_pspoll(sta);
1320 return;
1321 }
1322
1323 if (ieee80211_has_morefrags(hdr->frame_control) ||
1324 !(ieee80211_is_mgmt(hdr->frame_control) ||
1325 ieee80211_is_data(hdr->frame_control)))
1326 return;
1327
1328 ps = ieee80211_has_pm(hdr->frame_control);
1329
1330 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1331 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1332 ieee80211_sta_uapsd_trigger(sta, tidno);
1333
1334 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1335 return;
1336
1337 if (ps)
1338 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1339
1340 if (dev->drv->sta_ps)
1341 dev->drv->sta_ps(dev, sta, ps);
1342
1343 if (!ps)
1344 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1345
1346 ieee80211_sta_ps_transition(sta, ps);
1347 }
1348
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1349 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1350 struct napi_struct *napi)
1351 {
1352 struct ieee80211_sta *sta;
1353 struct ieee80211_hw *hw;
1354 struct sk_buff *skb, *tmp;
1355 LIST_HEAD(list);
1356
1357 spin_lock(&dev->rx_lock);
1358 while ((skb = __skb_dequeue(frames)) != NULL) {
1359 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1360
1361 mt76_check_ccmp_pn(skb);
1362 skb_shinfo(skb)->frag_list = NULL;
1363 mt76_rx_convert(dev, skb, &hw, &sta);
1364 ieee80211_rx_list(hw, sta, skb, &list);
1365
1366 /* subsequent amsdu frames */
1367 while (nskb) {
1368 skb = nskb;
1369 nskb = nskb->next;
1370 skb->next = NULL;
1371
1372 mt76_rx_convert(dev, skb, &hw, &sta);
1373 ieee80211_rx_list(hw, sta, skb, &list);
1374 }
1375 }
1376 spin_unlock(&dev->rx_lock);
1377
1378 if (!napi) {
1379 netif_receive_skb_list(&list);
1380 return;
1381 }
1382
1383 list_for_each_entry_safe(skb, tmp, &list, list) {
1384 skb_list_del_init(skb);
1385 napi_gro_receive(napi, skb);
1386 }
1387 }
1388
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1389 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1390 struct napi_struct *napi)
1391 {
1392 struct sk_buff_head frames;
1393 struct sk_buff *skb;
1394
1395 __skb_queue_head_init(&frames);
1396
1397 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1398 mt76_check_sta(dev, skb);
1399 if (mtk_wed_device_active(&dev->mmio.wed))
1400 __skb_queue_tail(&frames, skb);
1401 else
1402 mt76_rx_aggr_reorder(skb, &frames);
1403 }
1404
1405 mt76_rx_complete(dev, &frames, napi);
1406 }
1407 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1408
1409 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1410 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1411 struct ieee80211_sta *sta)
1412 {
1413 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1414 struct mt76_dev *dev = phy->dev;
1415 int ret;
1416 int i;
1417
1418 mutex_lock(&dev->mutex);
1419
1420 ret = dev->drv->sta_add(dev, vif, sta);
1421 if (ret)
1422 goto out;
1423
1424 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1425 struct mt76_txq *mtxq;
1426
1427 if (!sta->txq[i])
1428 continue;
1429
1430 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1431 mtxq->wcid = wcid->idx;
1432 }
1433
1434 ewma_signal_init(&wcid->rssi);
1435 if (phy->band_idx == MT_BAND1)
1436 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1437 wcid->phy_idx = phy->band_idx;
1438 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1439
1440 mt76_wcid_init(wcid);
1441 out:
1442 mutex_unlock(&dev->mutex);
1443
1444 return ret;
1445 }
1446
__mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1447 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1448 struct ieee80211_sta *sta)
1449 {
1450 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1451 int i, idx = wcid->idx;
1452
1453 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1454 mt76_rx_aggr_stop(dev, wcid, i);
1455
1456 if (dev->drv->sta_remove)
1457 dev->drv->sta_remove(dev, vif, sta);
1458
1459 mt76_wcid_cleanup(dev, wcid);
1460
1461 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1462 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1463 }
1464 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1465
1466 static void
mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1467 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1468 struct ieee80211_sta *sta)
1469 {
1470 mutex_lock(&dev->mutex);
1471 __mt76_sta_remove(dev, vif, sta);
1472 mutex_unlock(&dev->mutex);
1473 }
1474
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1475 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1476 struct ieee80211_sta *sta,
1477 enum ieee80211_sta_state old_state,
1478 enum ieee80211_sta_state new_state)
1479 {
1480 struct mt76_phy *phy = hw->priv;
1481 struct mt76_dev *dev = phy->dev;
1482
1483 if (old_state == IEEE80211_STA_NOTEXIST &&
1484 new_state == IEEE80211_STA_NONE)
1485 return mt76_sta_add(phy, vif, sta);
1486
1487 if (old_state == IEEE80211_STA_AUTH &&
1488 new_state == IEEE80211_STA_ASSOC &&
1489 dev->drv->sta_assoc)
1490 dev->drv->sta_assoc(dev, vif, sta);
1491
1492 if (old_state == IEEE80211_STA_NONE &&
1493 new_state == IEEE80211_STA_NOTEXIST)
1494 mt76_sta_remove(dev, vif, sta);
1495
1496 return 0;
1497 }
1498 EXPORT_SYMBOL_GPL(mt76_sta_state);
1499
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1500 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1501 struct ieee80211_sta *sta)
1502 {
1503 struct mt76_phy *phy = hw->priv;
1504 struct mt76_dev *dev = phy->dev;
1505 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1506
1507 mutex_lock(&dev->mutex);
1508 spin_lock_bh(&dev->status_lock);
1509 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1510 spin_unlock_bh(&dev->status_lock);
1511 mutex_unlock(&dev->mutex);
1512 }
1513 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1514
mt76_wcid_init(struct mt76_wcid * wcid)1515 void mt76_wcid_init(struct mt76_wcid *wcid)
1516 {
1517 INIT_LIST_HEAD(&wcid->tx_list);
1518 skb_queue_head_init(&wcid->tx_pending);
1519
1520 INIT_LIST_HEAD(&wcid->list);
1521 idr_init(&wcid->pktid);
1522 }
1523 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1524
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1525 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1526 {
1527 struct mt76_phy *phy = dev->phys[wcid->phy_idx];
1528 struct ieee80211_hw *hw;
1529 struct sk_buff_head list;
1530 struct sk_buff *skb;
1531
1532 mt76_tx_status_lock(dev, &list);
1533 mt76_tx_status_skb_get(dev, wcid, -1, &list);
1534 mt76_tx_status_unlock(dev, &list);
1535
1536 idr_destroy(&wcid->pktid);
1537
1538 spin_lock_bh(&phy->tx_lock);
1539
1540 if (!list_empty(&wcid->tx_list))
1541 list_del_init(&wcid->tx_list);
1542
1543 spin_lock(&wcid->tx_pending.lock);
1544 skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1545 spin_unlock(&wcid->tx_pending.lock);
1546
1547 spin_unlock_bh(&phy->tx_lock);
1548
1549 while ((skb = __skb_dequeue(&list)) != NULL) {
1550 hw = mt76_tx_status_get_hw(dev, skb);
1551 ieee80211_free_txskb(hw, skb);
1552 }
1553 }
1554 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1555
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int * dbm)1556 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1557 int *dbm)
1558 {
1559 struct mt76_phy *phy = hw->priv;
1560 int n_chains = hweight16(phy->chainmask);
1561 int delta = mt76_tx_power_nss_delta(n_chains);
1562
1563 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1564
1565 return 0;
1566 }
1567 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1568
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1569 int mt76_init_sar_power(struct ieee80211_hw *hw,
1570 const struct cfg80211_sar_specs *sar)
1571 {
1572 struct mt76_phy *phy = hw->priv;
1573 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1574 int i;
1575
1576 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1577 return -EINVAL;
1578
1579 for (i = 0; i < sar->num_sub_specs; i++) {
1580 u32 index = sar->sub_specs[i].freq_range_index;
1581 /* SAR specifies power limitaton in 0.25dbm */
1582 s32 power = sar->sub_specs[i].power >> 1;
1583
1584 if (power > 127 || power < -127)
1585 power = 127;
1586
1587 phy->frp[index].range = &capa->freq_ranges[index];
1588 phy->frp[index].power = power;
1589 }
1590
1591 return 0;
1592 }
1593 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1594
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1595 int mt76_get_sar_power(struct mt76_phy *phy,
1596 struct ieee80211_channel *chan,
1597 int power)
1598 {
1599 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1600 int freq, i;
1601
1602 if (!capa || !phy->frp)
1603 return power;
1604
1605 if (power > 127 || power < -127)
1606 power = 127;
1607
1608 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1609 for (i = 0 ; i < capa->num_freq_ranges; i++) {
1610 if (phy->frp[i].range &&
1611 freq >= phy->frp[i].range->start_freq &&
1612 freq < phy->frp[i].range->end_freq) {
1613 power = min_t(int, phy->frp[i].power, power);
1614 break;
1615 }
1616 }
1617
1618 return power;
1619 }
1620 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1621
1622 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1623 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1624 {
1625 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1626 ieee80211_csa_finish(vif, 0);
1627 }
1628
mt76_csa_finish(struct mt76_dev * dev)1629 void mt76_csa_finish(struct mt76_dev *dev)
1630 {
1631 if (!dev->csa_complete)
1632 return;
1633
1634 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1635 IEEE80211_IFACE_ITER_RESUME_ALL,
1636 __mt76_csa_finish, dev);
1637
1638 dev->csa_complete = 0;
1639 }
1640 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1641
1642 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1643 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1644 {
1645 struct mt76_dev *dev = priv;
1646
1647 if (!vif->bss_conf.csa_active)
1648 return;
1649
1650 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1651 }
1652
mt76_csa_check(struct mt76_dev * dev)1653 void mt76_csa_check(struct mt76_dev *dev)
1654 {
1655 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1656 IEEE80211_IFACE_ITER_RESUME_ALL,
1657 __mt76_csa_check, dev);
1658 }
1659 EXPORT_SYMBOL_GPL(mt76_csa_check);
1660
1661 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1662 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1663 {
1664 return 0;
1665 }
1666 EXPORT_SYMBOL_GPL(mt76_set_tim);
1667
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1668 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1669 {
1670 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1671 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1672 u8 *hdr, *pn = status->iv;
1673
1674 __skb_push(skb, 8);
1675 memmove(skb->data, skb->data + 8, hdr_len);
1676 hdr = skb->data + hdr_len;
1677
1678 hdr[0] = pn[5];
1679 hdr[1] = pn[4];
1680 hdr[2] = 0;
1681 hdr[3] = 0x20 | (key_id << 6);
1682 hdr[4] = pn[3];
1683 hdr[5] = pn[2];
1684 hdr[6] = pn[1];
1685 hdr[7] = pn[0];
1686
1687 status->flag &= ~RX_FLAG_IV_STRIPPED;
1688 }
1689 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1690
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1691 int mt76_get_rate(struct mt76_dev *dev,
1692 struct ieee80211_supported_band *sband,
1693 int idx, bool cck)
1694 {
1695 int i, offset = 0, len = sband->n_bitrates;
1696
1697 if (cck) {
1698 if (sband != &dev->phy.sband_2g.sband)
1699 return 0;
1700
1701 idx &= ~BIT(2); /* short preamble */
1702 } else if (sband == &dev->phy.sband_2g.sband) {
1703 offset = 4;
1704 }
1705
1706 for (i = offset; i < len; i++) {
1707 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1708 return i;
1709 }
1710
1711 return 0;
1712 }
1713 EXPORT_SYMBOL_GPL(mt76_get_rate);
1714
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1715 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1716 const u8 *mac)
1717 {
1718 struct mt76_phy *phy = hw->priv;
1719
1720 set_bit(MT76_SCANNING, &phy->state);
1721 }
1722 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1723
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1724 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1725 {
1726 struct mt76_phy *phy = hw->priv;
1727
1728 clear_bit(MT76_SCANNING, &phy->state);
1729 }
1730 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1731
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1732 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1733 {
1734 struct mt76_phy *phy = hw->priv;
1735 struct mt76_dev *dev = phy->dev;
1736
1737 mutex_lock(&dev->mutex);
1738 *tx_ant = phy->antenna_mask;
1739 *rx_ant = phy->antenna_mask;
1740 mutex_unlock(&dev->mutex);
1741
1742 return 0;
1743 }
1744 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1745
1746 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1747 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1748 int ring_base, void *wed, u32 flags)
1749 {
1750 struct mt76_queue *hwq;
1751 int err;
1752
1753 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1754 if (!hwq)
1755 return ERR_PTR(-ENOMEM);
1756
1757 hwq->flags = flags;
1758 hwq->wed = wed;
1759
1760 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1761 if (err < 0)
1762 return ERR_PTR(err);
1763
1764 return hwq;
1765 }
1766 EXPORT_SYMBOL_GPL(mt76_init_queue);
1767
mt76_calculate_default_rate(struct mt76_phy * phy,struct ieee80211_vif * vif,int rateidx)1768 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1769 struct ieee80211_vif *vif, int rateidx)
1770 {
1771 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1772 struct cfg80211_chan_def *chandef = mvif->ctx ?
1773 &mvif->ctx->def :
1774 &phy->chandef;
1775 int offset = 0;
1776
1777 if (chandef->chan->band != NL80211_BAND_2GHZ)
1778 offset = 4;
1779
1780 /* pick the lowest rate for hidden nodes */
1781 if (rateidx < 0)
1782 rateidx = 0;
1783
1784 rateidx += offset;
1785 if (rateidx >= ARRAY_SIZE(mt76_rates))
1786 rateidx = offset;
1787
1788 return mt76_rates[rateidx].hw_value;
1789 }
1790 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1791
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1792 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1793 struct mt76_sta_stats *stats, bool eht)
1794 {
1795 int i, ei = wi->initial_stat_idx;
1796 u64 *data = wi->data;
1797
1798 wi->sta_count++;
1799
1800 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1801 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1802 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1803 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1804 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1805 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1806 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1807 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1808 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1809 if (eht) {
1810 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1811 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1812 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1813 }
1814
1815 for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1816 data[ei++] += stats->tx_bw[i];
1817
1818 for (i = 0; i < (eht ? 14 : 12); i++)
1819 data[ei++] += stats->tx_mcs[i];
1820
1821 for (i = 0; i < 4; i++)
1822 data[ei++] += stats->tx_nss[i];
1823
1824 wi->worker_stat_count = ei - wi->initial_stat_idx;
1825 }
1826 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1827
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1828 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1829 {
1830 #ifdef CONFIG_PAGE_POOL_STATS
1831 struct page_pool_stats stats = {};
1832 int i;
1833
1834 mt76_for_each_q_rx(dev, i)
1835 page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1836
1837 page_pool_ethtool_stats_get(data, &stats);
1838 *index += page_pool_ethtool_stats_get_count();
1839 #endif
1840 }
1841 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1842
mt76_phy_dfs_state(struct mt76_phy * phy)1843 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1844 {
1845 struct ieee80211_hw *hw = phy->hw;
1846 struct mt76_dev *dev = phy->dev;
1847
1848 if (dev->region == NL80211_DFS_UNSET ||
1849 test_bit(MT76_SCANNING, &phy->state))
1850 return MT_DFS_STATE_DISABLED;
1851
1852 if (!hw->conf.radar_enabled) {
1853 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1854 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1855 return MT_DFS_STATE_ACTIVE;
1856
1857 return MT_DFS_STATE_DISABLED;
1858 }
1859
1860 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1861 return MT_DFS_STATE_CAC;
1862
1863 return MT_DFS_STATE_ACTIVE;
1864 }
1865 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1866