1 /* $OpenBSD: if_mwx.c,v 1.5 2024/05/22 16:24:59 martijn Exp $ */
2 /*
3 * Copyright (c) 2022 Claudio Jeker <claudio@openbsd.org>
4 * Copyright (c) 2021 MediaTek Inc.
5 * Copyright (c) 2021 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
6 * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
7 * Copyright (c) 2016 Felix Fietkau <nbd@nbd.name>
8 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
9 *
10 * Permission to use, copy, modify, and/or distribute this software for any
11 * purpose with or without fee is hereby granted, provided that the above
12 * copyright notice and this permission notice appear in all copies.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
15 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
17 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 */
22
23 #include "bpfilter.h"
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/atomic.h>
28 #include <sys/sockio.h>
29
30 #include <machine/bus.h>
31 #include <machine/intr.h>
32
33 #include <dev/pci/pcireg.h>
34 #include <dev/pci/pcivar.h>
35 #include <dev/pci/pcidevs.h>
36
37 #if NBPFILTER > 0
38 #include <net/bpf.h>
39 #endif
40 #include <net/if.h>
41 #include <net/if_dl.h>
42 #include <net/if_media.h>
43
44 #include <netinet/in.h>
45 #include <netinet/if_ether.h>
46
47 #include <net80211/ieee80211_var.h>
48 #include <net80211/ieee80211_radiotap.h>
49
50 #include <dev/pci/if_mwxreg.h>
51
52 static const struct pci_matchid mwx_devices[] = {
53 { PCI_VENDOR_MEDIATEK, PCI_PRODUCT_MEDIATEK_MT7921 },
54 { PCI_VENDOR_MEDIATEK, PCI_PRODUCT_MEDIATEK_MT7921K },
55 { PCI_VENDOR_MEDIATEK, PCI_PRODUCT_MEDIATEK_MT7922 },
56 };
57
58 #define MWX_DEBUG 1
59
60 #define MT7921_ROM_PATCH "mwx-mt7961_patch_mcu_1_2_hdr"
61 #define MT7921_FIRMWARE_WM "mwx-mt7961_ram_code_1"
62 #define MT7922_ROM_PATCH "mwx-mt7922_patch_mcu_1_1_hdr"
63 #define MT7922_FIRMWARE_WM "mwx-mt7922_ram_code_1"
64
65 #if NBPFILTER > 0
66 struct mwx_rx_radiotap_header {
67 struct ieee80211_radiotap_header wr_ihdr;
68 uint64_t wr_tsft;
69 uint8_t wr_flags;
70 uint8_t wr_rate;
71 uint16_t wr_chan_freq;
72 uint16_t wr_chan_flags;
73 int8_t wr_dbm_antsignal;
74 int8_t wr_dbm_antnoise;
75 } __packed;
76
77 #define MWX_RX_RADIOTAP_PRESENT \
78 ((1 << IEEE80211_RADIOTAP_TSFT) | \
79 (1 << IEEE80211_RADIOTAP_FLAGS) | \
80 (1 << IEEE80211_RADIOTAP_RATE) | \
81 (1 << IEEE80211_RADIOTAP_CHANNEL) | \
82 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
83 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
84
85 struct mwx_tx_radiotap_header {
86 struct ieee80211_radiotap_header wt_ihdr;
87 uint8_t wt_flags;
88 uint8_t wt_rate;
89 uint16_t wt_chan_freq;
90 uint16_t wt_chan_flags;
91 } __packed;
92
93 #define MWX_TX_RADIOTAP_PRESENT \
94 ((1 << IEEE80211_RADIOTAP_FLAGS) | \
95 (1 << IEEE80211_RADIOTAP_RATE) | \
96 (1 << IEEE80211_RADIOTAP_CHANNEL))
97
98 #endif
99
100 struct mwx_txwi {
101 struct mt76_txwi *mt_desc;
102 struct mbuf *mt_mbuf;
103 bus_dmamap_t mt_map;
104 LIST_ENTRY(mwx_txwi) mt_entry;
105 u_int32_t mt_addr;
106 u_int mt_idx;
107 };
108
109 struct mwx_txwi_desc {
110 struct mt76_txwi *mt_desc;
111 struct mwx_txwi *mt_data;
112
113 u_int mt_count;
114 bus_dmamap_t mt_map;
115 bus_dma_segment_t mt_seg;
116 LIST_HEAD(, mwx_txwi) mt_freelist;
117 };
118
119 struct mwx_queue_data {
120 struct mbuf *md_mbuf;
121 struct mwx_txwi *md_txwi;
122 bus_dmamap_t md_map;
123 };
124
125 struct mwx_queue {
126 uint32_t mq_regbase;
127 u_int mq_count;
128 u_int mq_prod;
129 u_int mq_cons;
130
131 struct mt76_desc *mq_desc;
132 struct mwx_queue_data *mq_data;
133
134 bus_dmamap_t mq_map;
135 bus_dma_segment_t mq_seg;
136 int mq_wakeme;
137 };
138
139 struct mwx_hw_capa {
140 int8_t has_2ghz;
141 int8_t has_5ghz;
142 int8_t has_6ghz;
143 uint8_t antenna_mask;
144 uint8_t num_streams;
145 };
146
147 struct mwx_node {
148 struct ieee80211_node ni;
149 uint16_t wcid;
150 uint8_t hw_key_idx; /* encryption key index */
151 uint8_t hw_key_idx2;
152 };
153
154 struct mwx_vif {
155 uint8_t idx;
156 uint8_t omac_idx;
157 uint8_t band_idx;
158 uint8_t wmm_idx;
159 uint8_t scan_seq_num;
160 };
161
162 enum mwx_hw_type {
163 MWX_HW_MT7921,
164 MWX_HW_MT7922,
165 };
166
167 struct mwx_softc {
168 struct device sc_dev;
169 struct ieee80211com sc_ic;
170
171 enum mwx_hw_type sc_hwtype;
172
173 struct mwx_queue sc_txq;
174 struct mwx_queue sc_txmcuq;
175 struct mwx_queue sc_txfwdlq;
176
177 struct mwx_queue sc_rxq;
178 struct mwx_queue sc_rxmcuq;
179 struct mwx_queue sc_rxfwdlq;
180
181 struct mwx_txwi_desc sc_txwi;
182
183 bus_space_tag_t sc_st;
184 bus_space_handle_t sc_memh;
185 bus_size_t sc_mems;
186 bus_dma_tag_t sc_dmat;
187 pcitag_t sc_tag;
188 pci_chipset_tag_t sc_pc;
189 void *sc_ih;
190
191 int (*sc_newstate)(struct ieee80211com *,
192 enum ieee80211_state, int);
193
194 struct task sc_scan_task;
195 struct task sc_reset_task;
196 u_int sc_flags;
197 #define MWX_FLAG_SCANNING 0x01
198 #define MWX_FLAG_BGSCAN 0x02
199 int8_t sc_resetting;
200 int8_t sc_fw_loaded;
201
202 #if NBPFILTER > 0
203 caddr_t sc_drvbpf;
204 union {
205 struct mwx_rx_radiotap_header th;
206 uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
207 } sc_rxtapu;
208 int sc_rxtap_len;
209 union {
210 struct mwx_tx_radiotap_header th;
211 uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
212 } sc_txtapu;
213 int sc_txtap_len;
214 #define sc_rxtap sc_rxtapu.th
215 #define sc_txtap sc_txtapu.th
216 #endif
217
218 struct mwx_vif sc_vif;
219
220 /* mcu */
221 uint32_t sc_mcu_seq;
222 struct {
223 struct mbuf *mcu_m;
224 uint32_t mcu_cmd;
225 uint32_t mcu_int;
226 } sc_mcu_wait[16];
227 uint8_t sc_scan_seq_num;
228
229 /* phy / hw */
230 struct mwx_hw_capa sc_capa;
231 uint8_t sc_lladdr[ETHER_ADDR_LEN];
232 char sc_alpha2[4]; /* regulatory-domain */
233
234 int16_t sc_coverage_class;
235 uint8_t sc_slottime;
236
237 /* mac specific */
238 uint32_t sc_rxfilter;
239
240 };
241
242 const uint8_t mwx_channels_2ghz[] = {
243 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
244 };
245
246 #define MWX_NUM_2GHZ_CHANNELS nitems(mwx_channels_2ghz)
247
248 const uint8_t mwx_channels_5ghz[] = {
249 36, 40, 44, 48, 52, 56, 60, 64,
250 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
251 149, 153, 157, 161, 165, 169, 173
252 };
253
254 #define MWX_NUM_5GHZ_CHANNELS nitems(mwx_channels_5ghz)
255
256 const uint8_t mwx_channels_6ghz[] = {
257 /* UNII-5 */
258 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57,
259 61, 65, 69, 73, 77, 81, 85, 89, 93,
260 /* UNII-6 */
261 97, 101, 105, 109, 113, 117,
262 /* UNII-7 */
263 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 169,
264 173, 177, 181, 185,
265 /* UNII-8 */
266 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
267 };
268
269 const struct mwx_rate {
270 uint16_t rate;
271 uint16_t hw_value;
272 } mt76_rates[] = {
273 { 2, (MT_PHY_TYPE_CCK << 8) | 0 },
274 { 4, (MT_PHY_TYPE_CCK << 8) | 1 },
275 { 11, (MT_PHY_TYPE_CCK << 8) | 2 },
276 { 22, (MT_PHY_TYPE_CCK << 8) | 3 },
277 { 12, (MT_PHY_TYPE_OFDM << 8) | 11 },
278 { 18, (MT_PHY_TYPE_OFDM << 8) | 15 },
279 { 24, (MT_PHY_TYPE_OFDM << 8) | 10 },
280 { 36, (MT_PHY_TYPE_OFDM << 8) | 14 },
281 { 48, (MT_PHY_TYPE_OFDM << 8) | 9 },
282 { 72, (MT_PHY_TYPE_OFDM << 8) | 13 },
283 { 96, (MT_PHY_TYPE_OFDM << 8) | 8 },
284 { 108, (MT_PHY_TYPE_OFDM << 8) | 12 },
285 };
286
287
288 #define MWX_NUM_6GHZ_CHANNELS nitems(mwx_channels_6ghz)
289
290 #define DEVNAME(s) ((s)->sc_dev.dv_xname)
291 #define DEVDEBUG(x) ((x)->sc_ic.ic_if.if_flags & IFF_DEBUG)
292
293 #ifdef MWX_DEBUG
294 #define DPRINTF(x...) printf(x)
295 #else
296 #define DPRINTF(x...)
297 #endif
298
299 static void
pkt_hex_dump(struct mbuf * m)300 pkt_hex_dump(struct mbuf *m)
301 {
302 int len, rowsize = 16;
303 int i, l, linelen;
304 uint8_t *data;
305
306 printf("Packet hex dump:\n");
307 data = mtod(m, uint8_t *);
308 len = m->m_len;
309
310 for (i = 0; i < len; i += linelen) {
311 printf("%04x\t", i);
312 linelen = len - i;
313 if (len - i > rowsize)
314 linelen = rowsize;
315 for (l = 0; l < linelen; l++)
316 printf("%02X ", (uint32_t)data[l]);
317 data += linelen;
318 printf("\n");
319 }
320 }
321
322 int mwx_init(struct ifnet *);
323 void mwx_stop(struct ifnet *);
324 void mwx_watchdog(struct ifnet *);
325 void mwx_start(struct ifnet *);
326 int mwx_ioctl(struct ifnet *, u_long, caddr_t);
327
328 struct ieee80211_node *mwx_node_alloc(struct ieee80211com *);
329 int mwx_media_change(struct ifnet *);
330 #if NBPFILTER > 0
331 void mwx_radiotap_attach(struct mwx_softc *);
332 #endif
333
334 int mwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
335 void mwx_newstate_task(void *);
336
337 int mwx_tx(struct mwx_softc *, struct mbuf *, struct ieee80211_node *);
338 void mwx_rx(struct mwx_softc *, struct mbuf *, struct mbuf_list *);
339 int mwx_intr(void *);
340 int mwx_preinit(struct mwx_softc *);
341 void mwx_attach_hook(struct device *);
342 int mwx_match(struct device *, void *, void *);
343 void mwx_attach(struct device *, struct device *, void *);
344 int mwx_activate(struct device *, int);
345
346 void mwx_reset(struct mwx_softc *);
347 void mwx_reset_task(void *);
348 int mwx_txwi_alloc(struct mwx_softc *, int);
349 void mwx_txwi_free(struct mwx_softc *);
350 struct mwx_txwi *mwx_txwi_get(struct mwx_softc *);
351 void mwx_txwi_put(struct mwx_softc *, struct mwx_txwi *);
352 int mwx_txwi_enqueue(struct mwx_softc *, struct mwx_txwi *, struct mbuf *);
353 int mwx_queue_alloc(struct mwx_softc *, struct mwx_queue *, int, uint32_t);
354 void mwx_queue_free(struct mwx_softc *, struct mwx_queue *);
355 void mwx_queue_reset(struct mwx_softc *, struct mwx_queue *);
356 int mwx_buf_fill(struct mwx_softc *, struct mwx_queue_data *,
357 struct mt76_desc *);
358 int mwx_queue_fill(struct mwx_softc *, struct mwx_queue *);
359 int mwx_dma_alloc(struct mwx_softc *);
360 int mwx_dma_reset(struct mwx_softc *, int);
361 void mwx_dma_free(struct mwx_softc *);
362 int mwx_dma_tx_enqueue(struct mwx_softc *, struct mwx_queue *,
363 struct mbuf *);
364 int mwx_dma_txwi_enqueue(struct mwx_softc *, struct mwx_queue *,
365 struct mwx_txwi *);
366 void mwx_dma_tx_cleanup(struct mwx_softc *, struct mwx_queue *);
367 void mwx_dma_tx_done(struct mwx_softc *);
368 void mwx_dma_rx_process(struct mwx_softc *, struct mbuf_list *);
369 void mwx_dma_rx_dequeue(struct mwx_softc *, struct mwx_queue *,
370 struct mbuf_list *);
371 void mwx_dma_rx_done(struct mwx_softc *, struct mwx_queue *);
372
373 struct mbuf *mwx_mcu_alloc_msg(size_t);
374 void mwx_mcu_set_len(struct mbuf *, void *);
375 int mwx_mcu_send_mbuf(struct mwx_softc *, uint32_t, struct mbuf *, int *);
376 int mwx_mcu_send_msg(struct mwx_softc *, uint32_t, void *, size_t, int *);
377 int mwx_mcu_send_wait(struct mwx_softc *, uint32_t, void *, size_t);
378 int mwx_mcu_send_mbuf_wait(struct mwx_softc *, uint32_t, struct mbuf *);
379 void mwx_mcu_rx_event(struct mwx_softc *, struct mbuf *);
380 int mwx_mcu_wait_resp_int(struct mwx_softc *, uint32_t, int, uint32_t *);
381 int mwx_mcu_wait_resp_msg(struct mwx_softc *, uint32_t, int,
382 struct mbuf **);
383
384 int mt7921_dma_disable(struct mwx_softc *sc, int force);
385 void mt7921_dma_enable(struct mwx_softc *sc);
386 int mt7921_e_mcu_fw_pmctrl(struct mwx_softc *);
387 int mt7921_e_mcu_drv_pmctrl(struct mwx_softc *);
388 int mt7921_wfsys_reset(struct mwx_softc *sc);
389 uint32_t mt7921_reg_addr(struct mwx_softc *, uint32_t);
390 int mt7921_init_hardware(struct mwx_softc *);
391 int mt7921_mcu_init(struct mwx_softc *);
392 int mt7921_load_firmware(struct mwx_softc *);
393 int mt7921_mac_wtbl_update(struct mwx_softc *, int);
394 void mt7921_mac_init_band(struct mwx_softc *sc, uint32_t);
395 int mt7921_mac_init(struct mwx_softc *);
396 int mt7921_mcu_patch_sem_ctrl(struct mwx_softc *, int);
397 int mt7921_mcu_init_download(struct mwx_softc *, uint32_t,
398 uint32_t, uint32_t);
399 int mt7921_mcu_send_firmware(struct mwx_softc *, int,
400 u_char *, size_t, size_t);
401 int mt7921_mcu_start_patch(struct mwx_softc *);
402 int mt7921_mcu_start_firmware(struct mwx_softc *, uint32_t,
403 uint32_t);
404 int mt7921_mcu_get_nic_capability(struct mwx_softc *);
405 int mt7921_mcu_fw_log_2_host(struct mwx_softc *, uint8_t);
406 int mt7921_mcu_set_eeprom(struct mwx_softc *);
407 int mt7921_mcu_set_rts_thresh(struct mwx_softc *, uint32_t,
408 uint8_t);
409 int mt7921_mcu_set_deep_sleep(struct mwx_softc *, int);
410 void mt7921_mcu_low_power_event(struct mwx_softc *, struct mbuf *);
411 void mt7921_mcu_tx_done_event(struct mwx_softc *, struct mbuf *);
412 void mwx_end_scan_task(void *);
413 void mt7921_mcu_scan_event(struct mwx_softc *, struct mbuf *);
414 int mt7921_mcu_hw_scan(struct mwx_softc *, int);
415 int mt7921_mcu_hw_scan_cancel(struct mwx_softc *);
416 int mt7921_mcu_set_mac_enable(struct mwx_softc *, int, int);
417 int mt7921_mcu_set_channel_domain(struct mwx_softc *);
418 uint8_t mt7921_mcu_chan_bw(struct ieee80211_channel *channel);
419 int mt7921_mcu_set_chan_info(struct mwx_softc *, int);
420 void mt7921_mcu_build_sku(struct mwx_softc *, int, int8_t *);
421 int mt7921_mcu_rate_txpower_band(struct mwx_softc *, int,
422 const uint8_t *, int, int);
423 int mt7921_mcu_set_rate_txpower(struct mwx_softc *);
424 void mt7921_mac_reset_counters(struct mwx_softc *);
425 void mt7921_mac_set_timing(struct mwx_softc *);
426 int mt7921_mcu_uni_add_dev(struct mwx_softc *, struct mwx_vif *,
427 struct mwx_node *, int);
428 int mt7921_mcu_set_sniffer(struct mwx_softc *, int);
429 int mt7921_mcu_set_beacon_filter(struct mwx_softc *, int);
430 int mt7921_mcu_set_bss_pm(struct mwx_softc *, int);
431 int mt7921_mcu_set_tx(struct mwx_softc *, struct mwx_vif *);
432 int mt7921_mac_fill_rx(struct mwx_softc *, struct mbuf *,
433 struct ieee80211_rxinfo *);
434 uint32_t mt7921_mac_tx_rate_val(struct mwx_softc *);
435 void mt7921_mac_write_txwi_80211(struct mwx_softc *, struct mbuf *,
436 struct ieee80211_node *, struct mt76_txwi *);
437 void mt7921_mac_write_txwi(struct mwx_softc *, struct mbuf *,
438 struct ieee80211_node *, struct mt76_txwi *);
439 void mt7921_mac_tx_free(struct mwx_softc *, struct mbuf *);
440 int mt7921_set_channel(struct mwx_softc *);
441
442 uint8_t mt7921_get_phy_mode_v2(struct mwx_softc *,
443 struct ieee80211_node *);
444 struct mbuf *mt7921_alloc_sta_tlv(int);
445 void *mt7921_append_tlv(struct mbuf *, uint16_t *, int, int);
446 void mt7921_mcu_add_basic_tlv(struct mbuf *, uint16_t *,
447 struct mwx_softc *, struct ieee80211_node *, int, int);
448 void mt7921_mcu_add_sta_tlv(struct mbuf *, uint16_t *,
449 struct mwx_softc *, struct ieee80211_node *, int, int);
450 int mt7921_mcu_wtbl_generic_tlv(struct mbuf *, uint16_t *,
451 struct mwx_softc *, struct ieee80211_node *);
452 int mt7921_mcu_wtbl_hdr_trans_tlv(struct mbuf *, uint16_t *,
453 struct mwx_softc *, struct ieee80211_node *);
454 int mt7921_mcu_wtbl_ht_tlv(struct mbuf *, uint16_t *,
455 struct mwx_softc *, struct ieee80211_node *);
456 int mt7921_mac_sta_update(struct mwx_softc *,
457 struct ieee80211_node *, int, int);
458
459 static inline uint32_t
mwx_read(struct mwx_softc * sc,uint32_t reg)460 mwx_read(struct mwx_softc *sc, uint32_t reg)
461 {
462 reg = mt7921_reg_addr(sc, reg);
463 return bus_space_read_4(sc->sc_st, sc->sc_memh, reg);
464 }
465
466 static inline void
mwx_write(struct mwx_softc * sc,uint32_t reg,uint32_t val)467 mwx_write(struct mwx_softc *sc, uint32_t reg, uint32_t val)
468 {
469 reg = mt7921_reg_addr(sc, reg);
470 bus_space_write_4(sc->sc_st, sc->sc_memh, reg, val);
471 }
472
473 static inline void
mwx_barrier(struct mwx_softc * sc)474 mwx_barrier(struct mwx_softc *sc)
475 {
476 bus_space_barrier(sc->sc_st, sc->sc_memh, 0, sc->sc_mems,
477 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
478 }
479
480 static inline uint32_t
mwx_rmw(struct mwx_softc * sc,uint32_t reg,uint32_t val,uint32_t mask)481 mwx_rmw(struct mwx_softc *sc, uint32_t reg, uint32_t val, uint32_t mask)
482 {
483 reg = mt7921_reg_addr(sc, reg);
484 val |= bus_space_read_4(sc->sc_st, sc->sc_memh, reg) & ~mask;
485 bus_space_write_4(sc->sc_st, sc->sc_memh, reg, val);
486 return val;
487 }
488
489 static inline uint32_t
mwx_set(struct mwx_softc * sc,uint32_t reg,uint32_t bits)490 mwx_set(struct mwx_softc *sc, uint32_t reg, uint32_t bits)
491 {
492 return mwx_rmw(sc, reg, bits, 0);
493 }
494
495 static inline uint32_t
mwx_clear(struct mwx_softc * sc,uint32_t reg,uint32_t bits)496 mwx_clear(struct mwx_softc *sc, uint32_t reg, uint32_t bits)
497 {
498 return mwx_rmw(sc, reg, 0, bits);
499 }
500
501 static inline uint32_t
mwx_map_reg_l1(struct mwx_softc * sc,uint32_t reg)502 mwx_map_reg_l1(struct mwx_softc *sc, uint32_t reg)
503 {
504 uint32_t offset = MT_HIF_REMAP_L1_GET_OFFSET(reg);
505 uint32_t base = MT_HIF_REMAP_L1_GET_BASE(reg);
506
507 mwx_rmw(sc, MT_HIF_REMAP_L1, base, MT_HIF_REMAP_L1_MASK);
508 mwx_barrier(sc);
509
510 return MT_HIF_REMAP_BASE_L1 + offset;
511 }
512
513 /*
514 * Poll for timeout milliseconds or until register reg read the value val
515 * after applying the mask to the value read. Returns 0 on success ETIMEDOUT
516 * on failure.
517 */
518 int
mwx_poll(struct mwx_softc * sc,uint32_t reg,uint32_t val,uint32_t mask,int timeout)519 mwx_poll(struct mwx_softc *sc, uint32_t reg, uint32_t val, uint32_t mask,
520 int timeout)
521 {
522 uint32_t cur;
523
524 reg = mt7921_reg_addr(sc, reg);
525 timeout *= 100;
526 do {
527 cur = bus_space_read_4(sc->sc_st, sc->sc_memh, reg) & mask;
528 if (cur == val)
529 return 0;
530 delay(10);
531 } while (timeout-- > 0);
532
533 DPRINTF("%s: poll timeout reg %x val %x mask %x cur %x\n",
534 DEVNAME(sc), reg, val, mask, cur);
535 return ETIMEDOUT;
536 }
537
538 /*
539 * ifp specific functions
540 */
541 int
mwx_init(struct ifnet * ifp)542 mwx_init(struct ifnet *ifp)
543 {
544 struct mwx_softc *sc = ifp->if_softc;
545 struct ieee80211com *ic = &sc->sc_ic;
546 struct mwx_node *mn;
547 int rv;
548
549 if (!sc->sc_fw_loaded) {
550 rv = mwx_preinit(sc);
551 if (rv)
552 return rv;
553 }
554
555 DPRINTF("%s: init\n", DEVNAME(sc));
556 mt7921_mcu_set_deep_sleep(sc, 0);
557
558 rv = mt7921_mcu_set_mac_enable(sc, 0, 1);
559 if (rv)
560 return rv;
561
562 rv = mt7921_mcu_set_channel_domain(sc);
563 if (rv)
564 return rv;
565
566 #if 0
567 /* XXX no channel available yet */
568 rv = mt7921_mcu_set_chan_info(sc, MCU_EXT_CMD_SET_RX_PATH);
569 if (rv)
570 return rv;
571 #endif
572
573 rv = mt7921_mcu_set_rate_txpower(sc);
574 if (rv)
575 return rv;
576
577 mt7921_mac_reset_counters(sc);
578
579 mn = (void *)ic->ic_bss;
580
581 rv = mt7921_mcu_uni_add_dev(sc, &sc->sc_vif, mn, 1);
582 if (rv)
583 return rv;
584
585 rv = mt7921_mcu_set_tx(sc, &sc->sc_vif);
586 if (rv)
587 return rv;
588
589 mt7921_mac_wtbl_update(sc, mn->wcid);
590
591 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
592 rv = mt7921_mcu_set_chan_info(sc, MCU_EXT_CMD_SET_RX_PATH);
593 if (rv)
594 return rv;
595 rv = mt7921_set_channel(sc);
596 if (rv)
597 return rv;
598
599 mt7921_mcu_set_sniffer(sc, 1);
600 mt7921_mcu_set_beacon_filter(sc, 0);
601 sc->sc_rxfilter = 0;
602 mwx_set(sc, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN);
603 } else {
604 mt7921_mcu_set_sniffer(sc, 0);
605 sc->sc_rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
606 mwx_clear(sc, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN);
607 }
608 mwx_write(sc, MT_WF_RFCR(0), sc->sc_rxfilter);
609
610 ifp->if_flags |= IFF_RUNNING;
611 ifq_clr_oactive(&ifp->if_snd);
612
613 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
614 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
615 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
616 return 0;
617 }
618
619 ieee80211_begin_scan(ifp);
620
621 /*
622 * ieee80211_begin_scan() ends up scheduling mwx_newstate_task().
623 * Wait until the transition to SCAN state has completed.
624 */
625
626 return 0;
627 }
628
629 void
mwx_stop(struct ifnet * ifp)630 mwx_stop(struct ifnet *ifp)
631 {
632 struct mwx_softc *sc = ifp->if_softc;
633 struct ieee80211com *ic = &sc->sc_ic;
634
635 DPRINTF("%s: stop\n", DEVNAME(sc));
636
637 //XXX sc->sc_flags |= MWX_FLAG_SHUTDOWN;
638 /* Cancel scheduled tasks and let any stale tasks finish up. */
639 task_del(systq, &sc->sc_reset_task);
640 task_del(systq, &sc->sc_scan_task);
641
642 ifp->if_timer = 0;
643 ifp->if_flags &= ~IFF_RUNNING;
644 ifq_clr_oactive(&ifp->if_snd);
645
646 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); /* free all nodes */
647
648 mt7921_mcu_set_mac_enable(sc, 0, 0);
649
650 /* XXX anything more ??? */
651 /* check out mt7921e_mac_reset, mt7921e_unregister_device and
652 mt7921_pci_suspend
653 */
654 }
655
656 void
mwx_watchdog(struct ifnet * ifp)657 mwx_watchdog(struct ifnet *ifp)
658 {
659 ifp->if_timer = 0;
660 ieee80211_watchdog(ifp);
661 }
662
663 void
mwx_start(struct ifnet * ifp)664 mwx_start(struct ifnet *ifp)
665 {
666 struct mwx_softc *sc = ifp->if_softc;
667 struct ieee80211com *ic = &sc->sc_ic;
668 struct ieee80211_node *ni;
669 struct ether_header *eh;
670 struct mbuf *m;
671
672 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
673 return;
674
675 for (;;) {
676 /* XXX TODO handle oactive
677 ifq_set_oactive(&ifp->if_snd);
678 */
679
680 /* need to send management frames even if we're not RUNning */
681 m = mq_dequeue(&ic->ic_mgtq);
682 if (m) {
683 ni = m->m_pkthdr.ph_cookie;
684 goto sendit;
685 }
686
687 if (ic->ic_state != IEEE80211_S_RUN ||
688 (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
689 break;
690
691 m = ifq_dequeue(&ifp->if_snd);
692 if (!m)
693 break;
694 if (m->m_len < sizeof (*eh) &&
695 (m = m_pullup(m, sizeof (*eh))) == NULL) {
696 ifp->if_oerrors++;
697 continue;
698 }
699 #if NBPFILTER > 0
700 if (ifp->if_bpf != NULL)
701 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
702 #endif
703 if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
704 ifp->if_oerrors++;
705 continue;
706 }
707
708 sendit:
709 #if NBPFILTER > 0
710 if (ic->ic_rawbpf != NULL)
711 bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
712 #endif
713 if (mwx_tx(sc, m, ni) != 0) {
714 ieee80211_release_node(ic, ni);
715 ifp->if_oerrors++;
716 continue;
717 }
718
719 if (ifp->if_flags & IFF_UP)
720 ifp->if_timer = 1;
721 }
722 }
723
724 int
mwx_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)725 mwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
726 {
727 int s, err = 0;
728
729 s = splnet();
730 switch (cmd) {
731 case SIOCSIFADDR:
732 ifp->if_flags |= IFF_UP;
733 /* FALLTHROUGH */
734 case SIOCSIFFLAGS:
735 if (ifp->if_flags & IFF_UP) {
736 if (!(ifp->if_flags & IFF_RUNNING)) {
737 mwx_stop(ifp);
738 err = mwx_init(ifp);
739 }
740 } else {
741 if (ifp->if_flags & IFF_RUNNING)
742 mwx_stop(ifp);
743 }
744 break;
745 default:
746 err = ieee80211_ioctl(ifp, cmd, data);
747 break;
748 }
749
750 if (err == ENETRESET) {
751 err = 0;
752 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
753 (IFF_UP | IFF_RUNNING)) {
754 mwx_stop(ifp);
755 err = mwx_init(ifp);
756 }
757 }
758 splx(s);
759 return err;
760 }
761
762 int
mwx_media_change(struct ifnet * ifp)763 mwx_media_change(struct ifnet *ifp)
764 {
765 struct mwx_softc *sc = ifp->if_softc;
766 struct ieee80211com *ic = &sc->sc_ic;
767 int err;
768
769 err = ieee80211_media_change(ifp);
770 if (err != ENETRESET)
771 return err;
772
773 /* TODO lot more handling here */
774 if (ic->ic_fixed_mcs != -1) {
775 ;
776 } else if (ic->ic_fixed_rate != -1) {
777 ;
778 }
779 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
780 (IFF_UP | IFF_RUNNING)) {
781 /* XXX could be a bit harsh */
782 mwx_stop(ifp);
783 err = mwx_init(ifp);
784 }
785 return err;
786 }
787
788 /*
789 * net80211 specific functions.
790 */
791
792 struct ieee80211_node *
mwx_node_alloc(struct ieee80211com * ic)793 mwx_node_alloc(struct ieee80211com *ic)
794 {
795 /* XXX this is just wrong */
796 static int wcid = 1;
797 struct mwx_softc *sc = ic->ic_softc;
798 struct mwx_node *mn;
799
800 mn = malloc(sizeof(struct mwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
801 if (mn == NULL)
802 return NULL;
803 mn->wcid = wcid++;
804
805 /* init WCID table entry */
806 mt7921_mac_wtbl_update(sc, mn->wcid);
807
808 return &mn->ni;
809 }
810
811 void
mwx_newassoc(struct ieee80211com * ic,struct ieee80211_node * ni,int isnew)812 mwx_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
813 {
814 struct mwx_softc *sc = ic->ic_softc;
815 struct mwx_node *mn = (void *)ni;
816 uint16_t wcid = 0;
817
818 if (isnew && ni->ni_associd != 0) {
819 /* only interested in true associations */
820 wcid = IEEE80211_AID(ni->ni_associd);
821
822 }
823 printf("%s: new assoc isnew=%d addr=%s WCID=%d\n", DEVNAME(sc),
824 isnew, ether_sprintf(ni->ni_macaddr), mn->wcid);
825
826 /* XXX TODO rate handling here */
827 }
828
829 #ifndef IEEE80211_STA_ONLY
830 void
mwx_node_leave(struct ieee80211com * ic,struct ieee80211_node * ni)831 mwx_node_leave(struct ieee80211com *ic, struct ieee80211_node *ni)
832 {
833 #if 0
834 struct mwx_softc *sc = ic->ic_softc;
835 struct mwx_node *mn = (void *)ni;
836 uint16_t wcid = mn->wcid;
837
838 /* TODO clear WCID */
839 #endif
840 }
841 #endif
842
843 int
mwx_scan(struct mwx_softc * sc)844 mwx_scan(struct mwx_softc *sc)
845 {
846 struct ieee80211com *ic = &sc->sc_ic;
847 struct ifnet *ifp = &ic->ic_if;
848 int rv;
849
850 if (sc->sc_flags & MWX_FLAG_BGSCAN) {
851 rv = mt7921_mcu_hw_scan_cancel(sc);
852 if (rv) {
853 printf("%s: could not abort background scan\n",
854 DEVNAME(sc));
855 return rv;
856 }
857 }
858
859 rv = mt7921_mcu_hw_scan(sc, 0);
860 if (rv) {
861 printf("%s: could not initiate scan\n", DEVNAME(sc));
862 return rv;
863 }
864
865 /*
866 * The current mode might have been fixed during association.
867 * Ensure all channels get scanned.
868 */
869 if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) != IFM_AUTO)
870 ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
871
872 sc->sc_flags |= MWX_FLAG_SCANNING;
873 if (ifp->if_flags & IFF_DEBUG)
874 printf("%s: %s -> %s\n", ifp->if_xname,
875 ieee80211_state_name[ic->ic_state],
876 ieee80211_state_name[IEEE80211_S_SCAN]);
877 if ((sc->sc_flags & MWX_FLAG_BGSCAN) == 0) {
878 ieee80211_set_link_state(ic, LINK_STATE_DOWN);
879 ieee80211_node_cleanup(ic, ic->ic_bss);
880 }
881 ic->ic_state = IEEE80211_S_SCAN;
882
883 return 0;
884 }
885
886 int
mwx_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)887 mwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
888 {
889 struct mwx_softc *sc = ic->ic_softc;
890 enum ieee80211_state ostate;
891 int rv;
892
893 ostate = ic->ic_state;
894
895
896 switch (ostate) {
897 case IEEE80211_S_RUN:
898 if (nstate != ostate)
899 mt7921_mcu_set_deep_sleep(sc, 1);
900 break;
901 case IEEE80211_S_SCAN:
902 if (nstate == ostate) {
903 if (sc->sc_flags & MWX_FLAG_SCANNING)
904 return 0;
905 }
906 break;
907 default:
908 break;
909 }
910
911 printf("%s: %s %d -> %d\n", DEVNAME(sc), __func__, ostate, nstate);
912
913 /* XXX TODO */
914 switch (nstate) {
915 case IEEE80211_S_INIT:
916 break;
917 case IEEE80211_S_SCAN:
918 rv = mwx_scan(sc);
919 if (rv)
920 /* XXX error handling */
921 return rv;
922 return 0;
923 case IEEE80211_S_AUTH:
924 rv = mt7921_set_channel(sc);
925 if (rv)
926 return rv;
927 mt7921_mcu_set_deep_sleep(sc, 0);
928 mt7921_mac_sta_update(sc, sc->sc_ic.ic_bss, 1, 1);
929 break;
930 case IEEE80211_S_ASSOC:
931 mt7921_mcu_set_deep_sleep(sc, 1);
932 break;
933 case IEEE80211_S_RUN:
934 mt7921_mcu_hw_scan_cancel(sc); /* XXX */
935 mt7921_mcu_set_deep_sleep(sc, 0);
936 break;
937 }
938
939 return sc->sc_newstate(ic, nstate, arg);
940 }
941
942 #if NBPFILTER > 0
943 void
mwx_radiotap_attach(struct mwx_softc * sc)944 mwx_radiotap_attach(struct mwx_softc *sc)
945 {
946 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
947 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
948
949 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
950 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
951 sc->sc_rxtap.wr_ihdr.it_present = htole32(MWX_RX_RADIOTAP_PRESENT);
952
953 sc->sc_txtap_len = sizeof sc->sc_txtapu;
954 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
955 sc->sc_txtap.wt_ihdr.it_present = htole32(MWX_TX_RADIOTAP_PRESENT);
956 }
957 #endif
958
959 int
mwx_tx(struct mwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)960 mwx_tx(struct mwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
961 {
962 struct mwx_node *mn = (void *)ni;
963 struct mwx_txwi *mt;
964 struct mt76_txwi *txp;
965 int rv;
966
967 if ((mt = mwx_txwi_get(sc)) == NULL)
968 return ENOBUFS;
969 /* XXX DMA memory access without BUS_DMASYNC_PREWRITE */
970 txp = mt->mt_desc;
971 memset(txp, 0, sizeof(*txp));
972 mt7921_mac_write_txwi(sc, m, ni, txp);
973
974 rv = mwx_txwi_enqueue(sc, mt, m);
975 if (rv != 0)
976 return rv;
977
978 printf("%s: TX WCID %08x id %d pid %d\n", DEVNAME(sc), mn->wcid, 0, mt->mt_idx);
979 printf("%s: TX twxi %08x %08x %08x %08x %08x %08x %08x %08x\n",
980 DEVNAME(sc), txp->txwi[0], txp->txwi[1],
981 txp->txwi[2], txp->txwi[3], txp->txwi[4], txp->txwi[5],
982 txp->txwi[6], txp->txwi[7]);
983 printf("%s: TX hw txp %d %d %d %d %04x %04x %04x %04x\n", DEVNAME(sc),
984 txp->msdu_id[0], txp->msdu_id[1], txp->msdu_id[2], txp->msdu_id[3],
985 txp->ptr[0].len0, txp->ptr[0].len1, txp->ptr[1].len0, txp->ptr[1].len1);
986
987 return mwx_dma_txwi_enqueue(sc, &sc->sc_txq, mt);
988 }
989
990 void
mwx_rx(struct mwx_softc * sc,struct mbuf * m,struct mbuf_list * ml)991 mwx_rx(struct mwx_softc *sc, struct mbuf *m, struct mbuf_list *ml)
992 {
993 struct ieee80211com *ic = &sc->sc_ic;
994 struct ifnet *ifp = &ic->ic_if;
995 struct ieee80211_node *ni;
996 struct ieee80211_frame *wh;
997 struct ieee80211_rxinfo rxi = { 0 };
998
999
1000 if (mt7921_mac_fill_rx(sc, m, &rxi) == -1) {
1001 ifp->if_ierrors++;
1002 m_freem(m);
1003 return;
1004 }
1005
1006 wh = mtod(m, struct ieee80211_frame *);
1007
1008 #if NBPFILTER > 0
1009 if (__predict_false(sc->sc_drvbpf != NULL)) {
1010 struct mwx_rx_radiotap_header *tap = &sc->sc_rxtap;
1011 uint32_t tsf_lo, tsf_hi;
1012 /* get timestamp (low and high 32 bits) */
1013 tsf_hi = 0;
1014 tsf_lo = 0;
1015 tap->wr_tsft = htole64(((uint64_t)tsf_hi << 32) | tsf_lo);
1016 tap->wr_flags = 0;
1017 tap->wr_rate = 2; /* XXX */
1018 tap->wr_chan_freq =
1019 htole16(ic->ic_channels[rxi.rxi_chan].ic_freq);
1020 tap->wr_chan_flags =
1021 ic->ic_channels[rxi.rxi_chan].ic_flags;
1022 tap->wr_dbm_antsignal = 0;
1023 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m,
1024 BPF_DIRECTION_IN);
1025 }
1026 #endif
1027
1028 /* grab a reference to the source node */
1029 ni = ieee80211_find_rxnode(ic, wh);
1030
1031 /* send the frame to the 802.11 layer */
1032 /* TODO MAYBE rxi.rxi_rssi = rssi; */
1033 ieee80211_inputm(ifp, m, ni, &rxi, ml);
1034
1035 /* node is no longer needed */
1036 ieee80211_release_node(ic, ni);
1037 }
1038
1039 /*
1040 * Driver specific functions.
1041 */
1042 int
mwx_intr(void * arg)1043 mwx_intr(void *arg)
1044 {
1045 struct mwx_softc *sc = arg;
1046 uint32_t intr, intr_sw;
1047 uint32_t mask = MT_INT_RX_DONE_ALL|MT_INT_TX_DONE_ALL|MT_INT_MCU_CMD;
1048
1049 mwx_write(sc, MT_WFDMA0_HOST_INT_ENA, 0);
1050 intr = mwx_read(sc, MT_WFDMA0_HOST_INT_STA);
1051 if (intr == 0) {
1052 mwx_write(sc, MT_WFDMA0_HOST_INT_ENA, mask);
1053 return 0;
1054 }
1055
1056 /* TODO power management */
1057 // mt76_connac_pm_ref(&dev->mphy, &dev->pm);
1058
1059 if (intr & ~mask)
1060 printf("%s: unhandled interrupt %08x\n", DEVNAME(sc),
1061 intr & ~mask);
1062 /* ack interrupts */
1063 intr &= mask;
1064 mwx_write(sc, MT_WFDMA0_HOST_INT_STA, intr);
1065
1066 if (intr & MT_INT_MCU_CMD) {
1067 intr_sw = mwx_read(sc, MT_MCU_CMD);
1068 /* ack MCU2HOST_SW_INT_STA */
1069 mwx_write(sc, MT_MCU_CMD, intr_sw);
1070 if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE)
1071 intr |= MT_INT_RX_DONE_DATA;
1072 }
1073
1074 if (intr & MT_INT_TX_DONE_ALL)
1075 mwx_dma_tx_done(sc);
1076
1077 if (intr & MT_INT_RX_DONE_WM)
1078 mwx_dma_rx_done(sc, &sc->sc_rxfwdlq);
1079 if (intr & MT_INT_RX_DONE_WM2)
1080 mwx_dma_rx_done(sc, &sc->sc_rxmcuq);
1081 if (intr & MT_INT_RX_DONE_DATA)
1082 mwx_dma_rx_done(sc, &sc->sc_rxq);
1083
1084 /* TODO power management */
1085 // mt76_connac_pm_unref(&dev->mphy, &dev->pm);
1086
1087 mwx_write(sc, MT_WFDMA0_HOST_INT_ENA, mask);
1088
1089 return 1;
1090 }
1091
1092 int
mwx_preinit(struct mwx_softc * sc)1093 mwx_preinit(struct mwx_softc *sc)
1094 {
1095 struct ieee80211com *ic = &sc->sc_ic;
1096 struct ifnet *ifp = &ic->ic_if;
1097 int rv, i;
1098 uint8_t chan;
1099
1100 DPRINTF("%s: init\n", DEVNAME(sc));
1101
1102 if ((rv = mt7921_init_hardware(sc)) != 0)
1103 return rv;
1104
1105 if ((rv = mt7921_mcu_set_deep_sleep(sc, 1)) != 0)
1106 return rv;
1107
1108 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
1109 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
1110
1111 if (sc->sc_capa.has_2ghz) {
1112 for (i = 0; i < MWX_NUM_2GHZ_CHANNELS; i++) {
1113 chan = mwx_channels_2ghz[i];
1114 ic->ic_channels[chan].ic_freq =
1115 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
1116 ic->ic_channels[chan].ic_flags =
1117 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
1118 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
1119 /* TODO 11n and 11ac flags */
1120 }
1121
1122 }
1123 if (sc->sc_capa.has_5ghz) {
1124 ic->ic_sup_rates[IEEE80211_MODE_11A] =
1125 ieee80211_std_rateset_11a;
1126 /* set supported .11a channels */
1127 for (i = 0; i < MWX_NUM_5GHZ_CHANNELS; i++) {
1128 chan = mwx_channels_5ghz[i];
1129 ic->ic_channels[chan].ic_freq =
1130 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
1131 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A;
1132 /* TODO 11n and 11ac flags */
1133 }
1134 }
1135 #ifdef NOTYET
1136 /* TODO support for 6GHz */
1137 if (sc->sc_capa.has_6ghz) {
1138 for (i = 0; i < MWX_NUM_6GHZ_CHANNELS; i++) {
1139 }
1140 }
1141 #endif
1142
1143 /* Configure channel information obtained from firmware. */
1144 ieee80211_channel_init(ifp);
1145
1146 if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
1147 IEEE80211_ADDR_COPY(ic->ic_myaddr, sc->sc_lladdr);
1148
1149 /* Configure MAC address. */
1150 rv = if_setlladdr(ifp, ic->ic_myaddr);
1151 if (rv)
1152 printf("%s: could not set MAC address (error %d)\n",
1153 DEVNAME(sc), rv);
1154
1155 ieee80211_media_init(ifp, mwx_media_change, ieee80211_media_status);
1156
1157 sc->sc_fw_loaded = 1;
1158 return 0;
1159 }
1160
1161 void
mwx_attach_hook(struct device * self)1162 mwx_attach_hook(struct device *self)
1163 {
1164 struct mwx_softc *sc = (void *)self;
1165
1166 mwx_preinit(sc);
1167 }
1168
1169 int
mwx_match(struct device * parent,void * match __unused,void * aux)1170 mwx_match(struct device *parent, void *match __unused, void *aux)
1171 {
1172 struct pci_attach_args *pa = aux;
1173
1174 return pci_matchbyid(pa, mwx_devices, nitems(mwx_devices));
1175 }
1176
1177 void
mwx_attach(struct device * parent,struct device * self,void * aux)1178 mwx_attach(struct device *parent, struct device *self, void *aux)
1179 {
1180 struct mwx_softc *sc = (struct mwx_softc *)self;
1181 struct ieee80211com *ic = &sc->sc_ic;
1182 struct ifnet *ifp = &ic->ic_if;
1183 struct pci_attach_args *pa = aux;
1184 pci_intr_handle_t ih;
1185 pcireg_t memtype;
1186 uint32_t hwid, hwrev;
1187 int error;
1188
1189 sc->sc_pc = pa->pa_pc;
1190 sc->sc_tag = pa->pa_tag;
1191 sc->sc_dmat = pa->pa_dmat;
1192 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MEDIATEK_MT7922)
1193 sc->sc_hwtype = MWX_HW_MT7922;
1194 else
1195 sc->sc_hwtype = MWX_HW_MT7921;
1196
1197 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
1198 if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
1199 &sc->sc_st, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1200 printf("%s: can't map mem space\n", DEVNAME(sc));
1201 return;
1202 }
1203
1204 if (pci_intr_map_msix(pa, 0, &ih) &&
1205 pci_intr_map_msi(pa, &ih) &&
1206 pci_intr_map(pa, &ih)) {
1207 printf("%s: can't map interrupt\n", DEVNAME(sc));
1208 bus_space_unmap(sc->sc_st, sc->sc_memh, sc->sc_mems);
1209 return;
1210 }
1211
1212 hwid = mwx_read(sc, MT_HW_CHIPID) & 0xffff;
1213 hwrev = mwx_read(sc, MT_HW_REV) & 0xff;
1214
1215 printf(": %s, rev: %x.%x\n", pci_intr_string(pa->pa_pc, ih),
1216 hwid, hwrev);
1217
1218 mwx_write(sc, MT_WFDMA0_HOST_INT_ENA, 0);
1219 mwx_write(sc, MT_PCIE_MAC_INT_ENABLE, 0xff);
1220
1221 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
1222 mwx_intr, sc, DEVNAME(sc));
1223
1224 if (mt7921_e_mcu_fw_pmctrl(sc) != 0 ||
1225 mt7921_e_mcu_drv_pmctrl(sc) != 0)
1226 goto fail;
1227
1228 if ((error = mwx_txwi_alloc(sc, MWX_TXWI_MAX)) != 0) {
1229 printf("%s: failed to allocate DMA resources %d\n",
1230 DEVNAME(sc), error);
1231 goto fail;
1232 }
1233
1234 if ((error = mwx_dma_alloc(sc)) != 0) {
1235 printf("%s: failed to allocate DMA resources %d\n",
1236 DEVNAME(sc), error);
1237 goto fail;
1238 }
1239
1240 /* set regulatory domain to '00' */
1241 sc->sc_alpha2[0] = '0';
1242 sc->sc_alpha2[1] = '0';
1243
1244 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
1245 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
1246 ic->ic_state = IEEE80211_S_INIT;
1247
1248 /* Set device capabilities. */
1249 ic->ic_caps =
1250 #if NOTYET
1251 IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
1252 IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
1253 #endif
1254 IEEE80211_C_WEP | /* WEP */
1255 IEEE80211_C_RSN | /* WPA/RSN */
1256 IEEE80211_C_SCANALL | /* device scans all channels at once */
1257 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
1258 IEEE80211_C_MONITOR | /* monitor mode supported */
1259 #ifndef IEEE80211_STA_ONLY
1260 IEEE80211_C_IBSS | /* IBSS mode supported */
1261 IEEE80211_C_HOSTAP | /* HostAP mode supported */
1262 IEEE80211_C_APPMGT | /* HostAP power management */
1263 #endif
1264 IEEE80211_C_SHSLOT | /* short slot time supported */
1265 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
1266
1267 #if NOTYET
1268 ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
1269 ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
1270 ic->ic_htcaps |=
1271 (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
1272 #endif
1273 ic->ic_htxcaps = 0;
1274 ic->ic_txbfcaps = 0;
1275 ic->ic_aselcaps = 0;
1276 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
1277
1278 #if NOTYET
1279 ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_11454 |
1280 (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
1281 IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
1282 (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
1283 IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
1284 IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
1285 #endif
1286
1287 /* IBSS channel undefined for now. */
1288 ic->ic_ibss_chan = &ic->ic_channels[1];
1289
1290 /* HW supports up to 288 STAs in HostAP and IBSS modes */
1291 ic->ic_max_aid = min(IEEE80211_AID_MAX, MWX_WCID_MAX);
1292
1293 //XXX TODO ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
1294
1295 ifp->if_softc = sc;
1296 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1297 ifp->if_ioctl = mwx_ioctl;
1298 ifp->if_start = mwx_start;
1299 ifp->if_watchdog = mwx_watchdog;
1300 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1301
1302 if_attach(ifp);
1303 ieee80211_ifattach(ifp);
1304 ieee80211_media_init(ifp, mwx_media_change, ieee80211_media_status);
1305
1306 #if NBPFILTER > 0
1307 mwx_radiotap_attach(sc);
1308 #endif
1309
1310 ic->ic_node_alloc = mwx_node_alloc;
1311 ic->ic_newassoc = mwx_newassoc;
1312 #ifndef IEEE80211_STA_ONLY
1313 ic->ic_node_leave = mwx_node_leave;
1314 #endif
1315 /* TODO XXX
1316 ic->ic_bgscan_start = mwx_bgscan;
1317 ic->ic_bgscan_done = mwx_bgscan_done;
1318 ic->ic_set_key = mwx_set_key;
1319 ic->ic_delete_key = mwx_delete_key;
1320 */
1321
1322 /* Override 802.11 state transition machine. */
1323 sc->sc_newstate = ic->ic_newstate;
1324 ic->ic_newstate = mwx_newstate;
1325
1326 task_set(&sc->sc_reset_task, mwx_reset_task, sc);
1327 task_set(&sc->sc_scan_task, mwx_end_scan_task, sc);
1328
1329 /*
1330 * We cannot read the MAC address without loading the
1331 * firmware from disk. Postpone until mountroot is done.
1332 */
1333 config_mountroot(self, mwx_attach_hook);
1334
1335 return;
1336
1337 fail:
1338 mwx_txwi_free(sc);
1339 mwx_dma_free(sc);
1340 pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
1341 bus_space_unmap(sc->sc_st, sc->sc_memh, sc->sc_mems);
1342 return;
1343 }
1344
1345 int
mwx_activate(struct device * self,int act)1346 mwx_activate(struct device *self, int act)
1347 {
1348 /* XXX TODO */
1349 return 0;
1350 }
1351
1352 struct cfdriver mwx_cd = {
1353 NULL, "mwx", DV_IFNET
1354 };
1355
1356 struct cfattach mwx_ca = {
1357 sizeof(struct mwx_softc), mwx_match, mwx_attach,
1358 NULL, mwx_activate
1359 };
1360
1361 void
mwx_reset(struct mwx_softc * sc)1362 mwx_reset(struct mwx_softc *sc)
1363 {
1364 if (sc->sc_resetting)
1365 return;
1366 sc->sc_resetting = 1;
1367 task_add(systq, &sc->sc_reset_task);
1368 }
1369
1370 void
mwx_reset_task(void * arg)1371 mwx_reset_task(void *arg)
1372 {
1373 struct mwx_softc *sc = arg;
1374 struct ifnet *ifp = &sc->sc_ic.ic_if;
1375 int fatal = 0;
1376
1377 if (ifp->if_flags & IFF_RUNNING)
1378 mwx_stop(ifp);
1379
1380 if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
1381 mwx_init(ifp);
1382 }
1383
1384 int
mwx_txwi_alloc(struct mwx_softc * sc,int count)1385 mwx_txwi_alloc(struct mwx_softc *sc, int count)
1386 {
1387 int error, nsegs, i;
1388 struct mwx_txwi_desc *q = &sc->sc_txwi;
1389 bus_size_t size = count * sizeof(*q->mt_desc);
1390 uint32_t addr;
1391
1392 LIST_INIT(&q->mt_freelist);
1393 q->mt_count = count;
1394
1395 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1396 BUS_DMA_NOWAIT, &q->mt_map);
1397 if (error != 0) {
1398 printf("%s: could not create desc TWXI map\n", DEVNAME(sc));
1399 goto fail;
1400 }
1401
1402 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &q->mt_seg,
1403 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1404 if (error != 0) {
1405 printf("%s: could not allocate TWXI memory\n", DEVNAME(sc));
1406 goto fail;
1407 }
1408
1409 error = bus_dmamem_map(sc->sc_dmat, &q->mt_seg, nsegs, size,
1410 (caddr_t *)&q->mt_desc, BUS_DMA_NOWAIT);
1411 if (error != 0) {
1412 printf("%s: can't map desc DMA memory\n", DEVNAME(sc));
1413 goto fail;
1414 }
1415
1416 error = bus_dmamap_load(sc->sc_dmat, q->mt_map, q->mt_desc,
1417 size, NULL, BUS_DMA_NOWAIT);
1418 if (error != 0) {
1419 printf("%s: could not load desc DMA map\n", DEVNAME(sc));
1420 goto fail;
1421 }
1422
1423 addr = q->mt_map->dm_segs[0].ds_addr;
1424
1425 q->mt_data = mallocarray(count, sizeof(*q->mt_data),
1426 M_DEVBUF, M_NOWAIT | M_ZERO);
1427 if (q->mt_data == NULL) {
1428 printf("%s: could not allocate soft data\n", DEVNAME(sc));
1429 error = ENOMEM;
1430 goto fail;
1431 }
1432
1433 for (i = 0; i < count; i++) {
1434 q->mt_data[i].mt_idx = i;
1435 q->mt_data[i].mt_desc = &q->mt_desc[i];
1436 q->mt_data[i].mt_addr = addr + i * sizeof(*q->mt_desc);
1437 error = bus_dmamap_create(sc->sc_dmat, MT_TXD_LEN_MASK,
1438 MT_MAX_SCATTER, MT_TXD_LEN_MASK, 0, BUS_DMA_NOWAIT,
1439 &q->mt_data[i].mt_map);
1440 if (error != 0) {
1441 printf("%s: could not create data DMA map\n",
1442 DEVNAME(sc));
1443 goto fail;
1444 }
1445 }
1446
1447 for (i = count - 1; i >= MT_PACKET_ID_FIRST; i--)
1448 LIST_INSERT_HEAD(&q->mt_freelist, &q->mt_data[i], mt_entry);
1449
1450 return 0;
1451
1452 fail:
1453 mwx_txwi_free(sc);
1454 return error;
1455 }
1456
1457 void
mwx_txwi_free(struct mwx_softc * sc)1458 mwx_txwi_free(struct mwx_softc *sc)
1459 {
1460 struct mwx_txwi_desc *q = &sc->sc_txwi;
1461
1462 if (q->mt_data != NULL) {
1463 int i;
1464 for (i = 0; i < q->mt_count; i++) {
1465 struct mwx_txwi *mt = &q->mt_data[i];
1466 bus_dmamap_destroy(sc->sc_dmat, mt->mt_map);
1467 m_freem(mt->mt_mbuf);
1468 if (i >= MT_PACKET_ID_FIRST)
1469 LIST_REMOVE(mt, mt_entry);
1470 }
1471 free(q->mt_data, M_DEVBUF, q->mt_count * sizeof(*q->mt_data));
1472 }
1473
1474 if (q->mt_desc != NULL) {
1475 bus_dmamap_sync(sc->sc_dmat, q->mt_map, 0,
1476 q->mt_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1477 bus_dmamap_unload(sc->sc_dmat, q->mt_map);
1478 }
1479
1480 /*
1481 * XXX TODO this is probably not correct as a check, should use
1482 * some state variable bitfield to decide which steps need to be run.
1483 */
1484 if (q->mt_seg.ds_len != 0)
1485 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)q->mt_desc,
1486 q->mt_count * sizeof(*q->mt_desc));
1487 if (q->mt_map != NULL)
1488 bus_dmamem_free(sc->sc_dmat, &q->mt_seg, 1);
1489
1490 memset(q, 0, sizeof(*q));
1491 }
1492
1493 struct mwx_txwi *
mwx_txwi_get(struct mwx_softc * sc)1494 mwx_txwi_get(struct mwx_softc *sc)
1495 {
1496 struct mwx_txwi *mt;
1497
1498 mt = LIST_FIRST(&sc->sc_txwi.mt_freelist);
1499 if (mt == NULL)
1500 return NULL;
1501 LIST_REMOVE(mt, mt_entry);
1502 return mt;
1503 }
1504
1505 void
mwx_txwi_put(struct mwx_softc * sc,struct mwx_txwi * mt)1506 mwx_txwi_put(struct mwx_softc *sc, struct mwx_txwi *mt)
1507 {
1508 /* TODO more cleanup here probably */
1509
1510 if (mt->mt_idx < MT_PACKET_ID_FIRST)
1511 return;
1512 LIST_INSERT_HEAD(&sc->sc_txwi.mt_freelist, mt, mt_entry);
1513 }
1514
1515 int
mwx_txwi_enqueue(struct mwx_softc * sc,struct mwx_txwi * mt,struct mbuf * m)1516 mwx_txwi_enqueue(struct mwx_softc *sc, struct mwx_txwi *mt, struct mbuf *m)
1517 {
1518 struct mwx_txwi_desc *q = &sc->sc_txwi;
1519 struct mt76_txwi *txp = mt->mt_desc;
1520 struct mt76_connac_txp_ptr *ptr = &txp->ptr[0];
1521 uint32_t addr;
1522 uint16_t len;
1523 int i, nsegs, rv;
1524
1525 rv = bus_dmamap_load_mbuf(sc->sc_dmat, mt->mt_map, m,
1526 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1527 if (rv == EFBIG && m_defrag(m, M_DONTWAIT) == 0)
1528 rv = bus_dmamap_load_mbuf(sc->sc_dmat, mt->mt_map, m,
1529 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1530 if (rv != 0)
1531 return rv;
1532
1533 nsegs = mt->mt_map->dm_nsegs;
1534
1535 bus_dmamap_sync(sc->sc_dmat, mt->mt_map, 0, mt->mt_map->dm_mapsize,
1536 BUS_DMASYNC_PREWRITE);
1537
1538 txp->msdu_id[0] = htole16(mt->mt_idx | MT_MSDU_ID_VALID);
1539 mt->mt_mbuf = m;
1540
1541 bus_dmamap_sync(sc->sc_dmat, q->mt_map, 0, q->mt_map->dm_mapsize,
1542 BUS_DMASYNC_PREWRITE);
1543 for (i = 0; i < nsegs; i++) {
1544 KASSERT(mt->mt_map->dm_segs[i].ds_addr <= UINT32_MAX);
1545 KASSERT(mt->mt_map->dm_segs[i].ds_len <= MT_TXD_LEN_MASK);
1546 addr = mt->mt_map->dm_segs[i].ds_addr;
1547 len = mt->mt_map->dm_segs[i].ds_len;
1548
1549 if (i == nsegs - 1)
1550 len |= MT_TXD_LEN_LAST;
1551
1552 if ((i & 1) == 0) {
1553 ptr->buf0 = htole32(addr);
1554 ptr->len0 = htole16(len);
1555 } else {
1556 ptr->buf1 = htole32(addr);
1557 ptr->len1 = htole16(len);
1558 ptr++;
1559 }
1560 }
1561 bus_dmamap_sync(sc->sc_dmat, q->mt_map, 0, q->mt_map->dm_mapsize,
1562 BUS_DMASYNC_POSTWRITE);
1563
1564 return 0;
1565 }
1566
1567 int
mwx_queue_alloc(struct mwx_softc * sc,struct mwx_queue * q,int count,uint32_t regbase)1568 mwx_queue_alloc(struct mwx_softc *sc, struct mwx_queue *q, int count,
1569 uint32_t regbase)
1570 {
1571 int error, nsegs, i;
1572 bus_size_t size = count * sizeof(*q->mq_desc);
1573
1574 q->mq_regbase = regbase;
1575 q->mq_count = count;
1576
1577 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1578 BUS_DMA_NOWAIT, &q->mq_map);
1579 if (error != 0) {
1580 printf("%s: could not create desc DMA map\n", DEVNAME(sc));
1581 goto fail;
1582 }
1583
1584 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &q->mq_seg,
1585 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1586 if (error != 0) {
1587 printf("%s: could not allocate DMA memory\n", DEVNAME(sc));
1588 goto fail;
1589 }
1590
1591 error = bus_dmamem_map(sc->sc_dmat, &q->mq_seg, nsegs, size,
1592 (caddr_t *)&q->mq_desc, BUS_DMA_NOWAIT);
1593 if (error != 0) {
1594 printf("%s: can't map desc DMA memory\n", DEVNAME(sc));
1595 goto fail;
1596 }
1597
1598 error = bus_dmamap_load(sc->sc_dmat, q->mq_map, q->mq_desc,
1599 size, NULL, BUS_DMA_NOWAIT);
1600 if (error != 0) {
1601 printf("%s: could not load desc DMA map\n", DEVNAME(sc));
1602 goto fail;
1603 }
1604
1605 q->mq_data = mallocarray(count, sizeof(*q->mq_data),
1606 M_DEVBUF, M_NOWAIT | M_ZERO);
1607 if (q->mq_data == NULL) {
1608 printf("%s: could not allocate soft data\n", DEVNAME(sc));
1609 error = ENOMEM;
1610 goto fail;
1611 }
1612
1613 for (i = 0; i < count; i++) {
1614 error = bus_dmamap_create(sc->sc_dmat, MT_MAX_SIZE,
1615 MT_MAX_SCATTER, MT_MAX_SIZE, 0, BUS_DMA_NOWAIT,
1616 &q->mq_data[i].md_map);
1617 if (error != 0) {
1618 printf("%s: could not create data DMA map\n",
1619 DEVNAME(sc));
1620 goto fail;
1621 }
1622 }
1623
1624 mwx_queue_reset(sc, q);
1625 return 0;
1626
1627 fail:
1628 mwx_queue_free(sc, q);
1629 return error;
1630 }
1631
1632 void
mwx_queue_free(struct mwx_softc * sc,struct mwx_queue * q)1633 mwx_queue_free(struct mwx_softc *sc, struct mwx_queue *q)
1634 {
1635 if (q->mq_data != NULL) {
1636 int i;
1637 for (i = 0; i < q->mq_count; i++) {
1638 struct mwx_queue_data *md = &q->mq_data[i];
1639 bus_dmamap_destroy(sc->sc_dmat, md->md_map);
1640 m_freem(md->md_mbuf);
1641 }
1642 free(q->mq_data, M_DEVBUF, q->mq_count * sizeof(*q->mq_data));
1643 }
1644
1645 if (q->mq_desc != NULL) {
1646 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0,
1647 q->mq_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1648 bus_dmamap_unload(sc->sc_dmat, q->mq_map);
1649 }
1650
1651 /*
1652 * XXX TODO this is probably not correct as a check, should use
1653 * some state variable bitfield to decide which steps need to be run.
1654 */
1655 if (q->mq_seg.ds_len != 0)
1656 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)q->mq_desc,
1657 q->mq_count * sizeof(*q->mq_desc));
1658 if (q->mq_map != NULL)
1659 bus_dmamem_free(sc->sc_dmat, &q->mq_seg, 1);
1660
1661 memset(q, 0, sizeof(*q));
1662 }
1663
1664 void
mwx_queue_reset(struct mwx_softc * sc,struct mwx_queue * q)1665 mwx_queue_reset(struct mwx_softc *sc, struct mwx_queue *q)
1666 {
1667 int i;
1668 uint32_t dmaaddr;
1669 struct mwx_queue_data *md;
1670
1671 /* clear descriptors */
1672 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
1673 BUS_DMASYNC_PREWRITE);
1674
1675 for (i = 0; i < q->mq_count; i++) {
1676 q->mq_desc[i].buf0 = 0;
1677 q->mq_desc[i].buf1 = 0;
1678 q->mq_desc[i].info = 0;
1679 q->mq_desc[i].ctrl = htole32(MT_DMA_CTL_DMA_DONE);
1680 }
1681
1682 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
1683 BUS_DMASYNC_POSTWRITE);
1684
1685 /* reset DMA registers */
1686 KASSERT(q->mq_map->dm_nsegs == 1);
1687 KASSERT(q->mq_map->dm_segs[0].ds_addr <= UINT32_MAX);
1688 dmaaddr = q->mq_map->dm_segs[0].ds_addr;
1689 mwx_write(sc, q->mq_regbase + MT_DMA_DESC_BASE, dmaaddr);
1690 mwx_write(sc, q->mq_regbase + MT_DMA_RING_SIZE, q->mq_count);
1691 mwx_write(sc, q->mq_regbase + MT_DMA_CPU_IDX, 0);
1692 mwx_write(sc, q->mq_regbase + MT_DMA_DMA_IDX, 0);
1693 q->mq_cons = 0;
1694 q->mq_prod = 0;
1695
1696 /* free buffers */
1697 for (i = 0; i < q->mq_count; i++) {
1698 md = &q->mq_data[i];
1699 if (md->md_mbuf != NULL) {
1700 bus_dmamap_sync(sc->sc_dmat, md->md_map, 0,
1701 md->md_map->dm_mapsize,
1702 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1703 bus_dmamap_unload(sc->sc_dmat, md->md_map);
1704 m_freem(md->md_mbuf);
1705 md->md_mbuf = NULL;
1706 }
1707 }
1708 }
1709
1710 int
mwx_buf_fill(struct mwx_softc * sc,struct mwx_queue_data * md,struct mt76_desc * desc)1711 mwx_buf_fill(struct mwx_softc *sc, struct mwx_queue_data *md,
1712 struct mt76_desc *desc)
1713 {
1714 struct mbuf *m;
1715 uint32_t buf0, len0, ctrl;
1716 int rv;
1717
1718 m = MCLGETL(NULL, M_DONTWAIT, MT_RX_BUF_SIZE);
1719 if (m == NULL)
1720 return (ENOMEM);
1721
1722 m->m_pkthdr.len = m->m_len = MT_RX_BUF_SIZE;
1723
1724 rv = bus_dmamap_load_mbuf(sc->sc_dmat, md->md_map, m,
1725 BUS_DMA_READ | BUS_DMA_NOWAIT);
1726 if (rv != 0) {
1727 printf("%s: could not load data, %d\n", DEVNAME(sc), rv);
1728 m_freem(m);
1729 return (rv);
1730 }
1731
1732 bus_dmamap_sync(sc->sc_dmat, md->md_map, 0,
1733 md->md_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1734
1735 md->md_mbuf = m;
1736
1737 KASSERT(md->md_map->dm_nsegs == 1);
1738 KASSERT(md->md_map->dm_segs[0].ds_addr <= UINT32_MAX);
1739 buf0 = md->md_map->dm_segs[0].ds_addr;
1740 len0 = md->md_map->dm_segs[0].ds_len;
1741 ctrl = MT_DMA_CTL_SD_LEN0(len0);
1742 ctrl |= MT_DMA_CTL_LAST_SEC0;
1743
1744 desc->buf0 = htole32(buf0);
1745 desc->buf1 = 0;
1746 desc->info = 0;
1747 desc->ctrl = htole32(ctrl);
1748
1749 return 0;
1750 }
1751
1752 int
mwx_queue_fill(struct mwx_softc * sc,struct mwx_queue * q)1753 mwx_queue_fill(struct mwx_softc *sc, struct mwx_queue *q)
1754 {
1755 u_int idx, last;
1756 int rv;
1757
1758 last = (q->mq_count + q->mq_cons - 1) % q->mq_count;
1759 idx = q->mq_prod;
1760
1761 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
1762 BUS_DMASYNC_PREWRITE);
1763
1764 while (idx != last) {
1765 rv = mwx_buf_fill(sc, &q->mq_data[idx], &q->mq_desc[idx]);
1766 if (rv != 0) {
1767 printf("%s: could not fill data, slot %d err %d\n",
1768 DEVNAME(sc), idx, rv);
1769 return rv;
1770 }
1771 idx = (idx + 1) % q->mq_count;
1772 }
1773
1774 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
1775 BUS_DMASYNC_POSTWRITE);
1776
1777 q->mq_prod = idx;
1778 mwx_write(sc, q->mq_regbase + MT_DMA_CPU_IDX, q->mq_prod);
1779
1780 return 0;
1781 }
1782
1783 int
mwx_dma_alloc(struct mwx_softc * sc)1784 mwx_dma_alloc(struct mwx_softc *sc)
1785 {
1786 int rv;
1787
1788 /* Stop DMA engine and reset wfsys */
1789 if ((rv = mt7921_dma_disable(sc, 1)) != 0)
1790 return rv;
1791 if ((rv = mt7921_wfsys_reset(sc)) != 0)
1792 return rv;
1793
1794 /* TX queues */
1795 if ((rv = mwx_queue_alloc(sc, &sc->sc_txq, 256,
1796 MT_TX_DATA_RING_BASE)) != 0)
1797 return rv;
1798 if ((rv = mwx_queue_alloc(sc, &sc->sc_txmcuq, 16 /* XXX */,
1799 MT_TX_MCU_RING_BASE)) != 0)
1800 return rv;
1801 if ((rv = mwx_queue_alloc(sc, &sc->sc_txfwdlq, 16 /* XXX */,
1802 MT_TX_FWDL_RING_BASE)) != 0)
1803 return rv;
1804
1805 /* RX queues */
1806 if ((rv = mwx_queue_alloc(sc, &sc->sc_rxq, 256,
1807 MT_RX_DATA_RING_BASE)) != 0 ||
1808 (rv = mwx_queue_fill(sc, &sc->sc_rxq)) != 0)
1809 return rv;
1810 if ((rv = mwx_queue_alloc(sc, &sc->sc_rxmcuq, 16 /* XXX */,
1811 MT_RX_MCU_RING_BASE)) != 0 ||
1812 (rv = mwx_queue_fill(sc, &sc->sc_rxmcuq)) != 0)
1813 return rv;
1814 if ((rv = mwx_queue_alloc(sc, &sc->sc_rxfwdlq, 16 /* XXX */,
1815 MT_RX_FWDL_RING_BASE)) != 0 ||
1816 (rv = mwx_queue_fill(sc, &sc->sc_rxfwdlq)) != 0)
1817 return rv;
1818
1819 /* enable DMA engine */
1820 mt7921_dma_enable(sc);
1821
1822 return 0;
1823 }
1824
1825 int
mwx_dma_reset(struct mwx_softc * sc,int fullreset)1826 mwx_dma_reset(struct mwx_softc *sc, int fullreset)
1827 {
1828 int rv;
1829
1830 DPRINTF("%s: DMA reset\n", DEVNAME(sc));
1831
1832 if ((rv = mt7921_dma_disable(sc, fullreset)) != 0)
1833 return rv;
1834 if (fullreset)
1835 if ((rv = mt7921_wfsys_reset(sc)) != 0)
1836 return rv;
1837
1838 /* TX queues */
1839 mwx_queue_reset(sc, &sc->sc_txq);
1840 mwx_queue_reset(sc, &sc->sc_txmcuq);
1841 mwx_queue_reset(sc, &sc->sc_txfwdlq);
1842
1843 /* RX queues */
1844 mwx_queue_reset(sc, &sc->sc_rxq);
1845 mwx_queue_reset(sc, &sc->sc_rxmcuq);
1846 mwx_queue_reset(sc, &sc->sc_rxfwdlq);
1847
1848 /* TDOD mt76_tx_status_check */
1849
1850 /* refill RX queues */
1851 if ((rv = mwx_queue_fill(sc, &sc->sc_rxq)) != 0 ||
1852 (rv = mwx_queue_fill(sc, &sc->sc_rxmcuq)) != 0 ||
1853 (rv = mwx_queue_fill(sc, &sc->sc_rxfwdlq)) != 0)
1854 return rv;
1855
1856 /* enable DMA engine */
1857 mt7921_dma_enable(sc);
1858
1859 return 0;
1860 }
1861
1862 void
mwx_dma_free(struct mwx_softc * sc)1863 mwx_dma_free(struct mwx_softc *sc)
1864 {
1865 /* TX queues */
1866 mwx_queue_free(sc, &sc->sc_txq);
1867 mwx_queue_free(sc, &sc->sc_txmcuq);
1868 mwx_queue_free(sc, &sc->sc_txfwdlq);
1869
1870 /* RX queues */
1871 mwx_queue_free(sc, &sc->sc_rxq);
1872 mwx_queue_free(sc, &sc->sc_rxmcuq);
1873 mwx_queue_free(sc, &sc->sc_rxfwdlq);
1874 }
1875
1876 static inline int
mwx_dma_free_slots(struct mwx_queue * q)1877 mwx_dma_free_slots(struct mwx_queue *q)
1878 {
1879 int free = q->mq_count - 1;
1880 free += q->mq_cons;
1881 free -= q->mq_prod;
1882 free %= q->mq_count;
1883 return free;
1884 }
1885
1886 int
mwx_dma_tx_enqueue(struct mwx_softc * sc,struct mwx_queue * q,struct mbuf * m)1887 mwx_dma_tx_enqueue(struct mwx_softc *sc, struct mwx_queue *q, struct mbuf *m)
1888 {
1889 struct mwx_queue_data *md;
1890 struct mt76_desc *desc;
1891 int i, nsegs, idx, rv;
1892
1893 idx = q->mq_prod;
1894 md = &q->mq_data[idx];
1895 desc = &q->mq_desc[idx];
1896
1897 rv = bus_dmamap_load_mbuf(sc->sc_dmat, md->md_map, m,
1898 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1899 if (rv == EFBIG && m_defrag(m, M_DONTWAIT) == 0)
1900 rv = bus_dmamap_load_mbuf(sc->sc_dmat, md->md_map, m,
1901 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1902 if (rv != 0)
1903 return rv;
1904
1905 nsegs = md->md_map->dm_nsegs;
1906
1907 /* check if there is enough space */
1908 if ((nsegs + 1)/2 > mwx_dma_free_slots(q)) {
1909 bus_dmamap_unload(sc->sc_dmat, md->md_map);
1910 return EBUSY;
1911 }
1912
1913 bus_dmamap_sync(sc->sc_dmat, md->md_map, 0, md->md_map->dm_mapsize,
1914 BUS_DMASYNC_PREWRITE);
1915 md->md_mbuf = m;
1916 md->md_txwi = NULL;
1917
1918 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
1919 BUS_DMASYNC_PREWRITE);
1920 for (i = 0; i < nsegs; i += 2) {
1921 uint32_t buf0, buf1 = 0;
1922 uint32_t len0, len1 = 0, ctrl;
1923
1924 KASSERT(md->md_map->dm_segs[i].ds_addr <= UINT32_MAX);
1925 buf0 = md->md_map->dm_segs[i].ds_addr;
1926 len0 = md->md_map->dm_segs[i].ds_len;
1927 ctrl = MT_DMA_CTL_SD_LEN0(len0);
1928
1929 if (i < nsegs - 1) {
1930 KASSERT(md->md_map->dm_segs[i + 1].ds_addr <=
1931 UINT32_MAX);
1932 buf1 = md->md_map->dm_segs[i + 1].ds_addr;
1933 len1 = md->md_map->dm_segs[i + 1].ds_len;
1934 ctrl |= MT_DMA_CTL_SD_LEN1(len1);
1935 }
1936
1937 if (i == nsegs - 1)
1938 ctrl |= MT_DMA_CTL_LAST_SEC0;
1939 else if (i == nsegs - 2)
1940 ctrl |= MT_DMA_CTL_LAST_SEC1;
1941
1942 desc->buf0 = htole32(buf0);
1943 desc->buf1 = htole32(buf1);
1944 desc->info = 0;
1945 desc->ctrl = htole32(ctrl);
1946
1947 idx = (idx + 1) % q->mq_count;
1948 KASSERT(idx != q->mq_cons);
1949 md = &q->mq_data[idx];
1950 desc = &q->mq_desc[idx];
1951 }
1952 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
1953 BUS_DMASYNC_POSTWRITE);
1954
1955 q->mq_prod = idx;
1956
1957 mwx_write(sc, q->mq_regbase + MT_DMA_CPU_IDX, q->mq_prod);
1958
1959 return 0;
1960 }
1961
1962 int
mwx_dma_txwi_enqueue(struct mwx_softc * sc,struct mwx_queue * q,struct mwx_txwi * mt)1963 mwx_dma_txwi_enqueue(struct mwx_softc *sc, struct mwx_queue *q,
1964 struct mwx_txwi *mt)
1965 {
1966 struct mwx_queue_data *md;
1967 struct mt76_desc *desc;
1968 uint32_t buf0, len0, ctrl;
1969 int idx;
1970
1971 idx = q->mq_prod;
1972 md = &q->mq_data[idx];
1973 desc = &q->mq_desc[idx];
1974
1975 /* check if there is enough space */
1976 if (1 > mwx_dma_free_slots(q)) {
1977 bus_dmamap_unload(sc->sc_dmat, md->md_map);
1978 return EBUSY;
1979 }
1980
1981 md->md_txwi = mt;
1982 md->md_mbuf = NULL;
1983
1984 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
1985 BUS_DMASYNC_PREWRITE);
1986
1987 buf0 = mt->mt_addr;
1988 len0 = sizeof(mt->mt_desc);
1989 ctrl = MT_DMA_CTL_SD_LEN0(len0);
1990 ctrl |= MT_DMA_CTL_LAST_SEC0;
1991
1992 desc->buf0 = htole32(buf0);
1993 desc->buf1 = 0;
1994 desc->info = 0;
1995 desc->ctrl = htole32(ctrl);
1996
1997 idx = (idx + 1) % q->mq_count;
1998 KASSERT(idx != q->mq_cons);
1999
2000 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
2001 BUS_DMASYNC_POSTWRITE);
2002
2003 q->mq_prod = idx;
2004
2005 mwx_write(sc, q->mq_regbase + MT_DMA_CPU_IDX, q->mq_prod);
2006
2007 return 0;
2008 }
2009
2010 void
mwx_dma_tx_cleanup(struct mwx_softc * sc,struct mwx_queue * q)2011 mwx_dma_tx_cleanup(struct mwx_softc *sc, struct mwx_queue *q)
2012 {
2013 struct mwx_queue_data *md;
2014 struct mt76_desc *desc;
2015 int idx, last;
2016
2017 idx = q->mq_cons;
2018 last = mwx_read(sc, q->mq_regbase + MT_DMA_DMA_IDX);
2019
2020 if (idx == last)
2021 return;
2022
2023 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
2024 BUS_DMASYNC_PREWRITE);
2025
2026 while (idx != last) {
2027 md = &q->mq_data[idx];
2028 desc = &q->mq_desc[idx];
2029
2030 if (md->md_mbuf != NULL) {
2031 bus_dmamap_sync(sc->sc_dmat, md->md_map, 0,
2032 md->md_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2033 bus_dmamap_unload(sc->sc_dmat, md->md_map);
2034 m_freem(md->md_mbuf);
2035 md->md_mbuf = NULL;
2036 }
2037 if (md->md_txwi != NULL) {
2038 /* nothing here, cleanup via mt7921_mac_tx_free() */
2039 md->md_txwi = NULL;
2040 printf("%s: %s txwi acked, idx %d\n", DEVNAME(sc), __func__, idx);
2041 }
2042
2043 /* clear DMA desc just to be sure */
2044 desc->buf0 = 0;
2045 desc->buf1 = 0;
2046 desc->info = 0;
2047 desc->ctrl = htole32(MT_DMA_CTL_DMA_DONE);
2048
2049 idx = (idx + 1) % q->mq_count;
2050
2051 /* check if more data made it in */
2052 /* XXX should we actually do that? */
2053 if (idx == last)
2054 last = mwx_read(sc, q->mq_regbase + MT_DMA_DMA_IDX);
2055 }
2056
2057 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
2058 BUS_DMASYNC_POSTWRITE);
2059
2060 q->mq_cons = idx;
2061 if (q->mq_wakeme) {
2062 q->mq_wakeme = 0;
2063 wakeup(q);
2064 }
2065 }
2066
2067 void
mwx_dma_tx_done(struct mwx_softc * sc)2068 mwx_dma_tx_done(struct mwx_softc *sc)
2069 {
2070 mwx_dma_tx_cleanup(sc, &sc->sc_txq);
2071 mwx_dma_tx_cleanup(sc, &sc->sc_txmcuq);
2072 mwx_dma_tx_cleanup(sc, &sc->sc_txfwdlq);
2073
2074 /* XXX do we need to wakeup someone */
2075 }
2076
2077 /* XXX wrong place */
2078 void
mwx_dma_rx_process(struct mwx_softc * sc,struct mbuf_list * ml)2079 mwx_dma_rx_process(struct mwx_softc *sc, struct mbuf_list *ml)
2080 {
2081 struct mbuf_list mlout = MBUF_LIST_INITIALIZER();
2082 struct mbuf *m;
2083 uint32_t *data, rxd, type, flag;
2084
2085 while ((m = ml_dequeue(ml)) != NULL) {
2086 data = mtod(m, uint32_t *);
2087 rxd = le32toh(data[0]);
2088
2089 type = MT_RXD0_PKT_TYPE_GET(rxd);
2090 flag = (rxd & MT_RXD0_PKT_FLAG_MASK) >> MT_RXD0_PKT_FLAG_SHIFT;
2091
2092 if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
2093 type = PKT_TYPE_NORMAL_MCU;
2094
2095 switch (type) {
2096 case PKT_TYPE_RX_EVENT:
2097 mwx_mcu_rx_event(sc, m);
2098 break;
2099 case PKT_TYPE_TXRX_NOTIFY:
2100 mt7921_mac_tx_free(sc, m);
2101 break;
2102 #if TODO
2103 case PKT_TYPE_TXS:
2104 for (rxd += 2; rxd + 8 <= end; rxd += 8)
2105 mt7921_mac_add_txs(dev, rxd);
2106 m_freem(m);
2107 break;
2108 #endif
2109 case PKT_TYPE_NORMAL_MCU:
2110 case PKT_TYPE_NORMAL:
2111 mwx_rx(sc, m, &mlout);
2112 break;
2113 default:
2114 if (DEVDEBUG(sc))
2115 printf("%s: received unknown pkt type %d\n",
2116 DEVNAME(sc), type);
2117 m_freem(m);
2118 break;
2119 }
2120 }
2121
2122 if_input(&sc->sc_ic.ic_if, &mlout);
2123 }
2124
2125 void
mwx_dma_rx_dequeue(struct mwx_softc * sc,struct mwx_queue * q,struct mbuf_list * ml)2126 mwx_dma_rx_dequeue(struct mwx_softc *sc, struct mwx_queue *q,
2127 struct mbuf_list *ml)
2128 {
2129 struct mwx_queue_data *md;
2130 struct mt76_desc *desc;
2131 struct mbuf *m, *m0 = NULL, *mtail = NULL;
2132 int idx, last;
2133
2134 idx = q->mq_cons;
2135 last = mwx_read(sc, q->mq_regbase + MT_DMA_DMA_IDX);
2136
2137 if (idx == last)
2138 return;
2139
2140 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
2141 BUS_DMASYNC_PREREAD);
2142
2143 while (idx != last) {
2144 uint32_t ctrl;
2145
2146 md = &q->mq_data[idx];
2147 desc = &q->mq_desc[idx];
2148
2149 bus_dmamap_sync(sc->sc_dmat, md->md_map, 0,
2150 md->md_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2151 bus_dmamap_unload(sc->sc_dmat, md->md_map);
2152
2153 /* dequeue mbuf */
2154 m = md->md_mbuf;
2155 md->md_mbuf = NULL;
2156
2157 /* only buf0 is used on RX rings */
2158 ctrl = le32toh(desc->ctrl);
2159 m->m_len = MT_DNA_CTL_SD_GET_LEN0(ctrl);
2160
2161 if (m0 == NULL) {
2162 m0 = mtail = m;
2163 m0->m_pkthdr.len = m->m_len;
2164 } else {
2165 mtail->m_next = m;
2166 mtail = m;
2167 m0->m_pkthdr.len += m->m_len;
2168 }
2169
2170 /* TODO handle desc->info */
2171
2172 /* check if this was the last mbuf of the chain */
2173 if (ctrl & MT_DMA_CTL_LAST_SEC0) {
2174 ml_enqueue(ml, m0);
2175 m0 = NULL;
2176 mtail = NULL;
2177 }
2178
2179 idx = (idx + 1) % q->mq_count;
2180
2181 /* check if more data made it in */
2182 /* XXX should we actually do that? */
2183 if (idx == last)
2184 last = mwx_read(sc, q->mq_regbase + MT_DMA_DMA_IDX);
2185 }
2186
2187 /* XXX make sure we don't have half processed data */
2188 KASSERT(m0 == NULL);
2189
2190 bus_dmamap_sync(sc->sc_dmat, q->mq_map, 0, q->mq_map->dm_mapsize,
2191 BUS_DMASYNC_POSTREAD);
2192
2193 q->mq_cons = idx;
2194 }
2195
2196 void
mwx_dma_rx_done(struct mwx_softc * sc,struct mwx_queue * q)2197 mwx_dma_rx_done(struct mwx_softc *sc, struct mwx_queue *q)
2198 {
2199 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2200
2201 mwx_dma_rx_dequeue(sc, q, &ml);
2202
2203 if (ml_empty(&ml))
2204 return;
2205
2206 mwx_queue_fill(sc, q); /* TODO what if it fails, run timer? */
2207
2208 mwx_dma_rx_process(sc, &ml);
2209 }
2210
2211 struct mbuf *
mwx_mcu_alloc_msg(size_t len)2212 mwx_mcu_alloc_msg(size_t len)
2213 {
2214 const int headspace = sizeof(struct mt7921_mcu_txd);
2215 struct mbuf *m;
2216
2217 /* Allocate mbuf with enough space */
2218 m = m_gethdr(MT_DATA, M_DONTWAIT);
2219 if (m == NULL)
2220 return NULL;
2221 if (len + headspace > MHLEN) {
2222 m_clget(m, M_DONTWAIT, len + headspace);
2223 if (!ISSET(m->m_flags, M_EXT)) {
2224 m_freem(m);
2225 return NULL;
2226 }
2227 }
2228
2229 m_align(m, len + headspace);
2230 m->m_pkthdr.len = m->m_len = len + headspace;
2231 m_adj(m, headspace);
2232
2233 return m;
2234 }
2235
2236 void
mwx_mcu_set_len(struct mbuf * m,void * end)2237 mwx_mcu_set_len(struct mbuf *m, void *end)
2238 {
2239 void *start = mtod(m, void *);
2240 int len = m->m_pkthdr.len, adj;
2241
2242 KASSERT(start <= end && end - start <= len);
2243 adj = len - (end - start);
2244 m_adj(m, -adj);
2245 }
2246
2247 int
mwx_mcu_send_mbuf(struct mwx_softc * sc,uint32_t cmd,struct mbuf * m,int * seqp)2248 mwx_mcu_send_mbuf(struct mwx_softc *sc, uint32_t cmd, struct mbuf *m, int *seqp)
2249 {
2250 struct mt7921_uni_txd *uni_txd;
2251 struct mt7921_mcu_txd *mcu_txd;
2252 struct mwx_queue *q;
2253 uint32_t *txd, val;
2254 int s, rv, txd_len, mcu_cmd = cmd & MCU_CMD_FIELD_ID_MASK;
2255 int len = m->m_pkthdr.len;
2256 uint8_t seq;
2257
2258 if (cmd == MCU_CMD_FW_SCATTER) {
2259 q = &sc->sc_txfwdlq;
2260 goto enqueue;
2261 }
2262
2263 seq = ++sc->sc_mcu_seq & 0x0f;
2264 if (seq == 0)
2265 seq = ++sc->sc_mcu_seq & 0x0f;
2266
2267 txd_len = cmd & MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
2268 KASSERT(m_leadingspace(m) >= txd_len);
2269 m = m_prepend(m, txd_len, M_DONTWAIT);
2270 txd = mtod(m, uint32_t *);
2271 memset(txd, 0, txd_len);
2272
2273 val = (m->m_len & MT_TXD0_TX_BYTES_MASK) |
2274 MT_TX_TYPE_CMD | MT_TXD0_Q_IDX(MT_TX_MCU_PORT_RX_Q0);
2275 txd[0] = htole32(val);
2276
2277 val = MT_TXD1_LONG_FORMAT | MT_HDR_FORMAT_CMD;
2278 txd[1] = htole32(val);
2279
2280 if (cmd & MCU_CMD_FIELD_UNI) {
2281 uni_txd = (struct mt7921_uni_txd *)txd;
2282 uni_txd->len = htole16(len);
2283 uni_txd->option = MCU_CMD_UNI_EXT_ACK;
2284 uni_txd->cid = htole16(mcu_cmd);
2285 uni_txd->s2d_index = CMD_S2D_IDX_H2N;
2286 uni_txd->pkt_type = MCU_PKT_ID;
2287 uni_txd->seq = seq;
2288 } else {
2289 mcu_txd = (struct mt7921_mcu_txd *)txd;
2290 mcu_txd->len = htole16(len);
2291 mcu_txd->pq_id = htole16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU,
2292 MT_TX_MCU_PORT_RX_Q0));
2293 mcu_txd->pkt_type = MCU_PKT_ID;
2294 mcu_txd->seq = seq;
2295 mcu_txd->cid = mcu_cmd;
2296 mcu_txd->s2d_index = CMD_S2D_IDX_H2N;
2297 mcu_txd->ext_cid = MCU_GET_EXT_CMD(cmd);
2298
2299 if (mcu_txd->ext_cid || (cmd & MCU_CMD_FIELD_CE)) {
2300 if (cmd & MCU_CMD_FIELD_QUERY)
2301 mcu_txd->set_query = MCU_Q_QUERY;
2302 else
2303 mcu_txd->set_query = MCU_Q_SET;
2304 mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
2305 } else {
2306 mcu_txd->set_query = MCU_Q_NA;
2307 }
2308 }
2309
2310 if (seqp != NULL)
2311 *seqp = seq;
2312 q = &sc->sc_txmcuq;
2313 enqueue:
2314
2315 if (cmd != MCU_CMD_FW_SCATTER) {
2316 printf("%s: %s: cmd %08x\n", DEVNAME(sc), __func__, cmd);
2317 pkt_hex_dump(m);
2318 }
2319
2320 s = splnet();
2321 while (1) {
2322 rv = mwx_dma_tx_enqueue(sc, q, m);
2323 if (rv != EBUSY)
2324 break;
2325 q->mq_wakeme = 1;
2326 tsleep_nsec(q, 0, "mwxq", MSEC_TO_NSEC(100));
2327 }
2328 splx(s);
2329 return rv;
2330 }
2331
2332 int
mwx_mcu_send_msg(struct mwx_softc * sc,uint32_t cmd,void * data,size_t len,int * seqp)2333 mwx_mcu_send_msg(struct mwx_softc *sc, uint32_t cmd, void *data, size_t len,
2334 int *seqp)
2335 {
2336 struct mbuf *m;
2337
2338 m = mwx_mcu_alloc_msg(len);
2339 if (m == NULL)
2340 return ENOMEM;
2341
2342 if (len != 0)
2343 memcpy(mtod(m, caddr_t), data, len);
2344
2345 return mwx_mcu_send_mbuf(sc, cmd, m, seqp);
2346 }
2347
2348 int
mwx_mcu_send_wait(struct mwx_softc * sc,uint32_t cmd,void * data,size_t len)2349 mwx_mcu_send_wait(struct mwx_softc *sc, uint32_t cmd, void *data, size_t len)
2350 {
2351 int rv, seq;
2352
2353 rv = mwx_mcu_send_msg(sc, cmd, data, len, &seq);
2354 if (rv != 0)
2355 return rv;
2356 return mwx_mcu_wait_resp_int(sc, cmd, seq, NULL);
2357 }
2358
2359 int
mwx_mcu_send_mbuf_wait(struct mwx_softc * sc,uint32_t cmd,struct mbuf * m)2360 mwx_mcu_send_mbuf_wait(struct mwx_softc *sc, uint32_t cmd, struct mbuf *m)
2361 {
2362 int rv, seq;
2363
2364 rv = mwx_mcu_send_mbuf(sc, cmd, m, &seq);
2365 if (rv != 0)
2366 return rv;
2367 return mwx_mcu_wait_resp_int(sc, cmd, seq, NULL);
2368 }
2369
2370 void
mwx_mcu_rx_event(struct mwx_softc * sc,struct mbuf * m)2371 mwx_mcu_rx_event(struct mwx_softc *sc, struct mbuf *m)
2372 {
2373 struct mt7921_mcu_rxd *rxd;
2374 uint32_t cmd, mcu_int = 0;
2375 int len;
2376
2377 if ((m = m_pullup(m, sizeof(*rxd))) == NULL)
2378 return;
2379 rxd = mtod(m, struct mt7921_mcu_rxd *);
2380
2381 if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT) {
2382 printf("%s: MCU_EXT_EVENT_RATE_REPORT COMMAND\n", DEVNAME(sc));
2383 m_freem(m);
2384 return;
2385 }
2386
2387 len = sizeof(*rxd) - sizeof(rxd->rxd) + le16toh(rxd->len);
2388 /* make sure all the data is in one mbuf */
2389 if ((m = m_pullup(m, len)) == NULL) {
2390 printf("%s: mwx_mcu_rx_event m_pullup failed\n", DEVNAME(sc));
2391 return;
2392 }
2393 /* refetch after pullup */
2394 rxd = mtod(m, struct mt7921_mcu_rxd *);
2395 m_adj(m, sizeof(*rxd));
2396
2397 switch (rxd->eid) {
2398 case MCU_EVENT_SCHED_SCAN_DONE:
2399 case MCU_EVENT_SCAN_DONE:
2400 mt7921_mcu_scan_event(sc, m);
2401 break;
2402 #if 0
2403 case MCU_EVENT_BSS_BEACON_LOSS:
2404 mt7921_mcu_connection_loss_event(dev, skb);
2405 break;
2406 case MCU_EVENT_BSS_ABSENCE:
2407 mt7921_mcu_bss_event(dev, skb);
2408 break;
2409 case MCU_EVENT_DBG_MSG:
2410 mt7921_mcu_debug_msg_event(dev, skb);
2411 break;
2412 #endif
2413 case MCU_EVENT_COREDUMP:
2414 /* it makes little sense to write the coredump down */
2415 if (!sc->sc_resetting)
2416 printf("%s: coredump event\n", DEVNAME(sc));
2417 mwx_reset(sc);
2418 break;
2419 case MCU_EVENT_LP_INFO:
2420 mt7921_mcu_low_power_event(sc, m);
2421 break;
2422 case MCU_EVENT_TX_DONE:
2423 mt7921_mcu_tx_done_event(sc, m);
2424 break;
2425 case 0x6:
2426 printf("%s: MAGIC COMMAND\n", DEVNAME(sc));
2427 default:
2428 if (rxd->seq == 0 || rxd->seq >= nitems(sc->sc_mcu_wait)) {
2429 printf("%s: mcu rx bad seq %x\n", DEVNAME(sc),
2430 rxd->seq);
2431 break;
2432 }
2433
2434 cmd = sc->sc_mcu_wait[rxd->seq].mcu_cmd;
2435
2436 if (cmd == MCU_CMD_PATCH_SEM_CONTROL ||
2437 cmd == MCU_CMD_PATCH_FINISH_REQ) {
2438 /* XXX this is a terrible abuse */
2439 KASSERT(m_leadingspace(m) >= sizeof(uint32_t));
2440 m = m_prepend(m, sizeof(uint32_t), M_DONTWAIT);
2441 mcu_int = *mtod(m, uint8_t *);
2442 } else if (cmd == MCU_EXT_CMD_THERMAL_CTRL) {
2443 if (m->m_len < sizeof(uint32_t) * 2)
2444 break;
2445 mcu_int = le32toh(mtod(m, uint32_t *)[1]);
2446 } else if (cmd == MCU_EXT_CMD_EFUSE_ACCESS) {
2447 //ret = mt7921_mcu_parse_eeprom(sc, m);
2448 printf("%s: mcu resp no handled yet\n", DEVNAME(sc));
2449 } else if (cmd == MCU_UNI_CMD_DEV_INFO_UPDATE ||
2450 cmd == MCU_UNI_CMD_BSS_INFO_UPDATE ||
2451 cmd == MCU_UNI_CMD_STA_REC_UPDATE ||
2452 cmd == MCU_UNI_CMD_HIF_CTRL ||
2453 cmd == MCU_UNI_CMD_OFFLOAD ||
2454 cmd == MCU_UNI_CMD_SUSPEND) {
2455 struct mt7921_mcu_uni_event *event;
2456
2457 if (m->m_len < sizeof(*event))
2458 break;
2459 event = mtod(m, struct mt7921_mcu_uni_event *);
2460 mcu_int = le32toh(event->status);
2461 } else if (cmd == MCU_CE_QUERY_REG_READ) {
2462 struct mt7921_mcu_reg_event *event;
2463
2464 if (m->m_len < sizeof(*event))
2465 break;
2466 event = mtod(m, struct mt7921_mcu_reg_event *);
2467 mcu_int = le32toh(event->val);
2468 }
2469
2470 sc->sc_mcu_wait[rxd->seq].mcu_int = mcu_int;
2471 sc->sc_mcu_wait[rxd->seq].mcu_m = m;
2472 wakeup(&sc->sc_mcu_wait[rxd->seq]);
2473 return;
2474 }
2475
2476 m_freem(m);
2477 }
2478
2479 int
mwx_mcu_wait_resp_int(struct mwx_softc * sc,uint32_t cmd,int seq,uint32_t * val)2480 mwx_mcu_wait_resp_int(struct mwx_softc *sc, uint32_t cmd, int seq,
2481 uint32_t *val)
2482 {
2483 int rv;
2484
2485 KASSERT(seq < nitems(sc->sc_mcu_wait));
2486
2487 memset(&sc->sc_mcu_wait[seq], 0, sizeof(sc->sc_mcu_wait[0]));
2488 sc->sc_mcu_wait[seq].mcu_cmd = cmd;
2489
2490 rv = tsleep_nsec(&sc->sc_mcu_wait[seq], 0, "mwxwait", SEC_TO_NSEC(3));
2491 if (rv != 0) {
2492 printf("%s: command %x timeout\n", DEVNAME(sc), cmd);
2493 mwx_reset(sc);
2494 return rv;
2495 }
2496
2497 if (sc->sc_mcu_wait[seq].mcu_m != NULL) {
2498 m_freem(sc->sc_mcu_wait[seq].mcu_m);
2499 sc->sc_mcu_wait[seq].mcu_m = NULL;
2500 }
2501 if (val != NULL)
2502 *val = sc->sc_mcu_wait[seq].mcu_int;
2503 return 0;
2504 }
2505
2506 int
mwx_mcu_wait_resp_msg(struct mwx_softc * sc,uint32_t cmd,int seq,struct mbuf ** mp)2507 mwx_mcu_wait_resp_msg(struct mwx_softc *sc, uint32_t cmd, int seq,
2508 struct mbuf **mp)
2509 {
2510 int rv;
2511
2512 KASSERT(seq < nitems(sc->sc_mcu_wait));
2513
2514 memset(&sc->sc_mcu_wait[seq], 0, sizeof(sc->sc_mcu_wait[0]));
2515 sc->sc_mcu_wait[seq].mcu_cmd = cmd;
2516
2517 rv = tsleep_nsec(&sc->sc_mcu_wait[seq], 0, "mwxwait", SEC_TO_NSEC(3));
2518 if (rv != 0) {
2519 printf("%s: command %x timeout\n", DEVNAME(sc), cmd);
2520 mwx_reset(sc);
2521 return rv;
2522 }
2523 if (sc->sc_mcu_wait[seq].mcu_m == NULL) {
2524 printf("%s: command response missing\n", DEVNAME(sc));
2525 return ENOENT;
2526 }
2527 if (mp != NULL)
2528 *mp = sc->sc_mcu_wait[seq].mcu_m;
2529 else
2530 m_freem(sc->sc_mcu_wait[seq].mcu_m);
2531 sc->sc_mcu_wait[seq].mcu_m = NULL;
2532 return 0;
2533 }
2534
2535 int
mt7921_dma_disable(struct mwx_softc * sc,int force)2536 mt7921_dma_disable(struct mwx_softc *sc, int force)
2537 {
2538 /* disable WFDMA0 */
2539 mwx_clear(sc, MT_WFDMA0_GLO_CFG,
2540 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
2541 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
2542 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
2543 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
2544 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
2545
2546 if (force) {
2547 /* reset */
2548 mwx_clear(sc, MT_WFDMA0_RST, MT_WFDMA0_RST_DMASHDL_ALL_RST |
2549 MT_WFDMA0_RST_LOGIC_RST);
2550 mwx_set(sc, MT_WFDMA0_RST, MT_WFDMA0_RST_DMASHDL_ALL_RST |
2551 MT_WFDMA0_RST_LOGIC_RST);
2552 }
2553
2554 /* disable dmashdl */
2555 mwx_clear(sc, MT_WFDMA0_GLO_CFG_EXT0, MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
2556 mwx_set(sc, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
2557
2558 return mwx_poll(sc, MT_WFDMA0_GLO_CFG, 0,
2559 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | MT_WFDMA0_GLO_CFG_RX_DMA_BUSY,
2560 1000);
2561 }
2562
2563 void
mt7921_dma_enable(struct mwx_softc * sc)2564 mt7921_dma_enable(struct mwx_softc *sc)
2565 {
2566 #define PREFETCH(base, depth) ((base) << 16 | (depth))
2567 /* configure perfetch settings */
2568 mwx_write(sc, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
2569 mwx_write(sc, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
2570 mwx_write(sc, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
2571 mwx_write(sc, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
2572 mwx_write(sc, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
2573
2574 mwx_write(sc, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
2575 mwx_write(sc, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
2576 mwx_write(sc, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
2577 mwx_write(sc, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
2578 mwx_write(sc, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
2579 mwx_write(sc, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
2580 mwx_write(sc, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
2581 mwx_write(sc, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
2582 mwx_write(sc, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
2583
2584 /* reset dma idx */
2585 mwx_write(sc, MT_WFDMA0_RST_DTX_PTR, ~0);
2586
2587 /* configure delay interrupt */
2588 mwx_write(sc, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
2589
2590 mwx_set(sc, MT_WFDMA0_GLO_CFG,
2591 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
2592 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
2593 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
2594 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
2595 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
2596 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
2597
2598 mwx_barrier(sc);
2599 mwx_set(sc, MT_WFDMA0_GLO_CFG,
2600 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
2601
2602 mwx_set(sc, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
2603
2604 /* enable interrupts for TX/RX rings */
2605 mwx_write(sc, MT_WFDMA0_HOST_INT_ENA, MT_INT_RX_DONE_ALL |
2606 MT_INT_TX_DONE_ALL | MT_INT_MCU_CMD);
2607 mwx_set(sc, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
2608 mwx_write(sc, MT_PCIE_MAC_INT_ENABLE, 0xff);
2609 }
2610
2611 int
mt7921_e_mcu_fw_pmctrl(struct mwx_softc * sc)2612 mt7921_e_mcu_fw_pmctrl(struct mwx_softc *sc)
2613 {
2614 int i;
2615
2616 for (i = 0; i < MT7921_MCU_INIT_RETRY_COUNT; i++) {
2617 mwx_write(sc, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
2618 if (mwx_poll(sc, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_OWN_SYNC,
2619 4, 50) == 0)
2620 break;
2621 }
2622
2623 if (i == MT7921_MCU_INIT_RETRY_COUNT) {
2624 printf("%s: firmware own failed\n", DEVNAME(sc));
2625 return EIO;
2626 }
2627
2628 return 0;
2629 }
2630
2631 int
mt7921_e_mcu_drv_pmctrl(struct mwx_softc * sc)2632 mt7921_e_mcu_drv_pmctrl(struct mwx_softc *sc)
2633 {
2634 int i;
2635
2636 for (i = 0; i < MT7921_MCU_INIT_RETRY_COUNT; i++) {
2637 mwx_write(sc, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
2638 if (mwx_poll(sc, MT_CONN_ON_LPCTL, 0,
2639 PCIE_LPCR_HOST_OWN_SYNC, 50) == 0)
2640 break;
2641 }
2642
2643 if (i == MT7921_MCU_INIT_RETRY_COUNT) {
2644 printf("%s: driver own failed\n", DEVNAME(sc));
2645 return EIO;
2646 }
2647
2648 return 0;
2649 }
2650
2651 int
mt7921_wfsys_reset(struct mwx_softc * sc)2652 mt7921_wfsys_reset(struct mwx_softc *sc)
2653 {
2654 DPRINTF("%s: WFSYS reset\n", DEVNAME(sc));
2655
2656 mwx_clear(sc, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
2657 delay(50 * 1000);
2658 mwx_set(sc, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
2659
2660 return mwx_poll(sc, MT_WFSYS_SW_RST_B, WFSYS_SW_INIT_DONE,
2661 WFSYS_SW_INIT_DONE, 500);
2662 }
2663
2664 /*
2665 * To be honest this is ridiculous.
2666 */
2667 uint32_t
mt7921_reg_addr(struct mwx_softc * sc,uint32_t reg)2668 mt7921_reg_addr(struct mwx_softc *sc, uint32_t reg)
2669 {
2670 static const struct {
2671 uint32_t phys;
2672 uint32_t mapped;
2673 uint32_t size;
2674 } fixed_map[] = {
2675 { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
2676 { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
2677 { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
2678 { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
2679 { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
2680 { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
2681 { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
2682 { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
2683 { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
2684 { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (conf register) */
2685 { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
2686 { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
2687 { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
2688 { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
2689 { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
2690 { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
2691 { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
2692 { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
2693 { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
2694 { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
2695 { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
2696 { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
2697 { 0x820cc000, 0x0e000, 0x01000 }, /* WF_UMAC_TOP (PP) */
2698 { 0x820cd000, 0x0f000, 0x01000 }, /* WF_MDP_TOP */
2699 { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
2700 { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
2701 { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
2702 { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
2703 { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
2704 { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
2705 { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
2706 { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
2707 { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
2708 { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
2709 { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
2710 { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
2711 { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
2712 { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
2713 { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
2714 { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
2715 { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
2716 { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
2717 { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
2718 };
2719 int i;
2720
2721 if (reg < 0x100000)
2722 return reg;
2723
2724 for (i = 0; i < nitems(fixed_map); i++) {
2725 uint32_t ofs;
2726
2727 if (reg < fixed_map[i].phys)
2728 continue;
2729
2730 ofs = reg - fixed_map[i].phys;
2731 if (ofs > fixed_map[i].size)
2732 continue;
2733
2734 return fixed_map[i].mapped + ofs;
2735 }
2736
2737 if ((reg >= 0x18000000 && reg < 0x18c00000) ||
2738 (reg >= 0x70000000 && reg < 0x78000000) ||
2739 (reg >= 0x7c000000 && reg < 0x7c400000))
2740 return mwx_map_reg_l1(sc, reg);
2741
2742 panic("%s: Access to currently unsupported address %08x\n",
2743 DEVNAME(sc), reg);
2744 }
2745
2746 int
mt7921_init_hardware(struct mwx_softc * sc)2747 mt7921_init_hardware(struct mwx_softc *sc)
2748 {
2749 int rv;
2750
2751 /* reset dma */
2752 rv = mwx_dma_reset(sc, 1);
2753 if (rv != 0)
2754 return rv;
2755
2756 /*
2757 * force firmware operation mode into normal state,
2758 * which should be set before firmware download stage.
2759 */
2760 mwx_write(sc, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
2761 mwx_barrier(sc);
2762
2763 rv = mt7921_mcu_init(sc);
2764 if (rv != 0)
2765 goto fail;
2766 /* TODO override eeprom for systems with FDT */
2767 rv = mt7921_mcu_set_eeprom(sc);
2768 if (rv != 0)
2769 goto fail;
2770 rv = mt7921_mac_init(sc);
2771 if (rv != 0)
2772 goto fail;
2773
2774 /* MAYBE alloc beacon and mgmt frame wcid 0 here */
2775
2776 return 0;
2777
2778 fail:
2779 /* reset dma */
2780 rv = mwx_dma_reset(sc, 1);
2781 if (rv != 0)
2782 return rv;
2783 return EAGAIN;
2784 }
2785
2786 int
mt7921_mcu_init(struct mwx_softc * sc)2787 mt7921_mcu_init(struct mwx_softc *sc)
2788 {
2789 int rv;
2790
2791 /* this read is needed to make interrupts work */
2792 (void) mwx_read(sc, MT_TOP_LPCR_HOST_BAND0);
2793 mwx_write(sc, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN);
2794 if (mwx_poll(sc, MT_TOP_LPCR_HOST_BAND0, 0, MT_TOP_LPCR_HOST_FW_OWN,
2795 5000) != 0) {
2796 printf("%s: timeout for driver own\n", DEVNAME(sc));
2797 return EIO;
2798 }
2799
2800 mwx_set(sc, MT_PCIE_MAC_PM, MT_PCIE_MAC_PM_L0S_DIS);
2801
2802 if ((rv = mt7921_load_firmware(sc)) != 0)
2803 return rv;
2804
2805 if ((rv = mt7921_mcu_get_nic_capability(sc)) != 0)
2806 return rv;
2807 if ((rv = mt7921_mcu_fw_log_2_host(sc, 1)) != 0)
2808 return rv;
2809
2810 /* TODO mark MCU running */
2811
2812 return 0;
2813 }
2814
2815 static inline uint32_t
mt7921_get_data_mode(struct mwx_softc * sc,uint32_t info)2816 mt7921_get_data_mode(struct mwx_softc *sc, uint32_t info)
2817 {
2818 uint32_t mode = DL_MODE_NEED_RSP;
2819
2820 if (info == PATCH_SEC_NOT_SUPPORT)
2821 return mode;
2822 switch (info & PATCH_SEC_ENC_TYPE_MASK) {
2823 case PATCH_SEC_ENC_TYPE_PLAIN:
2824 break;
2825 case PATCH_SEC_ENC_TYPE_AES:
2826 mode |= DL_MODE_ENCRYPT;
2827 mode |= (info << DL_MODE_KEY_IDX_SHIFT) & DL_MODE_KEY_IDX_MASK;
2828 mode |= DL_MODE_RESET_SEC_IV;
2829 break;
2830 case PATCH_SEC_ENC_TYPE_SCRAMBLE:
2831 mode |= DL_MODE_ENCRYPT;
2832 mode |= DL_CONFIG_ENCRY_MODE_SEL;
2833 mode |= DL_MODE_RESET_SEC_IV;
2834 break;
2835 default:
2836 printf("%s: encryption type not supported\n", DEVNAME(sc));
2837 }
2838 return mode;
2839 }
2840
2841 static inline uint32_t
mt7921_mcu_gen_dl_mode(uint8_t feature_set)2842 mt7921_mcu_gen_dl_mode(uint8_t feature_set)
2843 {
2844 uint32_t ret = DL_MODE_NEED_RSP;
2845
2846 if (feature_set & FW_FEATURE_SET_ENCRYPT)
2847 ret |= (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV);
2848 if (feature_set & FW_FEATURE_ENCRY_MODE)
2849 ret |= DL_CONFIG_ENCRY_MODE_SEL;
2850
2851 /* FW_FEATURE_SET_KEY_IDX_MASK == DL_MODE_KEY_IDX_MASK */
2852 ret |= feature_set & FW_FEATURE_SET_KEY_IDX_MASK;
2853
2854 return ret;
2855 }
2856
2857
2858 int
mt7921_load_firmware(struct mwx_softc * sc)2859 mt7921_load_firmware(struct mwx_softc *sc)
2860 {
2861 struct mt7921_patch_hdr *hdr;
2862 struct mt7921_fw_trailer *fwhdr;
2863 const char *rompatch, *fw;
2864 u_char *buf, *fwbuf, *dl;
2865 size_t buflen, fwlen, offset = 0;
2866 uint32_t reg, override = 0, option = 0;
2867 int i, rv, sem;
2868
2869 reg = mwx_read(sc, MT_CONN_ON_MISC) & MT_TOP_MISC2_FW_N9_RDY;
2870 if (reg != 0) {
2871 DPRINTF("%s: firmware already downloaded\n", DEVNAME(sc));
2872 return 0;
2873 }
2874
2875 switch (sc->sc_hwtype) {
2876 case MWX_HW_MT7921:
2877 rompatch = MT7921_ROM_PATCH;
2878 fw = MT7921_FIRMWARE_WM;
2879 break;
2880 case MWX_HW_MT7922:
2881 rompatch = MT7922_ROM_PATCH;
2882 fw = MT7922_FIRMWARE_WM;
2883 break;
2884 }
2885 if ((rv = loadfirmware(rompatch, &buf, &buflen)) != 0 ||
2886 (rv= loadfirmware(fw, &fwbuf, &fwlen)) != 0) {
2887 printf("%s: loadfirmware error %d\n", DEVNAME(sc), rv);
2888 return rv;
2889 }
2890
2891 rv = mt7921_mcu_patch_sem_ctrl(sc, 1);
2892 if (rv != 0)
2893 return rv;
2894
2895 if (buflen < sizeof(*hdr)) {
2896 DPRINTF("%s: invalid firmware\n", DEVNAME(sc));
2897 rv = EINVAL;
2898 goto out;
2899 }
2900 hdr = (struct mt7921_patch_hdr *)buf;
2901 printf("%s: HW/SW version: 0x%x, build time: %.15s\n",
2902 DEVNAME(sc), be32toh(hdr->hw_sw_ver), hdr->build_date);
2903
2904 for (i = 0; i < be32toh(hdr->desc.n_region); i++) {
2905 struct mt7921_patch_sec *sec;
2906 uint32_t len, addr, mode, sec_info;
2907
2908 sec = (struct mt7921_patch_sec *)(buf + sizeof(*hdr) +
2909 i * sizeof(*sec));
2910 if ((be32toh(sec->type) & PATCH_SEC_TYPE_MASK) !=
2911 PATCH_SEC_TYPE_INFO) {
2912 DPRINTF("%s: invalid firmware sector\n", DEVNAME(sc));
2913 rv = EINVAL;
2914 goto out;
2915 }
2916
2917 addr = be32toh(sec->info.addr);
2918 len = be32toh(sec->info.len);
2919 dl = buf + be32toh(sec->offs);
2920 sec_info = be32toh(sec->info.sec_key_idx);
2921 mode = mt7921_get_data_mode(sc, sec_info);
2922
2923 rv = mt7921_mcu_init_download(sc, addr, len, mode);
2924 if (rv != 0) {
2925 DPRINTF("%s: download request failed\n", DEVNAME(sc));
2926 goto out;
2927 }
2928 rv = mt7921_mcu_send_firmware(sc, MCU_CMD_FW_SCATTER,
2929 dl, len, 4096);
2930 if (rv != 0) {
2931 DPRINTF("%s: failed to send patch\n", DEVNAME(sc));
2932 goto out;
2933 }
2934 }
2935
2936 rv = mt7921_mcu_start_patch(sc);
2937 if (rv != 0) {
2938 printf("%s: patch start failed\n", DEVNAME(sc));
2939 goto fail;
2940 }
2941
2942 out:
2943 sem = mt7921_mcu_patch_sem_ctrl(sc, 0);
2944 if (sem != 0)
2945 rv = sem;
2946 if (rv != 0)
2947 goto fail;
2948
2949 fwhdr = (struct mt7921_fw_trailer *)(fwbuf + fwlen - sizeof(*fwhdr));
2950 printf("%s: WM firmware version: %.10s, build time: %.15s\n",
2951 DEVNAME(sc), fwhdr->fw_ver, fwhdr->build_date);
2952
2953 for (i = 0; i < fwhdr->n_region; i++) {
2954 struct mt7921_fw_region *region;
2955 uint32_t len, addr, mode;
2956
2957 region = (struct mt7921_fw_region *)((u_char *)fwhdr -
2958 (fwhdr->n_region - i) * sizeof(*region));
2959
2960 addr = le32toh(region->addr);
2961 len = le32toh(region->len);
2962 mode = mt7921_mcu_gen_dl_mode(region->feature_set);
2963
2964 if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
2965 override = addr;
2966
2967 rv = mt7921_mcu_init_download(sc, addr, len, mode);
2968 if (rv != 0) {
2969 DPRINTF("%s: download request failed\n", DEVNAME(sc));
2970 goto fail;
2971 }
2972
2973 rv = mt7921_mcu_send_firmware(sc, MCU_CMD_FW_SCATTER,
2974 fwbuf + offset, len, 4096);
2975 if (rv != 0) {
2976 DPRINTF("%s: failed to send firmware\n", DEVNAME(sc));
2977 goto fail;
2978 }
2979 offset += len;
2980 }
2981
2982 if (override != 0)
2983 option |= FW_START_OVERRIDE;
2984
2985 rv = mt7921_mcu_start_firmware(sc, override, option);
2986 if (rv != 0) {
2987 DPRINTF("%s: firmware start failed\n", DEVNAME(sc));
2988 goto fail;
2989 }
2990
2991 /* XXX should not busy poll here */
2992 if (mwx_poll(sc, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
2993 MT_TOP_MISC2_FW_N9_RDY, 1500) != 0) {
2994 DPRINTF("%s: Timeout initializing firmware\n", DEVNAME(sc));
2995 return EIO;
2996 }
2997
2998 DPRINTF("%s: firmware loaded\n", DEVNAME(sc));
2999 rv = 0;
3000
3001 fail:
3002 free(buf, M_DEVBUF, buflen);
3003 free(fwbuf, M_DEVBUF, fwlen);
3004 return rv;
3005 }
3006
3007 int
mt7921_mac_wtbl_update(struct mwx_softc * sc,int idx)3008 mt7921_mac_wtbl_update(struct mwx_softc *sc, int idx)
3009 {
3010 mwx_rmw(sc, MT_WTBL_UPDATE,
3011 (idx & MT_WTBL_UPDATE_WLAN_IDX) | MT_WTBL_UPDATE_ADM_COUNT_CLEAR,
3012 MT_WTBL_UPDATE_WLAN_IDX);
3013
3014 return mwx_poll(sc, MT_WTBL_UPDATE, 0, MT_WTBL_UPDATE_BUSY, 5000);
3015 }
3016
3017 void
mt7921_mac_init_band(struct mwx_softc * sc,uint32_t band)3018 mt7921_mac_init_band(struct mwx_softc *sc, uint32_t band)
3019 {
3020 mwx_rmw(sc, MT_TMAC_CTCR0(band), 0x3f, MT_TMAC_CTCR0_INS_DDLMT_REFTIME);
3021 mwx_set(sc, MT_TMAC_CTCR0(band),
3022 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
3023 MT_TMAC_CTCR0_INS_DDLMT_EN);
3024
3025 mwx_set(sc, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
3026 mwx_set(sc, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
3027
3028 /* enable MIB tx-rx time reporting */
3029 mwx_set(sc, MT_MIB_SCR1(band), MT_MIB_TXDUR_EN);
3030 mwx_set(sc, MT_MIB_SCR1(band), MT_MIB_RXDUR_EN);
3031
3032 mwx_rmw(sc, MT_DMA_DCR0(band),
3033 1536 << MT_DMA_DCR0_MAX_RX_LEN_SHIFT, MT_DMA_DCR0_MAX_RX_LEN_MASK);
3034 /* disable rx rate report by default due to hw issues */
3035 mwx_clear(sc, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
3036 }
3037
3038 int
mt7921_mac_init(struct mwx_softc * sc)3039 mt7921_mac_init(struct mwx_softc *sc)
3040 {
3041 int i;
3042
3043 mwx_rmw(sc, MT_MDP_DCR1, 1536 << MT_MDP_DCR1_MAX_RX_LEN_SHIFT,
3044 MT_MDP_DCR1_MAX_RX_LEN_MASK);
3045
3046 /* enable hardware de-agg */
3047 mwx_set(sc, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
3048 #if 0
3049 /* not enabled since our stack does not handle 802.3 frames */
3050 /* enable hardware rx header translation */
3051 mwx_set(sc, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
3052 #endif
3053
3054 for (i = 0; i < MT7921_WTBL_SIZE; i++)
3055 mt7921_mac_wtbl_update(sc, i);
3056
3057 mt7921_mac_init_band(sc, 0);
3058 mt7921_mac_init_band(sc, 1);
3059
3060 sc->sc_rxfilter = mwx_read(sc, MT_WF_RFCR(0));
3061 return mt7921_mcu_set_rts_thresh(sc, 0x92b, 0);
3062 }
3063
3064 int
mt7921_mcu_patch_sem_ctrl(struct mwx_softc * sc,int semget)3065 mt7921_mcu_patch_sem_ctrl(struct mwx_softc *sc, int semget)
3066 {
3067 #define PATCH_SEM_RELEASE 0
3068 #define PATCH_SEM_GET 1
3069 #define PATCH_NOT_DL_SEM_FAIL 0
3070 #define PATCH_IS_DL 1
3071 #define PATCH_NOT_DL_SEM_SUCCESS 2
3072 #define PATCH_REL_SEM_SUCCESS 3
3073
3074 uint32_t op = semget ? PATCH_SEM_GET : PATCH_SEM_RELEASE;
3075 struct {
3076 uint32_t op;
3077 } req = {
3078 .op = htole32(op),
3079 };
3080 int rv, seq, sem;
3081
3082 rv = mwx_mcu_send_msg(sc, MCU_CMD_PATCH_SEM_CONTROL,
3083 &req, sizeof(req), &seq);
3084 if (rv != 0)
3085 return rv;
3086
3087 rv = mwx_mcu_wait_resp_int(sc, MCU_CMD_PATCH_SEM_CONTROL, seq, &sem);
3088 if (rv != 0)
3089 return rv;
3090
3091 if (semget) {
3092 switch (sem) {
3093 case PATCH_IS_DL:
3094 return -1;
3095 case PATCH_NOT_DL_SEM_SUCCESS:
3096 return 0;
3097 default:
3098 DPRINTF("%s: failed to %s patch semaphore\n",
3099 DEVNAME(sc), "get");
3100 return EAGAIN;
3101 }
3102 } else {
3103 switch (sem) {
3104 case PATCH_REL_SEM_SUCCESS:
3105 return 0;
3106 default:
3107 DPRINTF("%s: failed to %s patch semaphore\n",
3108 DEVNAME(sc), "release");
3109 return EAGAIN;
3110 }
3111 }
3112 }
3113
3114 int
mt7921_mcu_init_download(struct mwx_softc * sc,uint32_t addr,uint32_t len,uint32_t mode)3115 mt7921_mcu_init_download(struct mwx_softc *sc, uint32_t addr,
3116 uint32_t len, uint32_t mode)
3117 {
3118 struct {
3119 uint32_t addr;
3120 uint32_t len;
3121 uint32_t mode;
3122 } req = {
3123 .addr = htole32(addr),
3124 .len = htole32(len),
3125 .mode = htole32(mode),
3126 };
3127 int cmd;
3128
3129 if (addr == 0x200000 || addr == 0x900000)
3130 cmd = MCU_CMD_PATCH_START_REQ;
3131 else
3132 cmd = MCU_CMD_TARGET_ADDRESS_LEN_REQ;
3133
3134 return mwx_mcu_send_wait(sc, cmd, &req, sizeof(req));
3135 }
3136
3137 int
mt7921_mcu_send_firmware(struct mwx_softc * sc,int cmd,u_char * data,size_t len,size_t max_len)3138 mt7921_mcu_send_firmware(struct mwx_softc *sc, int cmd, u_char *data,
3139 size_t len, size_t max_len)
3140 {
3141 size_t cur_len;
3142 int rv;
3143
3144 while (len > 0) {
3145 cur_len = len;
3146 if (cur_len > max_len)
3147 cur_len = max_len;
3148
3149 rv = mwx_mcu_send_msg(sc, cmd, data, cur_len, NULL);
3150 if (rv != 0)
3151 return rv;
3152
3153 data += cur_len;
3154 len -= cur_len;
3155
3156 mwx_dma_tx_cleanup(sc, &sc->sc_txfwdlq);
3157 }
3158
3159 return 0;
3160 }
3161
3162 int
mt7921_mcu_start_patch(struct mwx_softc * sc)3163 mt7921_mcu_start_patch(struct mwx_softc *sc)
3164 {
3165 struct {
3166 uint8_t check_crc;
3167 uint8_t reserved[3];
3168 } req = {
3169 .check_crc = 0,
3170 };
3171
3172 return mwx_mcu_send_wait(sc, MCU_CMD_PATCH_FINISH_REQ, &req,
3173 sizeof(req));
3174 }
3175
3176 int
mt7921_mcu_start_firmware(struct mwx_softc * sc,uint32_t addr,uint32_t option)3177 mt7921_mcu_start_firmware(struct mwx_softc *sc, uint32_t addr, uint32_t option)
3178 {
3179 struct {
3180 uint32_t option;
3181 uint32_t addr;
3182 } req = {
3183 .option = htole32(option),
3184 .addr = htole32(addr),
3185 };
3186
3187 return mwx_mcu_send_wait(sc, MCU_CMD_FW_START_REQ, &req, sizeof(req));
3188 }
3189
3190 int
mt7921_mcu_get_nic_capability(struct mwx_softc * sc)3191 mt7921_mcu_get_nic_capability(struct mwx_softc *sc)
3192 {
3193 struct mt76_connac_cap_hdr {
3194 uint16_t n_elements;
3195 uint16_t pad;
3196 } __packed *hdr;
3197 struct tlv_hdr {
3198 uint32_t type;
3199 uint32_t len;
3200 } __packed *tlv;
3201 struct mt76_connac_phy_cap {
3202 uint8_t ht;
3203 uint8_t vht;
3204 uint8_t _5g;
3205 uint8_t max_bw;
3206 uint8_t nss;
3207 uint8_t dbdc;
3208 uint8_t tx_ldpc;
3209 uint8_t rx_ldpc;
3210 uint8_t tx_stbc;
3211 uint8_t rx_stbc;
3212 uint8_t hw_path;
3213 uint8_t he;
3214 } __packed *cap;
3215 struct mbuf *m;
3216 int rv, seq, count, i;
3217
3218 rv = mwx_mcu_send_msg(sc, MCU_CE_CMD_GET_NIC_CAPAB, NULL, 0, &seq);
3219 if (rv != 0)
3220 return rv;
3221
3222 rv = mwx_mcu_wait_resp_msg(sc, MCU_CE_CMD_GET_NIC_CAPAB, seq, &m);
3223 if (rv != 0)
3224 return rv;
3225
3226 /* the message was already pulled up by mwx_mcu_rx_event() */
3227 if (m->m_len < sizeof(*hdr)) {
3228 printf("%s: GET_NIC_CAPAB response size error\n", DEVNAME(sc));
3229 m_freem(m);
3230 return EINVAL;
3231 }
3232 hdr = mtod(m, struct mt76_connac_cap_hdr *);
3233 count = le16toh(hdr->n_elements);
3234 m_adj(m, sizeof(*hdr));
3235
3236 for (i = 0; i < count; i++) {
3237 uint32_t type, len;
3238
3239 if (m->m_len < sizeof(*tlv)) {
3240 printf("%s: GET_NIC_CAPAB tlv size error\n",
3241 DEVNAME(sc));
3242 m_freem(m);
3243 return EINVAL;
3244 }
3245
3246 tlv = mtod(m, struct tlv_hdr *);
3247 type = le32toh(tlv->type);
3248 len = le32toh(tlv->len);
3249 m_adj(m, sizeof(*tlv));
3250
3251 if (m->m_len < len)
3252 break;
3253 switch (type) {
3254 case MT_NIC_CAP_6G:
3255 /* TODO 6GHZ SUPPORT */
3256 sc->sc_capa.has_6ghz = /* XXX 1 */ 0;
3257 break;
3258 case MT_NIC_CAP_MAC_ADDR:
3259 if (len < ETHER_ADDR_LEN)
3260 break;
3261 memcpy(sc->sc_lladdr, mtod(m, caddr_t), ETHER_ADDR_LEN);
3262 break;
3263 case MT_NIC_CAP_PHY:
3264 if (len < sizeof(*cap))
3265 break;
3266 cap = mtod(m, struct mt76_connac_phy_cap *);
3267
3268 sc->sc_capa.num_streams = cap->nss;
3269 sc->sc_capa.antenna_mask = (1U << cap->nss) - 1;
3270 sc->sc_capa.has_2ghz = cap->hw_path & 0x01;
3271 sc->sc_capa.has_5ghz = cap->hw_path & 0x02;
3272 break;
3273 case MT_NIC_CAP_TX_RESOURCE:
3274 /* unused on PCIe devices */
3275 break;
3276 }
3277 m_adj(m, len);
3278 }
3279
3280 printf("%s: address %s\n", DEVNAME(sc), ether_sprintf(sc->sc_lladdr));
3281
3282 m_freem(m);
3283 return 0;
3284 }
3285
3286 int
mt7921_mcu_fw_log_2_host(struct mwx_softc * sc,uint8_t ctrl)3287 mt7921_mcu_fw_log_2_host(struct mwx_softc *sc, uint8_t ctrl)
3288 {
3289 struct {
3290 uint8_t ctrl;
3291 uint8_t pad[3];
3292 } req = {
3293 .ctrl = ctrl,
3294 };
3295
3296 return mwx_mcu_send_msg(sc, MCU_CE_CMD_FWLOG_2_HOST, &req,
3297 sizeof(req), NULL);
3298 }
3299
3300 int
mt7921_mcu_set_eeprom(struct mwx_softc * sc)3301 mt7921_mcu_set_eeprom(struct mwx_softc *sc)
3302 {
3303 struct req_hdr {
3304 uint8_t buffer_mode;
3305 uint8_t format;
3306 uint8_t pad[2];
3307 } req = {
3308 .buffer_mode = EE_MODE_EFUSE,
3309 .format = EE_FORMAT_WHOLE,
3310 };
3311
3312 return mwx_mcu_send_wait(sc, MCU_EXT_CMD_EFUSE_BUFFER_MODE, &req,
3313 sizeof(req));
3314 }
3315
3316 int
mt7921_mcu_set_rts_thresh(struct mwx_softc * sc,uint32_t val,uint8_t band)3317 mt7921_mcu_set_rts_thresh(struct mwx_softc *sc, uint32_t val, uint8_t band)
3318 {
3319 struct {
3320 uint8_t prot_idx;
3321 uint8_t band;
3322 uint8_t rsv[2];
3323 uint32_t len_thresh;
3324 uint32_t pkt_thresh;
3325 } __packed req = {
3326 .prot_idx = 1,
3327 .band = band,
3328 .len_thresh = htole32(val),
3329 .pkt_thresh = htole32(0x2),
3330 };
3331
3332 return mwx_mcu_send_wait(sc, MCU_EXT_CMD_PROTECT_CTRL, &req,
3333 sizeof(req));
3334 }
3335
3336 int
mt7921_mcu_set_deep_sleep(struct mwx_softc * sc,int ena)3337 mt7921_mcu_set_deep_sleep(struct mwx_softc *sc, int ena)
3338 {
3339 struct mt76_connac_config req = {
3340 .resp_type = 0,
3341 };
3342
3343 DPRINTF("%s: %s deep sleep\n", DEVNAME(sc), ena ? "enable" : "disable");
3344 snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !ena);
3345 return mwx_mcu_send_msg(sc, MCU_CE_CMD_CHIP_CONFIG, &req,
3346 sizeof(req), NULL);
3347 }
3348
3349 void
mt7921_mcu_low_power_event(struct mwx_softc * sc,struct mbuf * m)3350 mt7921_mcu_low_power_event(struct mwx_softc *sc, struct mbuf *m)
3351 {
3352 struct mt7921_mcu_lp_event {
3353 uint8_t state;
3354 uint8_t reserved[3];
3355 } __packed *event;
3356
3357 if (m->m_len < sizeof(*event))
3358 return;
3359 event = mtod(m, struct mt7921_mcu_lp_event *);
3360 DPRINTF("%s: low power event state %d\n", DEVNAME(sc), event->state);
3361 }
3362
3363 void
mt7921_mcu_tx_done_event(struct mwx_softc * sc,struct mbuf * m)3364 mt7921_mcu_tx_done_event(struct mwx_softc *sc, struct mbuf *m)
3365 {
3366 struct mt7921_mcu_tx_done_event {
3367 uint8_t pid;
3368 uint8_t status;
3369 uint16_t seq;
3370 uint8_t wlan_idx;
3371 uint8_t tx_cnt;
3372 uint16_t tx_rate;
3373 uint8_t flag;
3374 uint8_t tid;
3375 uint8_t rsp_rate;
3376 uint8_t mcs;
3377 uint8_t bw;
3378 uint8_t tx_pwr;
3379 uint8_t reason;
3380 uint8_t rsv0[1];
3381 uint32_t delay;
3382 uint32_t timestamp;
3383 uint32_t applied_flag;
3384 uint8_t txs[28];
3385 uint8_t rsv1[32];
3386 } __packed *event;
3387
3388 if (m->m_len < sizeof(*event))
3389 return;
3390 event = mtod(m, struct mt7921_mcu_tx_done_event *);
3391 // TODO mt7921_mac_add_txs(dev, event->txs);
3392 }
3393
3394 int
mt7921_mcu_hw_scan(struct mwx_softc * sc,int bgscan)3395 mt7921_mcu_hw_scan(struct mwx_softc *sc, int bgscan)
3396 {
3397 struct ieee80211com *ic = &sc->sc_ic;
3398 struct ieee80211_channel *c;
3399 struct mt76_connac_hw_scan_req *req;
3400 struct mbuf *m;
3401 int n_ssids = 0;
3402 int rv;
3403 uint8_t nchan;
3404
3405 m = mwx_mcu_alloc_msg(sizeof(*req));
3406 if (m == NULL)
3407 return ENOMEM;
3408 req = mtod(m, struct mt76_connac_hw_scan_req *);
3409
3410 sc->sc_flags |= MWX_FLAG_SCANNING;
3411 sc->sc_scan_seq_num = (sc->sc_scan_seq_num + 1) & 0x7f;
3412
3413 req->seq_num = sc->sc_scan_seq_num /* | sc->sc_band_idx << 7 */;
3414 req->bss_idx = /* mvif->idx */ 0;
3415 req->scan_type = /* sreq->n_ssids ? 1 : */ 0;
3416 req->probe_req_num = /* sreq->n_ssids ? 2 : */ 0;
3417 req->version = 1;
3418
3419 #ifdef NOTYET
3420 for (i = 0; i < sreq->n_ssids; i++) {
3421 if (!sreq->ssids[i].ssid_len)
3422 continue;
3423 req->ssids[i].ssid_len = htole32(sreq->ssids[i].ssid_len);
3424 memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
3425 sreq->ssids[i].ssid_len);
3426 n_ssids++;
3427 }
3428 #endif
3429
3430 req->ssid_type = n_ssids ? 0x4 : 0x1;
3431 req->ssid_type_ext = n_ssids ? 1 : 0;
3432 req->ssids_num = n_ssids;
3433
3434 for (nchan = 0, c = &ic->ic_channels[1];
3435 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
3436 nchan < 64; c++) {
3437 struct mt76_connac_mcu_scan_channel *chan;
3438 uint8_t channel_num;
3439
3440 if (c->ic_flags == 0)
3441 continue;
3442
3443 if (nchan < 32)
3444 chan = &req->channels[nchan];
3445 else
3446 chan = &req->ext_channels[nchan - 32];
3447
3448 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
3449 /* TODO IEEE80211_IS_CHAN_6GHZ -> chan->band = 3*/
3450 if (IEEE80211_IS_CHAN_2GHZ(c)) {
3451 chan->band = 1;
3452 } else {
3453 chan->band = 2;
3454 }
3455 chan->channel_num = channel_num;
3456 nchan++;
3457 }
3458
3459 if (nchan <= 32) {
3460 req->channels_num = nchan;
3461 } else {
3462 req->channels_num = 32;
3463 req->ext_channels_num = nchan - 32;
3464 }
3465
3466 req->channel_type = nchan ? 4 : 0;
3467 req->timeout_value = htole16(nchan * 120);
3468 req->channel_min_dwell_time = htole16(120);
3469 req->channel_dwell_time = htole16(120);
3470
3471
3472 #ifdef NOTYET
3473 if (sreq->ie_len > 0) {
3474 memcpy(req->ies, sreq->ie, sreq->ie_len);
3475 req->ies_len = htole16(sreq->ie_len);
3476 }
3477 #endif
3478
3479 req->scan_func |= SCAN_FUNC_SPLIT_SCAN;
3480
3481 /* wildcard BSSID */
3482 memset(req->bssid, 0xff, ETHER_ADDR_LEN);
3483 #ifdef NOTYET
3484 if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
3485 get_random_mask_addr(req->random_mac, sreq->mac_addr,
3486 sreq->mac_addr_mask);
3487 req->scan_func |= SCAN_FUNC_RANDOM_MAC;
3488 }
3489 #endif
3490
3491 rv = mwx_mcu_send_mbuf(sc, MCU_CE_CMD_START_HW_SCAN, m, NULL);
3492 if (rv != 0)
3493 sc->sc_flags &= ~(MWX_FLAG_SCANNING | MWX_FLAG_BGSCAN);
3494
3495 return rv;
3496 }
3497
3498 int
mt7921_mcu_hw_scan_cancel(struct mwx_softc * sc)3499 mt7921_mcu_hw_scan_cancel(struct mwx_softc *sc)
3500 {
3501 struct {
3502 uint8_t seq_num;
3503 uint8_t is_ext_channel;
3504 uint8_t rsv[2];
3505 } __packed req = {
3506 .seq_num = sc->sc_scan_seq_num,
3507 };
3508 int rv;
3509
3510 rv = mwx_mcu_send_msg(sc, MCU_CE_CMD_CANCEL_HW_SCAN, &req,
3511 sizeof(req), NULL);
3512 if (rv == 0)
3513 sc->sc_flags &= ~(MWX_FLAG_SCANNING | MWX_FLAG_BGSCAN);
3514 return rv;
3515 }
3516
3517 void
mwx_end_scan_task(void * arg)3518 mwx_end_scan_task(void *arg)
3519 {
3520 struct mwx_softc *sc = arg;
3521 struct ieee80211com *ic = &sc->sc_ic;
3522 int s;
3523
3524 s = splnet();
3525 ieee80211_end_scan(&ic->ic_if);
3526 splx(s);
3527 }
3528
3529 void
mt7921_mcu_scan_event(struct mwx_softc * sc,struct mbuf * m)3530 mt7921_mcu_scan_event(struct mwx_softc *sc, struct mbuf *m)
3531 {
3532 if (mt7921_mcu_hw_scan_cancel(sc) != 0)
3533 return;
3534 task_add(systq, &sc->sc_scan_task);
3535 }
3536
3537 int
mt7921_mcu_set_mac_enable(struct mwx_softc * sc,int band,int enable)3538 mt7921_mcu_set_mac_enable(struct mwx_softc *sc, int band, int enable)
3539 {
3540 struct {
3541 uint8_t enable;
3542 uint8_t band;
3543 uint8_t rsv[2];
3544 } __packed req = {
3545 .enable = enable,
3546 .band = band,
3547 };
3548
3549 return mwx_mcu_send_wait(sc, MCU_EXT_CMD_MAC_INIT_CTRL, &req,
3550 sizeof(req));
3551 }
3552
3553 int
mt7921_mcu_set_channel_domain(struct mwx_softc * sc)3554 mt7921_mcu_set_channel_domain(struct mwx_softc *sc)
3555 {
3556 struct {
3557 uint8_t alpha2[4]; /* regulatory_request.alpha2 */
3558 uint8_t bw_2g; /* BW_20_40M 0
3559 * BW_20M 1
3560 * BW_20_40_80M 2
3561 * BW_20_40_80_160M 3
3562 * BW_20_40_80_8080M 4
3563 */
3564 uint8_t bw_5g;
3565 uint8_t bw_6g;
3566 uint8_t pad;
3567 uint8_t n_2ch;
3568 uint8_t n_5ch;
3569 uint8_t n_6ch;
3570 uint8_t pad2;
3571 } __packed *hdr;
3572 struct {
3573 uint16_t hw_value;
3574 uint16_t pad;
3575 uint32_t flags;
3576 } __packed *channel;
3577 struct ieee80211com *ic = &sc->sc_ic;
3578 struct ieee80211_channel *chan;
3579 struct mbuf *m;
3580 int i, len, rv;
3581 int n_2ch = 0, n_5ch = 0, n_6ch = 0;
3582
3583 len = sizeof(*hdr) + IEEE80211_CHAN_MAX * sizeof(channel);
3584 m = mwx_mcu_alloc_msg(len);
3585 if (m == NULL)
3586 return ENOMEM;
3587 hdr = mtod(m, void *);
3588
3589 hdr->alpha2[0] = '0';
3590 hdr->alpha2[1] = '0';
3591
3592 channel = (void *)(hdr + 1);
3593
3594 hdr->bw_2g = 0; /* BW_20_40M */
3595 for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
3596 chan = &ic->ic_channels[i];
3597 if (!IEEE80211_IS_CHAN_2GHZ(chan))
3598 continue;
3599
3600 channel->hw_value = htole16(ieee80211_chan2ieee(ic, chan));
3601 channel->flags = htole32(0); /* XXX */
3602
3603 channel++;
3604 n_2ch++;
3605 }
3606 hdr->bw_5g = 3; /* BW_20_40_80_160M */
3607 for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
3608 chan = &ic->ic_channels[i];
3609 if (!IEEE80211_IS_CHAN_5GHZ(chan))
3610 continue;
3611
3612 channel->hw_value = htole16(ieee80211_chan2ieee(ic, chan));
3613 channel->flags = htole32(0); /* XXX */
3614
3615 channel++;
3616 n_5ch++;
3617 }
3618 #ifdef NOTYET
3619 /* 6GHz handling */
3620 hdr->bw_6g = 3; /* BW_20_40_80_160M */
3621 for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
3622 chan = &ic->ic_channels[i];
3623 if (!IEEE80211_IS_CHAN_6GHZ(chan))
3624 continue;
3625
3626 channel->hw_value = htole16(ieee80211_chan2ieee(ic, chan));
3627 channel->flags = htole32(0); /* XXX */
3628
3629 channel++;
3630 n_6ch++;
3631 }
3632 #endif
3633
3634 memcpy(hdr->alpha2, sc->sc_alpha2, sizeof(sc->sc_alpha2));
3635 hdr->n_2ch = n_2ch;
3636 hdr->n_5ch = n_5ch;
3637 hdr->n_6ch = n_6ch;
3638
3639 mwx_mcu_set_len(m, channel);
3640 rv = mwx_mcu_send_mbuf(sc, MCU_CE_CMD_SET_CHAN_DOMAIN, m, NULL);
3641 return rv;
3642 }
3643
3644 uint8_t
mt7921_mcu_chan_bw(struct ieee80211_channel * channel)3645 mt7921_mcu_chan_bw(struct ieee80211_channel *channel)
3646 {
3647 /*
3648 * following modes are not yet supported
3649 * CMD_CBW_5MHZ, CMD_CBW_10MHZ, CMD_CBW_8080MHZ
3650 */
3651 if (channel->ic_xflags & IEEE80211_CHANX_160MHZ)
3652 return CMD_CBW_160MHZ;
3653 if (channel->ic_xflags & IEEE80211_CHANX_80MHZ)
3654 return CMD_CBW_80MHZ;
3655 if (channel->ic_flags & IEEE80211_CHAN_40MHZ)
3656 return CMD_CBW_40MHZ;
3657 return CMD_CBW_20MHZ;
3658 }
3659
3660 int
mt7921_mcu_set_chan_info(struct mwx_softc * sc,int cmd)3661 mt7921_mcu_set_chan_info(struct mwx_softc *sc, int cmd)
3662 {
3663 struct ieee80211_channel *channel;
3664 struct {
3665 uint8_t control_ch;
3666 uint8_t center_ch;
3667 uint8_t bw;
3668 uint8_t tx_streams_num;
3669 uint8_t rx_streams; /* mask or num */
3670 uint8_t switch_reason;
3671 uint8_t band_idx;
3672 uint8_t center_ch2; /* for 80+80 only */
3673 uint16_t cac_case;
3674 uint8_t channel_band;
3675 uint8_t rsv0;
3676 uint32_t outband_freq;
3677 uint8_t txpower_drop;
3678 uint8_t ap_bw;
3679 uint8_t ap_center_ch;
3680 uint8_t rsv1[57];
3681 } __packed req = {
3682 .tx_streams_num = sc->sc_capa.num_streams,
3683 .rx_streams = sc->sc_capa.antenna_mask,
3684 .band_idx = 0, /* XXX 0 or 1 */
3685 };
3686
3687 if (sc->sc_ic.ic_opmode == IEEE80211_M_STA && sc->sc_ic.ic_bss != NULL)
3688 channel = sc->sc_ic.ic_bss->ni_chan;
3689 else
3690 channel = sc->sc_ic.ic_ibss_chan;
3691
3692 req.control_ch = ieee80211_mhz2ieee(channel->ic_freq,
3693 channel->ic_flags);
3694 req.center_ch = ieee80211_mhz2ieee(channel->ic_freq,
3695 channel->ic_flags);
3696 req.bw = mt7921_mcu_chan_bw(channel);
3697
3698 #ifdef NOTYET
3699 if (channel->ic_flags & IEEE80211_CHAN_6GHZ)
3700 req.channel_band = 2;
3701 else
3702 #endif
3703 if (channel->ic_flags & IEEE80211_CHAN_5GHZ)
3704 req.channel_band = 1;
3705 else
3706 req.channel_band = 0;
3707
3708 #ifdef NOTYET
3709 if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
3710 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
3711 else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
3712 chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
3713 req.switch_reason = CH_SWITCH_DFS;
3714 else
3715 #endif
3716 req.switch_reason = CH_SWITCH_NORMAL;
3717
3718 if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH)
3719 req.rx_streams = sc->sc_capa.num_streams;
3720
3721 #ifdef NOTYET
3722 if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
3723 int freq2 = chandef->center_freq2;
3724 req.center_ch2 = ieee80211_frequency_to_channel(freq2);
3725 }
3726 #endif
3727
3728 return mwx_mcu_send_wait(sc, cmd, &req, sizeof(req));
3729 }
3730
3731 /* hardcoded version of what linux does */
3732 void
mt7921_mcu_build_sku(struct mwx_softc * sc,int band,int8_t * sku)3733 mt7921_mcu_build_sku(struct mwx_softc *sc, int band, int8_t *sku)
3734 {
3735 int max_power = 127;
3736 int i, offset = 4;
3737
3738 memset(sku, max_power, MT_SKU_POWER_LIMIT);
3739
3740 if (band == MT_TX_PWR_BAND_2GHZ) {
3741 /* cck */
3742 memset(sku, 0x28, 4);
3743 }
3744
3745 /* ofdm */
3746 memset(sku + offset, 0x28, 8);
3747 offset += 8;
3748
3749 /* ht */
3750 for (i = 0; i < 2; i++) {
3751 memset(sku + offset, 0x28, 8);
3752 offset += 8;
3753 }
3754 sku[offset++] = 0x28;
3755
3756 /* vht */
3757 for (i = 0; i < 4; i++) {
3758 /* this only sets 10 out of 12 bytes on purpose */
3759 memset(sku + offset, 0x28, 10);
3760 offset += 12;
3761 }
3762
3763 /* he */
3764 for (i = 0; i < 7; i++) {
3765 memset(sku + offset, 0x28, 12);
3766 offset += 12;
3767 }
3768 }
3769
3770 int
mt7921_mcu_rate_txpower_band(struct mwx_softc * sc,int band,const uint8_t * chans,int n_chans,int is_last)3771 mt7921_mcu_rate_txpower_band(struct mwx_softc *sc, int band,
3772 const uint8_t *chans, int n_chans, int is_last)
3773 {
3774 struct mt76_connac_sku_tlv *sku_tlv;
3775 struct mt76_connac_tx_power_limit_tlv *tx_power_tlv;
3776 struct mbuf *m;
3777 int batch_size = 8;
3778 const int len = sizeof(*tx_power_tlv) + batch_size * sizeof(*sku_tlv);
3779 int rv = 0, idx, j;
3780
3781 for (idx = 0; idx < n_chans; ) {
3782 int num_ch = batch_size;
3783
3784 m = mwx_mcu_alloc_msg(len);
3785 if (m == NULL)
3786 return ENOMEM;
3787 tx_power_tlv = mtod(m, struct mt76_connac_tx_power_limit_tlv *);
3788 tx_power_tlv->n_chan = num_ch;
3789 tx_power_tlv->band = band;
3790 memcpy(tx_power_tlv->alpha2, sc->sc_alpha2,
3791 sizeof(sc->sc_alpha2));
3792
3793 if (n_chans - idx < batch_size) {
3794 num_ch = n_chans - idx;
3795 if (is_last)
3796 tx_power_tlv->last_msg = 1;
3797 }
3798
3799 sku_tlv = (struct mt76_connac_sku_tlv *)(tx_power_tlv + 1);
3800
3801 for (j = 0; j < num_ch; j++, idx++) {
3802 sku_tlv->channel = chans[idx];
3803 mt7921_mcu_build_sku(sc, band, sku_tlv->pwr_limit);
3804 sku_tlv++;
3805 }
3806
3807 mwx_mcu_set_len(m, sku_tlv);
3808 rv = mwx_mcu_send_mbuf(sc, MCU_CE_CMD_SET_RATE_TX_POWER, m,
3809 NULL);
3810 if (rv != 0)
3811 break;
3812 }
3813
3814 return rv;
3815 }
3816
3817 int
mt7921_mcu_set_rate_txpower(struct mwx_softc * sc)3818 mt7921_mcu_set_rate_txpower(struct mwx_softc *sc)
3819 {
3820 static const uint8_t chan_list_2ghz[] = {
3821 1, 2, 3, 4, 5, 6, 7,
3822 8, 9, 10, 11, 12, 13, 14
3823 };
3824 static const uint8_t chan_list_5ghz[] = {
3825 36, 38, 40, 42, 44, 46, 48,
3826 50, 52, 54, 56, 58, 60, 62,
3827 64, 100, 102, 104, 106, 108, 110,
3828 112, 114, 116, 118, 120, 122, 124,
3829 126, 128, 132, 134, 136, 138, 140,
3830 142, 144, 149, 151, 153, 155, 157,
3831 159, 161, 165
3832 };
3833 static const uint8_t chan_list_6ghz[] = {
3834 1, 3, 5, 7, 9, 11, 13,
3835 15, 17, 19, 21, 23, 25, 27,
3836 29, 33, 35, 37, 39, 41, 43,
3837 45, 47, 49, 51, 53, 55, 57,
3838 59, 61, 65, 67, 69, 71, 73,
3839 75, 77, 79, 81, 83, 85, 87,
3840 89, 91, 93, 97, 99, 101, 103,
3841 105, 107, 109, 111, 113, 115, 117,
3842 119, 121, 123, 125, 129, 131, 133,
3843 135, 137, 139, 141, 143, 145, 147,
3844 149, 151, 153, 155, 157, 161, 163,
3845 165, 167, 169, 171, 173, 175, 177,
3846 179, 181, 183, 185, 187, 189, 193,
3847 195, 197, 199, 201, 203, 205, 207,
3848 209, 211, 213, 215, 217, 219, 221,
3849 225, 227, 229, 233
3850 };
3851 int rv = 0;
3852
3853 if (sc->sc_capa.has_2ghz)
3854 rv = mt7921_mcu_rate_txpower_band(sc, MT_TX_PWR_BAND_2GHZ,
3855 chan_list_2ghz, nitems(chan_list_2ghz),
3856 !(sc->sc_capa.has_5ghz || sc->sc_capa.has_6ghz));
3857 if (rv == 0 && sc->sc_capa.has_5ghz)
3858 rv = mt7921_mcu_rate_txpower_band(sc, MT_TX_PWR_BAND_5GHZ,
3859 chan_list_5ghz, nitems(chan_list_5ghz),
3860 !sc->sc_capa.has_6ghz);
3861 if (rv == 0 && sc->sc_capa.has_6ghz)
3862 rv = mt7921_mcu_rate_txpower_band(sc, MT_TX_PWR_BAND_6GHZ,
3863 chan_list_6ghz, nitems(chan_list_6ghz), 1);
3864 return rv;
3865 }
3866
3867 void
mt7921_mac_reset_counters(struct mwx_softc * sc)3868 mt7921_mac_reset_counters(struct mwx_softc *sc)
3869 {
3870 int i;
3871
3872 for (i = 0; i < 4; i++) {
3873 mwx_read(sc, MT_TX_AGG_CNT(0, i));
3874 mwx_read(sc, MT_TX_AGG_CNT2(0, i));
3875 }
3876
3877 /* XXX TODO stats in softc */
3878
3879 /* reset airtime counters */
3880 mwx_read(sc, MT_MIB_SDR9(0));
3881 mwx_read(sc, MT_MIB_SDR36(0));
3882 mwx_read(sc, MT_MIB_SDR37(0));
3883 mwx_set(sc, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
3884 mwx_set(sc, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
3885 }
3886
3887 void
mt7921_mac_set_timing(struct mwx_softc * sc)3888 mt7921_mac_set_timing(struct mwx_softc *sc)
3889 {
3890 uint16_t coverage_class = 0; /* XXX */
3891 uint32_t val, reg_offset;
3892 uint32_t cck = MT_TIMEOUT_CCK_DEF_VAL;
3893 uint32_t ofdm = MT_TIMEOUT_OFDM_DEF_VAL;
3894 uint32_t offset;
3895 int is_2ghz = 1; /* XXX get from ic_bss node */
3896 uint32_t sifs = is_2ghz ? 10 : 16;
3897 uint32_t slottime = IEEE80211_DUR_DS_SHSLOT; /* XXX get from stack */
3898
3899 #ifdef NOTYET
3900 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
3901 return;
3902 #endif
3903
3904 mwx_set(sc, MT_ARB_SCR(0),
3905 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
3906 delay(1);
3907
3908 offset = 3 * coverage_class;
3909 reg_offset = offset | (offset << 16);
3910
3911 mwx_write(sc, MT_TMAC_CDTR(0), cck + reg_offset);
3912 mwx_write(sc, MT_TMAC_ODTR(0), ofdm + reg_offset);
3913 mwx_write(sc, MT_TMAC_ICR0(0),
3914 MT_IFS_EIFS_DEF | MT_IFS_RIFS_DEF |
3915 (sifs << MT_IFS_SIFS_SHIFT) |
3916 (slottime << MT_IFS_SLOT_SHIFT));
3917
3918 if (slottime < 20 || !is_2ghz)
3919 val = MT7921_CFEND_RATE_DEFAULT;
3920 else
3921 val = MT7921_CFEND_RATE_11B;
3922
3923 mwx_rmw(sc, MT_AGG_ACR0(0), val, MT_AGG_ACR_CFEND_RATE_MASK);
3924 mwx_clear(sc, MT_ARB_SCR(0),
3925 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
3926 }
3927
3928 int
mt7921_mcu_uni_add_dev(struct mwx_softc * sc,struct mwx_vif * mvif,struct mwx_node * mn,int enable)3929 mt7921_mcu_uni_add_dev(struct mwx_softc *sc, struct mwx_vif *mvif,
3930 struct mwx_node *mn, int enable)
3931 {
3932 struct {
3933 struct {
3934 uint8_t omac_idx;
3935 uint8_t band_idx;
3936 uint16_t pad;
3937 } __packed hdr;
3938 struct req_tlv {
3939 uint16_t tag;
3940 uint16_t len;
3941 uint8_t active;
3942 uint8_t pad;
3943 uint8_t omac_addr[ETHER_ADDR_LEN];
3944 } __packed tlv;
3945 } dev_req = {
3946 .hdr = {
3947 .omac_idx = mvif->omac_idx,
3948 .band_idx = mvif->band_idx,
3949 },
3950 .tlv = {
3951 .tag = htole16(DEV_INFO_ACTIVE),
3952 .len = htole16(sizeof(struct req_tlv)),
3953 .active = enable,
3954 },
3955 };
3956 struct {
3957 struct {
3958 uint8_t bss_idx;
3959 uint8_t pad[3];
3960 } __packed hdr;
3961 struct mt76_connac_bss_basic_tlv basic;
3962 } basic_req = {
3963 .hdr = {
3964 .bss_idx = mvif->idx,
3965 },
3966 .basic = {
3967 .tag = htole16(UNI_BSS_INFO_BASIC),
3968 .len = htole16(
3969 sizeof(struct mt76_connac_bss_basic_tlv)),
3970 .omac_idx = mvif->omac_idx,
3971 .band_idx = mvif->band_idx,
3972 .wmm_idx = mvif->wmm_idx,
3973 .active = enable,
3974 .bmc_tx_wlan_idx = htole16(mn->wcid),
3975 .sta_idx = htole16(mn->wcid),
3976 .conn_state = 1,
3977 },
3978 };
3979 int rv, idx, cmd, len;
3980 void *data;
3981
3982 switch (sc->sc_ic.ic_opmode) {
3983 case IEEE80211_M_MONITOR:
3984 case IEEE80211_M_HOSTAP:
3985 basic_req.basic.conn_type =
3986 htole32(STA_TYPE_AP | NETWORK_INFRA);
3987 break;
3988 case IEEE80211_M_STA:
3989 basic_req.basic.conn_type =
3990 htole32(STA_TYPE_STA | NETWORK_INFRA);
3991 break;
3992 case IEEE80211_M_IBSS:
3993 basic_req.basic.conn_type =
3994 htole32(STA_TYPE_ADHOC | NETWORK_IBSS);
3995 break;
3996 default:
3997 panic("%s: unknown operation mode", DEVNAME(sc));
3998 }
3999
4000 idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
4001 basic_req.basic.hw_bss_idx = idx;
4002
4003 memcpy(dev_req.tlv.omac_addr, sc->sc_lladdr, ETHER_ADDR_LEN);
4004
4005 if (enable) {
4006 cmd = MCU_UNI_CMD_DEV_INFO_UPDATE;
4007 data = &dev_req;
4008 len = sizeof(dev_req);
4009 } else {
4010 cmd = MCU_UNI_CMD_BSS_INFO_UPDATE;
4011 data = &basic_req;
4012 len = sizeof(basic_req);
4013 }
4014
4015 printf("%s: %s cmd %x mvif idx %d omac %d band %d wmm %d\n", DEVNAME(sc), __func__, cmd, mvif->idx, mvif->omac_idx, mvif->band_idx, mvif->wmm_idx);
4016 rv = mwx_mcu_send_wait(sc, cmd, data, len);
4017 if (rv < 0)
4018 return rv;
4019
4020 if (enable) {
4021 cmd = MCU_UNI_CMD_BSS_INFO_UPDATE;
4022 data = &basic_req;
4023 len = sizeof(basic_req);
4024 } else {
4025 cmd = MCU_UNI_CMD_DEV_INFO_UPDATE;
4026 data = &dev_req;
4027 len = sizeof(dev_req);
4028 }
4029
4030 printf("%s: %s cmd %x wcid %d\n", DEVNAME(sc), __func__, cmd, mn->wcid);
4031 return mwx_mcu_send_wait(sc, cmd, data, len);
4032 }
4033
4034 int
mt7921_mcu_set_sniffer(struct mwx_softc * sc,int enable)4035 mt7921_mcu_set_sniffer(struct mwx_softc *sc, int enable)
4036 {
4037 struct {
4038 uint8_t band_idx;
4039 uint8_t pad[3];
4040 struct sniffer_enable_tlv {
4041 uint16_t tag;
4042 uint16_t len;
4043 uint8_t enable;
4044 uint8_t pad[3];
4045 } enable;
4046 } req = {
4047 .band_idx = 0,
4048 .enable = {
4049 .tag = htole16(0),
4050 .len = htole16(sizeof(struct sniffer_enable_tlv)),
4051 .enable = enable,
4052 },
4053 };
4054
4055 return mwx_mcu_send_wait(sc, MCU_UNI_CMD_SNIFFER, &req, sizeof(req));
4056 }
4057
4058 int
mt7921_mcu_set_beacon_filter(struct mwx_softc * sc,int enable)4059 mt7921_mcu_set_beacon_filter(struct mwx_softc *sc, int enable)
4060 {
4061 int rv;
4062
4063 if (enable) {
4064 #ifdef NOTYET
4065 rv = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
4066 if (rv)
4067 return rv;
4068 #endif
4069 mwx_set(sc, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
4070 } else {
4071 rv = mt7921_mcu_set_bss_pm(sc, 0);
4072 if (rv)
4073 return rv;
4074 mwx_clear(sc, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
4075 }
4076 return 0;
4077 }
4078
4079 int
mt7921_mcu_set_bss_pm(struct mwx_softc * sc,int enable)4080 mt7921_mcu_set_bss_pm(struct mwx_softc *sc, int enable)
4081 {
4082 #ifdef NOTYET
4083 struct {
4084 uint8_t bss_idx;
4085 uint8_t dtim_period;
4086 uint16_t aid;
4087 uint16_t bcn_interval;
4088 uint16_t atim_window;
4089 uint8_t uapsd;
4090 uint8_t bmc_delivered_ac;
4091 uint8_t bmc_triggered_ac;
4092 uint8_t pad;
4093 } req = {
4094 .bss_idx = mvif->mt76.idx,
4095 .aid = htole16(vif->cfg.aid),
4096 .dtim_period = vif->bss_conf.dtim_period,
4097 .bcn_interval = htole16(vif->bss_conf.beacon_int),
4098 };
4099 #endif
4100 struct {
4101 uint8_t bss_idx;
4102 uint8_t pad[3];
4103 } req_hdr = {
4104 .bss_idx = /* mvif->mt76.idx XXX */ 0,
4105 };
4106 int rv;
4107
4108 rv = mwx_mcu_send_msg(sc, MCU_CE_CMD_SET_BSS_ABORT,
4109 &req_hdr, sizeof(req_hdr), NULL);
4110 #ifdef NOTYET
4111 if (rv != 0 || !enable)
4112 return rv;
4113 rv = mwx_mcu_send_msg(sc, MCU_CE_CMD_SET_BSS_CONNECTED,
4114 &req, sizeof(req), NULL);
4115 #endif
4116 return rv;
4117 }
4118
4119 #define IEEE80211_NUM_ACS 4
4120 int
mt7921_mcu_set_tx(struct mwx_softc * sc,struct mwx_vif * mvif)4121 mt7921_mcu_set_tx(struct mwx_softc *sc, struct mwx_vif *mvif)
4122 {
4123 struct edca {
4124 uint16_t cw_min;
4125 uint16_t cw_max;
4126 uint16_t txop;
4127 uint16_t aifs;
4128 uint8_t guardtime;
4129 uint8_t acm;
4130 } __packed;
4131 struct mt7921_mcu_tx {
4132 struct edca edca[IEEE80211_NUM_ACS];
4133 uint8_t bss_idx;
4134 uint8_t qos;
4135 uint8_t wmm_idx;
4136 uint8_t pad;
4137 } __packed req = {
4138 .bss_idx = mvif->idx,
4139 .qos = /* vif->bss_conf.qos */ 0,
4140 .wmm_idx = mvif->wmm_idx,
4141 };
4142 #ifdef NOTYET
4143 struct mu_edca {
4144 uint8_t cw_min;
4145 uint8_t cw_max;
4146 uint8_t aifsn;
4147 uint8_t acm;
4148 uint8_t timer;
4149 uint8_t padding[3];
4150 };
4151 struct mt7921_mcu_mu_tx {
4152 uint8_t ver;
4153 uint8_t pad0;
4154 uint16_t len;
4155 uint8_t bss_idx;
4156 uint8_t qos;
4157 uint8_t wmm_idx;
4158 uint8_t pad1;
4159 struct mu_edca edca[IEEE80211_NUM_ACS];
4160 uint8_t pad3[32];
4161 } __packed req_mu = {
4162 .bss_idx = mvif->mt76.idx,
4163 .qos = vif->bss_conf.qos,
4164 .wmm_idx = mvif->mt76.wmm_idx,
4165 };
4166 #endif
4167 static const int to_aci[] = { 1, 0, 2, 3 };
4168 int ac, rv;
4169
4170 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
4171 //struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
4172 struct edca *e = &req.edca[to_aci[ac]];
4173
4174 e->aifs = htole16(/* q->aifs */ 2);
4175 e->txop = htole16(/* q->txop */ 0);
4176
4177 #ifdef NOTYET
4178 if (q->cw_min)
4179 e->cw_min = htole16(q->cw_min);
4180 else
4181 #endif
4182 e->cw_min = htole16(5);
4183
4184 #ifdef NOTYET
4185 if ( q->cw_max)
4186 e->cw_max = htole16(q->cw_max);
4187 else
4188 #endif
4189 e->cw_max = htole16(10);
4190 }
4191
4192 rv = mwx_mcu_send_msg(sc, MCU_CE_CMD_SET_EDCA_PARMS, &req,
4193 sizeof(req), NULL);
4194
4195 #ifdef NOTYET
4196 if (rv)
4197 return rv;
4198 if (!vif->bss_conf.he_support)
4199 return 0;
4200
4201 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
4202 struct ieee80211_he_mu_edca_param_ac_rec *q;
4203 struct mu_edca *e;
4204
4205 if (!mvif->queue_params[ac].mu_edca)
4206 break;
4207
4208 q = &mvif->queue_params[ac].mu_edca_param_rec;
4209 e = &(req_mu.edca[to_aci[ac]]);
4210
4211 e->cw_min = q->ecw_min_max & 0xf;
4212 e->cw_max = (q->ecw_min_max & 0xf0) >> 4;
4213 e->aifsn = q->aifsn;
4214 e->timer = q->mu_edca_timer;
4215 }
4216
4217 rv = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS),
4218 &req_mu, sizeof(req_mu), false);
4219 #endif
4220 return rv;
4221 }
4222
4223 int
mt7921_mac_fill_rx(struct mwx_softc * sc,struct mbuf * m,struct ieee80211_rxinfo * rxi)4224 mt7921_mac_fill_rx(struct mwx_softc *sc, struct mbuf *m,
4225 struct ieee80211_rxinfo *rxi)
4226 {
4227 struct ieee80211com *ic = &sc->sc_ic;
4228 uint32_t *rxd, rxd0, rxd1, rxd2, rxd3, rxd4;
4229 // uint32_t mode = 0;
4230 uint16_t hdr_gap /*, seq_ctrl = 0, fc = 0 */;
4231 uint8_t chfnum, remove_pad /*, qos_ctl = 0, amsdu_info */;
4232 int idx, unicast, num_rxd = 6;
4233 // bool insert_ccmp_hdr = false;
4234
4235 if (m->m_len < num_rxd * sizeof(uint32_t))
4236 return -1;
4237
4238 rxd = mtod(m, uint32_t *);
4239 rxd0 = le32toh(rxd[0]);
4240 rxd1 = le32toh(rxd[1]);
4241 rxd2 = le32toh(rxd[2]);
4242 rxd3 = le32toh(rxd[3]);
4243 rxd4 = le32toh(rxd[4]);
4244
4245 if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
4246 return -1;
4247
4248 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
4249 return -1;
4250
4251 if (rxd2 & MT_RXD2_NORMAL_HDR_TRANS)
4252 return -1;
4253
4254 /* ICV error or CCMP/BIP/WPI MIC error */
4255 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) {
4256 ic->ic_stats.is_rx_decryptcrc++;
4257 return -1;
4258 }
4259
4260 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
4261 return -1;
4262
4263 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) {
4264 /* report MIC failures to net80211 for TKIP */
4265 ic->ic_stats.is_rx_locmicfail++;
4266 ieee80211_michael_mic_failure(ic, 0/* XXX */);
4267 return -1;
4268 }
4269
4270
4271 chfnum = (rxd3 & MT_RXD3_NORMAL_CH_NUM_MASK) >>
4272 MT_RXD3_NORMAL_CH_NUM_SHIFT;
4273 unicast = (rxd3 & MT_RXD3_NORMAL_ADDR_TYPE_MASK) == MT_RXD3_NORMAL_U2M;
4274 idx = rxd1 & MT_RXD1_NORMAL_WLAN_IDX_MASK;
4275
4276 #if 0
4277 status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
4278 if (status->wcid) {
4279 struct mt7921_sta *msta;
4280
4281 msta = container_of(status->wcid, struct mt7921_sta, wcid);
4282 spin_lock_bh(&dev->sta_poll_lock);
4283 if (list_empty(&msta->poll_list))
4284 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
4285 spin_unlock_bh(&dev->sta_poll_lock);
4286 }
4287 #endif
4288
4289 #if NOTYET
4290 if ((rxd0 & (MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM)) ==
4291 (MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM)) {
4292 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK |
4293 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
4294 }
4295
4296 if ((rxd1 & MT_RXD1_NORMAL_SEC_MODE_MASK) != 0 &&
4297 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
4298 rxi->rxi_flags |= IEEE80211_RXI_HWDEC |
4299 IEEE80211_RXI_HWDEC_IV_STRIPPED;
4300 }
4301 #endif
4302
4303 remove_pad = (rxd2 & MT_RXD2_NORMAL_HDR_OFFSET_MASK) >>
4304 MT_RXD2_NORMAL_HDR_OFFSET_SHIFT;
4305
4306 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
4307 return -EINVAL;
4308
4309 rxd += 6;
4310
4311 if (rxd1 & MT_RXD1_NORMAL_GROUP_4)
4312 num_rxd += 4;
4313 if (rxd1 & MT_RXD1_NORMAL_GROUP_1)
4314 num_rxd += 4;
4315 if (rxd1 & MT_RXD1_NORMAL_GROUP_2)
4316 num_rxd += 2;
4317 if (rxd1 & MT_RXD1_NORMAL_GROUP_3)
4318 num_rxd += 2;
4319 if (rxd1 & MT_RXD1_NORMAL_GROUP_5)
4320 num_rxd += 18;
4321
4322 if (m->m_len < num_rxd * sizeof(uint32_t))
4323 return -1;
4324
4325 #if 0
4326 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
4327 uint32_t v0 = le32toh(rxd[0]);
4328 uint32_t v2 = le32toh(rxd[2]);
4329
4330 fc = htole16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
4331 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
4332 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
4333
4334 rxd += 4;
4335 }
4336
4337 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
4338 u8 *data = (u8 *)rxd;
4339
4340 if (status->flag & RX_FLAG_DECRYPTED) {
4341 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
4342 case MT_CIPHER_AES_CCMP:
4343 case MT_CIPHER_CCMP_CCX:
4344 case MT_CIPHER_CCMP_256:
4345 insert_ccmp_hdr =
4346 (rxd2 & MT_RXD2_NORMAL_FRAG);
4347 /* FALLTHROUGH */
4348 case MT_CIPHER_TKIP:
4349 case MT_CIPHER_TKIP_NO_MIC:
4350 case MT_CIPHER_GCMP:
4351 case MT_CIPHER_GCMP_256:
4352 status->iv[0] = data[5];
4353 status->iv[1] = data[4];
4354 status->iv[2] = data[3];
4355 status->iv[3] = data[2];
4356 status->iv[4] = data[1];
4357 status->iv[5] = data[0];
4358 break;
4359 default:
4360 break;
4361 }
4362 }
4363 rxd += 4;
4364 }
4365
4366 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
4367 status->timestamp = le32_to_cpu(rxd[0]);
4368 status->flag |= RX_FLAG_MACTIME_START;
4369
4370 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
4371 status->flag |= RX_FLAG_AMPDU_DETAILS;
4372
4373 /* all subframes of an A-MPDU have the same timestamp */
4374 if (phy->rx_ampdu_ts != status->timestamp) {
4375 if (!++phy->ampdu_ref)
4376 phy->ampdu_ref++;
4377 }
4378 phy->rx_ampdu_ts = status->timestamp;
4379
4380 status->ampdu_ref = phy->ampdu_ref;
4381 }
4382
4383 rxd += 2;
4384 }
4385
4386 /* RXD Group 3 - P-RXV */
4387 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
4388 u8 stbc, gi;
4389 u32 v0, v1;
4390 bool cck;
4391
4392 rxv = rxd;
4393 rxd += 2;
4394
4395 v0 = le32_to_cpu(rxv[0]);
4396 v1 = le32_to_cpu(rxv[1]);
4397
4398 if (v0 & MT_PRXV_HT_AD_CODE)
4399 status->enc_flags |= RX_ENC_FLAG_LDPC;
4400
4401 status->chains = mphy->antenna_mask;
4402 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
4403 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
4404 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
4405 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
4406 status->signal = -128;
4407 for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
4408 if (!(status->chains & BIT(i)) ||
4409 status->chain_signal[i] >= 0)
4410 continue;
4411
4412 status->signal = max(status->signal,
4413 status->chain_signal[i]);
4414 }
4415
4416 stbc = FIELD_GET(MT_PRXV_STBC, v0);
4417 gi = FIELD_GET(MT_PRXV_SGI, v0);
4418 cck = false;
4419
4420 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
4421 mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
4422
4423 switch (mode) {
4424 case MT_PHY_TYPE_CCK:
4425 cck = true;
4426 fallthrough;
4427 case MT_PHY_TYPE_OFDM:
4428 i = mt76_get_rate(&dev->mt76, sband, i, cck);
4429 break;
4430 case MT_PHY_TYPE_HT_GF:
4431 case MT_PHY_TYPE_HT:
4432 status->encoding = RX_ENC_HT;
4433 if (i > 31)
4434 return -EINVAL;
4435 break;
4436 case MT_PHY_TYPE_VHT:
4437 status->nss =
4438 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
4439 status->encoding = RX_ENC_VHT;
4440 if (i > 9)
4441 return -EINVAL;
4442 break;
4443 case MT_PHY_TYPE_HE_MU:
4444 case MT_PHY_TYPE_HE_SU:
4445 case MT_PHY_TYPE_HE_EXT_SU:
4446 case MT_PHY_TYPE_HE_TB:
4447 status->nss =
4448 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
4449 status->encoding = RX_ENC_HE;
4450 i &= GENMASK(3, 0);
4451
4452 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
4453 status->he_gi = gi;
4454
4455 status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
4456 break;
4457 default:
4458 return -EINVAL;
4459 }
4460
4461 status->rate_idx = i;
4462
4463 switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
4464 case IEEE80211_STA_RX_BW_20:
4465 break;
4466 case IEEE80211_STA_RX_BW_40:
4467 if (mode & MT_PHY_TYPE_HE_EXT_SU &&
4468 (idx & MT_PRXV_TX_ER_SU_106T)) {
4469 status->bw = RATE_INFO_BW_HE_RU;
4470 status->he_ru =
4471 NL80211_RATE_INFO_HE_RU_ALLOC_106;
4472 } else {
4473 status->bw = RATE_INFO_BW_40;
4474 }
4475 break;
4476 case IEEE80211_STA_RX_BW_80:
4477 status->bw = RATE_INFO_BW_80;
4478 break;
4479 case IEEE80211_STA_RX_BW_160:
4480 status->bw = RATE_INFO_BW_160;
4481 break;
4482 default:
4483 return -EINVAL;
4484 }
4485
4486 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
4487 if (mode < MT_PHY_TYPE_HE_SU && gi)
4488 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
4489
4490 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
4491 rxd += 18;
4492 }
4493 }
4494
4495 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
4496 status->amsdu = !!amsdu_info;
4497 if (status->amsdu) {
4498 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
4499 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
4500 }
4501 #endif
4502
4503 hdr_gap = num_rxd * sizeof(uint32_t) + 2 * remove_pad;
4504 m_adj(m, hdr_gap);
4505 #if 0
4506 if (status->amsdu) {
4507 memmove(skb->data + 2, skb->data,
4508 ieee80211_get_hdrlen_from_skb(skb));
4509 skb_pull(skb, 2);
4510 }
4511
4512 struct ieee80211_hdr *hdr;
4513
4514 if (insert_ccmp_hdr) {
4515 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
4516
4517 mt76_insert_ccmp_hdr(skb, key_id);
4518 }
4519
4520 hdr = mt76_skb_get_hdr(skb);
4521 fc = hdr->frame_control;
4522 if (ieee80211_is_data_qos(fc)) {
4523 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
4524 qos_ctl = *ieee80211_get_qos_ctl(hdr);
4525 }
4526
4527 if (!status->wcid || !ieee80211_is_data_qos(fc))
4528 return 0;
4529
4530 status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
4531 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
4532 status->qos_ctl = qos_ctl;
4533 #endif
4534 rxi->rxi_chan = chfnum;
4535
4536 return 0;
4537 }
4538
4539 uint32_t
mt7921_mac_tx_rate_val(struct mwx_softc * sc)4540 mt7921_mac_tx_rate_val(struct mwx_softc *sc)
4541 {
4542 int rateidx = 0, offset = 4;
4543 uint32_t rate, mode;
4544
4545 /* XXX TODO basic_rates
4546 rateidx = ffs(vif->bss_conf.basic_rates) - 1;
4547 */
4548
4549 if (IEEE80211_IS_CHAN_2GHZ(sc->sc_ic.ic_bss->ni_chan))
4550 offset = 0;
4551 /* pick the lowest rate for hidden nodes */
4552 if (rateidx < 0)
4553 rateidx = 0;
4554
4555 rateidx += offset;
4556
4557 if (rateidx >= nitems(mt76_rates))
4558 rateidx = offset;
4559
4560 rate = mt76_rates[rateidx].hw_value;
4561 mode = (rate >> 8) << MT_TX_RATE_MODE_SHIFT;
4562 rate &= 0xff;
4563
4564 return (rate & MT_TX_RATE_IDX_MASK) | (mode & MT_TX_RATE_MODE_MASK);
4565 }
4566
4567 void
mt7921_mac_write_txwi_80211(struct mwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni,struct mt76_txwi * txp)4568 mt7921_mac_write_txwi_80211(struct mwx_softc *sc, struct mbuf *m,
4569 struct ieee80211_node *ni, struct mt76_txwi *txp)
4570 {
4571 struct ieee80211com *ic = &sc->sc_ic;
4572 struct ieee80211_frame *wh;
4573 uint32_t val;
4574 uint8_t type, subtype, tid = 0;
4575 u_int hdrlen;
4576 int multicast;
4577
4578
4579 wh = mtod(m, struct ieee80211_frame *);
4580 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4581 type >>= IEEE80211_FC0_TYPE_SHIFT;
4582 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4583 subtype >>= IEEE80211_FC0_SUBTYPE_SHIFT;
4584 multicast = IEEE80211_IS_MULTICAST(wh->i_addr1);
4585
4586 if (type == IEEE80211_FC0_TYPE_CTL)
4587 hdrlen = sizeof(struct ieee80211_frame_min);
4588 else
4589 hdrlen = ieee80211_get_hdrlen(wh);
4590
4591 /* Put QoS frames on the data queue which maps to their TID. */
4592 if (ieee80211_has_qos(wh)) {
4593 uint16_t qos = ieee80211_get_qos(wh);
4594
4595 tid = qos & IEEE80211_QOS_TID;
4596 }
4597
4598 #ifdef NOTYET
4599 if (ieee80211_is_action(fc) &&
4600 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
4601 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
4602 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
4603
4604 txp->txwi[5] |= htole32(MT_TXD5_ADD_BA);
4605 tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
4606 } else if (ieee80211_is_back_req(hdr->frame_control)) {
4607 struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
4608 u16 control = le16_to_cpu(bar->control);
4609
4610 tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
4611 }
4612 #endif
4613
4614 val = MT_HDR_FORMAT_802_11 | MT_TXD1_HDR_INFO(hdrlen / 2) |
4615 MT_TXD1_TID(tid);
4616 txp->txwi[1] |= htole32(val);
4617
4618 val = MT_TXD2_FRAME_TYPE(type) | MT_TXD2_SUB_TYPE(subtype);
4619 if (multicast)
4620 val |= MT_TXD2_MULTICAST;
4621
4622 #ifdef NOTYET
4623 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
4624 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
4625 val |= MT_TXD2_BIP;
4626 txp->txwi[3] &= ~htole32(MT_TXD3_PROTECT_FRAME);
4627 }
4628 #endif
4629
4630 if (multicast || type != IEEE80211_FC0_TYPE_DATA) {
4631 /* Fixed rata is available just for 802.11 txd */
4632 uint32_t rate, val6;
4633
4634 val |= MT_TXD2_FIX_RATE;
4635 /* hardware won't add HTC for mgmt/ctrl frame */
4636 val |= htole32(MT_TXD2_HTC_VLD);
4637
4638 rate = mt7921_mac_tx_rate_val(sc);
4639
4640 val6 = MT_TXD6_FIXED_BW;
4641 val6 |= (rate << MT_TXD6_TX_RATE_SHIFT) & MT_TXD6_TX_RATE_MASK;
4642 txp->txwi[6] |= htole32(val6);
4643 txp->txwi[3] |= htole32(MT_TXD3_BA_DISABLE);
4644 }
4645
4646 txp->txwi[2] |= htole32(val);
4647
4648 #ifdef NOTYET
4649 if (ieee80211_is_beacon(fc)) {
4650 txp->txwi[3] &= ~htole32(MT_TXD3_SW_POWER_MGMT);
4651 txp->txwi[3] |= htole32(MT_TXD3_REM_TX_COUNT);
4652 }
4653
4654 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
4655 u16 seqno = le16_to_cpu(hdr->seq_ctrl);
4656
4657 if (ieee80211_is_back_req(hdr->frame_control)) {
4658 struct ieee80211_bar *bar;
4659
4660 bar = (struct ieee80211_bar *)skb->data;
4661 seqno = le16_to_cpu(bar->start_seq_num);
4662 }
4663
4664 val = MT_TXD3_SN_VALID |
4665 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
4666 txp->txwi[3] |= htole32(val);
4667 txp->txwi[7] &= ~htole32(MT_TXD7_HW_AMSDU);
4668 }
4669 #endif
4670
4671 val = MT_TXD7_TYPE(type) | MT_TXD7_SUB_TYPE(subtype);
4672 txp->txwi[7] |= htole32(val);
4673
4674 #if NBPFILTER > 0
4675 if (__predict_false(sc->sc_drvbpf != NULL)) {
4676 struct mwx_tx_radiotap_header *tap = &sc->sc_txtap;
4677 uint16_t chan_flags;
4678
4679 tap->wt_flags = 0;
4680 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4681 chan_flags = ni->ni_chan->ic_flags;
4682 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4683 ic->ic_curmode != IEEE80211_MODE_11AC) {
4684 chan_flags &= ~IEEE80211_CHAN_HT;
4685 chan_flags &= ~IEEE80211_CHAN_40MHZ;
4686 }
4687 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4688 chan_flags &= ~IEEE80211_CHAN_VHT;
4689 tap->wt_chan_flags = htole16(chan_flags);
4690 #ifdef NOTYET
4691 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4692 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4693 type == IEEE80211_FC0_TYPE_DATA &&
4694 rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4695 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4696 } else
4697 tap->wt_rate = rinfo->rate;
4698 #endif
4699 tap->wt_rate = 2;
4700 if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4701 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4702 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4703
4704 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4705 m, BPF_DIRECTION_OUT);
4706 }
4707 #endif
4708
4709 }
4710
4711 static inline uint8_t
mt7921_lmac_mapping(uint8_t ac)4712 mt7921_lmac_mapping(uint8_t ac)
4713 {
4714 /* LMAC uses the reverse order of mac80211 AC indexes */
4715 return 3 - ac;
4716 }
4717
4718 void
mt7921_mac_write_txwi(struct mwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni,struct mt76_txwi * txp)4719 mt7921_mac_write_txwi(struct mwx_softc *sc, struct mbuf *m,
4720 struct ieee80211_node *ni, struct mt76_txwi *txp)
4721 {
4722 struct mwx_node *mn = (void *)ni;
4723 uint32_t val, p_fmt, omac_idx;
4724 uint8_t q_idx, wmm_idx, band_idx;
4725 uint8_t phy_idx = 0;
4726 /* XXX hardcoded and wrong */
4727 int pid = MT_PACKET_ID_FIRST;
4728 enum mt76_txq_id qid = MT_TXQ_BE;
4729
4730 omac_idx = sc->sc_vif.omac_idx << MT_TXD1_OWN_MAC_SHIFT;
4731 wmm_idx = sc->sc_vif.wmm_idx;
4732 band_idx = sc->sc_vif.band_idx;
4733
4734 if (qid >= MT_TXQ_PSD) {
4735 p_fmt = MT_TX_TYPE_CT;
4736 q_idx = MT_LMAC_ALTX0;
4737 } else {
4738 p_fmt = MT_TX_TYPE_CT;
4739 q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
4740 mt7921_lmac_mapping(/* skb_get_queue_mapping(skb) */ 0);
4741
4742 #ifdef NOTYET
4743 /* counting non-offloading skbs */
4744 wcid->stats.tx_bytes += skb->len;
4745 wcid->stats.tx_packets++;
4746 #endif
4747 }
4748
4749 val = ((m->m_pkthdr.len + MT_TXD_SIZE) & MT_TXD0_TX_BYTES_MASK) |
4750 p_fmt | MT_TXD0_Q_IDX(q_idx);
4751 txp->txwi[0] = htole32(val);
4752
4753 val = MT_TXD1_LONG_FORMAT | (mn->wcid & MT_TXD1_WLAN_IDX_MASK) |
4754 (omac_idx & MT_TXD1_OWN_MAC_MASK);
4755 if (phy_idx || band_idx)
4756 val |= MT_TXD1_TGID;
4757 txp->txwi[1] = htole32(val);
4758 txp->txwi[2] = 0;
4759
4760 val = 15 << MT_TXD3_REM_TX_COUNT_SHIFT;
4761 #ifdef NOTYET
4762 if (key)
4763 val |= MT_TXD3_PROTECT_FRAME;
4764 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
4765 val |= MT_TXD3_NO_ACK;
4766 #endif
4767 txp->txwi[3] = htole32(val);
4768 txp->txwi[4] = 0;
4769
4770 val = pid & MT_TXD5_PID;
4771 if (pid >= MT_PACKET_ID_FIRST)
4772 val |= MT_TXD5_TX_STATUS_HOST;
4773 txp->txwi[5] = htole32(val);
4774 txp->txwi[6] = 0;
4775 txp->txwi[7] = /* XXX wcid->amsdu ? htole32(MT_TXD7_HW_AMSDU) : */ 0;
4776
4777 #ifdef NOTYET
4778 if (is_8023)
4779 mt76_connac2_mac_write_txwi_8023(txp, m, wcid);
4780 else
4781 #endif
4782 mt7921_mac_write_txwi_80211(sc, m, ni, txp);
4783 }
4784
4785 void
mt7921_mac_tx_free(struct mwx_softc * sc,struct mbuf * m)4786 mt7921_mac_tx_free(struct mwx_softc *sc, struct mbuf *m)
4787 {
4788 #ifdef NOTYET
4789 struct mt7921_mcu_rxd *rxd;
4790 uint32_t cmd, mcu_int = 0;
4791 int len;
4792
4793 if ((m = m_pullup(m, sizeof(*rxd))) == NULL)
4794 return;
4795 rxd = mtod(m, struct mt7921_mcu_rxd *);
4796
4797 if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT) {
4798 printf("%s: MCU_EXT_EVENT_RATE_REPORT COMMAND\n", DEVNAME(sc));
4799 m_freem(m);
4800 return;
4801 }
4802
4803 len = sizeof(*rxd) - sizeof(rxd->rxd) + le16toh(rxd->len);
4804 /* make sure all the data is in one mbuf */
4805 if ((m = m_pullup(m, len)) == NULL) {
4806 printf("%s: mwx_mcu_rx_event m_pullup failed\n", DEVNAME(sc));
4807 return;
4808 }
4809 /* refetch after pullup */
4810 rxd = mtod(m, struct mt7921_mcu_rxd *);
4811 m_adj(m, sizeof(*rxd));
4812 #endif
4813 printf("%s\n", __func__);
4814 m_freem(m);
4815 }
4816
4817 int
mt7921_set_channel(struct mwx_softc * sc)4818 mt7921_set_channel(struct mwx_softc *sc)
4819 {
4820 int rv;
4821
4822 /* stop queues, block other configs (MT76_RESET) */
4823 // XXX NOTYET mt76_set_channel(sc);
4824
4825 rv = mt7921_mcu_set_chan_info(sc, MCU_EXT_CMD_CHANNEL_SWITCH);
4826 if (rv)
4827 return rv;
4828 mt7921_mac_set_timing(sc);
4829 mt7921_mac_reset_counters(sc);
4830
4831 /* restart queues */
4832 return 0;
4833 }
4834
4835 uint8_t
mt7921_get_phy_mode_v2(struct mwx_softc * sc,struct ieee80211_node * ni)4836 mt7921_get_phy_mode_v2(struct mwx_softc *sc, struct ieee80211_node *ni)
4837 {
4838 uint8_t mode = 0;
4839
4840 if (ni == NULL)
4841 ni = sc->sc_ic.ic_bss;
4842
4843 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
4844 mode |= PHY_TYPE_BIT_HR_DSSS | PHY_TYPE_BIT_ERP;
4845 if (ieee80211_node_supports_ht(ni))
4846 mode |= PHY_TYPE_BIT_HT;
4847 #ifdef NOTYET
4848 if (ieee80211_node_supports_he(ni))
4849 mode |= PHY_TYPE_BIT_HE;
4850 #endif
4851 } else if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan) /* || CHAN_6GHZ */) {
4852 mode |= PHY_TYPE_BIT_OFDM;
4853 if (ieee80211_node_supports_ht(ni))
4854 mode |= PHY_TYPE_BIT_HT;
4855 if (ieee80211_node_supports_vht(ni))
4856 mode |= PHY_TYPE_BIT_VHT;
4857 #ifdef NOTYET
4858 if (ieee80211_node_supports_he(ni))
4859 mode |= PHY_TYPE_BIT_HE;
4860 #endif
4861 }
4862 return mode;
4863 }
4864
4865 struct mbuf *
mt7921_alloc_sta_tlv(int len)4866 mt7921_alloc_sta_tlv(int len)
4867 {
4868 struct mbuf *m;
4869
4870 /* Allocate mbuf cluster with enough space */
4871 m = m_clget(NULL, M_DONTWAIT, MCLBYTES);
4872 if (m == NULL)
4873 return NULL;
4874
4875 /* align to have space for the mcu header */
4876 m->m_data += sizeof(struct mt7921_mcu_txd) + len;
4877 m->m_len = m->m_pkthdr.len = 0;
4878
4879 return m;
4880 }
4881
4882 /*
4883 * Reserve len bytes at the end of mbuf m, return to start of that area
4884 * after initializing the data. It also sets the tag and len hdr.
4885 */
4886 void *
mt7921_append_tlv(struct mbuf * m,uint16_t * tlvnum,int tag,int len)4887 mt7921_append_tlv(struct mbuf *m, uint16_t *tlvnum, int tag, int len)
4888 {
4889 struct {
4890 uint16_t tag;
4891 uint16_t len;
4892 } tlv = {
4893 .tag = htole16(tag),
4894 .len = htole16(len),
4895 };
4896 caddr_t p;
4897
4898 KASSERT(m_trailingspace(m) >= len);
4899
4900 p = mtod(m, caddr_t) + m->m_len;
4901 m->m_len += len;
4902 m->m_pkthdr.len = m->m_len;
4903 memset(p, 0, len);
4904 memcpy(p, &tlv, sizeof(tlv));
4905
4906 *tlvnum += 1;
4907
4908 return p;
4909 }
4910
4911 void
mt7921_mcu_add_basic_tlv(struct mbuf * m,uint16_t * tlvnum,struct mwx_softc * sc,struct ieee80211_node * ni,int add,int new)4912 mt7921_mcu_add_basic_tlv(struct mbuf *m, uint16_t *tlvnum, struct mwx_softc *sc,
4913 struct ieee80211_node *ni, int add, int new)
4914 {
4915 struct ieee80211com *ic = &sc->sc_ic;
4916 struct sta_rec_basic *basic;
4917
4918 basic = mt7921_append_tlv(m, tlvnum, STA_REC_BASIC, sizeof(*basic));
4919
4920 basic->extra_info = htole16(EXTRA_INFO_VER);
4921 if (add) {
4922 if (new)
4923 basic->extra_info |= htole16(EXTRA_INFO_NEW);
4924 basic->conn_state = CONN_STATE_PORT_SECURE;
4925 } else {
4926 basic->conn_state = CONN_STATE_DISCONNECT;
4927 }
4928
4929 if (ni == NULL) {
4930 basic->conn_type = htole32(STA_TYPE_BC | NETWORK_INFRA);
4931 memset(basic->peer_addr, 0xff, sizeof(basic->peer_addr));
4932 return;
4933 }
4934
4935 switch (ic->ic_opmode) {
4936 case IEEE80211_M_HOSTAP:
4937 basic->conn_type = htole32(STA_TYPE_STA | NETWORK_INFRA);
4938 break;
4939 case IEEE80211_M_STA:
4940 basic->conn_type = htole32(STA_TYPE_AP | NETWORK_INFRA);
4941 break;
4942 case IEEE80211_M_IBSS:
4943 case IEEE80211_M_AHDEMO:
4944 basic->conn_type = htole32(STA_TYPE_ADHOC | NETWORK_IBSS);
4945 break;
4946 case IEEE80211_M_MONITOR:
4947 panic("mt7921_mcu_sta_basic_tlv unexpected operation mode");
4948 }
4949
4950 basic->aid = htole16(IEEE80211_AID(ni->ni_associd));
4951 memcpy(basic->peer_addr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
4952 basic->qos = (ni->ni_flags & IEEE80211_NODE_QOS) != 0;
4953 }
4954
4955 void
mt7921_mcu_add_sta_tlv(struct mbuf * m,uint16_t * tlvnum,struct mwx_softc * sc,struct ieee80211_node * ni,int add,int new)4956 mt7921_mcu_add_sta_tlv(struct mbuf *m, uint16_t *tlvnum, struct mwx_softc *sc,
4957 struct ieee80211_node *ni, int add, int new)
4958 {
4959 //struct ieee80211com *ic = &sc->sc_ic;
4960 struct sta_rec_ra_info *ra_info;
4961 struct sta_rec_state *state;
4962 struct sta_rec_phy *phy;
4963 uint16_t supp_rates;
4964
4965 #ifdef NOTYET
4966 /* sta rec ht */
4967 if (sta->deflink.ht_cap.ht_supported) {
4968 struct sta_rec_ht *ht;
4969
4970 ht = mt7921_append_tlv(m, tlvnum, STA_REC_HT, sizeof(*ht));
4971 ht->ht_cap = htole16(sta->deflink.ht_cap.cap);
4972 }
4973
4974 /* sta rec vht */
4975 if (sta->deflink.vht_cap.vht_supported) {
4976 struct sta_rec_vht *vht;
4977
4978 vht = mt7921_append_tlv(m, tlvnum, STA_REC_VHT,
4979 sizeof(*vht));
4980 vht->vht_cap = htole32(sta->deflink.vht_cap.cap);
4981 vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
4982 vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
4983 }
4984
4985 /* sta rec uapsd */
4986 /* from function:
4987 if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
4988 return;
4989 */
4990 mt7921_mcu_sta_uapsd(m, tlvnum, vif, ni);
4991 #endif
4992
4993 #ifdef NOTYET
4994 if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)
4995 mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
4996
4997 /* sta rec he */
4998 if (sta->deflink.he_cap.has_he) {
4999 mt76_connac_mcu_sta_he_tlv(skb, sta);
5000 if (band == NL80211_BAND_6GHZ &&
5001 sta_state == MT76_STA_INFO_STATE_ASSOC) {
5002 struct sta_rec_he_6g_capa *he_6g_capa;
5003
5004 he_6g_capa = mt7921_append_tlv(m, tlvnum,
5005 STA_REC_HE_6G, sizeof(*he_6g_capa));
5006 he_6g_capa->capa = sta->deflink.he_6ghz_capa.capa;
5007 }
5008 }
5009 #endif
5010
5011 phy = mt7921_append_tlv(m, tlvnum, STA_REC_PHY, sizeof(*phy));
5012 /* XXX basic_rates: bitmap of basic rates, each bit stands for an
5013 * index into the rate table configured by the driver in
5014 * the current band.
5015 */
5016 phy->basic_rate = htole16(0x0150); /* XXX */
5017 phy->phy_type = mt7921_get_phy_mode_v2(sc, ni);
5018 #ifdef NOTYET
5019 phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
5020 sta->deflink.ht_cap.ampdu_factor) |
5021 FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
5022 sta->deflink.ht_cap.ampdu_density);
5023 #endif
5024 // XXX phy->rcpi = rssi_to_rcpi(-ewma_rssi_read(&sc->sc_vif.rssi));
5025 phy->rcpi = 0xdc; /* XXX STOLEN FROM LINUX DUMP */
5026
5027 #ifdef HACK
5028 supp_rates = sta->deflink.supp_rates[band];
5029 if (band == NL80211_BAND_2GHZ)
5030 supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) |
5031 FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf);
5032 else
5033 supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates);
5034 #else
5035 supp_rates = RA_LEGACY_OFDM;
5036 #endif
5037
5038 ra_info = mt7921_append_tlv(m, tlvnum, STA_REC_RA,
5039 sizeof(*ra_info));
5040 ra_info->legacy = htole16(supp_rates);
5041 #ifdef NOTYET
5042 if (sta->deflink.ht_cap.ht_supported)
5043 memcpy(ra_info->rx_mcs_bitmask,
5044 sta->deflink.ht_cap.mcs.rx_mask,
5045 HT_MCS_MASK_NUM);
5046 #endif
5047
5048 state = mt7921_append_tlv(m, tlvnum, STA_REC_STATE, sizeof(*state));
5049 state->state = /* XXX sta_state */ 0;
5050 #ifdef NOTYET
5051 if (sta->deflink.vht_cap.vht_supported) {
5052 state->vht_opmode = sta->deflink.bandwidth;
5053 state->vht_opmode |= (sta->deflink.rx_nss - 1) <<
5054 IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
5055 }
5056 #endif
5057 }
5058
5059 int
mt7921_mcu_wtbl_generic_tlv(struct mbuf * m,uint16_t * tlvnum,struct mwx_softc * sc,struct ieee80211_node * ni)5060 mt7921_mcu_wtbl_generic_tlv(struct mbuf *m, uint16_t *tlvnum,
5061 struct mwx_softc *sc, struct ieee80211_node *ni)
5062 {
5063 struct ieee80211com *ic = &sc->sc_ic;
5064 struct wtbl_generic *generic;
5065 struct wtbl_rx *rx;
5066
5067 generic = mt7921_append_tlv(m, tlvnum, WTBL_GENERIC,
5068 sizeof(*generic));
5069
5070 if (ni) {
5071 generic->partial_aid = htole16(IEEE80211_AID(ni->ni_associd));
5072 memcpy(generic->peer_addr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
5073 generic->muar_idx = sc->sc_vif.omac_idx;
5074 generic->qos = (ni->ni_flags & IEEE80211_NODE_QOS) != 0;
5075 } else {
5076 memset(generic->peer_addr, 0xff, IEEE80211_ADDR_LEN);
5077 generic->muar_idx = 0xe;
5078 }
5079
5080 rx = mt7921_append_tlv(m, tlvnum, WTBL_RX, sizeof(*rx));
5081 rx->rca1 = ni ? ic->ic_opmode != IEEE80211_M_HOSTAP : 1;
5082 rx->rca2 = 1;
5083 rx->rv = 1;
5084
5085 return sizeof(*generic) + sizeof(*rx);
5086 }
5087
5088 int
mt7921_mcu_wtbl_hdr_trans_tlv(struct mbuf * m,uint16_t * tlvnum,struct mwx_softc * sc,struct ieee80211_node * ni)5089 mt7921_mcu_wtbl_hdr_trans_tlv(struct mbuf *m, uint16_t *tlvnum,
5090 struct mwx_softc *sc, struct ieee80211_node *ni)
5091 {
5092 struct ieee80211com *ic = &sc->sc_ic;
5093 struct wtbl_hdr_trans *htr;
5094
5095 htr = mt7921_append_tlv(m, tlvnum, WTBL_HDR_TRANS, sizeof(*htr));
5096
5097 /* no hdr decapsulation offload */
5098 htr->no_rx_trans = 1;
5099
5100 if (ic->ic_opmode == IEEE80211_M_STA)
5101 htr->to_ds = 1;
5102 else
5103 htr->from_ds = 1;
5104
5105 return sizeof(*htr);
5106 }
5107
5108 int
mt7921_mcu_wtbl_ht_tlv(struct mbuf * m,uint16_t * tlvnum,struct mwx_softc * sc,struct ieee80211_node * ni)5109 mt7921_mcu_wtbl_ht_tlv(struct mbuf *m, uint16_t *tlvnum,
5110 struct mwx_softc *sc, struct ieee80211_node *ni)
5111 {
5112 struct wtbl_smps *smps;
5113
5114 /* XXX lots missing here */
5115
5116 smps = mt7921_append_tlv(m, tlvnum, WTBL_SMPS, sizeof(*smps));
5117 /* spatial multiplexing power save mode, off for now */
5118 //smps->smps = (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
5119
5120 return sizeof(*smps);
5121 }
5122
5123 int
mt7921_mac_sta_update(struct mwx_softc * sc,struct ieee80211_node * ni,int add,int new)5124 mt7921_mac_sta_update(struct mwx_softc *sc, struct ieee80211_node *ni,
5125 int add, int new)
5126 {
5127 struct mwx_node *mw = (struct mwx_node *)ni;
5128 struct mwx_vif *mvif = &sc->sc_vif;
5129 struct sta_req_hdr *hdr;
5130 struct sta_rec_wtbl *wtbl;
5131 struct mbuf *m = NULL;
5132 uint16_t tlvnum = 0, wnum = 0;
5133 int wlen = 0;
5134
5135 m = mt7921_alloc_sta_tlv(sizeof(*hdr));
5136 if (m == NULL)
5137 return ENOBUFS;
5138
5139 if (ni != NULL)
5140 mt7921_mcu_add_basic_tlv(m, &tlvnum, sc, ni, add, new);
5141
5142 if (ni != NULL && add)
5143 mt7921_mcu_add_sta_tlv(m, &tlvnum, sc, ni, add, new);
5144
5145 wtbl = mt7921_append_tlv(m, &tlvnum, STA_REC_WTBL,
5146 sizeof(*wtbl));
5147 wtbl->wlan_idx_lo = mw ? mw->wcid & 0xff : 0,
5148 wtbl->wlan_idx_hi = mw ? mw->wcid >> 8 : 0,
5149 wtbl->operation = WTBL_RESET_AND_SET;
5150
5151 if (add) {
5152 wlen += mt7921_mcu_wtbl_generic_tlv(m, &wnum, sc, ni);
5153 wlen += mt7921_mcu_wtbl_hdr_trans_tlv(m, &wnum, sc, ni);
5154
5155 if (ni)
5156 wlen += mt7921_mcu_wtbl_ht_tlv(m, &wnum, sc, ni);
5157 }
5158
5159 wtbl->tlv_num = htole16(wnum);
5160 wtbl->len = htole16(le16toh(wtbl->len) + wlen);
5161
5162 KASSERT(m_leadingspace(m) >= sizeof(*hdr));
5163 m = m_prepend(m, sizeof(*hdr), M_DONTWAIT);
5164 hdr = mtod(m, struct sta_req_hdr *);
5165 memset(hdr, 0, sizeof(*hdr));
5166 hdr->bss_idx = mvif->idx,
5167 hdr->wlan_idx_lo = mw ? mw->wcid & 0xff : 0,
5168 hdr->wlan_idx_hi = mw ? mw->wcid >> 8 : 0,
5169 hdr->muar_idx = ni ? mvif->omac_idx : 0,
5170 hdr->is_tlv_append = 1,
5171 hdr->tlv_num = htole16(tlvnum);
5172
5173 return mwx_mcu_send_mbuf_wait(sc, MCU_UNI_CMD_STA_REC_UPDATE, m);
5174 }
5175
5176