1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020 Realtek Corporation
3 */
4
5 #include <linux/pci.h>
6
7 #include "mac.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "ser.h"
11
12 static bool rtw89_pci_disable_clkreq;
13 static bool rtw89_pci_disable_aspm_l1;
14 static bool rtw89_pci_disable_l1ss;
15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
21
rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev * rtwdev,u32 * phy_offset)22 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
23 u32 *phy_offset)
24 {
25 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
26 struct pci_dev *pdev = rtwpci->pdev;
27 u32 val;
28 int ret;
29
30 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
31 if (ret)
32 return ret;
33
34 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
35 if (val == RTW89_PCIE_GEN1_SPEED) {
36 *phy_offset = R_RAC_DIRECT_OFFSET_G1;
37 } else if (val == RTW89_PCIE_GEN2_SPEED) {
38 *phy_offset = R_RAC_DIRECT_OFFSET_G2;
39 } else {
40 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
41 return -EFAULT;
42 }
43
44 return 0;
45 }
46
rtw89_pci_rst_bdram_ax(struct rtw89_dev * rtwdev)47 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
48 {
49 u32 val;
50 int ret;
51
52 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
53
54 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
55 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
56 rtwdev, R_AX_PCIE_INIT_CFG1);
57
58 return ret;
59 }
60
rtw89_pci_dma_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring,u32 cur_idx,bool tx)61 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
62 struct rtw89_pci_dma_ring *bd_ring,
63 u32 cur_idx, bool tx)
64 {
65 const struct rtw89_pci_info *info = rtwdev->pci_info;
66 u32 cnt, cur_rp, wp, rp, len;
67
68 rp = bd_ring->rp;
69 wp = bd_ring->wp;
70 len = bd_ring->len;
71
72 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
73 if (tx) {
74 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
75 } else {
76 if (info->rx_ring_eq_is_full)
77 wp += 1;
78
79 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
80 }
81
82 bd_ring->rp = cur_rp;
83
84 return cnt;
85 }
86
rtw89_pci_txbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)87 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
88 struct rtw89_pci_tx_ring *tx_ring)
89 {
90 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
91 u32 addr_idx = bd_ring->addr.idx;
92 u32 cnt, idx;
93
94 idx = rtw89_read32(rtwdev, addr_idx);
95 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
96
97 return cnt;
98 }
99
rtw89_pci_release_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,u32 cnt,bool release_all)100 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
101 struct rtw89_pci *rtwpci,
102 u32 cnt, bool release_all)
103 {
104 struct rtw89_pci_tx_data *tx_data;
105 struct sk_buff *skb;
106 u32 qlen;
107
108 while (cnt--) {
109 skb = skb_dequeue(&rtwpci->h2c_queue);
110 if (!skb) {
111 rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
112 return;
113 }
114 skb_queue_tail(&rtwpci->h2c_release_queue, skb);
115 }
116
117 qlen = skb_queue_len(&rtwpci->h2c_release_queue);
118 if (!release_all)
119 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
120
121 while (qlen--) {
122 skb = skb_dequeue(&rtwpci->h2c_release_queue);
123 if (!skb) {
124 rtw89_err(rtwdev, "failed to release fwcmd\n");
125 return;
126 }
127 tx_data = RTW89_PCI_TX_SKB_CB(skb);
128 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
129 DMA_TO_DEVICE);
130 dev_kfree_skb_any(skb);
131 }
132 }
133
rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
135 struct rtw89_pci *rtwpci)
136 {
137 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
138 u32 cnt;
139
140 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
141 if (!cnt)
142 return;
143 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
144 }
145
rtw89_pci_rxbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)146 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
147 struct rtw89_pci_rx_ring *rx_ring)
148 {
149 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
150 u32 addr_idx = bd_ring->addr.idx;
151 u32 cnt, idx;
152
153 idx = rtw89_read32(rtwdev, addr_idx);
154 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
155
156 return cnt;
157 }
158
rtw89_pci_sync_skb_for_cpu(struct rtw89_dev * rtwdev,struct sk_buff * skb)159 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
160 struct sk_buff *skb)
161 {
162 struct rtw89_pci_rx_info *rx_info;
163 dma_addr_t dma;
164
165 rx_info = RTW89_PCI_RX_SKB_CB(skb);
166 dma = rx_info->dma;
167 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
168 DMA_FROM_DEVICE);
169 }
170
rtw89_pci_sync_skb_for_device(struct rtw89_dev * rtwdev,struct sk_buff * skb)171 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
172 struct sk_buff *skb)
173 {
174 struct rtw89_pci_rx_info *rx_info;
175 dma_addr_t dma;
176
177 rx_info = RTW89_PCI_RX_SKB_CB(skb);
178 dma = rx_info->dma;
179 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
180 DMA_FROM_DEVICE);
181 }
182
rtw89_pci_rxbd_info_update(struct rtw89_dev * rtwdev,struct sk_buff * skb)183 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
184 struct sk_buff *skb)
185 {
186 struct rtw89_pci_rxbd_info *rxbd_info;
187 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
188
189 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
190 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
191 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
192 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
193 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
194 }
195
rtw89_pci_validate_rx_tag(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)196 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
197 struct rtw89_pci_rx_ring *rx_ring,
198 struct sk_buff *skb)
199 {
200 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
201 const struct rtw89_pci_info *info = rtwdev->pci_info;
202 u32 target_rx_tag;
203
204 if (!info->check_rx_tag)
205 return 0;
206
207 /* valid range is 1 ~ 0x1FFF */
208 if (rx_ring->target_rx_tag == 0)
209 target_rx_tag = 1;
210 else
211 target_rx_tag = rx_ring->target_rx_tag;
212
213 if (rx_info->tag != target_rx_tag) {
214 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
215 rx_info->tag, target_rx_tag);
216 return -EAGAIN;
217 }
218
219 return 0;
220 }
221
222 static
rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)223 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
224 struct rtw89_pci_rx_ring *rx_ring,
225 struct sk_buff *skb)
226 {
227 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
228 int rx_tag_retry = 100;
229 int ret;
230
231 do {
232 rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
233 rtw89_pci_rxbd_info_update(rtwdev, skb);
234
235 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
236 if (ret != -EAGAIN)
237 break;
238 } while (rx_tag_retry--);
239
240 /* update target rx_tag for next RX */
241 rx_ring->target_rx_tag = rx_info->tag + 1;
242
243 return ret;
244 }
245
rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev * rtwdev,bool enable)246 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
247 {
248 const struct rtw89_pci_info *info = rtwdev->pci_info;
249 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
250 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
251
252 if (enable) {
253 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
254 if (dma_stop2->addr)
255 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
256 } else {
257 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
258 if (dma_stop2->addr)
259 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
260 }
261 }
262
rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev * rtwdev,bool enable)263 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
264 {
265 const struct rtw89_pci_info *info = rtwdev->pci_info;
266 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
267
268 if (enable)
269 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
270 else
271 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
272 }
273
274 static bool
rtw89_skb_put_rx_data(struct rtw89_dev * rtwdev,bool fs,bool ls,struct sk_buff * new,const struct sk_buff * skb,u32 offset,const struct rtw89_pci_rx_info * rx_info,const struct rtw89_rx_desc_info * desc_info)275 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
276 struct sk_buff *new,
277 const struct sk_buff *skb, u32 offset,
278 const struct rtw89_pci_rx_info *rx_info,
279 const struct rtw89_rx_desc_info *desc_info)
280 {
281 u32 copy_len = rx_info->len - offset;
282
283 if (unlikely(skb_tailroom(new) < copy_len)) {
284 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
285 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
286 rx_info->len, desc_info->pkt_size, offset, fs, ls);
287 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
288 skb->data, rx_info->len);
289 /* length of a single segment skb is desc_info->pkt_size */
290 if (fs && ls) {
291 copy_len = desc_info->pkt_size;
292 } else {
293 rtw89_info(rtwdev, "drop rx data due to invalid length\n");
294 return false;
295 }
296 }
297
298 skb_put_data(new, skb->data + offset, copy_len);
299
300 return true;
301 }
302
rtw89_pci_get_rx_skb_idx(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring)303 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
304 struct rtw89_pci_dma_ring *bd_ring)
305 {
306 const struct rtw89_pci_info *info = rtwdev->pci_info;
307 u32 wp = bd_ring->wp;
308
309 if (!info->rx_ring_eq_is_full)
310 return wp;
311
312 if (++wp >= bd_ring->len)
313 wp = 0;
314
315 return wp;
316 }
317
rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)318 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
319 struct rtw89_pci_rx_ring *rx_ring)
320 {
321 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
322 struct rtw89_pci_rx_info *rx_info;
323 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
324 struct sk_buff *new = rx_ring->diliver_skb;
325 struct sk_buff *skb;
326 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
327 u32 skb_idx;
328 u32 offset;
329 u32 cnt = 1;
330 bool fs, ls;
331 int ret;
332
333 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
334 skb = rx_ring->buf[skb_idx];
335
336 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
337 if (ret) {
338 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
339 bd_ring->wp, ret);
340 goto err_sync_device;
341 }
342
343 rx_info = RTW89_PCI_RX_SKB_CB(skb);
344 fs = rx_info->fs;
345 ls = rx_info->ls;
346
347 if (fs) {
348 if (new) {
349 rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
350 "skb should not be ready before first segment start\n");
351 goto err_sync_device;
352 }
353 if (desc_info->ready) {
354 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
355 goto err_sync_device;
356 }
357
358 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
359
360 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
361 if (!new)
362 goto err_sync_device;
363
364 rx_ring->diliver_skb = new;
365
366 /* first segment has RX desc */
367 offset = desc_info->offset + desc_info->rxd_len;
368 } else {
369 offset = sizeof(struct rtw89_pci_rxbd_info);
370 if (!new) {
371 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
372 goto err_sync_device;
373 }
374 }
375 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
376 goto err_sync_device;
377 rtw89_pci_sync_skb_for_device(rtwdev, skb);
378 rtw89_pci_rxbd_increase(rx_ring, 1);
379
380 if (!desc_info->ready) {
381 rtw89_warn(rtwdev, "no rx desc information\n");
382 goto err_free_resource;
383 }
384 if (ls) {
385 rtw89_core_rx(rtwdev, desc_info, new);
386 rx_ring->diliver_skb = NULL;
387 desc_info->ready = false;
388 }
389
390 return cnt;
391
392 err_sync_device:
393 rtw89_pci_sync_skb_for_device(rtwdev, skb);
394 rtw89_pci_rxbd_increase(rx_ring, 1);
395 err_free_resource:
396 if (new)
397 dev_kfree_skb_any(new);
398 rx_ring->diliver_skb = NULL;
399 desc_info->ready = false;
400
401 return cnt;
402 }
403
rtw89_pci_rxbd_deliver(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)404 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
405 struct rtw89_pci_rx_ring *rx_ring,
406 u32 cnt)
407 {
408 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
409 u32 rx_cnt;
410
411 while (cnt && rtwdev->napi_budget_countdown > 0) {
412 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
413 if (!rx_cnt) {
414 rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
415
416 /* skip the rest RXBD bufs */
417 rtw89_pci_rxbd_increase(rx_ring, cnt);
418 break;
419 }
420
421 cnt -= rx_cnt;
422 }
423
424 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
425 }
426
rtw89_pci_poll_rxq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)427 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
428 struct rtw89_pci *rtwpci, int budget)
429 {
430 struct rtw89_pci_rx_ring *rx_ring;
431 int countdown = rtwdev->napi_budget_countdown;
432 u32 cnt;
433
434 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
435
436 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
437 if (!cnt)
438 return 0;
439
440 cnt = min_t(u32, budget, cnt);
441
442 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
443
444 /* In case of flushing pending SKBs, the countdown may exceed. */
445 if (rtwdev->napi_budget_countdown <= 0)
446 return budget;
447
448 return budget - countdown;
449 }
450
rtw89_pci_tx_status(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct sk_buff * skb,u8 tx_status)451 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
452 struct rtw89_pci_tx_ring *tx_ring,
453 struct sk_buff *skb, u8 tx_status)
454 {
455 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
456 struct ieee80211_tx_info *info;
457
458 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
459
460 info = IEEE80211_SKB_CB(skb);
461 ieee80211_tx_info_clear_status(info);
462
463 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
464 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
465 if (tx_status == RTW89_TX_DONE) {
466 info->flags |= IEEE80211_TX_STAT_ACK;
467 tx_ring->tx_acked++;
468 } else {
469 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
470 rtw89_debug(rtwdev, RTW89_DBG_FW,
471 "failed to TX of status %x\n", tx_status);
472 switch (tx_status) {
473 case RTW89_TX_RETRY_LIMIT:
474 tx_ring->tx_retry_lmt++;
475 break;
476 case RTW89_TX_LIFE_TIME:
477 tx_ring->tx_life_time++;
478 break;
479 case RTW89_TX_MACID_DROP:
480 tx_ring->tx_mac_id_drop++;
481 break;
482 default:
483 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
484 break;
485 }
486 }
487
488 ieee80211_tx_status_ni(rtwdev->hw, skb);
489 }
490
rtw89_pci_reclaim_txbd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)491 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
492 {
493 struct rtw89_pci_tx_wd *txwd;
494 u32 cnt;
495
496 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
497 while (cnt--) {
498 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
499 if (!txwd) {
500 rtw89_warn(rtwdev, "No busy txwd pages available\n");
501 break;
502 }
503
504 list_del_init(&txwd->list);
505
506 /* this skb has been freed by RPP */
507 if (skb_queue_len(&txwd->queue) == 0)
508 rtw89_pci_enqueue_txwd(tx_ring, txwd);
509 }
510 }
511
rtw89_pci_release_busy_txwd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)512 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
513 struct rtw89_pci_tx_ring *tx_ring)
514 {
515 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
516 struct rtw89_pci_tx_wd *txwd;
517 int i;
518
519 for (i = 0; i < wd_ring->page_num; i++) {
520 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
521 if (!txwd)
522 break;
523
524 list_del_init(&txwd->list);
525 }
526 }
527
rtw89_pci_release_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,u16 seq,u8 tx_status)528 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
529 struct rtw89_pci_tx_ring *tx_ring,
530 struct rtw89_pci_tx_wd *txwd, u16 seq,
531 u8 tx_status)
532 {
533 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
534 struct rtw89_pci_tx_data *tx_data;
535 struct sk_buff *skb, *tmp;
536 u8 txch = tx_ring->txch;
537
538 if (!list_empty(&txwd->list)) {
539 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
540 /* In low power mode, RPP can receive before updating of TX BD.
541 * In normal mode, it should not happen so give it a warning.
542 */
543 if (!rtwpci->low_power && !list_empty(&txwd->list))
544 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
545 txch, seq);
546 }
547
548 skb_queue_walk_safe(&txwd->queue, skb, tmp) {
549 skb_unlink(skb, &txwd->queue);
550
551 tx_data = RTW89_PCI_TX_SKB_CB(skb);
552 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
553 DMA_TO_DEVICE);
554
555 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
556 }
557
558 if (list_empty(&txwd->list))
559 rtw89_pci_enqueue_txwd(tx_ring, txwd);
560 }
561
rtw89_pci_release_rpp(struct rtw89_dev * rtwdev,struct rtw89_pci_rpp_fmt * rpp)562 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
563 struct rtw89_pci_rpp_fmt *rpp)
564 {
565 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
566 struct rtw89_pci_tx_ring *tx_ring;
567 struct rtw89_pci_tx_wd_ring *wd_ring;
568 struct rtw89_pci_tx_wd *txwd;
569 u16 seq;
570 u8 qsel, tx_status, txch;
571
572 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
573 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
574 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
575 txch = rtw89_core_get_ch_dma(rtwdev, qsel);
576
577 if (txch == RTW89_TXCH_CH12) {
578 rtw89_warn(rtwdev, "should no fwcmd release report\n");
579 return;
580 }
581
582 tx_ring = &rtwpci->tx_rings[txch];
583 wd_ring = &tx_ring->wd_ring;
584 txwd = &wd_ring->pages[seq];
585
586 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
587 }
588
rtw89_pci_release_pending_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)589 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
590 struct rtw89_pci_tx_ring *tx_ring)
591 {
592 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
593 struct rtw89_pci_tx_wd *txwd;
594 int i;
595
596 for (i = 0; i < wd_ring->page_num; i++) {
597 txwd = &wd_ring->pages[i];
598
599 if (!list_empty(&txwd->list))
600 continue;
601
602 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
603 }
604 }
605
rtw89_pci_release_tx_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 max_cnt)606 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
607 struct rtw89_pci_rx_ring *rx_ring,
608 u32 max_cnt)
609 {
610 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
611 struct rtw89_pci_rx_info *rx_info;
612 struct rtw89_pci_rpp_fmt *rpp;
613 struct rtw89_rx_desc_info desc_info = {};
614 struct sk_buff *skb;
615 u32 cnt = 0;
616 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
617 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
618 u32 skb_idx;
619 u32 offset;
620 int ret;
621
622 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
623 skb = rx_ring->buf[skb_idx];
624
625 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
626 if (ret) {
627 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
628 bd_ring->wp, ret);
629 goto err_sync_device;
630 }
631
632 rx_info = RTW89_PCI_RX_SKB_CB(skb);
633 if (!rx_info->fs || !rx_info->ls) {
634 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
635 return cnt;
636 }
637
638 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
639
640 /* first segment has RX desc */
641 offset = desc_info.offset + desc_info.rxd_len;
642 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
643 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
644 rtw89_pci_release_rpp(rtwdev, rpp);
645 }
646
647 rtw89_pci_sync_skb_for_device(rtwdev, skb);
648 rtw89_pci_rxbd_increase(rx_ring, 1);
649 cnt++;
650
651 return cnt;
652
653 err_sync_device:
654 rtw89_pci_sync_skb_for_device(rtwdev, skb);
655 return 0;
656 }
657
rtw89_pci_release_tx(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)658 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
659 struct rtw89_pci_rx_ring *rx_ring,
660 u32 cnt)
661 {
662 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
663 u32 release_cnt;
664
665 while (cnt) {
666 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
667 if (!release_cnt) {
668 rtw89_err(rtwdev, "failed to release TX skbs\n");
669
670 /* skip the rest RXBD bufs */
671 rtw89_pci_rxbd_increase(rx_ring, cnt);
672 break;
673 }
674
675 cnt -= release_cnt;
676 }
677
678 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
679 }
680
rtw89_pci_poll_rpq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)681 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
682 struct rtw89_pci *rtwpci, int budget)
683 {
684 struct rtw89_pci_rx_ring *rx_ring;
685 u32 cnt;
686 int work_done;
687
688 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
689
690 spin_lock_bh(&rtwpci->trx_lock);
691
692 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
693 if (cnt == 0)
694 goto out_unlock;
695
696 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
697
698 out_unlock:
699 spin_unlock_bh(&rtwpci->trx_lock);
700
701 /* always release all RPQ */
702 work_done = min_t(int, cnt, budget);
703 rtwdev->napi_budget_countdown -= work_done;
704
705 return work_done;
706 }
707
rtw89_pci_isr_rxd_unavail(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)708 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
709 struct rtw89_pci *rtwpci)
710 {
711 struct rtw89_pci_rx_ring *rx_ring;
712 struct rtw89_pci_dma_ring *bd_ring;
713 u32 reg_idx;
714 u16 hw_idx, hw_idx_next, host_idx;
715 int i;
716
717 for (i = 0; i < RTW89_RXCH_NUM; i++) {
718 rx_ring = &rtwpci->rx_rings[i];
719 bd_ring = &rx_ring->bd_ring;
720
721 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
722 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
723 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
724 hw_idx_next = (hw_idx + 1) % bd_ring->len;
725
726 if (hw_idx_next == host_idx)
727 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
728
729 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
730 "%d RXD unavailable, idx=0x%08x, len=%d\n",
731 i, reg_idx, bd_ring->len);
732 }
733 }
734
rtw89_pci_recognize_intrs(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)735 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
736 struct rtw89_pci *rtwpci,
737 struct rtw89_pci_isrs *isrs)
738 {
739 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
740 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
741 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
742
743 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
744 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
745 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
746 }
747 EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
748
rtw89_pci_recognize_intrs_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)749 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
750 struct rtw89_pci *rtwpci,
751 struct rtw89_pci_isrs *isrs)
752 {
753 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
754 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
755 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
756 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
757 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
758 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
759 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
760
761 if (isrs->halt_c2h_isrs)
762 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
763 if (isrs->isrs[0])
764 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
765 if (isrs->isrs[1])
766 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
767 }
768 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
769
rtw89_pci_recognize_intrs_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)770 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
771 struct rtw89_pci *rtwpci,
772 struct rtw89_pci_isrs *isrs)
773 {
774 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
775 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
776 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
777 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
778 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
779 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
780
781 if (isrs->halt_c2h_isrs)
782 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
783 if (isrs->isrs[0])
784 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]);
785 if (isrs->isrs[1])
786 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
787 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
788 }
789 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
790
rtw89_pci_enable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)791 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
792 {
793 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
794 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
795 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
796 }
797 EXPORT_SYMBOL(rtw89_pci_enable_intr);
798
rtw89_pci_disable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)799 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
800 {
801 rtw89_write32(rtwdev, R_AX_HIMR0, 0);
802 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
803 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
804 }
805 EXPORT_SYMBOL(rtw89_pci_disable_intr);
806
rtw89_pci_enable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)807 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
808 {
809 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
810 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
811 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
812 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
813 }
814 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
815
rtw89_pci_disable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)816 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
817 {
818 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
819 }
820 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
821
rtw89_pci_enable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)822 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
823 {
824 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
825 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]);
826 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
827 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
828 }
829 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
830
rtw89_pci_disable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)831 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
832 {
833 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
834 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
835 }
836 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
837
rtw89_pci_ops_recovery_start(struct rtw89_dev * rtwdev)838 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
839 {
840 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
841 unsigned long flags;
842
843 spin_lock_irqsave(&rtwpci->irq_lock, flags);
844 rtw89_chip_disable_intr(rtwdev, rtwpci);
845 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
846 rtw89_chip_enable_intr(rtwdev, rtwpci);
847 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
848 }
849
rtw89_pci_ops_recovery_complete(struct rtw89_dev * rtwdev)850 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
851 {
852 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
853 unsigned long flags;
854
855 spin_lock_irqsave(&rtwpci->irq_lock, flags);
856 rtw89_chip_disable_intr(rtwdev, rtwpci);
857 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
858 rtw89_chip_enable_intr(rtwdev, rtwpci);
859 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
860 }
861
rtw89_pci_low_power_interrupt_handler(struct rtw89_dev * rtwdev)862 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
863 {
864 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
865 int budget = NAPI_POLL_WEIGHT;
866
867 /* To prevent RXQ get stuck due to run out of budget. */
868 rtwdev->napi_budget_countdown = budget;
869
870 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
871 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
872 }
873
rtw89_pci_interrupt_threadfn(int irq,void * dev)874 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
875 {
876 struct rtw89_dev *rtwdev = dev;
877 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
878 const struct rtw89_pci_info *info = rtwdev->pci_info;
879 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
880 struct rtw89_pci_isrs isrs;
881 unsigned long flags;
882
883 spin_lock_irqsave(&rtwpci->irq_lock, flags);
884 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
885 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
886
887 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
888 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
889
890 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
891 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
892
893 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
894 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
895
896 if (unlikely(rtwpci->under_recovery))
897 goto enable_intr;
898
899 if (unlikely(rtwpci->low_power)) {
900 rtw89_pci_low_power_interrupt_handler(rtwdev);
901 goto enable_intr;
902 }
903
904 if (likely(rtwpci->running)) {
905 local_bh_disable();
906 napi_schedule(&rtwdev->napi);
907 local_bh_enable();
908 }
909
910 return IRQ_HANDLED;
911
912 enable_intr:
913 spin_lock_irqsave(&rtwpci->irq_lock, flags);
914 if (likely(rtwpci->running))
915 rtw89_chip_enable_intr(rtwdev, rtwpci);
916 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
917 return IRQ_HANDLED;
918 }
919
rtw89_pci_interrupt_handler(int irq,void * dev)920 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
921 {
922 struct rtw89_dev *rtwdev = dev;
923 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
924 unsigned long flags;
925 irqreturn_t irqret = IRQ_WAKE_THREAD;
926
927 spin_lock_irqsave(&rtwpci->irq_lock, flags);
928
929 /* If interrupt event is on the road, it is still trigger interrupt
930 * even we have done pci_stop() to turn off IMR.
931 */
932 if (unlikely(!rtwpci->running)) {
933 irqret = IRQ_HANDLED;
934 goto exit;
935 }
936
937 rtw89_chip_disable_intr(rtwdev, rtwpci);
938 exit:
939 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
940
941 return irqret;
942 }
943
944 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
945 [RTW89_TXCH_##ch_idx] = { \
946 .num = R_##gen##_##txch##_TXBD_NUM ##v, \
947 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
948 .bdram = 0, \
949 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \
950 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \
951 }
952
953 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
954 [RTW89_TXCH_##txch] = { \
955 .num = R_AX_##txch##_TXBD_NUM ##v, \
956 .idx = R_AX_##txch##_TXBD_IDX ##v, \
957 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
958 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
959 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
960 }
961
962 #define DEF_TXCHADDRS(info, txch, v...) \
963 [RTW89_TXCH_##txch] = { \
964 .num = R_AX_##txch##_TXBD_NUM, \
965 .idx = R_AX_##txch##_TXBD_IDX, \
966 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
967 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
968 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
969 }
970
971 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
972 [RTW89_RXCH_##ch_idx] = { \
973 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \
974 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \
975 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \
976 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \
977 }
978
979 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
980 .tx = {
981 DEF_TXCHADDRS(info, ACH0),
982 DEF_TXCHADDRS(info, ACH1),
983 DEF_TXCHADDRS(info, ACH2),
984 DEF_TXCHADDRS(info, ACH3),
985 DEF_TXCHADDRS(info, ACH4),
986 DEF_TXCHADDRS(info, ACH5),
987 DEF_TXCHADDRS(info, ACH6),
988 DEF_TXCHADDRS(info, ACH7),
989 DEF_TXCHADDRS(info, CH8),
990 DEF_TXCHADDRS(info, CH9),
991 DEF_TXCHADDRS_TYPE1(info, CH10),
992 DEF_TXCHADDRS_TYPE1(info, CH11),
993 DEF_TXCHADDRS(info, CH12),
994 },
995 .rx = {
996 DEF_RXCHADDRS(AX, RXQ, RXQ),
997 DEF_RXCHADDRS(AX, RPQ, RPQ),
998 },
999 };
1000 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
1001
1002 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
1003 .tx = {
1004 DEF_TXCHADDRS(info, ACH0, _V1),
1005 DEF_TXCHADDRS(info, ACH1, _V1),
1006 DEF_TXCHADDRS(info, ACH2, _V1),
1007 DEF_TXCHADDRS(info, ACH3, _V1),
1008 DEF_TXCHADDRS(info, ACH4, _V1),
1009 DEF_TXCHADDRS(info, ACH5, _V1),
1010 DEF_TXCHADDRS(info, ACH6, _V1),
1011 DEF_TXCHADDRS(info, ACH7, _V1),
1012 DEF_TXCHADDRS(info, CH8, _V1),
1013 DEF_TXCHADDRS(info, CH9, _V1),
1014 DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
1015 DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
1016 DEF_TXCHADDRS(info, CH12, _V1),
1017 },
1018 .rx = {
1019 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
1020 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
1021 },
1022 };
1023 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
1024
1025 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
1026 .tx = {
1027 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
1028 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
1029 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
1030 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
1031 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
1032 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
1033 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
1034 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
1035 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
1036 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
1037 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
1038 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
1039 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
1040 },
1041 .rx = {
1042 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
1043 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
1044 },
1045 };
1046 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
1047
1048 #undef DEF_TXCHADDRS_TYPE1
1049 #undef DEF_TXCHADDRS
1050 #undef DEF_RXCHADDRS
1051
rtw89_pci_get_txch_addrs(struct rtw89_dev * rtwdev,enum rtw89_tx_channel txch,const struct rtw89_pci_ch_dma_addr ** addr)1052 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
1053 enum rtw89_tx_channel txch,
1054 const struct rtw89_pci_ch_dma_addr **addr)
1055 {
1056 const struct rtw89_pci_info *info = rtwdev->pci_info;
1057
1058 if (txch >= RTW89_TXCH_NUM)
1059 return -EINVAL;
1060
1061 *addr = &info->dma_addr_set->tx[txch];
1062
1063 return 0;
1064 }
1065
rtw89_pci_get_rxch_addrs(struct rtw89_dev * rtwdev,enum rtw89_rx_channel rxch,const struct rtw89_pci_ch_dma_addr ** addr)1066 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
1067 enum rtw89_rx_channel rxch,
1068 const struct rtw89_pci_ch_dma_addr **addr)
1069 {
1070 const struct rtw89_pci_info *info = rtwdev->pci_info;
1071
1072 if (rxch >= RTW89_RXCH_NUM)
1073 return -EINVAL;
1074
1075 *addr = &info->dma_addr_set->rx[rxch];
1076
1077 return 0;
1078 }
1079
rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring * ring)1080 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
1081 {
1082 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
1083
1084 /* reserved 1 desc check ring is full or not */
1085 if (bd_ring->rp > bd_ring->wp)
1086 return bd_ring->rp - bd_ring->wp - 1;
1087
1088 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
1089 }
1090
1091 static
__rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev * rtwdev)1092 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
1093 {
1094 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1095 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
1096 u32 cnt;
1097
1098 spin_lock_bh(&rtwpci->trx_lock);
1099 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
1100 cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1101 spin_unlock_bh(&rtwpci->trx_lock);
1102
1103 return cnt;
1104 }
1105
1106 static
__rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev * rtwdev,u8 txch)1107 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
1108 u8 txch)
1109 {
1110 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1112 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1113 u32 cnt;
1114
1115 spin_lock_bh(&rtwpci->trx_lock);
1116 cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1117 if (txch != RTW89_TXCH_CH12)
1118 cnt = min(cnt, wd_ring->curr_num);
1119 spin_unlock_bh(&rtwpci->trx_lock);
1120
1121 return cnt;
1122 }
1123
__rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1124 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1125 u8 txch)
1126 {
1127 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1128 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1129 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1130 const struct rtw89_chip_info *chip = rtwdev->chip;
1131 u32 bd_cnt, wd_cnt, min_cnt = 0;
1132 struct rtw89_pci_rx_ring *rx_ring;
1133 enum rtw89_debug_mask debug_mask;
1134 u32 cnt;
1135
1136 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
1137
1138 spin_lock_bh(&rtwpci->trx_lock);
1139 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1140 wd_cnt = wd_ring->curr_num;
1141
1142 if (wd_cnt == 0 || bd_cnt == 0) {
1143 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
1144 if (cnt)
1145 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
1146 else if (wd_cnt == 0)
1147 goto out_unlock;
1148
1149 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1150 if (bd_cnt == 0)
1151 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
1152 }
1153
1154 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1155 wd_cnt = wd_ring->curr_num;
1156 min_cnt = min(bd_cnt, wd_cnt);
1157 if (min_cnt == 0) {
1158 /* This message can be frequently shown in low power mode or
1159 * high traffic with small FIFO chips, and we have recognized it as normal
1160 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1161 */
1162 if (rtwpci->low_power || chip->small_fifo_size)
1163 debug_mask = RTW89_DBG_TXRX;
1164 else
1165 debug_mask = RTW89_DBG_UNEXP;
1166
1167 rtw89_debug(rtwdev, debug_mask,
1168 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1169 wd_cnt, bd_cnt);
1170 }
1171
1172 out_unlock:
1173 spin_unlock_bh(&rtwpci->trx_lock);
1174
1175 return min_cnt;
1176 }
1177
rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1178 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1179 u8 txch)
1180 {
1181 if (rtwdev->hci.paused)
1182 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1183
1184 if (txch == RTW89_TXCH_CH12)
1185 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1186
1187 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1188 }
1189
__rtw89_pci_tx_kick_off(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1190 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1191 {
1192 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1193 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1194 u32 host_idx, addr;
1195
1196 spin_lock_bh(&rtwpci->trx_lock);
1197
1198 addr = bd_ring->addr.idx;
1199 host_idx = bd_ring->wp;
1200 rtw89_write16(rtwdev, addr, host_idx);
1201
1202 spin_unlock_bh(&rtwpci->trx_lock);
1203 }
1204
rtw89_pci_tx_bd_ring_update(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,int n_txbd)1205 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1206 int n_txbd)
1207 {
1208 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1209 u32 host_idx, len;
1210
1211 len = bd_ring->len;
1212 host_idx = bd_ring->wp + n_txbd;
1213 host_idx = host_idx < len ? host_idx : host_idx - len;
1214
1215 bd_ring->wp = host_idx;
1216 }
1217
rtw89_pci_ops_tx_kick_off(struct rtw89_dev * rtwdev,u8 txch)1218 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1219 {
1220 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1221 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1222
1223 if (rtwdev->hci.paused) {
1224 set_bit(txch, rtwpci->kick_map);
1225 return;
1226 }
1227
1228 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1229 }
1230
rtw89_pci_tx_kick_off_pending(struct rtw89_dev * rtwdev)1231 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1232 {
1233 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1234 struct rtw89_pci_tx_ring *tx_ring;
1235 int txch;
1236
1237 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1238 if (!test_and_clear_bit(txch, rtwpci->kick_map))
1239 continue;
1240
1241 tx_ring = &rtwpci->tx_rings[txch];
1242 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1243 }
1244 }
1245
__pci_flush_txch(struct rtw89_dev * rtwdev,u8 txch,bool drop)1246 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1247 {
1248 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1249 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1250 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1251 u32 cur_idx, cur_rp;
1252 u8 i;
1253
1254 /* Because the time taked by the I/O is a bit dynamic, it's hard to
1255 * define a reasonable fixed total timeout to use read_poll_timeout*
1256 * helper. Instead, we can ensure a reasonable polling times, so we
1257 * just use for loop with udelay here.
1258 */
1259 for (i = 0; i < 60; i++) {
1260 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1261 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1262 if (cur_rp == bd_ring->wp)
1263 return;
1264
1265 udelay(1);
1266 }
1267
1268 if (!drop)
1269 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1270 }
1271
__rtw89_pci_ops_flush_txchs(struct rtw89_dev * rtwdev,u32 txchs,bool drop)1272 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1273 bool drop)
1274 {
1275 const struct rtw89_pci_info *info = rtwdev->pci_info;
1276 u8 i;
1277
1278 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1279 /* It may be unnecessary to flush FWCMD queue. */
1280 if (i == RTW89_TXCH_CH12)
1281 continue;
1282 if (info->tx_dma_ch_mask & BIT(i))
1283 continue;
1284
1285 if (txchs & BIT(i))
1286 __pci_flush_txch(rtwdev, i, drop);
1287 }
1288 }
1289
rtw89_pci_ops_flush_queues(struct rtw89_dev * rtwdev,u32 queues,bool drop)1290 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1291 bool drop)
1292 {
1293 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1294 }
1295
rtw89_pci_fill_txaddr_info(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1296 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1297 void *txaddr_info_addr, u32 total_len,
1298 dma_addr_t dma, u8 *add_info_nr)
1299 {
1300 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1301
1302 txaddr_info->length = cpu_to_le16(total_len);
1303 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
1304 RTW89_PCI_ADDR_NUM(1));
1305 txaddr_info->dma = cpu_to_le32(dma);
1306
1307 *add_info_nr = 1;
1308
1309 return sizeof(*txaddr_info);
1310 }
1311 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1312
rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1313 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1314 void *txaddr_info_addr, u32 total_len,
1315 dma_addr_t dma, u8 *add_info_nr)
1316 {
1317 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1318 u32 remain = total_len;
1319 u32 len;
1320 u16 length_option;
1321 int n;
1322
1323 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1324 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1325 TXADDR_INFO_LENTHG_V1_MAX : remain;
1326 remain -= len;
1327
1328 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1329 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1330 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1331 txaddr_info->length_opt = cpu_to_le16(length_option);
1332 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1333 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1334
1335 dma += len;
1336 txaddr_info++;
1337 }
1338
1339 WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1340 remain, total_len);
1341
1342 *add_info_nr = n;
1343
1344 return n * sizeof(*txaddr_info);
1345 }
1346 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1347
rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req)1348 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1349 struct rtw89_pci_tx_ring *tx_ring,
1350 struct rtw89_pci_tx_wd *txwd,
1351 struct rtw89_core_tx_request *tx_req)
1352 {
1353 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1354 const struct rtw89_chip_info *chip = rtwdev->chip;
1355 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1356 struct rtw89_pci_tx_wp_info *txwp_info;
1357 void *txaddr_info_addr;
1358 struct pci_dev *pdev = rtwpci->pdev;
1359 struct sk_buff *skb = tx_req->skb;
1360 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1361 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
1362 bool en_wd_info = desc_info->en_wd_info;
1363 u32 txwd_len;
1364 u32 txwp_len;
1365 u32 txaddr_info_len;
1366 dma_addr_t dma;
1367 int ret;
1368
1369 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1370 if (dma_mapping_error(&pdev->dev, dma)) {
1371 rtw89_err(rtwdev, "failed to map skb dma data\n");
1372 ret = -EBUSY;
1373 goto err;
1374 }
1375
1376 tx_data->dma = dma;
1377 rcu_assign_pointer(skb_data->wait, NULL);
1378
1379 txwp_len = sizeof(*txwp_info);
1380 txwd_len = chip->txwd_body_size;
1381 txwd_len += en_wd_info ? chip->txwd_info_size : 0;
1382
1383 txwp_info = txwd->vaddr + txwd_len;
1384 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1385 txwp_info->seq1 = 0;
1386 txwp_info->seq2 = 0;
1387 txwp_info->seq3 = 0;
1388
1389 tx_ring->tx_cnt++;
1390 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1391 txaddr_info_len =
1392 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1393 dma, &desc_info->addr_info_nr);
1394
1395 txwd->len = txwd_len + txwp_len + txaddr_info_len;
1396
1397 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1398
1399 skb_queue_tail(&txwd->queue, skb);
1400
1401 return 0;
1402
1403 err:
1404 return ret;
1405 }
1406
rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1407 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1408 struct rtw89_pci_tx_ring *tx_ring,
1409 struct rtw89_pci_tx_bd_32 *txbd,
1410 struct rtw89_core_tx_request *tx_req)
1411 {
1412 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1413 const struct rtw89_chip_info *chip = rtwdev->chip;
1414 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1415 void *txdesc;
1416 int txdesc_size = chip->h2c_desc_size;
1417 struct pci_dev *pdev = rtwpci->pdev;
1418 struct sk_buff *skb = tx_req->skb;
1419 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1420 dma_addr_t dma;
1421
1422 txdesc = skb_push(skb, txdesc_size);
1423 memset(txdesc, 0, txdesc_size);
1424 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1425
1426 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1427 if (dma_mapping_error(&pdev->dev, dma)) {
1428 rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1429 return -EBUSY;
1430 }
1431
1432 tx_data->dma = dma;
1433 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1434 txbd->length = cpu_to_le16(skb->len);
1435 txbd->dma = cpu_to_le32(tx_data->dma);
1436 skb_queue_tail(&rtwpci->h2c_queue, skb);
1437
1438 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1439
1440 return 0;
1441 }
1442
rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1443 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1444 struct rtw89_pci_tx_ring *tx_ring,
1445 struct rtw89_pci_tx_bd_32 *txbd,
1446 struct rtw89_core_tx_request *tx_req)
1447 {
1448 struct rtw89_pci_tx_wd *txwd;
1449 int ret;
1450
1451 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1452 * buffer with WD BODY only. So here we don't need to check the free
1453 * pages of the wd ring.
1454 */
1455 if (tx_ring->txch == RTW89_TXCH_CH12)
1456 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1457
1458 txwd = rtw89_pci_dequeue_txwd(tx_ring);
1459 if (!txwd) {
1460 rtw89_err(rtwdev, "no available TXWD\n");
1461 ret = -ENOSPC;
1462 goto err;
1463 }
1464
1465 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1466 if (ret) {
1467 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1468 goto err_enqueue_wd;
1469 }
1470
1471 list_add_tail(&txwd->list, &tx_ring->busy_pages);
1472
1473 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1474 txbd->length = cpu_to_le16(txwd->len);
1475 txbd->dma = cpu_to_le32(txwd->paddr);
1476
1477 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1478
1479 return 0;
1480
1481 err_enqueue_wd:
1482 rtw89_pci_enqueue_txwd(tx_ring, txwd);
1483 err:
1484 return ret;
1485 }
1486
rtw89_pci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,u8 txch)1487 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1488 u8 txch)
1489 {
1490 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1491 struct rtw89_pci_tx_ring *tx_ring;
1492 struct rtw89_pci_tx_bd_32 *txbd;
1493 u32 n_avail_txbd;
1494 int ret = 0;
1495
1496 /* check the tx type and dma channel for fw cmd queue */
1497 if ((txch == RTW89_TXCH_CH12 ||
1498 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1499 (txch != RTW89_TXCH_CH12 ||
1500 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1501 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1502 return -EINVAL;
1503 }
1504
1505 tx_ring = &rtwpci->tx_rings[txch];
1506 spin_lock_bh(&rtwpci->trx_lock);
1507
1508 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1509 if (n_avail_txbd == 0) {
1510 rtw89_err(rtwdev, "no available TXBD\n");
1511 ret = -ENOSPC;
1512 goto err_unlock;
1513 }
1514
1515 txbd = rtw89_pci_get_next_txbd(tx_ring);
1516 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1517 if (ret) {
1518 rtw89_err(rtwdev, "failed to submit TXBD\n");
1519 goto err_unlock;
1520 }
1521
1522 spin_unlock_bh(&rtwpci->trx_lock);
1523 return 0;
1524
1525 err_unlock:
1526 spin_unlock_bh(&rtwpci->trx_lock);
1527 return ret;
1528 }
1529
rtw89_pci_ops_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)1530 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1531 {
1532 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1533 int ret;
1534
1535 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1536 if (ret) {
1537 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1538 return ret;
1539 }
1540
1541 return 0;
1542 }
1543
1544 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1545 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1546 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1547 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1548 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1549 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1550 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1551 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1552 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1553 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1},
1554 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1},
1555 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1556 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1557 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1558 };
1559 EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1560
1561 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1562 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1563 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1564 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1565 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1566 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1},
1567 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1},
1568 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1569 };
1570 EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1571
rtw89_pci_reset_trx_rings(struct rtw89_dev * rtwdev)1572 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1573 {
1574 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1575 const struct rtw89_pci_info *info = rtwdev->pci_info;
1576 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1577 struct rtw89_pci_tx_ring *tx_ring;
1578 struct rtw89_pci_rx_ring *rx_ring;
1579 struct rtw89_pci_dma_ring *bd_ring;
1580 const struct rtw89_pci_bd_ram *bd_ram;
1581 u32 addr_num;
1582 u32 addr_idx;
1583 u32 addr_bdram;
1584 u32 addr_desa_l;
1585 u32 val32;
1586 int i;
1587
1588 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1589 if (info->tx_dma_ch_mask & BIT(i))
1590 continue;
1591
1592 tx_ring = &rtwpci->tx_rings[i];
1593 bd_ring = &tx_ring->bd_ring;
1594 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
1595 addr_num = bd_ring->addr.num;
1596 addr_bdram = bd_ring->addr.bdram;
1597 addr_desa_l = bd_ring->addr.desa_l;
1598 bd_ring->wp = 0;
1599 bd_ring->rp = 0;
1600
1601 rtw89_write16(rtwdev, addr_num, bd_ring->len);
1602 if (addr_bdram && bd_ram) {
1603 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1604 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1605 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1606
1607 rtw89_write32(rtwdev, addr_bdram, val32);
1608 }
1609 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1610 }
1611
1612 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1613 rx_ring = &rtwpci->rx_rings[i];
1614 bd_ring = &rx_ring->bd_ring;
1615 addr_num = bd_ring->addr.num;
1616 addr_idx = bd_ring->addr.idx;
1617 addr_desa_l = bd_ring->addr.desa_l;
1618 if (info->rx_ring_eq_is_full)
1619 bd_ring->wp = bd_ring->len - 1;
1620 else
1621 bd_ring->wp = 0;
1622 bd_ring->rp = 0;
1623 rx_ring->diliver_skb = NULL;
1624 rx_ring->diliver_desc.ready = false;
1625 rx_ring->target_rx_tag = 0;
1626
1627 rtw89_write16(rtwdev, addr_num, bd_ring->len);
1628 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1629
1630 if (info->rx_ring_eq_is_full)
1631 rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
1632 }
1633 }
1634
rtw89_pci_release_tx_ring(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1635 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1636 struct rtw89_pci_tx_ring *tx_ring)
1637 {
1638 rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1639 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1640 }
1641
rtw89_pci_ops_reset(struct rtw89_dev * rtwdev)1642 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1643 {
1644 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1645 const struct rtw89_pci_info *info = rtwdev->pci_info;
1646 int txch;
1647
1648 rtw89_pci_reset_trx_rings(rtwdev);
1649
1650 spin_lock_bh(&rtwpci->trx_lock);
1651 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1652 if (info->tx_dma_ch_mask & BIT(txch))
1653 continue;
1654 if (txch == RTW89_TXCH_CH12) {
1655 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1656 skb_queue_len(&rtwpci->h2c_queue), true);
1657 continue;
1658 }
1659 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1660 }
1661 spin_unlock_bh(&rtwpci->trx_lock);
1662 }
1663
rtw89_pci_enable_intr_lock(struct rtw89_dev * rtwdev)1664 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1665 {
1666 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1667 unsigned long flags;
1668
1669 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1670 rtwpci->running = true;
1671 rtw89_chip_enable_intr(rtwdev, rtwpci);
1672 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1673 }
1674
rtw89_pci_disable_intr_lock(struct rtw89_dev * rtwdev)1675 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1676 {
1677 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1678 unsigned long flags;
1679
1680 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1681 rtwpci->running = false;
1682 rtw89_chip_disable_intr(rtwdev, rtwpci);
1683 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1684 }
1685
rtw89_pci_ops_start(struct rtw89_dev * rtwdev)1686 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1687 {
1688 rtw89_core_napi_start(rtwdev);
1689 rtw89_pci_enable_intr_lock(rtwdev);
1690
1691 return 0;
1692 }
1693
rtw89_pci_ops_stop(struct rtw89_dev * rtwdev)1694 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1695 {
1696 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1697 struct pci_dev *pdev = rtwpci->pdev;
1698
1699 rtw89_pci_disable_intr_lock(rtwdev);
1700 synchronize_irq(pdev->irq);
1701 rtw89_core_napi_stop(rtwdev);
1702 }
1703
rtw89_pci_ops_pause(struct rtw89_dev * rtwdev,bool pause)1704 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1705 {
1706 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1707 struct pci_dev *pdev = rtwpci->pdev;
1708
1709 if (pause) {
1710 rtw89_pci_disable_intr_lock(rtwdev);
1711 synchronize_irq(pdev->irq);
1712 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1713 napi_synchronize(&rtwdev->napi);
1714 } else {
1715 rtw89_pci_enable_intr_lock(rtwdev);
1716 rtw89_pci_tx_kick_off_pending(rtwdev);
1717 }
1718 }
1719
1720 static
rtw89_pci_switch_bd_idx_addr(struct rtw89_dev * rtwdev,bool low_power)1721 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1722 {
1723 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1724 const struct rtw89_pci_info *info = rtwdev->pci_info;
1725 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1726 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1727 struct rtw89_pci_tx_ring *tx_ring;
1728 struct rtw89_pci_rx_ring *rx_ring;
1729 int i;
1730
1731 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1732 return;
1733
1734 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1735 tx_ring = &rtwpci->tx_rings[i];
1736 tx_ring->bd_ring.addr.idx = low_power ?
1737 bd_idx_addr->tx_bd_addrs[i] :
1738 dma_addr_set->tx[i].idx;
1739 }
1740
1741 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1742 rx_ring = &rtwpci->rx_rings[i];
1743 rx_ring->bd_ring.addr.idx = low_power ?
1744 bd_idx_addr->rx_bd_addrs[i] :
1745 dma_addr_set->rx[i].idx;
1746 }
1747 }
1748
rtw89_pci_ops_switch_mode(struct rtw89_dev * rtwdev,bool low_power)1749 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1750 {
1751 enum rtw89_pci_intr_mask_cfg cfg;
1752
1753 WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1754
1755 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1756 rtw89_chip_config_intr_mask(rtwdev, cfg);
1757 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1758 }
1759
1760 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1761
rtw89_pci_ops_read32_cmac(struct rtw89_dev * rtwdev,u32 addr)1762 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1763 {
1764 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1765 u32 val = readl(rtwpci->mmap + addr);
1766 int count;
1767
1768 for (count = 0; ; count++) {
1769 if (val != RTW89_R32_DEAD)
1770 return val;
1771 if (count >= MAC_REG_POOL_COUNT) {
1772 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1773 return RTW89_R32_DEAD;
1774 }
1775 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1776 val = readl(rtwpci->mmap + addr);
1777 }
1778
1779 return val;
1780 }
1781
rtw89_pci_ops_read8(struct rtw89_dev * rtwdev,u32 addr)1782 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1783 {
1784 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1785 u32 addr32, val32, shift;
1786
1787 if (!ACCESS_CMAC(addr))
1788 return readb(rtwpci->mmap + addr);
1789
1790 addr32 = addr & ~0x3;
1791 shift = (addr & 0x3) * 8;
1792 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1793 return val32 >> shift;
1794 }
1795
rtw89_pci_ops_read16(struct rtw89_dev * rtwdev,u32 addr)1796 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1797 {
1798 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1799 u32 addr32, val32, shift;
1800
1801 if (!ACCESS_CMAC(addr))
1802 return readw(rtwpci->mmap + addr);
1803
1804 addr32 = addr & ~0x3;
1805 shift = (addr & 0x3) * 8;
1806 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1807 return val32 >> shift;
1808 }
1809
rtw89_pci_ops_read32(struct rtw89_dev * rtwdev,u32 addr)1810 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1811 {
1812 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1813
1814 if (!ACCESS_CMAC(addr))
1815 return readl(rtwpci->mmap + addr);
1816
1817 return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1818 }
1819
rtw89_pci_ops_write8(struct rtw89_dev * rtwdev,u32 addr,u8 data)1820 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1821 {
1822 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1823
1824 writeb(data, rtwpci->mmap + addr);
1825 }
1826
rtw89_pci_ops_write16(struct rtw89_dev * rtwdev,u32 addr,u16 data)1827 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1828 {
1829 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1830
1831 writew(data, rtwpci->mmap + addr);
1832 }
1833
rtw89_pci_ops_write32(struct rtw89_dev * rtwdev,u32 addr,u32 data)1834 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1835 {
1836 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1837
1838 writel(data, rtwpci->mmap + addr);
1839 }
1840
rtw89_pci_ctrl_dma_trx(struct rtw89_dev * rtwdev,bool enable)1841 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
1842 {
1843 const struct rtw89_pci_info *info = rtwdev->pci_info;
1844
1845 if (enable)
1846 rtw89_write32_set(rtwdev, info->init_cfg_reg,
1847 info->rxhci_en_bit | info->txhci_en_bit);
1848 else
1849 rtw89_write32_clr(rtwdev, info->init_cfg_reg,
1850 info->rxhci_en_bit | info->txhci_en_bit);
1851 }
1852
rtw89_pci_ctrl_dma_io(struct rtw89_dev * rtwdev,bool enable)1853 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
1854 {
1855 const struct rtw89_pci_info *info = rtwdev->pci_info;
1856 const struct rtw89_reg_def *reg = &info->dma_io_stop;
1857
1858 if (enable)
1859 rtw89_write32_clr(rtwdev, reg->addr, reg->mask);
1860 else
1861 rtw89_write32_set(rtwdev, reg->addr, reg->mask);
1862 }
1863
rtw89_pci_ctrl_dma_all(struct rtw89_dev * rtwdev,bool enable)1864 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1865 {
1866 rtw89_pci_ctrl_dma_io(rtwdev, enable);
1867 rtw89_pci_ctrl_dma_trx(rtwdev, enable);
1868 }
1869
rtw89_pci_check_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 rw_bit)1870 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1871 {
1872 u16 val;
1873
1874 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1875
1876 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1877 switch (speed) {
1878 case PCIE_PHY_GEN1:
1879 if (addr < 0x20)
1880 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1881 else
1882 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1883 break;
1884 case PCIE_PHY_GEN2:
1885 if (addr < 0x20)
1886 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1887 else
1888 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1889 break;
1890 default:
1891 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1892 return -EINVAL;
1893 }
1894 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1895 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1896
1897 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1898 false, rtwdev, R_AX_MDIO_CFG);
1899 }
1900
1901 static int
rtw89_read16_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 * val)1902 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1903 {
1904 int ret;
1905
1906 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1907 if (ret) {
1908 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1909 return ret;
1910 }
1911 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1912
1913 return 0;
1914 }
1915
1916 static int
rtw89_write16_mdio(struct rtw89_dev * rtwdev,u8 addr,u16 data,u8 speed)1917 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1918 {
1919 int ret;
1920
1921 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1922 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1923 if (ret) {
1924 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1925 return ret;
1926 }
1927
1928 return 0;
1929 }
1930
1931 static int
rtw89_write16_mdio_mask(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u16 data,u8 speed)1932 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
1933 {
1934 u32 shift;
1935 int ret;
1936 u16 val;
1937
1938 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1939 if (ret)
1940 return ret;
1941
1942 shift = __ffs(mask);
1943 val &= ~mask;
1944 val |= ((data << shift) & mask);
1945
1946 ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
1947 if (ret)
1948 return ret;
1949
1950 return 0;
1951 }
1952
rtw89_write16_mdio_set(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1953 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1954 {
1955 int ret;
1956 u16 val;
1957
1958 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1959 if (ret)
1960 return ret;
1961 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
1962 if (ret)
1963 return ret;
1964
1965 return 0;
1966 }
1967
rtw89_write16_mdio_clr(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1968 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1969 {
1970 int ret;
1971 u16 val;
1972
1973 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1974 if (ret)
1975 return ret;
1976 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
1977 if (ret)
1978 return ret;
1979
1980 return 0;
1981 }
1982
rtw89_dbi_write8(struct rtw89_dev * rtwdev,u16 addr,u8 data)1983 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
1984 {
1985 u16 addr_2lsb = addr & B_AX_DBI_2LSB;
1986 u16 write_addr;
1987 u8 flag;
1988 int ret;
1989
1990 write_addr = addr & B_AX_DBI_ADDR_MSK;
1991 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
1992 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
1993 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
1994 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
1995
1996 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
1997 10 * RTW89_PCI_WR_RETRY_CNT, false,
1998 rtwdev, R_AX_DBI_FLAG + 2);
1999 if (ret)
2000 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
2001 addr);
2002
2003 return ret;
2004 }
2005
rtw89_dbi_read8(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2006 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
2007 {
2008 u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
2009 u8 flag;
2010 int ret;
2011
2012 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
2013 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
2014
2015 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2016 10 * RTW89_PCI_WR_RETRY_CNT, false,
2017 rtwdev, R_AX_DBI_FLAG + 2);
2018 if (ret) {
2019 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
2020 addr);
2021 return ret;
2022 }
2023
2024 read_addr = R_AX_DBI_RDATA + (addr & 3);
2025 *value = rtw89_read8(rtwdev, read_addr);
2026
2027 return 0;
2028 }
2029
rtw89_pci_write_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 data)2030 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2031 u8 data)
2032 {
2033 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2034 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2035 struct pci_dev *pdev = rtwpci->pdev;
2036 int ret;
2037
2038 ret = pci_write_config_byte(pdev, addr, data);
2039 if (!ret)
2040 return 0;
2041
2042 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
2043 ret = rtw89_dbi_write8(rtwdev, addr, data);
2044
2045 return ret;
2046 }
2047
rtw89_pci_read_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2048 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2049 u8 *value)
2050 {
2051 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2052 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2053 struct pci_dev *pdev = rtwpci->pdev;
2054 int ret;
2055
2056 ret = pci_read_config_byte(pdev, addr, value);
2057 if (!ret)
2058 return 0;
2059
2060 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
2061 ret = rtw89_dbi_read8(rtwdev, addr, value);
2062
2063 return ret;
2064 }
2065
rtw89_pci_config_byte_set(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2066 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
2067 u8 bit)
2068 {
2069 u8 value;
2070 int ret;
2071
2072 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2073 if (ret)
2074 return ret;
2075
2076 value |= bit;
2077 ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2078
2079 return ret;
2080 }
2081
rtw89_pci_config_byte_clr(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2082 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
2083 u8 bit)
2084 {
2085 u8 value;
2086 int ret;
2087
2088 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2089 if (ret)
2090 return ret;
2091
2092 value &= ~bit;
2093 ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2094
2095 return ret;
2096 }
2097
2098 static int
__get_target(struct rtw89_dev * rtwdev,u16 * target,enum rtw89_pcie_phy phy_rate)2099 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
2100 {
2101 u16 val, tar;
2102 int ret;
2103
2104 /* Enable counter */
2105 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
2106 if (ret)
2107 return ret;
2108 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2109 phy_rate);
2110 if (ret)
2111 return ret;
2112 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
2113 phy_rate);
2114 if (ret)
2115 return ret;
2116
2117 fsleep(300);
2118
2119 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
2120 if (ret)
2121 return ret;
2122 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2123 phy_rate);
2124 if (ret)
2125 return ret;
2126
2127 tar = tar & 0x0FFF;
2128 if (tar == 0 || tar == 0x0FFF) {
2129 rtw89_err(rtwdev, "[ERR]Get target failed.\n");
2130 return -EINVAL;
2131 }
2132
2133 *target = tar;
2134
2135 return 0;
2136 }
2137
rtw89_pci_autok_x(struct rtw89_dev * rtwdev)2138 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
2139 {
2140 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2141 int ret;
2142
2143 if (chip_id != RTL8852B && chip_id != RTL8851B)
2144 return 0;
2145
2146 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
2147 PCIE_AUTOK_4, PCIE_PHY_GEN1);
2148 return ret;
2149 }
2150
rtw89_pci_auto_refclk_cal(struct rtw89_dev * rtwdev,bool autook_en)2151 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
2152 {
2153 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2154 enum rtw89_pcie_phy phy_rate;
2155 u16 val16, mgn_set, div_set, tar;
2156 u8 val8, bdr_ori;
2157 bool l1_flag = false;
2158 int ret = 0;
2159
2160 if (chip_id != RTL8852B && chip_id != RTL8851B)
2161 return 0;
2162
2163 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
2164 if (ret) {
2165 rtw89_err(rtwdev, "[ERR]pci config read %X\n",
2166 RTW89_PCIE_PHY_RATE);
2167 return ret;
2168 }
2169
2170 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
2171 phy_rate = PCIE_PHY_GEN1;
2172 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
2173 phy_rate = PCIE_PHY_GEN2;
2174 } else {
2175 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
2176 return -EOPNOTSUPP;
2177 }
2178 /* Disable L1BD */
2179 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
2180 if (ret) {
2181 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
2182 return ret;
2183 }
2184
2185 if (bdr_ori & RTW89_PCIE_BIT_L1) {
2186 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2187 bdr_ori & ~RTW89_PCIE_BIT_L1);
2188 if (ret) {
2189 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2190 RTW89_PCIE_L1_CTRL);
2191 return ret;
2192 }
2193 l1_flag = true;
2194 }
2195
2196 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2197 if (ret) {
2198 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2199 goto end;
2200 }
2201
2202 if (val16 & B_AX_CALIB_EN) {
2203 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
2204 val16 & ~B_AX_CALIB_EN, phy_rate);
2205 if (ret) {
2206 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2207 goto end;
2208 }
2209 }
2210
2211 if (!autook_en)
2212 goto end;
2213 /* Set div */
2214 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
2215 if (ret) {
2216 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2217 goto end;
2218 }
2219
2220 /* Obtain div and margin */
2221 ret = __get_target(rtwdev, &tar, phy_rate);
2222 if (ret) {
2223 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2224 goto end;
2225 }
2226
2227 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2228
2229 if (mgn_set >= 128) {
2230 div_set = 0x0003;
2231 mgn_set = 0x000F;
2232 } else if (mgn_set >= 64) {
2233 div_set = 0x0003;
2234 mgn_set >>= 3;
2235 } else if (mgn_set >= 32) {
2236 div_set = 0x0002;
2237 mgn_set >>= 2;
2238 } else if (mgn_set >= 16) {
2239 div_set = 0x0001;
2240 mgn_set >>= 1;
2241 } else if (mgn_set == 0) {
2242 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2243 goto end;
2244 } else {
2245 div_set = 0x0000;
2246 }
2247
2248 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2249 if (ret) {
2250 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2251 goto end;
2252 }
2253
2254 val16 |= u16_encode_bits(div_set, B_AX_DIV);
2255
2256 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
2257 if (ret) {
2258 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2259 goto end;
2260 }
2261
2262 ret = __get_target(rtwdev, &tar, phy_rate);
2263 if (ret) {
2264 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2265 goto end;
2266 }
2267
2268 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2269 tar, div_set, mgn_set);
2270 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2271 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
2272 if (ret) {
2273 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2274 goto end;
2275 }
2276
2277 /* Enable function */
2278 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
2279 if (ret) {
2280 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2281 goto end;
2282 }
2283
2284 /* CLK delay = 0 */
2285 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2286 PCIE_CLKDLY_HW_0);
2287
2288 end:
2289 /* Set L1BD to ori */
2290 if (l1_flag) {
2291 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2292 bdr_ori);
2293 if (ret) {
2294 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2295 RTW89_PCIE_L1_CTRL);
2296 return ret;
2297 }
2298 }
2299
2300 return ret;
2301 }
2302
rtw89_pci_deglitch_setting(struct rtw89_dev * rtwdev)2303 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2304 {
2305 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2306 int ret;
2307
2308 if (chip_id == RTL8852A) {
2309 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2310 PCIE_PHY_GEN1);
2311 if (ret)
2312 return ret;
2313 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2314 PCIE_PHY_GEN2);
2315 if (ret)
2316 return ret;
2317 } else if (chip_id == RTL8852C) {
2318 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2319 B_AX_DEGLITCH);
2320 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2321 B_AX_DEGLITCH);
2322 }
2323
2324 return 0;
2325 }
2326
rtw89_pci_disable_eq(struct rtw89_dev * rtwdev)2327 static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
2328 {
2329 u16 g1_oobs, g2_oobs;
2330 u32 backup_aspm;
2331 u32 phy_offset;
2332 u16 oobs_val;
2333 u16 val16;
2334 int ret;
2335
2336 if (rtwdev->chip->chip_id != RTL8852C)
2337 return;
2338
2339 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
2340 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
2341
2342 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2343 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2344 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
2345 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2346 if (g1_oobs && g2_oobs)
2347 goto out;
2348
2349 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
2350 if (ret)
2351 goto out;
2352
2353 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
2354 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
2355 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
2356
2357 val16 = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
2358 OOBS_LEVEL_MASK);
2359 oobs_val = u16_encode_bits(val16, OOBS_SEN_MASK);
2360
2361 rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, oobs_val);
2362 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
2363 BAC_OOBS_SEL);
2364
2365 rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, oobs_val);
2366 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
2367 BAC_OOBS_SEL);
2368
2369 out:
2370 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm);
2371 }
2372
rtw89_pci_ber(struct rtw89_dev * rtwdev)2373 static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
2374 {
2375 u32 phy_offset;
2376
2377 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
2378 return;
2379
2380 phy_offset = R_RAC_DIRECT_OFFSET_G1;
2381 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
2382 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2383
2384 phy_offset = R_RAC_DIRECT_OFFSET_G2;
2385 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
2386 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2387 }
2388
rtw89_pci_rxdma_prefth(struct rtw89_dev * rtwdev)2389 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2390 {
2391 if (rtwdev->chip->chip_id != RTL8852A)
2392 return;
2393
2394 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2395 }
2396
rtw89_pci_l1off_pwroff(struct rtw89_dev * rtwdev)2397 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2398 {
2399 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2400
2401 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2402 return;
2403
2404 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2405 }
2406
rtw89_pci_l2_rxen_lat(struct rtw89_dev * rtwdev)2407 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2408 {
2409 int ret;
2410
2411 if (rtwdev->chip->chip_id != RTL8852A)
2412 return 0;
2413
2414 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2415 PCIE_PHY_GEN1);
2416 if (ret)
2417 return ret;
2418
2419 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2420 PCIE_PHY_GEN2);
2421 if (ret)
2422 return ret;
2423
2424 return 0;
2425 }
2426
rtw89_pci_aphy_pwrcut(struct rtw89_dev * rtwdev)2427 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2428 {
2429 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2430
2431 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2432 return;
2433
2434 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2435 }
2436
rtw89_pci_hci_ldo(struct rtw89_dev * rtwdev)2437 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2438 {
2439 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2440
2441 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
2442 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2443 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2444 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2445 B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2446 } else if (rtwdev->chip->chip_id == RTL8852C) {
2447 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2448 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2449 }
2450 }
2451
rtw89_pci_dphy_delay(struct rtw89_dev * rtwdev)2452 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2453 {
2454 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2455
2456 if (chip_id != RTL8852B && chip_id != RTL8851B)
2457 return 0;
2458
2459 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2460 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2461 }
2462
rtw89_pci_power_wake(struct rtw89_dev * rtwdev,bool pwr_up)2463 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
2464 {
2465 if (pwr_up)
2466 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2467 else
2468 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2469 }
2470
rtw89_pci_autoload_hang(struct rtw89_dev * rtwdev)2471 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2472 {
2473 if (rtwdev->chip->chip_id != RTL8852C)
2474 return;
2475
2476 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2477 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2478 }
2479
rtw89_pci_l12_vmain(struct rtw89_dev * rtwdev)2480 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2481 {
2482 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2483 return;
2484
2485 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2486 }
2487
rtw89_pci_gen2_force_ib(struct rtw89_dev * rtwdev)2488 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2489 {
2490 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2491 return;
2492
2493 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2494 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2495 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2496 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2497 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2498 }
2499
rtw89_pci_l1_ent_lat(struct rtw89_dev * rtwdev)2500 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2501 {
2502 if (rtwdev->chip->chip_id != RTL8852C)
2503 return;
2504
2505 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2506 }
2507
rtw89_pci_wd_exit_l1(struct rtw89_dev * rtwdev)2508 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2509 {
2510 if (rtwdev->chip->chip_id != RTL8852C)
2511 return;
2512
2513 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2514 }
2515
rtw89_pci_set_sic(struct rtw89_dev * rtwdev)2516 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2517 {
2518 if (rtwdev->chip->chip_id == RTL8852C)
2519 return;
2520
2521 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2522 B_AX_SIC_EN_FORCE_CLKREQ);
2523 }
2524
rtw89_pci_set_lbc(struct rtw89_dev * rtwdev)2525 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2526 {
2527 const struct rtw89_pci_info *info = rtwdev->pci_info;
2528 u32 lbc;
2529
2530 if (rtwdev->chip->chip_id == RTL8852C)
2531 return;
2532
2533 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2534 if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2535 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2536 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2537 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2538 } else {
2539 lbc &= ~B_AX_LBC_EN;
2540 }
2541 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2542 }
2543
rtw89_pci_set_io_rcy(struct rtw89_dev * rtwdev)2544 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2545 {
2546 const struct rtw89_pci_info *info = rtwdev->pci_info;
2547 u32 val32;
2548
2549 if (rtwdev->chip->chip_id != RTL8852C)
2550 return;
2551
2552 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2553 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2554 info->io_rcy_tmr);
2555 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2556 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2557 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2558
2559 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2560 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2561 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2562 } else {
2563 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2564 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2565 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2566 }
2567
2568 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2569 }
2570
rtw89_pci_set_dbg(struct rtw89_dev * rtwdev)2571 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2572 {
2573 if (rtwdev->chip->chip_id == RTL8852C)
2574 return;
2575
2576 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2577 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2578
2579 if (rtwdev->chip->chip_id == RTL8852A)
2580 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2581 B_AX_EN_CHKDSC_NO_RX_STUCK);
2582 }
2583
rtw89_pci_set_keep_reg(struct rtw89_dev * rtwdev)2584 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2585 {
2586 if (rtwdev->chip->chip_id == RTL8852C)
2587 return;
2588
2589 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2590 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2591 }
2592
rtw89_pci_clr_idx_all_ax(struct rtw89_dev * rtwdev)2593 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
2594 {
2595 const struct rtw89_pci_info *info = rtwdev->pci_info;
2596 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2597 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2598 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2599 B_AX_CLR_CH12_IDX;
2600 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2601 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2602
2603 if (chip_id == RTL8852A || chip_id == RTL8852C)
2604 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2605 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2606 /* clear DMA indexes */
2607 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2608 if (chip_id == RTL8852A || chip_id == RTL8852C)
2609 rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2610 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2611 rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2612 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2613 }
2614
rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev * rtwdev)2615 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2616 {
2617 const struct rtw89_pci_info *info = rtwdev->pci_info;
2618 u32 ret, check, dma_busy;
2619 u32 dma_busy1 = info->dma_busy1.addr;
2620 u32 dma_busy2 = info->dma_busy2_reg;
2621
2622 check = info->dma_busy1.mask;
2623
2624 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2625 10, 100, false, rtwdev, dma_busy1);
2626 if (ret)
2627 return ret;
2628
2629 if (!dma_busy2)
2630 return 0;
2631
2632 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2633
2634 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2635 10, 100, false, rtwdev, dma_busy2);
2636 if (ret)
2637 return ret;
2638
2639 return 0;
2640 }
2641
rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev * rtwdev)2642 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2643 {
2644 const struct rtw89_pci_info *info = rtwdev->pci_info;
2645 u32 ret, check, dma_busy;
2646 u32 dma_busy3 = info->dma_busy3_reg;
2647
2648 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2649
2650 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2651 10, 100, false, rtwdev, dma_busy3);
2652 if (ret)
2653 return ret;
2654
2655 return 0;
2656 }
2657
rtw89_pci_poll_dma_all_idle(struct rtw89_dev * rtwdev)2658 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2659 {
2660 u32 ret;
2661
2662 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
2663 if (ret) {
2664 rtw89_err(rtwdev, "txdma ch busy\n");
2665 return ret;
2666 }
2667
2668 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
2669 if (ret) {
2670 rtw89_err(rtwdev, "rxdma ch busy\n");
2671 return ret;
2672 }
2673
2674 return 0;
2675 }
2676
rtw89_pci_mode_op(struct rtw89_dev * rtwdev)2677 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2678 {
2679 const struct rtw89_pci_info *info = rtwdev->pci_info;
2680 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2681 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2682 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2683 enum mac_ax_tag_mode tag_mode = info->tag_mode;
2684 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2685 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2686 enum mac_ax_tx_burst tx_burst = info->tx_burst;
2687 enum mac_ax_rx_burst rx_burst = info->rx_burst;
2688 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2689 u8 cv = rtwdev->hal.cv;
2690 u32 val32;
2691
2692 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2693 if (chip_id == RTL8852A && cv == CHIP_CBV)
2694 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2695 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2696 if (chip_id == RTL8852A || chip_id == RTL8852B)
2697 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2698 }
2699
2700 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2701 if (chip_id == RTL8852A && cv == CHIP_CBV)
2702 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2703 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2704 if (chip_id == RTL8852A || chip_id == RTL8852B)
2705 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2706 }
2707
2708 if (rxbd_mode == MAC_AX_RXBD_PKT) {
2709 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2710 } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2711 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2712
2713 if (chip_id == RTL8852A || chip_id == RTL8852B)
2714 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2715 B_AX_PCIE_RX_APPLEN_MASK, 0);
2716 }
2717
2718 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2719 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
2720 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
2721 } else if (chip_id == RTL8852C) {
2722 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
2723 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
2724 }
2725
2726 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2727 if (tag_mode == MAC_AX_TAG_SGL) {
2728 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2729 ~B_AX_LATENCY_CONTROL;
2730 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2731 } else if (tag_mode == MAC_AX_TAG_MULTI) {
2732 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2733 B_AX_LATENCY_CONTROL;
2734 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2735 }
2736 }
2737
2738 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
2739 info->multi_tag_num);
2740
2741 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2742 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
2743 wd_dma_idle_intvl);
2744 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
2745 wd_dma_act_intvl);
2746 } else if (chip_id == RTL8852C) {
2747 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
2748 wd_dma_idle_intvl);
2749 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
2750 wd_dma_act_intvl);
2751 }
2752
2753 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2754 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2755 B_AX_HOST_ADDR_INFO_8B_SEL);
2756 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2757 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2758 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2759 B_AX_HOST_ADDR_INFO_8B_SEL);
2760 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2761 }
2762
2763 return 0;
2764 }
2765
rtw89_pci_ops_deinit(struct rtw89_dev * rtwdev)2766 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
2767 {
2768 const struct rtw89_pci_info *info = rtwdev->pci_info;
2769
2770 if (rtwdev->chip->chip_id == RTL8852A) {
2771 /* ltr sw trigger */
2772 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
2773 }
2774 info->ltr_set(rtwdev, false);
2775 rtw89_pci_ctrl_dma_all(rtwdev, false);
2776 rtw89_pci_clr_idx_all(rtwdev);
2777
2778 return 0;
2779 }
2780
rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev * rtwdev)2781 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
2782 {
2783 const struct rtw89_pci_info *info = rtwdev->pci_info;
2784 int ret;
2785
2786 rtw89_pci_disable_eq(rtwdev);
2787 rtw89_pci_ber(rtwdev);
2788 rtw89_pci_rxdma_prefth(rtwdev);
2789 rtw89_pci_l1off_pwroff(rtwdev);
2790 rtw89_pci_deglitch_setting(rtwdev);
2791 ret = rtw89_pci_l2_rxen_lat(rtwdev);
2792 if (ret) {
2793 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
2794 return ret;
2795 }
2796
2797 rtw89_pci_aphy_pwrcut(rtwdev);
2798 rtw89_pci_hci_ldo(rtwdev);
2799 rtw89_pci_dphy_delay(rtwdev);
2800
2801 ret = rtw89_pci_autok_x(rtwdev);
2802 if (ret) {
2803 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
2804 return ret;
2805 }
2806
2807 ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
2808 if (ret) {
2809 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
2810 return ret;
2811 }
2812
2813 rtw89_pci_power_wake(rtwdev, true);
2814 rtw89_pci_autoload_hang(rtwdev);
2815 rtw89_pci_l12_vmain(rtwdev);
2816 rtw89_pci_gen2_force_ib(rtwdev);
2817 rtw89_pci_l1_ent_lat(rtwdev);
2818 rtw89_pci_wd_exit_l1(rtwdev);
2819 rtw89_pci_set_sic(rtwdev);
2820 rtw89_pci_set_lbc(rtwdev);
2821 rtw89_pci_set_io_rcy(rtwdev);
2822 rtw89_pci_set_dbg(rtwdev);
2823 rtw89_pci_set_keep_reg(rtwdev);
2824
2825 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
2826
2827 /* stop DMA activities */
2828 rtw89_pci_ctrl_dma_all(rtwdev, false);
2829
2830 ret = rtw89_pci_poll_dma_all_idle(rtwdev);
2831 if (ret) {
2832 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
2833 return ret;
2834 }
2835
2836 rtw89_pci_clr_idx_all(rtwdev);
2837 rtw89_pci_mode_op(rtwdev);
2838
2839 /* fill TRX BD indexes */
2840 rtw89_pci_ops_reset(rtwdev);
2841
2842 ret = rtw89_pci_rst_bdram_ax(rtwdev);
2843 if (ret) {
2844 rtw89_warn(rtwdev, "reset bdram busy\n");
2845 return ret;
2846 }
2847
2848 /* disable all channels except to FW CMD channel to download firmware */
2849 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
2850 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
2851
2852 /* start DMA activities */
2853 rtw89_pci_ctrl_dma_all(rtwdev, true);
2854
2855 return 0;
2856 }
2857
rtw89_pci_ltr_set(struct rtw89_dev * rtwdev,bool en)2858 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
2859 {
2860 u32 val;
2861
2862 if (!en)
2863 return 0;
2864
2865 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2866 if (rtw89_pci_ltr_is_err_reg_val(val))
2867 return -EINVAL;
2868 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2869 if (rtw89_pci_ltr_is_err_reg_val(val))
2870 return -EINVAL;
2871 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
2872 if (rtw89_pci_ltr_is_err_reg_val(val))
2873 return -EINVAL;
2874 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
2875 if (rtw89_pci_ltr_is_err_reg_val(val))
2876 return -EINVAL;
2877
2878 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
2879 B_AX_LTR_WD_NOEMP_CHK);
2880 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
2881 PCI_LTR_SPC_500US);
2882 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2883 PCI_LTR_IDLE_TIMER_3_2MS);
2884 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2885 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2886 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
2887 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
2888
2889 return 0;
2890 }
2891 EXPORT_SYMBOL(rtw89_pci_ltr_set);
2892
rtw89_pci_ltr_set_v1(struct rtw89_dev * rtwdev,bool en)2893 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
2894 {
2895 u32 dec_ctrl;
2896 u32 val32;
2897
2898 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2899 if (rtw89_pci_ltr_is_err_reg_val(val32))
2900 return -EINVAL;
2901 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2902 if (rtw89_pci_ltr_is_err_reg_val(val32))
2903 return -EINVAL;
2904 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
2905 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
2906 return -EINVAL;
2907 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
2908 if (rtw89_pci_ltr_is_err_reg_val(val32))
2909 return -EINVAL;
2910 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
2911 if (rtw89_pci_ltr_is_err_reg_val(val32))
2912 return -EINVAL;
2913
2914 if (!en) {
2915 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
2916 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
2917 B_AX_LTR_REQ_DRV;
2918 } else {
2919 dec_ctrl |= B_AX_LTR_HW_DEC_EN;
2920 }
2921
2922 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
2923 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
2924
2925 if (en)
2926 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
2927 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
2928 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2929 PCI_LTR_IDLE_TIMER_3_2MS);
2930 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2931 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2932 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
2933 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
2934 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
2935
2936 return 0;
2937 }
2938 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
2939
rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev * rtwdev)2940 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
2941 {
2942 const struct rtw89_pci_info *info = rtwdev->pci_info;
2943 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2944 int ret;
2945
2946 ret = info->ltr_set(rtwdev, true);
2947 if (ret) {
2948 rtw89_err(rtwdev, "pci ltr set fail\n");
2949 return ret;
2950 }
2951 if (chip_id == RTL8852A) {
2952 /* ltr sw trigger */
2953 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
2954 }
2955 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2956 /* ADDR info 8-byte mode */
2957 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2958 B_AX_HOST_ADDR_INFO_8B_SEL);
2959 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2960 }
2961
2962 /* enable DMA for all queues */
2963 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
2964
2965 /* Release PCI IO */
2966 rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
2967 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
2968
2969 return 0;
2970 }
2971
rtw89_pci_claim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2972 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
2973 struct pci_dev *pdev)
2974 {
2975 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2976 int ret;
2977
2978 ret = pci_enable_device(pdev);
2979 if (ret) {
2980 rtw89_err(rtwdev, "failed to enable pci device\n");
2981 return ret;
2982 }
2983
2984 pci_set_master(pdev);
2985 pci_set_drvdata(pdev, rtwdev->hw);
2986
2987 rtwpci->pdev = pdev;
2988
2989 return 0;
2990 }
2991
rtw89_pci_declaim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2992 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
2993 struct pci_dev *pdev)
2994 {
2995 pci_disable_device(pdev);
2996 }
2997
rtw89_pci_setup_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2998 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
2999 struct pci_dev *pdev)
3000 {
3001 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3002 unsigned long resource_len;
3003 u8 bar_id = 2;
3004 int ret;
3005
3006 ret = pci_request_regions(pdev, KBUILD_MODNAME);
3007 if (ret) {
3008 rtw89_err(rtwdev, "failed to request pci regions\n");
3009 goto err;
3010 }
3011
3012 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3013 if (ret) {
3014 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
3015 goto err_release_regions;
3016 }
3017
3018 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
3019 if (ret) {
3020 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
3021 goto err_release_regions;
3022 }
3023
3024 resource_len = pci_resource_len(pdev, bar_id);
3025 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
3026 if (!rtwpci->mmap) {
3027 rtw89_err(rtwdev, "failed to map pci io\n");
3028 ret = -EIO;
3029 goto err_release_regions;
3030 }
3031
3032 return 0;
3033
3034 err_release_regions:
3035 pci_release_regions(pdev);
3036 err:
3037 return ret;
3038 }
3039
rtw89_pci_clear_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3040 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
3041 struct pci_dev *pdev)
3042 {
3043 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3044
3045 if (rtwpci->mmap) {
3046 pci_iounmap(pdev, rtwpci->mmap);
3047 pci_release_regions(pdev);
3048 }
3049 }
3050
rtw89_pci_free_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3051 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
3052 struct pci_dev *pdev,
3053 struct rtw89_pci_tx_ring *tx_ring)
3054 {
3055 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3056 u8 *head = wd_ring->head;
3057 dma_addr_t dma = wd_ring->dma;
3058 u32 page_size = wd_ring->page_size;
3059 u32 page_num = wd_ring->page_num;
3060 u32 ring_sz = page_size * page_num;
3061
3062 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3063 wd_ring->head = NULL;
3064 }
3065
rtw89_pci_free_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3066 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
3067 struct pci_dev *pdev,
3068 struct rtw89_pci_tx_ring *tx_ring)
3069 {
3070 int ring_sz;
3071 u8 *head;
3072 dma_addr_t dma;
3073
3074 head = tx_ring->bd_ring.head;
3075 dma = tx_ring->bd_ring.dma;
3076 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
3077 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3078
3079 tx_ring->bd_ring.head = NULL;
3080 }
3081
rtw89_pci_free_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3082 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
3083 struct pci_dev *pdev)
3084 {
3085 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3086 const struct rtw89_pci_info *info = rtwdev->pci_info;
3087 struct rtw89_pci_tx_ring *tx_ring;
3088 int i;
3089
3090 for (i = 0; i < RTW89_TXCH_NUM; i++) {
3091 if (info->tx_dma_ch_mask & BIT(i))
3092 continue;
3093 tx_ring = &rtwpci->tx_rings[i];
3094 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3095 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3096 }
3097 }
3098
rtw89_pci_free_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring)3099 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
3100 struct pci_dev *pdev,
3101 struct rtw89_pci_rx_ring *rx_ring)
3102 {
3103 struct rtw89_pci_rx_info *rx_info;
3104 struct sk_buff *skb;
3105 dma_addr_t dma;
3106 u32 buf_sz;
3107 u8 *head;
3108 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
3109 int i;
3110
3111 buf_sz = rx_ring->buf_sz;
3112 for (i = 0; i < rx_ring->bd_ring.len; i++) {
3113 skb = rx_ring->buf[i];
3114 if (!skb)
3115 continue;
3116
3117 rx_info = RTW89_PCI_RX_SKB_CB(skb);
3118 dma = rx_info->dma;
3119 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3120 dev_kfree_skb(skb);
3121 rx_ring->buf[i] = NULL;
3122 }
3123
3124 head = rx_ring->bd_ring.head;
3125 dma = rx_ring->bd_ring.dma;
3126 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3127
3128 rx_ring->bd_ring.head = NULL;
3129 }
3130
rtw89_pci_free_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3131 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
3132 struct pci_dev *pdev)
3133 {
3134 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3135 struct rtw89_pci_rx_ring *rx_ring;
3136 int i;
3137
3138 for (i = 0; i < RTW89_RXCH_NUM; i++) {
3139 rx_ring = &rtwpci->rx_rings[i];
3140 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3141 }
3142 }
3143
rtw89_pci_free_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3144 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
3145 struct pci_dev *pdev)
3146 {
3147 rtw89_pci_free_rx_rings(rtwdev, pdev);
3148 rtw89_pci_free_tx_rings(rtwdev, pdev);
3149 }
3150
rtw89_pci_init_rx_bd(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb,int buf_sz,u32 idx)3151 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
3152 struct rtw89_pci_rx_ring *rx_ring,
3153 struct sk_buff *skb, int buf_sz, u32 idx)
3154 {
3155 struct rtw89_pci_rx_info *rx_info;
3156 struct rtw89_pci_rx_bd_32 *rx_bd;
3157 dma_addr_t dma;
3158
3159 if (!skb)
3160 return -EINVAL;
3161
3162 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
3163 if (dma_mapping_error(&pdev->dev, dma))
3164 return -EBUSY;
3165
3166 rx_info = RTW89_PCI_RX_SKB_CB(skb);
3167 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
3168
3169 memset(rx_bd, 0, sizeof(*rx_bd));
3170 rx_bd->buf_size = cpu_to_le16(buf_sz);
3171 rx_bd->dma = cpu_to_le32(dma);
3172 rx_info->dma = dma;
3173
3174 return 0;
3175 }
3176
rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,enum rtw89_tx_channel txch)3177 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
3178 struct pci_dev *pdev,
3179 struct rtw89_pci_tx_ring *tx_ring,
3180 enum rtw89_tx_channel txch)
3181 {
3182 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3183 struct rtw89_pci_tx_wd *txwd;
3184 dma_addr_t dma;
3185 dma_addr_t cur_paddr;
3186 u8 *head;
3187 u8 *cur_vaddr;
3188 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
3189 u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
3190 u32 ring_sz = page_size * page_num;
3191 u32 page_offset;
3192 int i;
3193
3194 /* FWCMD queue doesn't use txwd as pages */
3195 if (txch == RTW89_TXCH_CH12)
3196 return 0;
3197
3198 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3199 if (!head)
3200 return -ENOMEM;
3201
3202 INIT_LIST_HEAD(&wd_ring->free_pages);
3203 wd_ring->head = head;
3204 wd_ring->dma = dma;
3205 wd_ring->page_size = page_size;
3206 wd_ring->page_num = page_num;
3207
3208 page_offset = 0;
3209 for (i = 0; i < page_num; i++) {
3210 txwd = &wd_ring->pages[i];
3211 cur_paddr = dma + page_offset;
3212 cur_vaddr = head + page_offset;
3213
3214 skb_queue_head_init(&txwd->queue);
3215 INIT_LIST_HEAD(&txwd->list);
3216 txwd->paddr = cur_paddr;
3217 txwd->vaddr = cur_vaddr;
3218 txwd->len = page_size;
3219 txwd->seq = i;
3220 rtw89_pci_enqueue_txwd(tx_ring, txwd);
3221
3222 page_offset += page_size;
3223 }
3224
3225 return 0;
3226 }
3227
rtw89_pci_alloc_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,u32 desc_size,u32 len,enum rtw89_tx_channel txch)3228 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
3229 struct pci_dev *pdev,
3230 struct rtw89_pci_tx_ring *tx_ring,
3231 u32 desc_size, u32 len,
3232 enum rtw89_tx_channel txch)
3233 {
3234 const struct rtw89_pci_ch_dma_addr *txch_addr;
3235 int ring_sz = desc_size * len;
3236 u8 *head;
3237 dma_addr_t dma;
3238 int ret;
3239
3240 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
3241 if (ret) {
3242 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
3243 goto err;
3244 }
3245
3246 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
3247 if (ret) {
3248 rtw89_err(rtwdev, "failed to get address of txch %d", txch);
3249 goto err_free_wd_ring;
3250 }
3251
3252 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3253 if (!head) {
3254 ret = -ENOMEM;
3255 goto err_free_wd_ring;
3256 }
3257
3258 INIT_LIST_HEAD(&tx_ring->busy_pages);
3259 tx_ring->bd_ring.head = head;
3260 tx_ring->bd_ring.dma = dma;
3261 tx_ring->bd_ring.len = len;
3262 tx_ring->bd_ring.desc_size = desc_size;
3263 tx_ring->bd_ring.addr = *txch_addr;
3264 tx_ring->bd_ring.wp = 0;
3265 tx_ring->bd_ring.rp = 0;
3266 tx_ring->txch = txch;
3267
3268 return 0;
3269
3270 err_free_wd_ring:
3271 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3272 err:
3273 return ret;
3274 }
3275
rtw89_pci_alloc_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3276 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
3277 struct pci_dev *pdev)
3278 {
3279 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3280 const struct rtw89_pci_info *info = rtwdev->pci_info;
3281 struct rtw89_pci_tx_ring *tx_ring;
3282 u32 desc_size;
3283 u32 len;
3284 u32 i, tx_allocated;
3285 int ret;
3286
3287 for (i = 0; i < RTW89_TXCH_NUM; i++) {
3288 if (info->tx_dma_ch_mask & BIT(i))
3289 continue;
3290 tx_ring = &rtwpci->tx_rings[i];
3291 desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3292 len = RTW89_PCI_TXBD_NUM_MAX;
3293 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3294 desc_size, len, i);
3295 if (ret) {
3296 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3297 goto err_free;
3298 }
3299 }
3300
3301 return 0;
3302
3303 err_free:
3304 tx_allocated = i;
3305 for (i = 0; i < tx_allocated; i++) {
3306 tx_ring = &rtwpci->tx_rings[i];
3307 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3308 }
3309
3310 return ret;
3311 }
3312
rtw89_pci_alloc_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,u32 desc_size,u32 len,u32 rxch)3313 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3314 struct pci_dev *pdev,
3315 struct rtw89_pci_rx_ring *rx_ring,
3316 u32 desc_size, u32 len, u32 rxch)
3317 {
3318 const struct rtw89_pci_info *info = rtwdev->pci_info;
3319 const struct rtw89_pci_ch_dma_addr *rxch_addr;
3320 struct sk_buff *skb;
3321 u8 *head;
3322 dma_addr_t dma;
3323 int ring_sz = desc_size * len;
3324 int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3325 int i, allocated;
3326 int ret;
3327
3328 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
3329 if (ret) {
3330 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3331 return ret;
3332 }
3333
3334 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3335 if (!head) {
3336 ret = -ENOMEM;
3337 goto err;
3338 }
3339
3340 rx_ring->bd_ring.head = head;
3341 rx_ring->bd_ring.dma = dma;
3342 rx_ring->bd_ring.len = len;
3343 rx_ring->bd_ring.desc_size = desc_size;
3344 rx_ring->bd_ring.addr = *rxch_addr;
3345 if (info->rx_ring_eq_is_full)
3346 rx_ring->bd_ring.wp = len - 1;
3347 else
3348 rx_ring->bd_ring.wp = 0;
3349 rx_ring->bd_ring.rp = 0;
3350 rx_ring->buf_sz = buf_sz;
3351 rx_ring->diliver_skb = NULL;
3352 rx_ring->diliver_desc.ready = false;
3353 rx_ring->target_rx_tag = 0;
3354
3355 for (i = 0; i < len; i++) {
3356 skb = dev_alloc_skb(buf_sz);
3357 if (!skb) {
3358 ret = -ENOMEM;
3359 goto err_free;
3360 }
3361
3362 memset(skb->data, 0, buf_sz);
3363 rx_ring->buf[i] = skb;
3364 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3365 buf_sz, i);
3366 if (ret) {
3367 rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3368 dev_kfree_skb_any(skb);
3369 rx_ring->buf[i] = NULL;
3370 goto err_free;
3371 }
3372 }
3373
3374 return 0;
3375
3376 err_free:
3377 allocated = i;
3378 for (i = 0; i < allocated; i++) {
3379 skb = rx_ring->buf[i];
3380 if (!skb)
3381 continue;
3382 dma = *((dma_addr_t *)skb->cb);
3383 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3384 dev_kfree_skb(skb);
3385 rx_ring->buf[i] = NULL;
3386 }
3387
3388 head = rx_ring->bd_ring.head;
3389 dma = rx_ring->bd_ring.dma;
3390 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3391
3392 rx_ring->bd_ring.head = NULL;
3393 err:
3394 return ret;
3395 }
3396
rtw89_pci_alloc_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3397 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3398 struct pci_dev *pdev)
3399 {
3400 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3401 struct rtw89_pci_rx_ring *rx_ring;
3402 u32 desc_size;
3403 u32 len;
3404 int i, rx_allocated;
3405 int ret;
3406
3407 for (i = 0; i < RTW89_RXCH_NUM; i++) {
3408 rx_ring = &rtwpci->rx_rings[i];
3409 desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3410 len = RTW89_PCI_RXBD_NUM_MAX;
3411 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3412 desc_size, len, i);
3413 if (ret) {
3414 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3415 goto err_free;
3416 }
3417 }
3418
3419 return 0;
3420
3421 err_free:
3422 rx_allocated = i;
3423 for (i = 0; i < rx_allocated; i++) {
3424 rx_ring = &rtwpci->rx_rings[i];
3425 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3426 }
3427
3428 return ret;
3429 }
3430
rtw89_pci_alloc_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3431 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3432 struct pci_dev *pdev)
3433 {
3434 int ret;
3435
3436 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3437 if (ret) {
3438 rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3439 goto err;
3440 }
3441
3442 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3443 if (ret) {
3444 rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3445 goto err_free_tx_rings;
3446 }
3447
3448 return 0;
3449
3450 err_free_tx_rings:
3451 rtw89_pci_free_tx_rings(rtwdev, pdev);
3452 err:
3453 return ret;
3454 }
3455
rtw89_pci_h2c_init(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)3456 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3457 struct rtw89_pci *rtwpci)
3458 {
3459 skb_queue_head_init(&rtwpci->h2c_queue);
3460 skb_queue_head_init(&rtwpci->h2c_release_queue);
3461 }
3462
rtw89_pci_setup_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3463 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3464 struct pci_dev *pdev)
3465 {
3466 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3467 int ret;
3468
3469 ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3470 if (ret) {
3471 rtw89_err(rtwdev, "failed to setup pci mapping\n");
3472 goto err;
3473 }
3474
3475 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3476 if (ret) {
3477 rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3478 goto err_pci_unmap;
3479 }
3480
3481 rtw89_pci_h2c_init(rtwdev, rtwpci);
3482
3483 spin_lock_init(&rtwpci->irq_lock);
3484 spin_lock_init(&rtwpci->trx_lock);
3485
3486 return 0;
3487
3488 err_pci_unmap:
3489 rtw89_pci_clear_mapping(rtwdev, pdev);
3490 err:
3491 return ret;
3492 }
3493
rtw89_pci_clear_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3494 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3495 struct pci_dev *pdev)
3496 {
3497 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3498
3499 rtw89_pci_free_trx_rings(rtwdev, pdev);
3500 rtw89_pci_clear_mapping(rtwdev, pdev);
3501 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3502 skb_queue_len(&rtwpci->h2c_queue), true);
3503 }
3504
rtw89_pci_config_intr_mask(struct rtw89_dev * rtwdev)3505 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3506 {
3507 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3508 const struct rtw89_chip_info *chip = rtwdev->chip;
3509 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3510
3511 if (chip->chip_id == RTL8851B)
3512 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3513
3514 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3515
3516 if (rtwpci->under_recovery) {
3517 rtwpci->intrs[0] = hs0isr_ind_int_en;
3518 rtwpci->intrs[1] = 0;
3519 } else {
3520 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3521 B_AX_RXDMA_INT_EN |
3522 B_AX_RXP1DMA_INT_EN |
3523 B_AX_RPQDMA_INT_EN |
3524 B_AX_RXDMA_STUCK_INT_EN |
3525 B_AX_RDU_INT_EN |
3526 B_AX_RPQBD_FULL_INT_EN |
3527 hs0isr_ind_int_en;
3528
3529 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3530 }
3531 }
3532 EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3533
rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev * rtwdev)3534 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3535 {
3536 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3537
3538 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3539 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3540 rtwpci->intrs[0] = 0;
3541 rtwpci->intrs[1] = 0;
3542 }
3543
rtw89_pci_default_intr_mask_v1(struct rtw89_dev * rtwdev)3544 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3545 {
3546 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3547
3548 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3549 B_AX_HS1ISR_IND_INT_EN |
3550 B_AX_HS0ISR_IND_INT_EN;
3551 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3552 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3553 B_AX_RXDMA_INT_EN |
3554 B_AX_RXP1DMA_INT_EN |
3555 B_AX_RPQDMA_INT_EN |
3556 B_AX_RXDMA_STUCK_INT_EN |
3557 B_AX_RDU_INT_EN |
3558 B_AX_RPQBD_FULL_INT_EN;
3559 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3560 }
3561
rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev * rtwdev)3562 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3563 {
3564 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3565
3566 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3567 B_AX_HS0ISR_IND_INT_EN;
3568 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3569 rtwpci->intrs[0] = 0;
3570 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3571 }
3572
rtw89_pci_config_intr_mask_v1(struct rtw89_dev * rtwdev)3573 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3574 {
3575 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3576
3577 if (rtwpci->under_recovery)
3578 rtw89_pci_recovery_intr_mask_v1(rtwdev);
3579 else if (rtwpci->low_power)
3580 rtw89_pci_low_power_intr_mask_v1(rtwdev);
3581 else
3582 rtw89_pci_default_intr_mask_v1(rtwdev);
3583 }
3584 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3585
rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev * rtwdev)3586 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
3587 {
3588 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3589
3590 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
3591 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3592 rtwpci->intrs[0] = 0;
3593 rtwpci->intrs[1] = 0;
3594 }
3595
rtw89_pci_default_intr_mask_v2(struct rtw89_dev * rtwdev)3596 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
3597 {
3598 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3599
3600 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 |
3601 B_BE_HS0_IND_INT_EN0;
3602 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3603 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 |
3604 B_BE_RDU_CH0_INT_IMR_V1;
3605 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3606 B_BE_PCIE_RX_RPQ0_IMR0_V1;
3607 }
3608
rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev * rtwdev)3609 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev)
3610 {
3611 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3612
3613 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 |
3614 B_BE_HS1_IND_INT_EN0;
3615 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3616 rtwpci->intrs[0] = 0;
3617 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3618 B_BE_PCIE_RX_RPQ0_IMR0_V1;
3619 }
3620
rtw89_pci_config_intr_mask_v2(struct rtw89_dev * rtwdev)3621 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
3622 {
3623 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3624
3625 if (rtwpci->under_recovery)
3626 rtw89_pci_recovery_intr_mask_v2(rtwdev);
3627 else if (rtwpci->low_power)
3628 rtw89_pci_low_power_intr_mask_v2(rtwdev);
3629 else
3630 rtw89_pci_default_intr_mask_v2(rtwdev);
3631 }
3632 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
3633
rtw89_pci_request_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3634 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
3635 struct pci_dev *pdev)
3636 {
3637 unsigned long flags = 0;
3638 int ret;
3639
3640 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI;
3641 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3642 if (ret < 0) {
3643 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
3644 goto err;
3645 }
3646
3647 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
3648 rtw89_pci_interrupt_handler,
3649 rtw89_pci_interrupt_threadfn,
3650 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
3651 if (ret) {
3652 rtw89_err(rtwdev, "failed to request threaded irq\n");
3653 goto err_free_vector;
3654 }
3655
3656 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
3657
3658 return 0;
3659
3660 err_free_vector:
3661 pci_free_irq_vectors(pdev);
3662 err:
3663 return ret;
3664 }
3665
rtw89_pci_free_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3666 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
3667 struct pci_dev *pdev)
3668 {
3669 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
3670 pci_free_irq_vectors(pdev);
3671 }
3672
gray_code_to_bin(u16 gray_code,u32 bit_num)3673 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
3674 {
3675 u16 bin = 0, gray_bit;
3676 u32 bit_idx;
3677
3678 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
3679 gray_bit = (gray_code >> bit_idx) & 0x1;
3680 if (bit_num - bit_idx > 1)
3681 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
3682 bin |= (gray_bit << bit_idx);
3683 }
3684
3685 return bin;
3686 }
3687
rtw89_pci_filter_out(struct rtw89_dev * rtwdev)3688 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
3689 {
3690 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3691 struct pci_dev *pdev = rtwpci->pdev;
3692 u16 val16, filter_out_val;
3693 u32 val, phy_offset;
3694 int ret;
3695
3696 if (rtwdev->chip->chip_id != RTL8852C)
3697 return 0;
3698
3699 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
3700 if (val == B_AX_ASPM_CTRL_L1)
3701 return 0;
3702
3703 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
3704 if (ret)
3705 return ret;
3706
3707 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
3708 if (val == RTW89_PCIE_GEN1_SPEED) {
3709 phy_offset = R_RAC_DIRECT_OFFSET_G1;
3710 } else if (val == RTW89_PCIE_GEN2_SPEED) {
3711 phy_offset = R_RAC_DIRECT_OFFSET_G2;
3712 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
3713 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
3714 val16 | B_PCIE_BIT_PINOUT_DIS);
3715 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
3716 val16 & ~B_PCIE_BIT_RD_SEL);
3717
3718 val16 = rtw89_read16_mask(rtwdev,
3719 phy_offset + RAC_ANA1F * RAC_MULT,
3720 FILTER_OUT_EQ_MASK);
3721 val16 = gray_code_to_bin(val16, hweight16(val16));
3722 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
3723 RAC_MULT);
3724 filter_out_val &= ~REG_FILTER_OUT_MASK;
3725 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
3726
3727 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
3728 filter_out_val);
3729 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
3730 B_BAC_EQ_SEL);
3731 rtw89_write16_set(rtwdev,
3732 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
3733 B_PCIE_BIT_PSAVE);
3734 } else {
3735 return -EOPNOTSUPP;
3736 }
3737 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
3738 B_PCIE_BIT_PSAVE);
3739
3740 return 0;
3741 }
3742
rtw89_pci_clkreq_set(struct rtw89_dev * rtwdev,bool enable)3743 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
3744 {
3745 const struct rtw89_pci_info *info = rtwdev->pci_info;
3746 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3747
3748 if (rtw89_pci_disable_clkreq)
3749 return;
3750
3751 gen_def->clkreq_set(rtwdev, enable);
3752 }
3753
rtw89_pci_clkreq_set_ax(struct rtw89_dev * rtwdev,bool enable)3754 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
3755 {
3756 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3757 int ret;
3758
3759 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
3760 PCIE_CLKDLY_HW_30US);
3761 if (ret)
3762 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
3763
3764 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3765 if (enable)
3766 ret = rtw89_pci_config_byte_set(rtwdev,
3767 RTW89_PCIE_L1_CTRL,
3768 RTW89_PCIE_BIT_CLK);
3769 else
3770 ret = rtw89_pci_config_byte_clr(rtwdev,
3771 RTW89_PCIE_L1_CTRL,
3772 RTW89_PCIE_BIT_CLK);
3773 if (ret)
3774 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
3775 enable ? "set" : "unset", ret);
3776 } else if (chip_id == RTL8852C) {
3777 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
3778 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
3779 if (enable)
3780 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
3781 B_AX_CLK_REQ_N);
3782 else
3783 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
3784 B_AX_CLK_REQ_N);
3785 }
3786 }
3787
rtw89_pci_aspm_set(struct rtw89_dev * rtwdev,bool enable)3788 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
3789 {
3790 const struct rtw89_pci_info *info = rtwdev->pci_info;
3791 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3792
3793 if (rtw89_pci_disable_aspm_l1)
3794 return;
3795
3796 gen_def->aspm_set(rtwdev, enable);
3797 }
3798
rtw89_pci_aspm_set_ax(struct rtw89_dev * rtwdev,bool enable)3799 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
3800 {
3801 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3802 u8 value = 0;
3803 int ret;
3804
3805 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
3806 if (ret)
3807 rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3808
3809 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
3810 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
3811
3812 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
3813 if (ret)
3814 rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3815
3816 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3817 if (enable)
3818 ret = rtw89_pci_config_byte_set(rtwdev,
3819 RTW89_PCIE_L1_CTRL,
3820 RTW89_PCIE_BIT_L1);
3821 else
3822 ret = rtw89_pci_config_byte_clr(rtwdev,
3823 RTW89_PCIE_L1_CTRL,
3824 RTW89_PCIE_BIT_L1);
3825 } else if (chip_id == RTL8852C) {
3826 if (enable)
3827 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3828 B_AX_ASPM_CTRL_L1);
3829 else
3830 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3831 B_AX_ASPM_CTRL_L1);
3832 }
3833 if (ret)
3834 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
3835 enable ? "set" : "unset", ret);
3836 }
3837
rtw89_pci_recalc_int_mit(struct rtw89_dev * rtwdev)3838 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
3839 {
3840 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
3841 const struct rtw89_pci_info *info = rtwdev->pci_info;
3842 struct rtw89_traffic_stats *stats = &rtwdev->stats;
3843 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3844 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3845 u32 val = 0;
3846
3847 if (rtwdev->scanning ||
3848 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH))
3849 goto out;
3850
3851 if (chip_gen == RTW89_CHIP_BE)
3852 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN;
3853 else
3854 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
3855 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
3856 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
3857 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
3858
3859 out:
3860 rtw89_write32(rtwdev, info->mit_addr, val);
3861 }
3862
rtw89_pci_link_cfg(struct rtw89_dev * rtwdev)3863 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
3864 {
3865 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3866 struct pci_dev *pdev = rtwpci->pdev;
3867 u16 link_ctrl;
3868 int ret;
3869
3870 /* Though there is standard PCIE configuration space to set the
3871 * link control register, but by Realtek's design, driver should
3872 * check if host supports CLKREQ/ASPM to enable the HW module.
3873 *
3874 * These functions are implemented by two HW modules associated,
3875 * one is responsible to access PCIE configuration space to
3876 * follow the host settings, and another is in charge of doing
3877 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
3878 * the host does not support it, and due to some reasons or wrong
3879 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
3880 * loss if HW misbehaves on the link.
3881 *
3882 * Hence it's designed that driver should first check the PCIE
3883 * configuration space is sync'ed and enabled, then driver can turn
3884 * on the other module that is actually working on the mechanism.
3885 */
3886 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
3887 if (ret) {
3888 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
3889 return;
3890 }
3891
3892 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
3893 rtw89_pci_clkreq_set(rtwdev, true);
3894
3895 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
3896 rtw89_pci_aspm_set(rtwdev, true);
3897 }
3898
rtw89_pci_l1ss_set(struct rtw89_dev * rtwdev,bool enable)3899 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
3900 {
3901 const struct rtw89_pci_info *info = rtwdev->pci_info;
3902 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3903
3904 if (rtw89_pci_disable_l1ss)
3905 return;
3906
3907 gen_def->l1ss_set(rtwdev, enable);
3908 }
3909
rtw89_pci_l1ss_set_ax(struct rtw89_dev * rtwdev,bool enable)3910 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
3911 {
3912 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3913 int ret;
3914
3915 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3916 if (enable)
3917 ret = rtw89_pci_config_byte_set(rtwdev,
3918 RTW89_PCIE_TIMER_CTRL,
3919 RTW89_PCIE_BIT_L1SUB);
3920 else
3921 ret = rtw89_pci_config_byte_clr(rtwdev,
3922 RTW89_PCIE_TIMER_CTRL,
3923 RTW89_PCIE_BIT_L1SUB);
3924 if (ret)
3925 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
3926 enable ? "set" : "unset", ret);
3927 } else if (chip_id == RTL8852C) {
3928 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
3929 RTW89_PCIE_BIT_ASPM_L11 |
3930 RTW89_PCIE_BIT_PCI_L11);
3931 if (ret)
3932 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
3933 if (enable)
3934 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3935 B_AX_L1SUB_DISABLE);
3936 else
3937 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3938 B_AX_L1SUB_DISABLE);
3939 }
3940 }
3941
rtw89_pci_l1ss_cfg(struct rtw89_dev * rtwdev)3942 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
3943 {
3944 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3945 struct pci_dev *pdev = rtwpci->pdev;
3946 u32 l1ss_cap_ptr, l1ss_ctrl;
3947
3948 if (rtw89_pci_disable_l1ss)
3949 return;
3950
3951 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3952 if (!l1ss_cap_ptr)
3953 return;
3954
3955 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
3956
3957 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
3958 rtw89_pci_l1ss_set(rtwdev, true);
3959 }
3960
rtw89_pci_poll_io_idle_ax(struct rtw89_dev * rtwdev)3961 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
3962 {
3963 int ret = 0;
3964 u32 sts;
3965 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
3966
3967 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
3968 10, 1000, false, rtwdev,
3969 R_AX_PCIE_DMA_BUSY1);
3970 if (ret) {
3971 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
3972 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
3973 return -EINVAL;
3974 }
3975 return ret;
3976 }
3977
rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev * rtwdev)3978 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
3979 {
3980 u32 val;
3981 int ret;
3982
3983 if (rtwdev->chip->chip_id == RTL8852C)
3984 return 0;
3985
3986 rtw89_pci_ctrl_dma_all(rtwdev, false);
3987 ret = rtw89_pci_poll_io_idle_ax(rtwdev);
3988 if (ret) {
3989 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3990 rtw89_debug(rtwdev, RTW89_DBG_HCI,
3991 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
3992 R_AX_DBG_ERR_FLAG, val);
3993 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
3994 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
3995 if (val & B_AX_RX_STUCK)
3996 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
3997 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
3998 ret = rtw89_pci_poll_io_idle_ax(rtwdev);
3999 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4000 rtw89_debug(rtwdev, RTW89_DBG_HCI,
4001 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
4002 R_AX_DBG_ERR_FLAG, val);
4003 }
4004
4005 return ret;
4006 }
4007
rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev * rtwdev)4008 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
4009 {
4010 u32 ret;
4011
4012 if (rtwdev->chip->chip_id == RTL8852C)
4013 return 0;
4014
4015 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
4016 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4017 rtw89_pci_clr_idx_all(rtwdev);
4018
4019 ret = rtw89_pci_rst_bdram_ax(rtwdev);
4020 if (ret)
4021 return ret;
4022
4023 rtw89_pci_ctrl_dma_all(rtwdev, true);
4024 return ret;
4025 }
4026
rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev * rtwdev,enum rtw89_lv1_rcvy_step step)4027 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
4028 enum rtw89_lv1_rcvy_step step)
4029 {
4030 const struct rtw89_pci_info *info = rtwdev->pci_info;
4031 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4032 int ret;
4033
4034 switch (step) {
4035 case RTW89_LV1_RCVY_STEP_1:
4036 ret = gen_def->lv1rst_stop_dma(rtwdev);
4037 if (ret)
4038 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
4039
4040 break;
4041
4042 case RTW89_LV1_RCVY_STEP_2:
4043 ret = gen_def->lv1rst_start_dma(rtwdev);
4044 if (ret)
4045 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
4046 break;
4047
4048 default:
4049 return -EINVAL;
4050 }
4051
4052 return ret;
4053 }
4054
rtw89_pci_ops_dump_err_status(struct rtw89_dev * rtwdev)4055 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
4056 {
4057 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
4058 return;
4059
4060 if (rtwdev->chip->chip_id == RTL8852C) {
4061 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4062 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1));
4063 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4064 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1));
4065 } else {
4066 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
4067 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
4068 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4069 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
4070 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4071 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
4072 }
4073 }
4074
rtw89_pci_napi_poll(struct napi_struct * napi,int budget)4075 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
4076 {
4077 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
4078 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4079 const struct rtw89_pci_info *info = rtwdev->pci_info;
4080 const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4081 unsigned long flags;
4082 int work_done;
4083
4084 rtwdev->napi_budget_countdown = budget;
4085
4086 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
4087 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4088 if (work_done == budget)
4089 return budget;
4090
4091 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
4092 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4093 if (work_done < budget && napi_complete_done(napi, work_done)) {
4094 spin_lock_irqsave(&rtwpci->irq_lock, flags);
4095 if (likely(rtwpci->running))
4096 rtw89_chip_enable_intr(rtwdev, rtwpci);
4097 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
4098 }
4099
4100 return work_done;
4101 }
4102
rtw89_pci_suspend(struct device * dev)4103 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
4104 {
4105 struct ieee80211_hw *hw = dev_get_drvdata(dev);
4106 struct rtw89_dev *rtwdev = hw->priv;
4107 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4108
4109 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4110 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4111 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4112 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
4113 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
4114 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4115 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
4116 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4117 } else {
4118 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4119 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4120 }
4121
4122 return 0;
4123 }
4124
rtw89_pci_l2_hci_ldo(struct rtw89_dev * rtwdev)4125 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
4126 {
4127 if (rtwdev->chip->chip_id == RTL8852C)
4128 return;
4129
4130 /* Hardware need write the reg twice to ensure the setting work */
4131 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4132 RTW89_PCIE_BIT_CFG_RST_MSTATE);
4133 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4134 RTW89_PCIE_BIT_CFG_RST_MSTATE);
4135 }
4136
rtw89_pci_resume(struct device * dev)4137 static int __maybe_unused rtw89_pci_resume(struct device *dev)
4138 {
4139 struct ieee80211_hw *hw = dev_get_drvdata(dev);
4140 struct rtw89_dev *rtwdev = hw->priv;
4141 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4142
4143 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4144 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4145 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4146 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
4147 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
4148 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4149 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
4150 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4151 } else {
4152 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4153 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4154 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4155 B_AX_SEL_REQ_ENTR_L1);
4156 }
4157 rtw89_pci_l2_hci_ldo(rtwdev);
4158 rtw89_pci_filter_out(rtwdev);
4159 rtw89_pci_link_cfg(rtwdev);
4160 rtw89_pci_l1ss_cfg(rtwdev);
4161
4162 return 0;
4163 }
4164
4165 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
4166 EXPORT_SYMBOL(rtw89_pm_ops);
4167
4168 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
4169 .isr_rdu = B_AX_RDU_INT,
4170 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
4171 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
4172 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
4173 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
4174 B_AX_RDU_INT},
4175
4176 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
4177 .mac_pre_deinit = NULL,
4178 .mac_post_init = rtw89_pci_ops_mac_post_init_ax,
4179
4180 .clr_idx_all = rtw89_pci_clr_idx_all_ax,
4181 .rst_bdram = rtw89_pci_rst_bdram_ax,
4182
4183 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
4184 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
4185
4186 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
4187 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
4188 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
4189
4190 .aspm_set = rtw89_pci_aspm_set_ax,
4191 .clkreq_set = rtw89_pci_clkreq_set_ax,
4192 .l1ss_set = rtw89_pci_l1ss_set_ax,
4193 };
4194 EXPORT_SYMBOL(rtw89_pci_gen_ax);
4195
4196 static const struct rtw89_hci_ops rtw89_pci_ops = {
4197 .tx_write = rtw89_pci_ops_tx_write,
4198 .tx_kick_off = rtw89_pci_ops_tx_kick_off,
4199 .flush_queues = rtw89_pci_ops_flush_queues,
4200 .reset = rtw89_pci_ops_reset,
4201 .start = rtw89_pci_ops_start,
4202 .stop = rtw89_pci_ops_stop,
4203 .pause = rtw89_pci_ops_pause,
4204 .switch_mode = rtw89_pci_ops_switch_mode,
4205 .recalc_int_mit = rtw89_pci_recalc_int_mit,
4206
4207 .read8 = rtw89_pci_ops_read8,
4208 .read16 = rtw89_pci_ops_read16,
4209 .read32 = rtw89_pci_ops_read32,
4210 .write8 = rtw89_pci_ops_write8,
4211 .write16 = rtw89_pci_ops_write16,
4212 .write32 = rtw89_pci_ops_write32,
4213
4214 .mac_pre_init = rtw89_pci_ops_mac_pre_init,
4215 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit,
4216 .mac_post_init = rtw89_pci_ops_mac_post_init,
4217 .deinit = rtw89_pci_ops_deinit,
4218
4219 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
4220 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery,
4221 .dump_err_status = rtw89_pci_ops_dump_err_status,
4222 .napi_poll = rtw89_pci_napi_poll,
4223
4224 .recovery_start = rtw89_pci_ops_recovery_start,
4225 .recovery_complete = rtw89_pci_ops_recovery_complete,
4226
4227 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch,
4228 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
4229 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
4230 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
4231
4232 .clr_idx_all = rtw89_pci_clr_idx_all,
4233 .clear = rtw89_pci_clear_resource,
4234 .disable_intr = rtw89_pci_disable_intr_lock,
4235 .enable_intr = rtw89_pci_enable_intr_lock,
4236 .rst_bdram = rtw89_pci_reset_bdram,
4237 };
4238
rtw89_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)4239 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4240 {
4241 struct rtw89_dev *rtwdev;
4242 const struct rtw89_driver_info *info;
4243 const struct rtw89_pci_info *pci_info;
4244 int ret;
4245
4246 info = (const struct rtw89_driver_info *)id->driver_data;
4247
4248 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
4249 sizeof(struct rtw89_pci),
4250 info->chip);
4251 if (!rtwdev) {
4252 dev_err(&pdev->dev, "failed to allocate hw\n");
4253 return -ENOMEM;
4254 }
4255
4256 pci_info = info->bus.pci;
4257
4258 rtwdev->pci_info = info->bus.pci;
4259 rtwdev->hci.ops = &rtw89_pci_ops;
4260 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
4261 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
4262 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
4263
4264 rtw89_check_quirks(rtwdev, info->quirks);
4265
4266 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
4267
4268 ret = rtw89_core_init(rtwdev);
4269 if (ret) {
4270 rtw89_err(rtwdev, "failed to initialise core\n");
4271 goto err_release_hw;
4272 }
4273
4274 ret = rtw89_pci_claim_device(rtwdev, pdev);
4275 if (ret) {
4276 rtw89_err(rtwdev, "failed to claim pci device\n");
4277 goto err_core_deinit;
4278 }
4279
4280 ret = rtw89_pci_setup_resource(rtwdev, pdev);
4281 if (ret) {
4282 rtw89_err(rtwdev, "failed to setup pci resource\n");
4283 goto err_declaim_pci;
4284 }
4285
4286 ret = rtw89_chip_info_setup(rtwdev);
4287 if (ret) {
4288 rtw89_err(rtwdev, "failed to setup chip information\n");
4289 goto err_clear_resource;
4290 }
4291
4292 rtw89_pci_filter_out(rtwdev);
4293 rtw89_pci_link_cfg(rtwdev);
4294 rtw89_pci_l1ss_cfg(rtwdev);
4295
4296 rtw89_core_napi_init(rtwdev);
4297
4298 ret = rtw89_pci_request_irq(rtwdev, pdev);
4299 if (ret) {
4300 rtw89_err(rtwdev, "failed to request pci irq\n");
4301 goto err_deinit_napi;
4302 }
4303
4304 ret = rtw89_core_register(rtwdev);
4305 if (ret) {
4306 rtw89_err(rtwdev, "failed to register core\n");
4307 goto err_free_irq;
4308 }
4309
4310 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
4311
4312 return 0;
4313
4314 err_free_irq:
4315 rtw89_pci_free_irq(rtwdev, pdev);
4316 err_deinit_napi:
4317 rtw89_core_napi_deinit(rtwdev);
4318 err_clear_resource:
4319 rtw89_pci_clear_resource(rtwdev, pdev);
4320 err_declaim_pci:
4321 rtw89_pci_declaim_device(rtwdev, pdev);
4322 err_core_deinit:
4323 rtw89_core_deinit(rtwdev);
4324 err_release_hw:
4325 rtw89_free_ieee80211_hw(rtwdev);
4326
4327 return ret;
4328 }
4329 EXPORT_SYMBOL(rtw89_pci_probe);
4330
rtw89_pci_remove(struct pci_dev * pdev)4331 void rtw89_pci_remove(struct pci_dev *pdev)
4332 {
4333 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4334 struct rtw89_dev *rtwdev;
4335
4336 rtwdev = hw->priv;
4337
4338 rtw89_pci_free_irq(rtwdev, pdev);
4339 rtw89_core_napi_deinit(rtwdev);
4340 rtw89_core_unregister(rtwdev);
4341 rtw89_pci_clear_resource(rtwdev, pdev);
4342 rtw89_pci_declaim_device(rtwdev, pdev);
4343 rtw89_core_deinit(rtwdev);
4344 rtw89_free_ieee80211_hw(rtwdev);
4345 }
4346 EXPORT_SYMBOL(rtw89_pci_remove);
4347
4348 MODULE_AUTHOR("Realtek Corporation");
4349 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
4350 MODULE_LICENSE("Dual BSD/GPL");
4351