1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020 Realtek Corporation
3 */
4
5 #if defined(__FreeBSD__)
6 #define LINUXKPI_PARAM_PREFIX rtw89_pci_
7 #endif
8
9 #include <linux/pci.h>
10 #if defined(__FreeBSD__)
11 #include <sys/rman.h>
12 #endif
13
14 #include "mac.h"
15 #include "pci.h"
16 #include "reg.h"
17 #include "ser.h"
18
19 static bool rtw89_pci_disable_clkreq;
20 static bool rtw89_pci_disable_aspm_l1;
21 static bool rtw89_pci_disable_l1ss;
22 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
23 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
24 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
25 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
26 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
27 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
28
rtw89_pci_rst_bdram_pcie(struct rtw89_dev * rtwdev)29 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev)
30 {
31 u32 val;
32 int ret;
33
34 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1,
35 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM);
36
37 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
38 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
39 rtwdev, R_AX_PCIE_INIT_CFG1);
40
41 if (ret)
42 return -EBUSY;
43
44 return 0;
45 }
46
rtw89_pci_dma_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring,u32 cur_idx,bool tx)47 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
48 struct rtw89_pci_dma_ring *bd_ring,
49 u32 cur_idx, bool tx)
50 {
51 u32 cnt, cur_rp, wp, rp, len;
52
53 rp = bd_ring->rp;
54 wp = bd_ring->wp;
55 len = bd_ring->len;
56
57 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
58 if (tx)
59 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
60 else
61 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
62
63 bd_ring->rp = cur_rp;
64
65 return cnt;
66 }
67
rtw89_pci_txbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)68 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
69 struct rtw89_pci_tx_ring *tx_ring)
70 {
71 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
72 u32 addr_idx = bd_ring->addr.idx;
73 u32 cnt, idx;
74
75 idx = rtw89_read32(rtwdev, addr_idx);
76 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
77
78 return cnt;
79 }
80
rtw89_pci_release_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,u32 cnt,bool release_all)81 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
82 struct rtw89_pci *rtwpci,
83 u32 cnt, bool release_all)
84 {
85 struct rtw89_pci_tx_data *tx_data;
86 struct sk_buff *skb;
87 u32 qlen;
88
89 while (cnt--) {
90 skb = skb_dequeue(&rtwpci->h2c_queue);
91 if (!skb) {
92 rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
93 return;
94 }
95 skb_queue_tail(&rtwpci->h2c_release_queue, skb);
96 }
97
98 qlen = skb_queue_len(&rtwpci->h2c_release_queue);
99 if (!release_all)
100 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
101
102 while (qlen--) {
103 skb = skb_dequeue(&rtwpci->h2c_release_queue);
104 if (!skb) {
105 rtw89_err(rtwdev, "failed to release fwcmd\n");
106 return;
107 }
108 tx_data = RTW89_PCI_TX_SKB_CB(skb);
109 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
110 DMA_TO_DEVICE);
111 dev_kfree_skb_any(skb);
112 }
113 }
114
rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)115 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
116 struct rtw89_pci *rtwpci)
117 {
118 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
119 u32 cnt;
120
121 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
122 if (!cnt)
123 return;
124 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
125 }
126
rtw89_pci_rxbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)127 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
128 struct rtw89_pci_rx_ring *rx_ring)
129 {
130 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
131 u32 addr_idx = bd_ring->addr.idx;
132 u32 cnt, idx;
133
134 idx = rtw89_read32(rtwdev, addr_idx);
135 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
136
137 return cnt;
138 }
139
rtw89_pci_sync_skb_for_cpu(struct rtw89_dev * rtwdev,struct sk_buff * skb)140 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
141 struct sk_buff *skb)
142 {
143 struct rtw89_pci_rx_info *rx_info;
144 dma_addr_t dma;
145
146 rx_info = RTW89_PCI_RX_SKB_CB(skb);
147 dma = rx_info->dma;
148 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
149 DMA_FROM_DEVICE);
150 }
151
rtw89_pci_sync_skb_for_device(struct rtw89_dev * rtwdev,struct sk_buff * skb)152 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
153 struct sk_buff *skb)
154 {
155 struct rtw89_pci_rx_info *rx_info;
156 dma_addr_t dma;
157
158 rx_info = RTW89_PCI_RX_SKB_CB(skb);
159 dma = rx_info->dma;
160 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
161 DMA_FROM_DEVICE);
162 }
163
rtw89_pci_rxbd_info_update(struct rtw89_dev * rtwdev,struct sk_buff * skb)164 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
165 struct sk_buff *skb)
166 {
167 struct rtw89_pci_rxbd_info *rxbd_info;
168 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
169
170 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
171 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
172 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
173 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
174 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
175
176 return 0;
177 }
178
rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev * rtwdev,bool enable)179 static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
180 {
181 const struct rtw89_pci_info *info = rtwdev->pci_info;
182 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
183 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
184
185 if (enable) {
186 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
187 if (dma_stop2->addr)
188 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
189 } else {
190 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
191 if (dma_stop2->addr)
192 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
193 }
194 }
195
rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev * rtwdev,bool enable)196 static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
197 {
198 const struct rtw89_pci_info *info = rtwdev->pci_info;
199 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
200
201 if (enable)
202 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
203 else
204 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
205 }
206
207 static bool
rtw89_skb_put_rx_data(struct rtw89_dev * rtwdev,bool fs,bool ls,struct sk_buff * new,const struct sk_buff * skb,u32 offset,const struct rtw89_pci_rx_info * rx_info,const struct rtw89_rx_desc_info * desc_info)208 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
209 struct sk_buff *new,
210 const struct sk_buff *skb, u32 offset,
211 const struct rtw89_pci_rx_info *rx_info,
212 const struct rtw89_rx_desc_info *desc_info)
213 {
214 u32 copy_len = rx_info->len - offset;
215
216 if (unlikely(skb_tailroom(new) < copy_len)) {
217 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
218 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
219 rx_info->len, desc_info->pkt_size, offset, fs, ls);
220 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
221 skb->data, rx_info->len);
222 /* length of a single segment skb is desc_info->pkt_size */
223 if (fs && ls) {
224 copy_len = desc_info->pkt_size;
225 } else {
226 rtw89_info(rtwdev, "drop rx data due to invalid length\n");
227 return false;
228 }
229 }
230
231 skb_put_data(new, skb->data + offset, copy_len);
232
233 return true;
234 }
235
rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)236 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
237 struct rtw89_pci_rx_ring *rx_ring)
238 {
239 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
240 struct rtw89_pci_rx_info *rx_info;
241 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
242 struct sk_buff *new = rx_ring->diliver_skb;
243 struct sk_buff *skb;
244 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
245 u32 offset;
246 u32 cnt = 1;
247 bool fs, ls;
248 int ret;
249
250 skb = rx_ring->buf[bd_ring->wp];
251 rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
252
253 ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
254 if (ret) {
255 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
256 bd_ring->wp, ret);
257 goto err_sync_device;
258 }
259
260 rx_info = RTW89_PCI_RX_SKB_CB(skb);
261 fs = rx_info->fs;
262 ls = rx_info->ls;
263
264 if (fs) {
265 if (new) {
266 rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
267 "skb should not be ready before first segment start\n");
268 goto err_sync_device;
269 }
270 if (desc_info->ready) {
271 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
272 goto err_sync_device;
273 }
274
275 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
276
277 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
278 if (!new)
279 goto err_sync_device;
280
281 rx_ring->diliver_skb = new;
282
283 /* first segment has RX desc */
284 offset = desc_info->offset + desc_info->rxd_len;
285 } else {
286 offset = sizeof(struct rtw89_pci_rxbd_info);
287 if (!new) {
288 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
289 goto err_sync_device;
290 }
291 }
292 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
293 goto err_sync_device;
294 rtw89_pci_sync_skb_for_device(rtwdev, skb);
295 rtw89_pci_rxbd_increase(rx_ring, 1);
296
297 if (!desc_info->ready) {
298 rtw89_warn(rtwdev, "no rx desc information\n");
299 goto err_free_resource;
300 }
301 if (ls) {
302 rtw89_core_rx(rtwdev, desc_info, new);
303 rx_ring->diliver_skb = NULL;
304 desc_info->ready = false;
305 }
306
307 return cnt;
308
309 err_sync_device:
310 rtw89_pci_sync_skb_for_device(rtwdev, skb);
311 rtw89_pci_rxbd_increase(rx_ring, 1);
312 err_free_resource:
313 if (new)
314 dev_kfree_skb_any(new);
315 rx_ring->diliver_skb = NULL;
316 desc_info->ready = false;
317
318 return cnt;
319 }
320
rtw89_pci_rxbd_deliver(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)321 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
322 struct rtw89_pci_rx_ring *rx_ring,
323 u32 cnt)
324 {
325 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
326 u32 rx_cnt;
327
328 while (cnt && rtwdev->napi_budget_countdown > 0) {
329 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
330 if (!rx_cnt) {
331 rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
332
333 /* skip the rest RXBD bufs */
334 rtw89_pci_rxbd_increase(rx_ring, cnt);
335 break;
336 }
337
338 cnt -= rx_cnt;
339 }
340
341 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
342 }
343
rtw89_pci_poll_rxq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)344 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
345 struct rtw89_pci *rtwpci, int budget)
346 {
347 struct rtw89_pci_rx_ring *rx_ring;
348 int countdown = rtwdev->napi_budget_countdown;
349 u32 cnt;
350
351 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
352
353 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
354 if (!cnt)
355 return 0;
356
357 cnt = min_t(u32, budget, cnt);
358
359 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
360
361 /* In case of flushing pending SKBs, the countdown may exceed. */
362 if (rtwdev->napi_budget_countdown <= 0)
363 return budget;
364
365 return budget - countdown;
366 }
367
rtw89_pci_tx_status(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct sk_buff * skb,u8 tx_status)368 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
369 struct rtw89_pci_tx_ring *tx_ring,
370 struct sk_buff *skb, u8 tx_status)
371 {
372 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
373 struct ieee80211_tx_info *info;
374
375 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
376
377 info = IEEE80211_SKB_CB(skb);
378 ieee80211_tx_info_clear_status(info);
379
380 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
381 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
382 if (tx_status == RTW89_TX_DONE) {
383 info->flags |= IEEE80211_TX_STAT_ACK;
384 tx_ring->tx_acked++;
385 } else {
386 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
387 rtw89_debug(rtwdev, RTW89_DBG_FW,
388 "failed to TX of status %x\n", tx_status);
389 switch (tx_status) {
390 case RTW89_TX_RETRY_LIMIT:
391 tx_ring->tx_retry_lmt++;
392 break;
393 case RTW89_TX_LIFE_TIME:
394 tx_ring->tx_life_time++;
395 break;
396 case RTW89_TX_MACID_DROP:
397 tx_ring->tx_mac_id_drop++;
398 break;
399 default:
400 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
401 break;
402 }
403 }
404
405 ieee80211_tx_status_ni(rtwdev->hw, skb);
406 }
407
rtw89_pci_reclaim_txbd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)408 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
409 {
410 struct rtw89_pci_tx_wd *txwd;
411 u32 cnt;
412
413 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
414 while (cnt--) {
415 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
416 if (!txwd) {
417 rtw89_warn(rtwdev, "No busy txwd pages available\n");
418 break;
419 }
420
421 list_del_init(&txwd->list);
422
423 /* this skb has been freed by RPP */
424 if (skb_queue_len(&txwd->queue) == 0)
425 rtw89_pci_enqueue_txwd(tx_ring, txwd);
426 }
427 }
428
rtw89_pci_release_busy_txwd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)429 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
430 struct rtw89_pci_tx_ring *tx_ring)
431 {
432 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
433 struct rtw89_pci_tx_wd *txwd;
434 int i;
435
436 for (i = 0; i < wd_ring->page_num; i++) {
437 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
438 if (!txwd)
439 break;
440
441 list_del_init(&txwd->list);
442 }
443 }
444
rtw89_pci_release_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,u16 seq,u8 tx_status)445 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
446 struct rtw89_pci_tx_ring *tx_ring,
447 struct rtw89_pci_tx_wd *txwd, u16 seq,
448 u8 tx_status)
449 {
450 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
451 struct rtw89_pci_tx_data *tx_data;
452 struct sk_buff *skb, *tmp;
453 u8 txch = tx_ring->txch;
454
455 if (!list_empty(&txwd->list)) {
456 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
457 /* In low power mode, RPP can receive before updating of TX BD.
458 * In normal mode, it should not happen so give it a warning.
459 */
460 if (!rtwpci->low_power && !list_empty(&txwd->list))
461 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
462 txch, seq);
463 }
464
465 skb_queue_walk_safe(&txwd->queue, skb, tmp) {
466 skb_unlink(skb, &txwd->queue);
467
468 tx_data = RTW89_PCI_TX_SKB_CB(skb);
469 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
470 DMA_TO_DEVICE);
471
472 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
473 }
474
475 if (list_empty(&txwd->list))
476 rtw89_pci_enqueue_txwd(tx_ring, txwd);
477 }
478
rtw89_pci_release_rpp(struct rtw89_dev * rtwdev,struct rtw89_pci_rpp_fmt * rpp)479 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
480 struct rtw89_pci_rpp_fmt *rpp)
481 {
482 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
483 struct rtw89_pci_tx_ring *tx_ring;
484 struct rtw89_pci_tx_wd_ring *wd_ring;
485 struct rtw89_pci_tx_wd *txwd;
486 u16 seq;
487 u8 qsel, tx_status, txch;
488
489 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
490 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
491 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
492 txch = rtw89_core_get_ch_dma(rtwdev, qsel);
493
494 if (txch == RTW89_TXCH_CH12) {
495 rtw89_warn(rtwdev, "should no fwcmd release report\n");
496 return;
497 }
498
499 tx_ring = &rtwpci->tx_rings[txch];
500 wd_ring = &tx_ring->wd_ring;
501 txwd = &wd_ring->pages[seq];
502
503 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
504 }
505
rtw89_pci_release_pending_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)506 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
507 struct rtw89_pci_tx_ring *tx_ring)
508 {
509 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
510 struct rtw89_pci_tx_wd *txwd;
511 int i;
512
513 for (i = 0; i < wd_ring->page_num; i++) {
514 txwd = &wd_ring->pages[i];
515
516 if (!list_empty(&txwd->list))
517 continue;
518
519 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
520 }
521 }
522
rtw89_pci_release_tx_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 max_cnt)523 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
524 struct rtw89_pci_rx_ring *rx_ring,
525 u32 max_cnt)
526 {
527 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
528 struct rtw89_pci_rx_info *rx_info;
529 struct rtw89_pci_rpp_fmt *rpp;
530 struct rtw89_rx_desc_info desc_info = {};
531 struct sk_buff *skb;
532 u32 cnt = 0;
533 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
534 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
535 u32 offset;
536 int ret;
537
538 skb = rx_ring->buf[bd_ring->wp];
539 rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
540
541 ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
542 if (ret) {
543 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
544 bd_ring->wp, ret);
545 goto err_sync_device;
546 }
547
548 rx_info = RTW89_PCI_RX_SKB_CB(skb);
549 if (!rx_info->fs || !rx_info->ls) {
550 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
551 return cnt;
552 }
553
554 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
555
556 /* first segment has RX desc */
557 offset = desc_info.offset + desc_info.rxd_len;
558 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
559 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
560 rtw89_pci_release_rpp(rtwdev, rpp);
561 }
562
563 rtw89_pci_sync_skb_for_device(rtwdev, skb);
564 rtw89_pci_rxbd_increase(rx_ring, 1);
565 cnt++;
566
567 return cnt;
568
569 err_sync_device:
570 rtw89_pci_sync_skb_for_device(rtwdev, skb);
571 return 0;
572 }
573
rtw89_pci_release_tx(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)574 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
575 struct rtw89_pci_rx_ring *rx_ring,
576 u32 cnt)
577 {
578 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
579 u32 release_cnt;
580
581 while (cnt) {
582 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
583 if (!release_cnt) {
584 rtw89_err(rtwdev, "failed to release TX skbs\n");
585
586 /* skip the rest RXBD bufs */
587 rtw89_pci_rxbd_increase(rx_ring, cnt);
588 break;
589 }
590
591 cnt -= release_cnt;
592 }
593
594 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
595 }
596
rtw89_pci_poll_rpq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)597 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
598 struct rtw89_pci *rtwpci, int budget)
599 {
600 struct rtw89_pci_rx_ring *rx_ring;
601 u32 cnt;
602 int work_done;
603
604 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
605
606 spin_lock_bh(&rtwpci->trx_lock);
607
608 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
609 if (cnt == 0)
610 goto out_unlock;
611
612 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
613
614 out_unlock:
615 spin_unlock_bh(&rtwpci->trx_lock);
616
617 /* always release all RPQ */
618 work_done = min_t(int, cnt, budget);
619 rtwdev->napi_budget_countdown -= work_done;
620
621 return work_done;
622 }
623
rtw89_pci_isr_rxd_unavail(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)624 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
625 struct rtw89_pci *rtwpci)
626 {
627 struct rtw89_pci_rx_ring *rx_ring;
628 struct rtw89_pci_dma_ring *bd_ring;
629 u32 reg_idx;
630 u16 hw_idx, hw_idx_next, host_idx;
631 int i;
632
633 for (i = 0; i < RTW89_RXCH_NUM; i++) {
634 rx_ring = &rtwpci->rx_rings[i];
635 bd_ring = &rx_ring->bd_ring;
636
637 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
638 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
639 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
640 hw_idx_next = (hw_idx + 1) % bd_ring->len;
641
642 if (hw_idx_next == host_idx)
643 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
644
645 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
646 "%d RXD unavailable, idx=0x%08x, len=%d\n",
647 i, reg_idx, bd_ring->len);
648 }
649 }
650
rtw89_pci_recognize_intrs(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)651 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
652 struct rtw89_pci *rtwpci,
653 struct rtw89_pci_isrs *isrs)
654 {
655 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
656 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
657 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
658
659 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
660 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
661 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
662 }
663 EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
664
rtw89_pci_recognize_intrs_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)665 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
666 struct rtw89_pci *rtwpci,
667 struct rtw89_pci_isrs *isrs)
668 {
669 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
670 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
671 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
672 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
673 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
674 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
675 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
676
677 if (isrs->halt_c2h_isrs)
678 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
679 if (isrs->isrs[0])
680 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
681 if (isrs->isrs[1])
682 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
683 }
684 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
685
rtw89_pci_clear_isr0(struct rtw89_dev * rtwdev,u32 isr00)686 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
687 {
688 /* write 1 clear */
689 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
690 }
691
rtw89_pci_enable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)692 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
693 {
694 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
695 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
696 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
697 }
698 EXPORT_SYMBOL(rtw89_pci_enable_intr);
699
rtw89_pci_disable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)700 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
701 {
702 rtw89_write32(rtwdev, R_AX_HIMR0, 0);
703 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
704 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
705 }
706 EXPORT_SYMBOL(rtw89_pci_disable_intr);
707
rtw89_pci_enable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)708 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
709 {
710 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
711 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
712 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
713 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
714 }
715 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
716
rtw89_pci_disable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)717 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
718 {
719 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
720 }
721 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
722
rtw89_pci_ops_recovery_start(struct rtw89_dev * rtwdev)723 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
724 {
725 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
726 unsigned long flags;
727
728 spin_lock_irqsave(&rtwpci->irq_lock, flags);
729 rtw89_chip_disable_intr(rtwdev, rtwpci);
730 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
731 rtw89_chip_enable_intr(rtwdev, rtwpci);
732 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
733 }
734
rtw89_pci_ops_recovery_complete(struct rtw89_dev * rtwdev)735 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
736 {
737 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
738 unsigned long flags;
739
740 spin_lock_irqsave(&rtwpci->irq_lock, flags);
741 rtw89_chip_disable_intr(rtwdev, rtwpci);
742 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
743 rtw89_chip_enable_intr(rtwdev, rtwpci);
744 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
745 }
746
rtw89_pci_low_power_interrupt_handler(struct rtw89_dev * rtwdev)747 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
748 {
749 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
750 int budget = NAPI_POLL_WEIGHT;
751
752 /* To prevent RXQ get stuck due to run out of budget. */
753 rtwdev->napi_budget_countdown = budget;
754
755 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
756 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
757 }
758
rtw89_pci_interrupt_threadfn(int irq,void * dev)759 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
760 {
761 struct rtw89_dev *rtwdev = dev;
762 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
763 struct rtw89_pci_isrs isrs;
764 unsigned long flags;
765
766 spin_lock_irqsave(&rtwpci->irq_lock, flags);
767 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
768 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
769
770 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
771 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
772
773 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
774 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
775
776 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN))
777 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
778
779 if (unlikely(rtwpci->under_recovery))
780 goto enable_intr;
781
782 if (unlikely(rtwpci->low_power)) {
783 rtw89_pci_low_power_interrupt_handler(rtwdev);
784 goto enable_intr;
785 }
786
787 if (likely(rtwpci->running)) {
788 local_bh_disable();
789 napi_schedule(&rtwdev->napi);
790 local_bh_enable();
791 }
792
793 return IRQ_HANDLED;
794
795 enable_intr:
796 spin_lock_irqsave(&rtwpci->irq_lock, flags);
797 if (likely(rtwpci->running))
798 rtw89_chip_enable_intr(rtwdev, rtwpci);
799 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
800 return IRQ_HANDLED;
801 }
802
rtw89_pci_interrupt_handler(int irq,void * dev)803 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
804 {
805 struct rtw89_dev *rtwdev = dev;
806 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
807 unsigned long flags;
808 irqreturn_t irqret = IRQ_WAKE_THREAD;
809
810 spin_lock_irqsave(&rtwpci->irq_lock, flags);
811
812 /* If interrupt event is on the road, it is still trigger interrupt
813 * even we have done pci_stop() to turn off IMR.
814 */
815 if (unlikely(!rtwpci->running)) {
816 irqret = IRQ_HANDLED;
817 goto exit;
818 }
819
820 rtw89_chip_disable_intr(rtwdev, rtwpci);
821 exit:
822 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
823
824 return irqret;
825 }
826
827 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
828 [RTW89_TXCH_##txch] = { \
829 .num = R_AX_##txch##_TXBD_NUM ##v, \
830 .idx = R_AX_##txch##_TXBD_IDX ##v, \
831 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
832 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
833 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
834 }
835
836 #define DEF_TXCHADDRS(info, txch, v...) \
837 [RTW89_TXCH_##txch] = { \
838 .num = R_AX_##txch##_TXBD_NUM, \
839 .idx = R_AX_##txch##_TXBD_IDX, \
840 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
841 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
842 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
843 }
844
845 #define DEF_RXCHADDRS(info, rxch, v...) \
846 [RTW89_RXCH_##rxch] = { \
847 .num = R_AX_##rxch##_RXBD_NUM ##v, \
848 .idx = R_AX_##rxch##_RXBD_IDX ##v, \
849 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
850 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
851 }
852
853 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
854 .tx = {
855 DEF_TXCHADDRS(info, ACH0),
856 DEF_TXCHADDRS(info, ACH1),
857 DEF_TXCHADDRS(info, ACH2),
858 DEF_TXCHADDRS(info, ACH3),
859 DEF_TXCHADDRS(info, ACH4),
860 DEF_TXCHADDRS(info, ACH5),
861 DEF_TXCHADDRS(info, ACH6),
862 DEF_TXCHADDRS(info, ACH7),
863 DEF_TXCHADDRS(info, CH8),
864 DEF_TXCHADDRS(info, CH9),
865 DEF_TXCHADDRS_TYPE1(info, CH10),
866 DEF_TXCHADDRS_TYPE1(info, CH11),
867 DEF_TXCHADDRS(info, CH12),
868 },
869 .rx = {
870 DEF_RXCHADDRS(info, RXQ),
871 DEF_RXCHADDRS(info, RPQ),
872 },
873 };
874 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
875
876 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
877 .tx = {
878 DEF_TXCHADDRS(info, ACH0, _V1),
879 DEF_TXCHADDRS(info, ACH1, _V1),
880 DEF_TXCHADDRS(info, ACH2, _V1),
881 DEF_TXCHADDRS(info, ACH3, _V1),
882 DEF_TXCHADDRS(info, ACH4, _V1),
883 DEF_TXCHADDRS(info, ACH5, _V1),
884 DEF_TXCHADDRS(info, ACH6, _V1),
885 DEF_TXCHADDRS(info, ACH7, _V1),
886 DEF_TXCHADDRS(info, CH8, _V1),
887 DEF_TXCHADDRS(info, CH9, _V1),
888 DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
889 DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
890 DEF_TXCHADDRS(info, CH12, _V1),
891 },
892 .rx = {
893 DEF_RXCHADDRS(info, RXQ, _V1),
894 DEF_RXCHADDRS(info, RPQ, _V1),
895 },
896 };
897 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
898
899 #undef DEF_TXCHADDRS_TYPE1
900 #undef DEF_TXCHADDRS
901 #undef DEF_RXCHADDRS
902
rtw89_pci_get_txch_addrs(struct rtw89_dev * rtwdev,enum rtw89_tx_channel txch,const struct rtw89_pci_ch_dma_addr ** addr)903 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
904 enum rtw89_tx_channel txch,
905 const struct rtw89_pci_ch_dma_addr **addr)
906 {
907 const struct rtw89_pci_info *info = rtwdev->pci_info;
908
909 if (txch >= RTW89_TXCH_NUM)
910 return -EINVAL;
911
912 *addr = &info->dma_addr_set->tx[txch];
913
914 return 0;
915 }
916
rtw89_pci_get_rxch_addrs(struct rtw89_dev * rtwdev,enum rtw89_rx_channel rxch,const struct rtw89_pci_ch_dma_addr ** addr)917 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
918 enum rtw89_rx_channel rxch,
919 const struct rtw89_pci_ch_dma_addr **addr)
920 {
921 const struct rtw89_pci_info *info = rtwdev->pci_info;
922
923 if (rxch >= RTW89_RXCH_NUM)
924 return -EINVAL;
925
926 *addr = &info->dma_addr_set->rx[rxch];
927
928 return 0;
929 }
930
rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring * ring)931 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
932 {
933 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
934
935 /* reserved 1 desc check ring is full or not */
936 if (bd_ring->rp > bd_ring->wp)
937 return bd_ring->rp - bd_ring->wp - 1;
938
939 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
940 }
941
942 static
__rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev * rtwdev)943 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
944 {
945 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
946 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
947 u32 cnt;
948
949 spin_lock_bh(&rtwpci->trx_lock);
950 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
951 cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
952 spin_unlock_bh(&rtwpci->trx_lock);
953
954 return cnt;
955 }
956
957 static
__rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev * rtwdev,u8 txch)958 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
959 u8 txch)
960 {
961 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
962 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
963 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
964 u32 cnt;
965
966 spin_lock_bh(&rtwpci->trx_lock);
967 cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
968 cnt = min(cnt, wd_ring->curr_num);
969 spin_unlock_bh(&rtwpci->trx_lock);
970
971 return cnt;
972 }
973
__rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)974 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
975 u8 txch)
976 {
977 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
978 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
979 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
980 const struct rtw89_chip_info *chip = rtwdev->chip;
981 u32 bd_cnt, wd_cnt, min_cnt = 0;
982 struct rtw89_pci_rx_ring *rx_ring;
983 enum rtw89_debug_mask debug_mask;
984 u32 cnt;
985
986 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
987
988 spin_lock_bh(&rtwpci->trx_lock);
989 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
990 wd_cnt = wd_ring->curr_num;
991
992 if (wd_cnt == 0 || bd_cnt == 0) {
993 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
994 if (cnt)
995 rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
996 else if (wd_cnt == 0)
997 goto out_unlock;
998
999 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1000 if (bd_cnt == 0)
1001 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
1002 }
1003
1004 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1005 wd_cnt = wd_ring->curr_num;
1006 min_cnt = min(bd_cnt, wd_cnt);
1007 if (min_cnt == 0) {
1008 /* This message can be frequently shown in low power mode or
1009 * high traffic with small FIFO chips, and we have recognized it as normal
1010 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1011 */
1012 if (rtwpci->low_power || chip->small_fifo_size)
1013 debug_mask = RTW89_DBG_TXRX;
1014 else
1015 debug_mask = RTW89_DBG_UNEXP;
1016
1017 rtw89_debug(rtwdev, debug_mask,
1018 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1019 wd_cnt, bd_cnt);
1020 }
1021
1022 out_unlock:
1023 spin_unlock_bh(&rtwpci->trx_lock);
1024
1025 return min_cnt;
1026 }
1027
rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1028 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1029 u8 txch)
1030 {
1031 if (rtwdev->hci.paused)
1032 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1033
1034 if (txch == RTW89_TXCH_CH12)
1035 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1036
1037 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1038 }
1039
__rtw89_pci_tx_kick_off(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1040 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1041 {
1042 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1043 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1044 u32 host_idx, addr;
1045
1046 spin_lock_bh(&rtwpci->trx_lock);
1047
1048 addr = bd_ring->addr.idx;
1049 host_idx = bd_ring->wp;
1050 rtw89_write16(rtwdev, addr, host_idx);
1051
1052 spin_unlock_bh(&rtwpci->trx_lock);
1053 }
1054
rtw89_pci_tx_bd_ring_update(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,int n_txbd)1055 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1056 int n_txbd)
1057 {
1058 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1059 u32 host_idx, len;
1060
1061 len = bd_ring->len;
1062 host_idx = bd_ring->wp + n_txbd;
1063 host_idx = host_idx < len ? host_idx : host_idx - len;
1064
1065 bd_ring->wp = host_idx;
1066 }
1067
rtw89_pci_ops_tx_kick_off(struct rtw89_dev * rtwdev,u8 txch)1068 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1069 {
1070 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1071 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1072
1073 if (rtwdev->hci.paused) {
1074 set_bit(txch, rtwpci->kick_map);
1075 return;
1076 }
1077
1078 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1079 }
1080
rtw89_pci_tx_kick_off_pending(struct rtw89_dev * rtwdev)1081 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1082 {
1083 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1084 struct rtw89_pci_tx_ring *tx_ring;
1085 int txch;
1086
1087 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1088 if (!test_and_clear_bit(txch, rtwpci->kick_map))
1089 continue;
1090
1091 tx_ring = &rtwpci->tx_rings[txch];
1092 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1093 }
1094 }
1095
__pci_flush_txch(struct rtw89_dev * rtwdev,u8 txch,bool drop)1096 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1097 {
1098 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1099 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1100 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1101 u32 cur_idx, cur_rp;
1102 u8 i;
1103
1104 /* Because the time taked by the I/O is a bit dynamic, it's hard to
1105 * define a reasonable fixed total timeout to use read_poll_timeout*
1106 * helper. Instead, we can ensure a reasonable polling times, so we
1107 * just use for loop with udelay here.
1108 */
1109 for (i = 0; i < 60; i++) {
1110 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1111 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1112 if (cur_rp == bd_ring->wp)
1113 return;
1114
1115 udelay(1);
1116 }
1117
1118 if (!drop)
1119 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1120 }
1121
__rtw89_pci_ops_flush_txchs(struct rtw89_dev * rtwdev,u32 txchs,bool drop)1122 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1123 bool drop)
1124 {
1125 const struct rtw89_pci_info *info = rtwdev->pci_info;
1126 u8 i;
1127
1128 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1129 /* It may be unnecessary to flush FWCMD queue. */
1130 if (i == RTW89_TXCH_CH12)
1131 continue;
1132 if (info->tx_dma_ch_mask & BIT(i))
1133 continue;
1134
1135 if (txchs & BIT(i))
1136 __pci_flush_txch(rtwdev, i, drop);
1137 }
1138 }
1139
rtw89_pci_ops_flush_queues(struct rtw89_dev * rtwdev,u32 queues,bool drop)1140 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1141 bool drop)
1142 {
1143 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1144 }
1145
rtw89_pci_fill_txaddr_info(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1146 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1147 void *txaddr_info_addr, u32 total_len,
1148 dma_addr_t dma, u8 *add_info_nr)
1149 {
1150 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1151
1152 txaddr_info->length = cpu_to_le16(total_len);
1153 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
1154 RTW89_PCI_ADDR_NUM(1));
1155 txaddr_info->dma = cpu_to_le32(dma);
1156
1157 *add_info_nr = 1;
1158
1159 return sizeof(*txaddr_info);
1160 }
1161 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1162
rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1163 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1164 void *txaddr_info_addr, u32 total_len,
1165 dma_addr_t dma, u8 *add_info_nr)
1166 {
1167 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1168 u32 remain = total_len;
1169 u32 len;
1170 u16 length_option;
1171 int n;
1172
1173 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1174 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1175 TXADDR_INFO_LENTHG_V1_MAX : remain;
1176 remain -= len;
1177
1178 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1179 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1180 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1181 txaddr_info->length_opt = cpu_to_le16(length_option);
1182 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1183 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1184
1185 dma += len;
1186 txaddr_info++;
1187 }
1188
1189 WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1190 remain, total_len);
1191
1192 *add_info_nr = n;
1193
1194 return n * sizeof(*txaddr_info);
1195 }
1196 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1197
rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req)1198 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1199 struct rtw89_pci_tx_ring *tx_ring,
1200 struct rtw89_pci_tx_wd *txwd,
1201 struct rtw89_core_tx_request *tx_req)
1202 {
1203 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1204 const struct rtw89_chip_info *chip = rtwdev->chip;
1205 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1206 struct rtw89_txwd_info *txwd_info;
1207 struct rtw89_pci_tx_wp_info *txwp_info;
1208 void *txaddr_info_addr;
1209 struct pci_dev *pdev = rtwpci->pdev;
1210 struct sk_buff *skb = tx_req->skb;
1211 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1212 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
1213 bool en_wd_info = desc_info->en_wd_info;
1214 u32 txwd_len;
1215 u32 txwp_len;
1216 u32 txaddr_info_len;
1217 dma_addr_t dma;
1218 int ret;
1219
1220 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1221 if (dma_mapping_error(&pdev->dev, dma)) {
1222 rtw89_err(rtwdev, "failed to map skb dma data\n");
1223 ret = -EBUSY;
1224 goto err;
1225 }
1226
1227 tx_data->dma = dma;
1228 rcu_assign_pointer(skb_data->wait, NULL);
1229
1230 txwp_len = sizeof(*txwp_info);
1231 txwd_len = chip->txwd_body_size;
1232 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
1233
1234 #if defined(__linux__)
1235 txwp_info = txwd->vaddr + txwd_len;
1236 #elif defined(__FreeBSD__)
1237 txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len);
1238 #endif
1239 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1240 txwp_info->seq1 = 0;
1241 txwp_info->seq2 = 0;
1242 txwp_info->seq3 = 0;
1243
1244 tx_ring->tx_cnt++;
1245 #if defined(__linux__)
1246 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1247 #elif defined(__FreeBSD__)
1248 txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len;
1249 #endif
1250 txaddr_info_len =
1251 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1252 dma, &desc_info->addr_info_nr);
1253
1254 txwd->len = txwd_len + txwp_len + txaddr_info_len;
1255
1256 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1257
1258 skb_queue_tail(&txwd->queue, skb);
1259
1260 return 0;
1261
1262 err:
1263 return ret;
1264 }
1265
rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1266 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1267 struct rtw89_pci_tx_ring *tx_ring,
1268 struct rtw89_pci_tx_bd_32 *txbd,
1269 struct rtw89_core_tx_request *tx_req)
1270 {
1271 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1272 const struct rtw89_chip_info *chip = rtwdev->chip;
1273 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1274 void *txdesc;
1275 int txdesc_size = chip->h2c_desc_size;
1276 struct pci_dev *pdev = rtwpci->pdev;
1277 struct sk_buff *skb = tx_req->skb;
1278 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1279 dma_addr_t dma;
1280
1281 txdesc = skb_push(skb, txdesc_size);
1282 memset(txdesc, 0, txdesc_size);
1283 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1284
1285 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1286 if (dma_mapping_error(&pdev->dev, dma)) {
1287 rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1288 return -EBUSY;
1289 }
1290
1291 tx_data->dma = dma;
1292 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1293 txbd->length = cpu_to_le16(skb->len);
1294 txbd->dma = cpu_to_le32(tx_data->dma);
1295 skb_queue_tail(&rtwpci->h2c_queue, skb);
1296
1297 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1298
1299 return 0;
1300 }
1301
rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1302 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1303 struct rtw89_pci_tx_ring *tx_ring,
1304 struct rtw89_pci_tx_bd_32 *txbd,
1305 struct rtw89_core_tx_request *tx_req)
1306 {
1307 struct rtw89_pci_tx_wd *txwd;
1308 int ret;
1309
1310 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1311 * buffer with WD BODY only. So here we don't need to check the free
1312 * pages of the wd ring.
1313 */
1314 if (tx_ring->txch == RTW89_TXCH_CH12)
1315 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1316
1317 txwd = rtw89_pci_dequeue_txwd(tx_ring);
1318 if (!txwd) {
1319 rtw89_err(rtwdev, "no available TXWD\n");
1320 ret = -ENOSPC;
1321 goto err;
1322 }
1323
1324 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1325 if (ret) {
1326 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1327 goto err_enqueue_wd;
1328 }
1329
1330 list_add_tail(&txwd->list, &tx_ring->busy_pages);
1331
1332 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1333 txbd->length = cpu_to_le16(txwd->len);
1334 txbd->dma = cpu_to_le32(txwd->paddr);
1335
1336 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1337
1338 return 0;
1339
1340 err_enqueue_wd:
1341 rtw89_pci_enqueue_txwd(tx_ring, txwd);
1342 err:
1343 return ret;
1344 }
1345
rtw89_pci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,u8 txch)1346 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1347 u8 txch)
1348 {
1349 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1350 struct rtw89_pci_tx_ring *tx_ring;
1351 struct rtw89_pci_tx_bd_32 *txbd;
1352 u32 n_avail_txbd;
1353 int ret = 0;
1354
1355 /* check the tx type and dma channel for fw cmd queue */
1356 if ((txch == RTW89_TXCH_CH12 ||
1357 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1358 (txch != RTW89_TXCH_CH12 ||
1359 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1360 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1361 return -EINVAL;
1362 }
1363
1364 tx_ring = &rtwpci->tx_rings[txch];
1365 spin_lock_bh(&rtwpci->trx_lock);
1366
1367 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1368 if (n_avail_txbd == 0) {
1369 rtw89_err(rtwdev, "no available TXBD\n");
1370 ret = -ENOSPC;
1371 goto err_unlock;
1372 }
1373
1374 txbd = rtw89_pci_get_next_txbd(tx_ring);
1375 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1376 if (ret) {
1377 rtw89_err(rtwdev, "failed to submit TXBD\n");
1378 goto err_unlock;
1379 }
1380
1381 spin_unlock_bh(&rtwpci->trx_lock);
1382 return 0;
1383
1384 err_unlock:
1385 spin_unlock_bh(&rtwpci->trx_lock);
1386 return ret;
1387 }
1388
rtw89_pci_ops_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)1389 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1390 {
1391 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1392 int ret;
1393
1394 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1395 if (ret) {
1396 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1397 return ret;
1398 }
1399
1400 return 0;
1401 }
1402
1403 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1404 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1405 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1406 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1407 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1408 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1409 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1410 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1411 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1412 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1},
1413 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1},
1414 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1415 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1416 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1417 };
1418 EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1419
1420 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1421 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
1422 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
1423 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1424 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1425 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1},
1426 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1},
1427 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1428 };
1429 EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1430
rtw89_pci_reset_trx_rings(struct rtw89_dev * rtwdev)1431 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1432 {
1433 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1434 const struct rtw89_pci_info *info = rtwdev->pci_info;
1435 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1436 struct rtw89_pci_tx_ring *tx_ring;
1437 struct rtw89_pci_rx_ring *rx_ring;
1438 struct rtw89_pci_dma_ring *bd_ring;
1439 const struct rtw89_pci_bd_ram *bd_ram;
1440 u32 addr_num;
1441 u32 addr_bdram;
1442 u32 addr_desa_l;
1443 u32 val32;
1444 int i;
1445
1446 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1447 if (info->tx_dma_ch_mask & BIT(i))
1448 continue;
1449
1450 tx_ring = &rtwpci->tx_rings[i];
1451 bd_ring = &tx_ring->bd_ring;
1452 bd_ram = &bd_ram_table[i];
1453 addr_num = bd_ring->addr.num;
1454 addr_bdram = bd_ring->addr.bdram;
1455 addr_desa_l = bd_ring->addr.desa_l;
1456 bd_ring->wp = 0;
1457 bd_ring->rp = 0;
1458
1459 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1460 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1461 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1462
1463 rtw89_write16(rtwdev, addr_num, bd_ring->len);
1464 rtw89_write32(rtwdev, addr_bdram, val32);
1465 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1466 }
1467
1468 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1469 rx_ring = &rtwpci->rx_rings[i];
1470 bd_ring = &rx_ring->bd_ring;
1471 addr_num = bd_ring->addr.num;
1472 addr_desa_l = bd_ring->addr.desa_l;
1473 bd_ring->wp = 0;
1474 bd_ring->rp = 0;
1475 rx_ring->diliver_skb = NULL;
1476 rx_ring->diliver_desc.ready = false;
1477
1478 rtw89_write16(rtwdev, addr_num, bd_ring->len);
1479 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1480 }
1481 }
1482
rtw89_pci_release_tx_ring(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1483 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1484 struct rtw89_pci_tx_ring *tx_ring)
1485 {
1486 rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1487 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1488 }
1489
rtw89_pci_ops_reset(struct rtw89_dev * rtwdev)1490 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1491 {
1492 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1493 const struct rtw89_pci_info *info = rtwdev->pci_info;
1494 int txch;
1495
1496 rtw89_pci_reset_trx_rings(rtwdev);
1497
1498 spin_lock_bh(&rtwpci->trx_lock);
1499 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1500 if (info->tx_dma_ch_mask & BIT(txch))
1501 continue;
1502 if (txch == RTW89_TXCH_CH12) {
1503 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1504 skb_queue_len(&rtwpci->h2c_queue), true);
1505 continue;
1506 }
1507 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1508 }
1509 spin_unlock_bh(&rtwpci->trx_lock);
1510 }
1511
rtw89_pci_enable_intr_lock(struct rtw89_dev * rtwdev)1512 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1513 {
1514 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1515 unsigned long flags;
1516
1517 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1518 rtwpci->running = true;
1519 rtw89_chip_enable_intr(rtwdev, rtwpci);
1520 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1521 }
1522
rtw89_pci_disable_intr_lock(struct rtw89_dev * rtwdev)1523 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1524 {
1525 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1526 unsigned long flags;
1527
1528 spin_lock_irqsave(&rtwpci->irq_lock, flags);
1529 rtwpci->running = false;
1530 rtw89_chip_disable_intr(rtwdev, rtwpci);
1531 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1532 }
1533
rtw89_pci_ops_start(struct rtw89_dev * rtwdev)1534 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1535 {
1536 rtw89_core_napi_start(rtwdev);
1537 rtw89_pci_enable_intr_lock(rtwdev);
1538
1539 return 0;
1540 }
1541
rtw89_pci_ops_stop(struct rtw89_dev * rtwdev)1542 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1543 {
1544 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1545 struct pci_dev *pdev = rtwpci->pdev;
1546
1547 rtw89_pci_disable_intr_lock(rtwdev);
1548 synchronize_irq(pdev->irq);
1549 rtw89_core_napi_stop(rtwdev);
1550 }
1551
rtw89_pci_ops_pause(struct rtw89_dev * rtwdev,bool pause)1552 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1553 {
1554 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1555 struct pci_dev *pdev = rtwpci->pdev;
1556
1557 if (pause) {
1558 rtw89_pci_disable_intr_lock(rtwdev);
1559 synchronize_irq(pdev->irq);
1560 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1561 napi_synchronize(&rtwdev->napi);
1562 } else {
1563 rtw89_pci_enable_intr_lock(rtwdev);
1564 rtw89_pci_tx_kick_off_pending(rtwdev);
1565 }
1566 }
1567
1568 static
rtw89_pci_switch_bd_idx_addr(struct rtw89_dev * rtwdev,bool low_power)1569 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1570 {
1571 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1572 const struct rtw89_pci_info *info = rtwdev->pci_info;
1573 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1574 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1575 struct rtw89_pci_tx_ring *tx_ring;
1576 struct rtw89_pci_rx_ring *rx_ring;
1577 int i;
1578
1579 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1580 return;
1581
1582 for (i = 0; i < RTW89_TXCH_NUM; i++) {
1583 tx_ring = &rtwpci->tx_rings[i];
1584 tx_ring->bd_ring.addr.idx = low_power ?
1585 bd_idx_addr->tx_bd_addrs[i] :
1586 dma_addr_set->tx[i].idx;
1587 }
1588
1589 for (i = 0; i < RTW89_RXCH_NUM; i++) {
1590 rx_ring = &rtwpci->rx_rings[i];
1591 rx_ring->bd_ring.addr.idx = low_power ?
1592 bd_idx_addr->rx_bd_addrs[i] :
1593 dma_addr_set->rx[i].idx;
1594 }
1595 }
1596
rtw89_pci_ops_switch_mode(struct rtw89_dev * rtwdev,bool low_power)1597 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1598 {
1599 enum rtw89_pci_intr_mask_cfg cfg;
1600
1601 WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1602
1603 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1604 rtw89_chip_config_intr_mask(rtwdev, cfg);
1605 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1606 }
1607
1608 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1609
rtw89_pci_ops_read32_cmac(struct rtw89_dev * rtwdev,u32 addr)1610 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1611 {
1612 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1613 #if defined(__linux__)
1614 u32 val = readl(rtwpci->mmap + addr);
1615 #elif defined(__FreeBSD__)
1616 u32 val;
1617
1618 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
1619 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
1620 #endif
1621 int count;
1622
1623 for (count = 0; ; count++) {
1624 if (val != RTW89_R32_DEAD)
1625 return val;
1626 if (count >= MAC_REG_POOL_COUNT) {
1627 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1628 return RTW89_R32_DEAD;
1629 }
1630 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1631 #if defined(__linux__)
1632 val = readl(rtwpci->mmap + addr);
1633 #elif defined(__FreeBSD__)
1634 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
1635 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
1636 #endif
1637 }
1638
1639 return val;
1640 }
1641
rtw89_pci_ops_read8(struct rtw89_dev * rtwdev,u32 addr)1642 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1643 {
1644 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1645 u32 addr32, val32, shift;
1646
1647 if (!ACCESS_CMAC(addr))
1648 #if defined(__linux__)
1649 return readb(rtwpci->mmap + addr);
1650 #elif defined(__FreeBSD__)
1651 {
1652 u8 val;
1653
1654 val = bus_read_1((struct resource *)rtwpci->mmap, addr);
1655 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val);
1656 return (val);
1657 }
1658 #endif
1659
1660 addr32 = addr & ~0x3;
1661 shift = (addr & 0x3) * 8;
1662 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1663 return val32 >> shift;
1664 }
1665
rtw89_pci_ops_read16(struct rtw89_dev * rtwdev,u32 addr)1666 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1667 {
1668 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1669 u32 addr32, val32, shift;
1670
1671 if (!ACCESS_CMAC(addr))
1672 #if defined(__linux__)
1673 return readw(rtwpci->mmap + addr);
1674 #elif defined(__FreeBSD__)
1675 {
1676 u16 val;
1677
1678 val = bus_read_2((struct resource *)rtwpci->mmap, addr);
1679 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val);
1680 return (val);
1681 }
1682 #endif
1683
1684 addr32 = addr & ~0x3;
1685 shift = (addr & 0x3) * 8;
1686 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1687 return val32 >> shift;
1688 }
1689
rtw89_pci_ops_read32(struct rtw89_dev * rtwdev,u32 addr)1690 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1691 {
1692 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1693
1694 if (!ACCESS_CMAC(addr))
1695 #if defined(__linux__)
1696 return readl(rtwpci->mmap + addr);
1697 #elif defined(__FreeBSD__)
1698 {
1699 u32 val;
1700
1701 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
1702 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
1703 return (val);
1704 }
1705 #endif
1706
1707 return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1708 }
1709
rtw89_pci_ops_write8(struct rtw89_dev * rtwdev,u32 addr,u8 data)1710 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1711 {
1712 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1713
1714 #if defined(__linux__)
1715 writeb(data, rtwpci->mmap + addr);
1716 #elif defined(__FreeBSD__)
1717 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data);
1718 return (bus_write_1((struct resource *)rtwpci->mmap, addr, data));
1719 #endif
1720 }
1721
rtw89_pci_ops_write16(struct rtw89_dev * rtwdev,u32 addr,u16 data)1722 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1723 {
1724 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1725
1726 #if defined(__linux__)
1727 writew(data, rtwpci->mmap + addr);
1728 #elif defined(__FreeBSD__)
1729 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data);
1730 return (bus_write_2((struct resource *)rtwpci->mmap, addr, data));
1731 #endif
1732 }
1733
rtw89_pci_ops_write32(struct rtw89_dev * rtwdev,u32 addr,u32 data)1734 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1735 {
1736 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1737
1738 #if defined(__linux__)
1739 writel(data, rtwpci->mmap + addr);
1740 #elif defined(__FreeBSD__)
1741 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data);
1742 return (bus_write_4((struct resource *)rtwpci->mmap, addr, data));
1743 #endif
1744 }
1745
rtw89_pci_ctrl_dma_trx(struct rtw89_dev * rtwdev,bool enable)1746 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
1747 {
1748 const struct rtw89_pci_info *info = rtwdev->pci_info;
1749
1750 if (enable)
1751 rtw89_write32_set(rtwdev, info->init_cfg_reg,
1752 info->rxhci_en_bit | info->txhci_en_bit);
1753 else
1754 rtw89_write32_clr(rtwdev, info->init_cfg_reg,
1755 info->rxhci_en_bit | info->txhci_en_bit);
1756 }
1757
rtw89_pci_ctrl_dma_io(struct rtw89_dev * rtwdev,bool enable)1758 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
1759 {
1760 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
1761 u32 reg, mask;
1762
1763 if (chip_id == RTL8852C) {
1764 reg = R_AX_HAXI_INIT_CFG1;
1765 mask = B_AX_STOP_AXI_MST;
1766 } else {
1767 reg = R_AX_PCIE_DMA_STOP1;
1768 mask = B_AX_STOP_PCIEIO;
1769 }
1770
1771 if (enable)
1772 rtw89_write32_clr(rtwdev, reg, mask);
1773 else
1774 rtw89_write32_set(rtwdev, reg, mask);
1775 }
1776
rtw89_pci_ctrl_dma_all(struct rtw89_dev * rtwdev,bool enable)1777 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1778 {
1779 rtw89_pci_ctrl_dma_io(rtwdev, enable);
1780 rtw89_pci_ctrl_dma_trx(rtwdev, enable);
1781 }
1782
rtw89_pci_check_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 rw_bit)1783 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1784 {
1785 u16 val;
1786
1787 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1788
1789 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1790 switch (speed) {
1791 case PCIE_PHY_GEN1:
1792 if (addr < 0x20)
1793 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1794 else
1795 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1796 break;
1797 case PCIE_PHY_GEN2:
1798 if (addr < 0x20)
1799 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1800 else
1801 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1802 break;
1803 default:
1804 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1805 return -EINVAL;
1806 }
1807 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1808 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1809
1810 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1811 false, rtwdev, R_AX_MDIO_CFG);
1812 }
1813
1814 static int
rtw89_read16_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 * val)1815 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1816 {
1817 int ret;
1818
1819 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1820 if (ret) {
1821 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1822 return ret;
1823 }
1824 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1825
1826 return 0;
1827 }
1828
1829 static int
rtw89_write16_mdio(struct rtw89_dev * rtwdev,u8 addr,u16 data,u8 speed)1830 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1831 {
1832 int ret;
1833
1834 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1835 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1836 if (ret) {
1837 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1838 return ret;
1839 }
1840
1841 return 0;
1842 }
1843
1844 static int
rtw89_write16_mdio_mask(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u16 data,u8 speed)1845 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
1846 {
1847 u32 shift;
1848 int ret;
1849 u16 val;
1850
1851 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1852 if (ret)
1853 return ret;
1854
1855 shift = __ffs(mask);
1856 val &= ~mask;
1857 val |= ((data << shift) & mask);
1858
1859 ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
1860 if (ret)
1861 return ret;
1862
1863 return 0;
1864 }
1865
rtw89_write16_mdio_set(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1866 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1867 {
1868 int ret;
1869 u16 val;
1870
1871 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1872 if (ret)
1873 return ret;
1874 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
1875 if (ret)
1876 return ret;
1877
1878 return 0;
1879 }
1880
rtw89_write16_mdio_clr(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1881 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1882 {
1883 int ret;
1884 u16 val;
1885
1886 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1887 if (ret)
1888 return ret;
1889 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
1890 if (ret)
1891 return ret;
1892
1893 return 0;
1894 }
1895
rtw89_pci_write_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 data)1896 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1897 u8 data)
1898 {
1899 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1900 struct pci_dev *pdev = rtwpci->pdev;
1901
1902 return pci_write_config_byte(pdev, addr, data);
1903 }
1904
rtw89_pci_read_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 * value)1905 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1906 u8 *value)
1907 {
1908 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1909 struct pci_dev *pdev = rtwpci->pdev;
1910
1911 return pci_read_config_byte(pdev, addr, value);
1912 }
1913
rtw89_pci_config_byte_set(struct rtw89_dev * rtwdev,u16 addr,u8 bit)1914 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
1915 u8 bit)
1916 {
1917 u8 value;
1918 int ret;
1919
1920 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1921 if (ret)
1922 return ret;
1923
1924 value |= bit;
1925 ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1926
1927 return ret;
1928 }
1929
rtw89_pci_config_byte_clr(struct rtw89_dev * rtwdev,u16 addr,u8 bit)1930 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
1931 u8 bit)
1932 {
1933 u8 value;
1934 int ret;
1935
1936 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1937 if (ret)
1938 return ret;
1939
1940 value &= ~bit;
1941 ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1942
1943 return ret;
1944 }
1945
1946 static int
__get_target(struct rtw89_dev * rtwdev,u16 * target,enum rtw89_pcie_phy phy_rate)1947 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
1948 {
1949 u16 val, tar;
1950 int ret;
1951
1952 /* Enable counter */
1953 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
1954 if (ret)
1955 return ret;
1956 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1957 phy_rate);
1958 if (ret)
1959 return ret;
1960 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
1961 phy_rate);
1962 if (ret)
1963 return ret;
1964
1965 fsleep(300);
1966
1967 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
1968 if (ret)
1969 return ret;
1970 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1971 phy_rate);
1972 if (ret)
1973 return ret;
1974
1975 tar = tar & 0x0FFF;
1976 if (tar == 0 || tar == 0x0FFF) {
1977 rtw89_err(rtwdev, "[ERR]Get target failed.\n");
1978 return -EINVAL;
1979 }
1980
1981 *target = tar;
1982
1983 return 0;
1984 }
1985
rtw89_pci_autok_x(struct rtw89_dev * rtwdev)1986 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
1987 {
1988 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
1989 int ret;
1990
1991 if (chip_id != RTL8852B && chip_id != RTL8851B)
1992 return 0;
1993
1994 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
1995 PCIE_AUTOK_4, PCIE_PHY_GEN1);
1996 return ret;
1997 }
1998
rtw89_pci_auto_refclk_cal(struct rtw89_dev * rtwdev,bool autook_en)1999 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
2000 {
2001 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2002 enum rtw89_pcie_phy phy_rate;
2003 u16 val16, mgn_set, div_set, tar;
2004 u8 val8, bdr_ori;
2005 bool l1_flag = false;
2006 int ret = 0;
2007
2008 if (chip_id != RTL8852B && chip_id != RTL8851B)
2009 return 0;
2010
2011 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
2012 if (ret) {
2013 rtw89_err(rtwdev, "[ERR]pci config read %X\n",
2014 RTW89_PCIE_PHY_RATE);
2015 return ret;
2016 }
2017
2018 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
2019 phy_rate = PCIE_PHY_GEN1;
2020 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
2021 phy_rate = PCIE_PHY_GEN2;
2022 } else {
2023 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
2024 return -EOPNOTSUPP;
2025 }
2026 /* Disable L1BD */
2027 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
2028 if (ret) {
2029 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
2030 return ret;
2031 }
2032
2033 if (bdr_ori & RTW89_PCIE_BIT_L1) {
2034 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2035 bdr_ori & ~RTW89_PCIE_BIT_L1);
2036 if (ret) {
2037 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2038 RTW89_PCIE_L1_CTRL);
2039 return ret;
2040 }
2041 l1_flag = true;
2042 }
2043
2044 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2045 if (ret) {
2046 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2047 goto end;
2048 }
2049
2050 if (val16 & B_AX_CALIB_EN) {
2051 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
2052 val16 & ~B_AX_CALIB_EN, phy_rate);
2053 if (ret) {
2054 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2055 goto end;
2056 }
2057 }
2058
2059 if (!autook_en)
2060 goto end;
2061 /* Set div */
2062 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
2063 if (ret) {
2064 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2065 goto end;
2066 }
2067
2068 /* Obtain div and margin */
2069 ret = __get_target(rtwdev, &tar, phy_rate);
2070 if (ret) {
2071 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2072 goto end;
2073 }
2074
2075 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2076
2077 if (mgn_set >= 128) {
2078 div_set = 0x0003;
2079 mgn_set = 0x000F;
2080 } else if (mgn_set >= 64) {
2081 div_set = 0x0003;
2082 mgn_set >>= 3;
2083 } else if (mgn_set >= 32) {
2084 div_set = 0x0002;
2085 mgn_set >>= 2;
2086 } else if (mgn_set >= 16) {
2087 div_set = 0x0001;
2088 mgn_set >>= 1;
2089 } else if (mgn_set == 0) {
2090 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2091 goto end;
2092 } else {
2093 div_set = 0x0000;
2094 }
2095
2096 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2097 if (ret) {
2098 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2099 goto end;
2100 }
2101
2102 val16 |= u16_encode_bits(div_set, B_AX_DIV);
2103
2104 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
2105 if (ret) {
2106 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2107 goto end;
2108 }
2109
2110 ret = __get_target(rtwdev, &tar, phy_rate);
2111 if (ret) {
2112 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2113 goto end;
2114 }
2115
2116 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2117 tar, div_set, mgn_set);
2118 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2119 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
2120 if (ret) {
2121 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2122 goto end;
2123 }
2124
2125 /* Enable function */
2126 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
2127 if (ret) {
2128 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2129 goto end;
2130 }
2131
2132 /* CLK delay = 0 */
2133 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2134 PCIE_CLKDLY_HW_0);
2135
2136 end:
2137 /* Set L1BD to ori */
2138 if (l1_flag) {
2139 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2140 bdr_ori);
2141 if (ret) {
2142 rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2143 RTW89_PCIE_L1_CTRL);
2144 return ret;
2145 }
2146 }
2147
2148 return ret;
2149 }
2150
rtw89_pci_deglitch_setting(struct rtw89_dev * rtwdev)2151 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2152 {
2153 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2154 int ret;
2155
2156 if (chip_id == RTL8852A) {
2157 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2158 PCIE_PHY_GEN1);
2159 if (ret)
2160 return ret;
2161 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2162 PCIE_PHY_GEN2);
2163 if (ret)
2164 return ret;
2165 } else if (chip_id == RTL8852C) {
2166 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2167 B_AX_DEGLITCH);
2168 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2169 B_AX_DEGLITCH);
2170 }
2171
2172 return 0;
2173 }
2174
rtw89_pci_rxdma_prefth(struct rtw89_dev * rtwdev)2175 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2176 {
2177 if (rtwdev->chip->chip_id != RTL8852A)
2178 return;
2179
2180 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2181 }
2182
rtw89_pci_l1off_pwroff(struct rtw89_dev * rtwdev)2183 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2184 {
2185 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2186
2187 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2188 return;
2189
2190 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2191 }
2192
rtw89_pci_l2_rxen_lat(struct rtw89_dev * rtwdev)2193 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2194 {
2195 int ret;
2196
2197 if (rtwdev->chip->chip_id != RTL8852A)
2198 return 0;
2199
2200 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2201 PCIE_PHY_GEN1);
2202 if (ret)
2203 return ret;
2204
2205 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2206 PCIE_PHY_GEN2);
2207 if (ret)
2208 return ret;
2209
2210 return 0;
2211 }
2212
rtw89_pci_aphy_pwrcut(struct rtw89_dev * rtwdev)2213 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2214 {
2215 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2216
2217 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2218 return;
2219
2220 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2221 }
2222
rtw89_pci_hci_ldo(struct rtw89_dev * rtwdev)2223 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2224 {
2225 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2226
2227 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
2228 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2229 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2230 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2231 B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2232 } else if (rtwdev->chip->chip_id == RTL8852C) {
2233 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2234 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2235 }
2236 }
2237
rtw89_pci_dphy_delay(struct rtw89_dev * rtwdev)2238 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2239 {
2240 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2241
2242 if (chip_id != RTL8852B && chip_id != RTL8851B)
2243 return 0;
2244
2245 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2246 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2247 }
2248
rtw89_pci_power_wake(struct rtw89_dev * rtwdev,bool pwr_up)2249 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
2250 {
2251 if (pwr_up)
2252 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2253 else
2254 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2255 }
2256
rtw89_pci_autoload_hang(struct rtw89_dev * rtwdev)2257 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2258 {
2259 if (rtwdev->chip->chip_id != RTL8852C)
2260 return;
2261
2262 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2263 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2264 }
2265
rtw89_pci_l12_vmain(struct rtw89_dev * rtwdev)2266 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2267 {
2268 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2269 return;
2270
2271 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2272 }
2273
rtw89_pci_gen2_force_ib(struct rtw89_dev * rtwdev)2274 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2275 {
2276 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2277 return;
2278
2279 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2280 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2281 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2282 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2283 B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2284 }
2285
rtw89_pci_l1_ent_lat(struct rtw89_dev * rtwdev)2286 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2287 {
2288 if (rtwdev->chip->chip_id != RTL8852C)
2289 return;
2290
2291 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2292 }
2293
rtw89_pci_wd_exit_l1(struct rtw89_dev * rtwdev)2294 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2295 {
2296 if (rtwdev->chip->chip_id != RTL8852C)
2297 return;
2298
2299 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2300 }
2301
rtw89_pci_set_sic(struct rtw89_dev * rtwdev)2302 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2303 {
2304 if (rtwdev->chip->chip_id == RTL8852C)
2305 return;
2306
2307 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2308 B_AX_SIC_EN_FORCE_CLKREQ);
2309 }
2310
rtw89_pci_set_lbc(struct rtw89_dev * rtwdev)2311 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2312 {
2313 const struct rtw89_pci_info *info = rtwdev->pci_info;
2314 u32 lbc;
2315
2316 if (rtwdev->chip->chip_id == RTL8852C)
2317 return;
2318
2319 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2320 if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2321 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2322 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2323 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2324 } else {
2325 lbc &= ~B_AX_LBC_EN;
2326 }
2327 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2328 }
2329
rtw89_pci_set_io_rcy(struct rtw89_dev * rtwdev)2330 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2331 {
2332 const struct rtw89_pci_info *info = rtwdev->pci_info;
2333 u32 val32;
2334
2335 if (rtwdev->chip->chip_id != RTL8852C)
2336 return;
2337
2338 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2339 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2340 info->io_rcy_tmr);
2341 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2342 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2343 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2344
2345 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2346 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2347 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2348 } else {
2349 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2350 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2351 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2352 }
2353
2354 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2355 }
2356
rtw89_pci_set_dbg(struct rtw89_dev * rtwdev)2357 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2358 {
2359 if (rtwdev->chip->chip_id == RTL8852C)
2360 return;
2361
2362 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2363 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2364
2365 if (rtwdev->chip->chip_id == RTL8852A)
2366 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2367 B_AX_EN_CHKDSC_NO_RX_STUCK);
2368 }
2369
rtw89_pci_set_keep_reg(struct rtw89_dev * rtwdev)2370 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2371 {
2372 if (rtwdev->chip->chip_id == RTL8852C)
2373 return;
2374
2375 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2376 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2377 }
2378
rtw89_pci_clr_idx_all(struct rtw89_dev * rtwdev)2379 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
2380 {
2381 const struct rtw89_pci_info *info = rtwdev->pci_info;
2382 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2383 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2384 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2385 B_AX_CLR_CH12_IDX;
2386 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2387 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2388
2389 if (chip_id == RTL8852A || chip_id == RTL8852C)
2390 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2391 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2392 /* clear DMA indexes */
2393 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2394 if (chip_id == RTL8852A || chip_id == RTL8852C)
2395 rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2396 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2397 rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2398 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2399 }
2400
rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev * rtwdev)2401 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
2402 {
2403 const struct rtw89_pci_info *info = rtwdev->pci_info;
2404 u32 ret, check, dma_busy;
2405 u32 dma_busy1 = info->dma_busy1.addr;
2406 u32 dma_busy2 = info->dma_busy2_reg;
2407
2408 check = info->dma_busy1.mask;
2409
2410 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2411 10, 100, false, rtwdev, dma_busy1);
2412 if (ret)
2413 return ret;
2414
2415 if (!dma_busy2)
2416 return 0;
2417
2418 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2419
2420 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2421 10, 100, false, rtwdev, dma_busy2);
2422 if (ret)
2423 return ret;
2424
2425 return 0;
2426 }
2427
rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev * rtwdev)2428 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
2429 {
2430 const struct rtw89_pci_info *info = rtwdev->pci_info;
2431 u32 ret, check, dma_busy;
2432 u32 dma_busy3 = info->dma_busy3_reg;
2433
2434 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2435
2436 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2437 10, 100, false, rtwdev, dma_busy3);
2438 if (ret)
2439 return ret;
2440
2441 return 0;
2442 }
2443
rtw89_pci_poll_dma_all_idle(struct rtw89_dev * rtwdev)2444 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2445 {
2446 u32 ret;
2447
2448 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
2449 if (ret) {
2450 rtw89_err(rtwdev, "txdma ch busy\n");
2451 return ret;
2452 }
2453
2454 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
2455 if (ret) {
2456 rtw89_err(rtwdev, "rxdma ch busy\n");
2457 return ret;
2458 }
2459
2460 return 0;
2461 }
2462
rtw89_pci_mode_op(struct rtw89_dev * rtwdev)2463 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2464 {
2465 const struct rtw89_pci_info *info = rtwdev->pci_info;
2466 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2467 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2468 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2469 enum mac_ax_tag_mode tag_mode = info->tag_mode;
2470 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2471 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2472 enum mac_ax_tx_burst tx_burst = info->tx_burst;
2473 enum mac_ax_rx_burst rx_burst = info->rx_burst;
2474 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2475 u8 cv = rtwdev->hal.cv;
2476 u32 val32;
2477
2478 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2479 if (chip_id == RTL8852A && cv == CHIP_CBV)
2480 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2481 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2482 if (chip_id == RTL8852A || chip_id == RTL8852B)
2483 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2484 }
2485
2486 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2487 if (chip_id == RTL8852A && cv == CHIP_CBV)
2488 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2489 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2490 if (chip_id == RTL8852A || chip_id == RTL8852B)
2491 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2492 }
2493
2494 if (rxbd_mode == MAC_AX_RXBD_PKT) {
2495 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2496 } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2497 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2498
2499 if (chip_id == RTL8852A || chip_id == RTL8852B)
2500 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2501 B_AX_PCIE_RX_APPLEN_MASK, 0);
2502 }
2503
2504 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2505 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
2506 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
2507 } else if (chip_id == RTL8852C) {
2508 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
2509 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
2510 }
2511
2512 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2513 if (tag_mode == MAC_AX_TAG_SGL) {
2514 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2515 ~B_AX_LATENCY_CONTROL;
2516 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2517 } else if (tag_mode == MAC_AX_TAG_MULTI) {
2518 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2519 B_AX_LATENCY_CONTROL;
2520 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2521 }
2522 }
2523
2524 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
2525 info->multi_tag_num);
2526
2527 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2528 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
2529 wd_dma_idle_intvl);
2530 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
2531 wd_dma_act_intvl);
2532 } else if (chip_id == RTL8852C) {
2533 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
2534 wd_dma_idle_intvl);
2535 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
2536 wd_dma_act_intvl);
2537 }
2538
2539 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2540 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2541 B_AX_HOST_ADDR_INFO_8B_SEL);
2542 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2543 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2544 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2545 B_AX_HOST_ADDR_INFO_8B_SEL);
2546 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2547 }
2548
2549 return 0;
2550 }
2551
rtw89_pci_ops_deinit(struct rtw89_dev * rtwdev)2552 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
2553 {
2554 const struct rtw89_pci_info *info = rtwdev->pci_info;
2555
2556 if (rtwdev->chip->chip_id == RTL8852A) {
2557 /* ltr sw trigger */
2558 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
2559 }
2560 info->ltr_set(rtwdev, false);
2561 rtw89_pci_ctrl_dma_all(rtwdev, false);
2562 rtw89_pci_clr_idx_all(rtwdev);
2563
2564 return 0;
2565 }
2566
rtw89_pci_ops_mac_pre_init(struct rtw89_dev * rtwdev)2567 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
2568 {
2569 const struct rtw89_pci_info *info = rtwdev->pci_info;
2570 int ret;
2571
2572 rtw89_pci_rxdma_prefth(rtwdev);
2573 rtw89_pci_l1off_pwroff(rtwdev);
2574 rtw89_pci_deglitch_setting(rtwdev);
2575 ret = rtw89_pci_l2_rxen_lat(rtwdev);
2576 if (ret) {
2577 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
2578 return ret;
2579 }
2580
2581 rtw89_pci_aphy_pwrcut(rtwdev);
2582 rtw89_pci_hci_ldo(rtwdev);
2583 rtw89_pci_dphy_delay(rtwdev);
2584
2585 ret = rtw89_pci_autok_x(rtwdev);
2586 if (ret) {
2587 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
2588 return ret;
2589 }
2590
2591 ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
2592 if (ret) {
2593 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
2594 return ret;
2595 }
2596
2597 rtw89_pci_power_wake(rtwdev, true);
2598 rtw89_pci_autoload_hang(rtwdev);
2599 rtw89_pci_l12_vmain(rtwdev);
2600 rtw89_pci_gen2_force_ib(rtwdev);
2601 rtw89_pci_l1_ent_lat(rtwdev);
2602 rtw89_pci_wd_exit_l1(rtwdev);
2603 rtw89_pci_set_sic(rtwdev);
2604 rtw89_pci_set_lbc(rtwdev);
2605 rtw89_pci_set_io_rcy(rtwdev);
2606 rtw89_pci_set_dbg(rtwdev);
2607 rtw89_pci_set_keep_reg(rtwdev);
2608
2609 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
2610
2611 /* stop DMA activities */
2612 rtw89_pci_ctrl_dma_all(rtwdev, false);
2613
2614 ret = rtw89_pci_poll_dma_all_idle(rtwdev);
2615 if (ret) {
2616 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
2617 return ret;
2618 }
2619
2620 rtw89_pci_clr_idx_all(rtwdev);
2621 rtw89_pci_mode_op(rtwdev);
2622
2623 /* fill TRX BD indexes */
2624 rtw89_pci_ops_reset(rtwdev);
2625
2626 ret = rtw89_pci_rst_bdram_pcie(rtwdev);
2627 if (ret) {
2628 rtw89_warn(rtwdev, "reset bdram busy\n");
2629 return ret;
2630 }
2631
2632 /* disable all channels except to FW CMD channel to download firmware */
2633 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
2634 rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true);
2635
2636 /* start DMA activities */
2637 rtw89_pci_ctrl_dma_all(rtwdev, true);
2638
2639 return 0;
2640 }
2641
rtw89_pci_ltr_set(struct rtw89_dev * rtwdev,bool en)2642 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
2643 {
2644 u32 val;
2645
2646 if (!en)
2647 return 0;
2648
2649 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2650 if (rtw89_pci_ltr_is_err_reg_val(val))
2651 return -EINVAL;
2652 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2653 if (rtw89_pci_ltr_is_err_reg_val(val))
2654 return -EINVAL;
2655 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
2656 if (rtw89_pci_ltr_is_err_reg_val(val))
2657 return -EINVAL;
2658 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
2659 if (rtw89_pci_ltr_is_err_reg_val(val))
2660 return -EINVAL;
2661
2662 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
2663 B_AX_LTR_WD_NOEMP_CHK);
2664 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
2665 PCI_LTR_SPC_500US);
2666 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2667 PCI_LTR_IDLE_TIMER_3_2MS);
2668 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2669 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2670 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
2671 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
2672
2673 return 0;
2674 }
2675 EXPORT_SYMBOL(rtw89_pci_ltr_set);
2676
rtw89_pci_ltr_set_v1(struct rtw89_dev * rtwdev,bool en)2677 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
2678 {
2679 u32 dec_ctrl;
2680 u32 val32;
2681
2682 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2683 if (rtw89_pci_ltr_is_err_reg_val(val32))
2684 return -EINVAL;
2685 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2686 if (rtw89_pci_ltr_is_err_reg_val(val32))
2687 return -EINVAL;
2688 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
2689 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
2690 return -EINVAL;
2691 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
2692 if (rtw89_pci_ltr_is_err_reg_val(val32))
2693 return -EINVAL;
2694 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
2695 if (rtw89_pci_ltr_is_err_reg_val(val32))
2696 return -EINVAL;
2697
2698 if (!en) {
2699 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
2700 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
2701 B_AX_LTR_REQ_DRV;
2702 } else {
2703 dec_ctrl |= B_AX_LTR_HW_DEC_EN;
2704 }
2705
2706 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
2707 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
2708
2709 if (en)
2710 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
2711 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
2712 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2713 PCI_LTR_IDLE_TIMER_3_2MS);
2714 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2715 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2716 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
2717 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
2718 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
2719
2720 return 0;
2721 }
2722 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
2723
rtw89_pci_ops_mac_post_init(struct rtw89_dev * rtwdev)2724 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
2725 {
2726 const struct rtw89_pci_info *info = rtwdev->pci_info;
2727 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2728 int ret;
2729
2730 ret = info->ltr_set(rtwdev, true);
2731 if (ret) {
2732 rtw89_err(rtwdev, "pci ltr set fail\n");
2733 return ret;
2734 }
2735 if (chip_id == RTL8852A) {
2736 /* ltr sw trigger */
2737 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
2738 }
2739 if (chip_id == RTL8852A || chip_id == RTL8852B) {
2740 /* ADDR info 8-byte mode */
2741 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2742 B_AX_HOST_ADDR_INFO_8B_SEL);
2743 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2744 }
2745
2746 /* enable DMA for all queues */
2747 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
2748
2749 /* Release PCI IO */
2750 rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
2751 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
2752
2753 return 0;
2754 }
2755
rtw89_pci_claim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2756 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
2757 struct pci_dev *pdev)
2758 {
2759 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2760 int ret;
2761
2762 ret = pci_enable_device(pdev);
2763 if (ret) {
2764 rtw89_err(rtwdev, "failed to enable pci device\n");
2765 return ret;
2766 }
2767
2768 pci_set_master(pdev);
2769 pci_set_drvdata(pdev, rtwdev->hw);
2770
2771 rtwpci->pdev = pdev;
2772
2773 return 0;
2774 }
2775
rtw89_pci_declaim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2776 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
2777 struct pci_dev *pdev)
2778 {
2779 pci_disable_device(pdev);
2780 }
2781
rtw89_pci_setup_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2782 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
2783 struct pci_dev *pdev)
2784 {
2785 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2786 unsigned long resource_len;
2787 u8 bar_id = 2;
2788 int ret;
2789
2790 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2791 if (ret) {
2792 rtw89_err(rtwdev, "failed to request pci regions\n");
2793 goto err;
2794 }
2795
2796 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2797 if (ret) {
2798 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
2799 goto err_release_regions;
2800 }
2801
2802 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2803 if (ret) {
2804 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
2805 goto err_release_regions;
2806 }
2807
2808 #if defined(__FreeBSD__)
2809 linuxkpi_pcim_want_to_use_bus_functions(pdev);
2810 #endif
2811 resource_len = pci_resource_len(pdev, bar_id);
2812 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
2813 if (!rtwpci->mmap) {
2814 rtw89_err(rtwdev, "failed to map pci io\n");
2815 ret = -EIO;
2816 goto err_release_regions;
2817 }
2818
2819 return 0;
2820
2821 err_release_regions:
2822 pci_release_regions(pdev);
2823 err:
2824 return ret;
2825 }
2826
rtw89_pci_clear_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2827 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
2828 struct pci_dev *pdev)
2829 {
2830 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2831
2832 if (rtwpci->mmap) {
2833 pci_iounmap(pdev, rtwpci->mmap);
2834 pci_release_regions(pdev);
2835 }
2836 }
2837
rtw89_pci_free_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)2838 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
2839 struct pci_dev *pdev,
2840 struct rtw89_pci_tx_ring *tx_ring)
2841 {
2842 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2843 u8 *head = wd_ring->head;
2844 dma_addr_t dma = wd_ring->dma;
2845 u32 page_size = wd_ring->page_size;
2846 u32 page_num = wd_ring->page_num;
2847 u32 ring_sz = page_size * page_num;
2848
2849 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2850 wd_ring->head = NULL;
2851 }
2852
rtw89_pci_free_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)2853 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
2854 struct pci_dev *pdev,
2855 struct rtw89_pci_tx_ring *tx_ring)
2856 {
2857 int ring_sz;
2858 u8 *head;
2859 dma_addr_t dma;
2860
2861 head = tx_ring->bd_ring.head;
2862 dma = tx_ring->bd_ring.dma;
2863 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
2864 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2865
2866 tx_ring->bd_ring.head = NULL;
2867 }
2868
rtw89_pci_free_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2869 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
2870 struct pci_dev *pdev)
2871 {
2872 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2873 const struct rtw89_pci_info *info = rtwdev->pci_info;
2874 struct rtw89_pci_tx_ring *tx_ring;
2875 int i;
2876
2877 for (i = 0; i < RTW89_TXCH_NUM; i++) {
2878 if (info->tx_dma_ch_mask & BIT(i))
2879 continue;
2880 tx_ring = &rtwpci->tx_rings[i];
2881 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
2882 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
2883 }
2884 }
2885
rtw89_pci_free_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring)2886 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
2887 struct pci_dev *pdev,
2888 struct rtw89_pci_rx_ring *rx_ring)
2889 {
2890 struct rtw89_pci_rx_info *rx_info;
2891 struct sk_buff *skb;
2892 dma_addr_t dma;
2893 u32 buf_sz;
2894 u8 *head;
2895 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
2896 int i;
2897
2898 buf_sz = rx_ring->buf_sz;
2899 for (i = 0; i < rx_ring->bd_ring.len; i++) {
2900 skb = rx_ring->buf[i];
2901 if (!skb)
2902 continue;
2903
2904 rx_info = RTW89_PCI_RX_SKB_CB(skb);
2905 dma = rx_info->dma;
2906 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
2907 dev_kfree_skb(skb);
2908 rx_ring->buf[i] = NULL;
2909 }
2910
2911 head = rx_ring->bd_ring.head;
2912 dma = rx_ring->bd_ring.dma;
2913 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2914
2915 rx_ring->bd_ring.head = NULL;
2916 }
2917
rtw89_pci_free_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2918 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
2919 struct pci_dev *pdev)
2920 {
2921 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2922 struct rtw89_pci_rx_ring *rx_ring;
2923 int i;
2924
2925 for (i = 0; i < RTW89_RXCH_NUM; i++) {
2926 rx_ring = &rtwpci->rx_rings[i];
2927 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
2928 }
2929 }
2930
rtw89_pci_free_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2931 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
2932 struct pci_dev *pdev)
2933 {
2934 rtw89_pci_free_rx_rings(rtwdev, pdev);
2935 rtw89_pci_free_tx_rings(rtwdev, pdev);
2936 }
2937
rtw89_pci_init_rx_bd(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb,int buf_sz,u32 idx)2938 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
2939 struct rtw89_pci_rx_ring *rx_ring,
2940 struct sk_buff *skb, int buf_sz, u32 idx)
2941 {
2942 struct rtw89_pci_rx_info *rx_info;
2943 struct rtw89_pci_rx_bd_32 *rx_bd;
2944 dma_addr_t dma;
2945
2946 if (!skb)
2947 return -EINVAL;
2948
2949 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
2950 if (dma_mapping_error(&pdev->dev, dma))
2951 return -EBUSY;
2952
2953 rx_info = RTW89_PCI_RX_SKB_CB(skb);
2954 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
2955
2956 memset(rx_bd, 0, sizeof(*rx_bd));
2957 rx_bd->buf_size = cpu_to_le16(buf_sz);
2958 rx_bd->dma = cpu_to_le32(dma);
2959 rx_info->dma = dma;
2960
2961 return 0;
2962 }
2963
rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,enum rtw89_tx_channel txch)2964 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
2965 struct pci_dev *pdev,
2966 struct rtw89_pci_tx_ring *tx_ring,
2967 enum rtw89_tx_channel txch)
2968 {
2969 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2970 struct rtw89_pci_tx_wd *txwd;
2971 dma_addr_t dma;
2972 dma_addr_t cur_paddr;
2973 u8 *head;
2974 u8 *cur_vaddr;
2975 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
2976 u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
2977 u32 ring_sz = page_size * page_num;
2978 u32 page_offset;
2979 int i;
2980
2981 /* FWCMD queue doesn't use txwd as pages */
2982 if (txch == RTW89_TXCH_CH12)
2983 return 0;
2984
2985 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2986 if (!head)
2987 return -ENOMEM;
2988
2989 INIT_LIST_HEAD(&wd_ring->free_pages);
2990 wd_ring->head = head;
2991 wd_ring->dma = dma;
2992 wd_ring->page_size = page_size;
2993 wd_ring->page_num = page_num;
2994
2995 page_offset = 0;
2996 for (i = 0; i < page_num; i++) {
2997 txwd = &wd_ring->pages[i];
2998 cur_paddr = dma + page_offset;
2999 cur_vaddr = head + page_offset;
3000
3001 skb_queue_head_init(&txwd->queue);
3002 INIT_LIST_HEAD(&txwd->list);
3003 txwd->paddr = cur_paddr;
3004 txwd->vaddr = cur_vaddr;
3005 txwd->len = page_size;
3006 txwd->seq = i;
3007 rtw89_pci_enqueue_txwd(tx_ring, txwd);
3008
3009 page_offset += page_size;
3010 }
3011
3012 return 0;
3013 }
3014
rtw89_pci_alloc_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,u32 desc_size,u32 len,enum rtw89_tx_channel txch)3015 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
3016 struct pci_dev *pdev,
3017 struct rtw89_pci_tx_ring *tx_ring,
3018 u32 desc_size, u32 len,
3019 enum rtw89_tx_channel txch)
3020 {
3021 const struct rtw89_pci_ch_dma_addr *txch_addr;
3022 int ring_sz = desc_size * len;
3023 u8 *head;
3024 dma_addr_t dma;
3025 int ret;
3026
3027 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
3028 if (ret) {
3029 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
3030 goto err;
3031 }
3032
3033 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
3034 if (ret) {
3035 rtw89_err(rtwdev, "failed to get address of txch %d", txch);
3036 goto err_free_wd_ring;
3037 }
3038
3039 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3040 if (!head) {
3041 ret = -ENOMEM;
3042 goto err_free_wd_ring;
3043 }
3044
3045 INIT_LIST_HEAD(&tx_ring->busy_pages);
3046 tx_ring->bd_ring.head = head;
3047 tx_ring->bd_ring.dma = dma;
3048 tx_ring->bd_ring.len = len;
3049 tx_ring->bd_ring.desc_size = desc_size;
3050 tx_ring->bd_ring.addr = *txch_addr;
3051 tx_ring->bd_ring.wp = 0;
3052 tx_ring->bd_ring.rp = 0;
3053 tx_ring->txch = txch;
3054
3055 return 0;
3056
3057 err_free_wd_ring:
3058 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3059 err:
3060 return ret;
3061 }
3062
rtw89_pci_alloc_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3063 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
3064 struct pci_dev *pdev)
3065 {
3066 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3067 const struct rtw89_pci_info *info = rtwdev->pci_info;
3068 struct rtw89_pci_tx_ring *tx_ring;
3069 u32 desc_size;
3070 u32 len;
3071 u32 i, tx_allocated;
3072 int ret;
3073
3074 for (i = 0; i < RTW89_TXCH_NUM; i++) {
3075 if (info->tx_dma_ch_mask & BIT(i))
3076 continue;
3077 tx_ring = &rtwpci->tx_rings[i];
3078 desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3079 len = RTW89_PCI_TXBD_NUM_MAX;
3080 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3081 desc_size, len, i);
3082 if (ret) {
3083 #if defined(__linux__)
3084 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3085 #elif defined(__FreeBSD__)
3086 rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret);
3087 #endif
3088 goto err_free;
3089 }
3090 }
3091
3092 return 0;
3093
3094 err_free:
3095 tx_allocated = i;
3096 for (i = 0; i < tx_allocated; i++) {
3097 tx_ring = &rtwpci->tx_rings[i];
3098 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3099 }
3100
3101 return ret;
3102 }
3103
rtw89_pci_alloc_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,u32 desc_size,u32 len,u32 rxch)3104 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3105 struct pci_dev *pdev,
3106 struct rtw89_pci_rx_ring *rx_ring,
3107 u32 desc_size, u32 len, u32 rxch)
3108 {
3109 const struct rtw89_pci_ch_dma_addr *rxch_addr;
3110 struct sk_buff *skb;
3111 u8 *head;
3112 dma_addr_t dma;
3113 int ring_sz = desc_size * len;
3114 int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3115 int i, allocated;
3116 int ret;
3117
3118 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
3119 if (ret) {
3120 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3121 return ret;
3122 }
3123
3124 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3125 if (!head) {
3126 ret = -ENOMEM;
3127 goto err;
3128 }
3129
3130 rx_ring->bd_ring.head = head;
3131 rx_ring->bd_ring.dma = dma;
3132 rx_ring->bd_ring.len = len;
3133 rx_ring->bd_ring.desc_size = desc_size;
3134 rx_ring->bd_ring.addr = *rxch_addr;
3135 rx_ring->bd_ring.wp = 0;
3136 rx_ring->bd_ring.rp = 0;
3137 rx_ring->buf_sz = buf_sz;
3138 rx_ring->diliver_skb = NULL;
3139 rx_ring->diliver_desc.ready = false;
3140
3141 for (i = 0; i < len; i++) {
3142 skb = dev_alloc_skb(buf_sz);
3143 if (!skb) {
3144 ret = -ENOMEM;
3145 goto err_free;
3146 }
3147
3148 memset(skb->data, 0, buf_sz);
3149 rx_ring->buf[i] = skb;
3150 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3151 buf_sz, i);
3152 if (ret) {
3153 #if defined(__linux__)
3154 rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3155 #elif defined(__FreeBSD__)
3156 rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret);
3157 #endif
3158 dev_kfree_skb_any(skb);
3159 rx_ring->buf[i] = NULL;
3160 goto err_free;
3161 }
3162 }
3163
3164 return 0;
3165
3166 err_free:
3167 allocated = i;
3168 for (i = 0; i < allocated; i++) {
3169 skb = rx_ring->buf[i];
3170 if (!skb)
3171 continue;
3172 dma = *((dma_addr_t *)skb->cb);
3173 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3174 dev_kfree_skb(skb);
3175 rx_ring->buf[i] = NULL;
3176 }
3177
3178 head = rx_ring->bd_ring.head;
3179 dma = rx_ring->bd_ring.dma;
3180 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3181
3182 rx_ring->bd_ring.head = NULL;
3183 err:
3184 return ret;
3185 }
3186
rtw89_pci_alloc_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3187 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3188 struct pci_dev *pdev)
3189 {
3190 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3191 struct rtw89_pci_rx_ring *rx_ring;
3192 u32 desc_size;
3193 u32 len;
3194 int i, rx_allocated;
3195 int ret;
3196
3197 for (i = 0; i < RTW89_RXCH_NUM; i++) {
3198 rx_ring = &rtwpci->rx_rings[i];
3199 desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3200 len = RTW89_PCI_RXBD_NUM_MAX;
3201 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3202 desc_size, len, i);
3203 if (ret) {
3204 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3205 goto err_free;
3206 }
3207 }
3208
3209 return 0;
3210
3211 err_free:
3212 rx_allocated = i;
3213 for (i = 0; i < rx_allocated; i++) {
3214 rx_ring = &rtwpci->rx_rings[i];
3215 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3216 }
3217
3218 return ret;
3219 }
3220
rtw89_pci_alloc_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3221 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3222 struct pci_dev *pdev)
3223 {
3224 int ret;
3225
3226 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3227 if (ret) {
3228 rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3229 goto err;
3230 }
3231
3232 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3233 if (ret) {
3234 rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3235 goto err_free_tx_rings;
3236 }
3237
3238 return 0;
3239
3240 err_free_tx_rings:
3241 rtw89_pci_free_tx_rings(rtwdev, pdev);
3242 err:
3243 return ret;
3244 }
3245
rtw89_pci_h2c_init(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)3246 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3247 struct rtw89_pci *rtwpci)
3248 {
3249 skb_queue_head_init(&rtwpci->h2c_queue);
3250 skb_queue_head_init(&rtwpci->h2c_release_queue);
3251 }
3252
rtw89_pci_setup_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3253 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3254 struct pci_dev *pdev)
3255 {
3256 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3257 int ret;
3258
3259 ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3260 if (ret) {
3261 rtw89_err(rtwdev, "failed to setup pci mapping\n");
3262 goto err;
3263 }
3264
3265 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3266 if (ret) {
3267 rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3268 goto err_pci_unmap;
3269 }
3270
3271 rtw89_pci_h2c_init(rtwdev, rtwpci);
3272
3273 spin_lock_init(&rtwpci->irq_lock);
3274 spin_lock_init(&rtwpci->trx_lock);
3275
3276 return 0;
3277
3278 err_pci_unmap:
3279 rtw89_pci_clear_mapping(rtwdev, pdev);
3280 err:
3281 return ret;
3282 }
3283
rtw89_pci_clear_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3284 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3285 struct pci_dev *pdev)
3286 {
3287 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3288
3289 rtw89_pci_free_trx_rings(rtwdev, pdev);
3290 rtw89_pci_clear_mapping(rtwdev, pdev);
3291 rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3292 skb_queue_len(&rtwpci->h2c_queue), true);
3293 }
3294
rtw89_pci_config_intr_mask(struct rtw89_dev * rtwdev)3295 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3296 {
3297 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3298 const struct rtw89_chip_info *chip = rtwdev->chip;
3299 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3300
3301 if (chip->chip_id == RTL8851B)
3302 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3303
3304 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3305
3306 if (rtwpci->under_recovery) {
3307 rtwpci->intrs[0] = hs0isr_ind_int_en;
3308 rtwpci->intrs[1] = 0;
3309 } else {
3310 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3311 B_AX_RXDMA_INT_EN |
3312 B_AX_RXP1DMA_INT_EN |
3313 B_AX_RPQDMA_INT_EN |
3314 B_AX_RXDMA_STUCK_INT_EN |
3315 B_AX_RDU_INT_EN |
3316 B_AX_RPQBD_FULL_INT_EN |
3317 hs0isr_ind_int_en;
3318
3319 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3320 }
3321 }
3322 EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3323
rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev * rtwdev)3324 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3325 {
3326 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3327
3328 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3329 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3330 rtwpci->intrs[0] = 0;
3331 rtwpci->intrs[1] = 0;
3332 }
3333
rtw89_pci_default_intr_mask_v1(struct rtw89_dev * rtwdev)3334 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3335 {
3336 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3337
3338 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3339 B_AX_HS1ISR_IND_INT_EN |
3340 B_AX_HS0ISR_IND_INT_EN;
3341 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3342 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3343 B_AX_RXDMA_INT_EN |
3344 B_AX_RXP1DMA_INT_EN |
3345 B_AX_RPQDMA_INT_EN |
3346 B_AX_RXDMA_STUCK_INT_EN |
3347 B_AX_RDU_INT_EN |
3348 B_AX_RPQBD_FULL_INT_EN;
3349 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3350 }
3351
rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev * rtwdev)3352 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3353 {
3354 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3355
3356 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3357 B_AX_HS0ISR_IND_INT_EN;
3358 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3359 rtwpci->intrs[0] = 0;
3360 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3361 }
3362
rtw89_pci_config_intr_mask_v1(struct rtw89_dev * rtwdev)3363 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3364 {
3365 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3366
3367 if (rtwpci->under_recovery)
3368 rtw89_pci_recovery_intr_mask_v1(rtwdev);
3369 else if (rtwpci->low_power)
3370 rtw89_pci_low_power_intr_mask_v1(rtwdev);
3371 else
3372 rtw89_pci_default_intr_mask_v1(rtwdev);
3373 }
3374 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3375
rtw89_pci_request_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3376 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
3377 struct pci_dev *pdev)
3378 {
3379 unsigned long flags = 0;
3380 int ret;
3381
3382 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
3383 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3384 if (ret < 0) {
3385 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
3386 goto err;
3387 }
3388
3389 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
3390 rtw89_pci_interrupt_handler,
3391 rtw89_pci_interrupt_threadfn,
3392 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
3393 if (ret) {
3394 rtw89_err(rtwdev, "failed to request threaded irq\n");
3395 goto err_free_vector;
3396 }
3397
3398 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
3399
3400 return 0;
3401
3402 err_free_vector:
3403 pci_free_irq_vectors(pdev);
3404 err:
3405 return ret;
3406 }
3407
rtw89_pci_free_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3408 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
3409 struct pci_dev *pdev)
3410 {
3411 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
3412 pci_free_irq_vectors(pdev);
3413 }
3414
gray_code_to_bin(u16 gray_code,u32 bit_num)3415 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
3416 {
3417 u16 bin = 0, gray_bit;
3418 u32 bit_idx;
3419
3420 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
3421 gray_bit = (gray_code >> bit_idx) & 0x1;
3422 if (bit_num - bit_idx > 1)
3423 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
3424 bin |= (gray_bit << bit_idx);
3425 }
3426
3427 return bin;
3428 }
3429
rtw89_pci_filter_out(struct rtw89_dev * rtwdev)3430 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
3431 {
3432 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3433 struct pci_dev *pdev = rtwpci->pdev;
3434 u16 val16, filter_out_val;
3435 u32 val, phy_offset;
3436 int ret;
3437
3438 if (rtwdev->chip->chip_id != RTL8852C)
3439 return 0;
3440
3441 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
3442 if (val == B_AX_ASPM_CTRL_L1)
3443 return 0;
3444
3445 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
3446 if (ret)
3447 return ret;
3448
3449 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
3450 if (val == RTW89_PCIE_GEN1_SPEED) {
3451 phy_offset = R_RAC_DIRECT_OFFSET_G1;
3452 } else if (val == RTW89_PCIE_GEN2_SPEED) {
3453 phy_offset = R_RAC_DIRECT_OFFSET_G2;
3454 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
3455 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
3456 val16 | B_PCIE_BIT_PINOUT_DIS);
3457 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
3458 val16 & ~B_PCIE_BIT_RD_SEL);
3459
3460 val16 = rtw89_read16_mask(rtwdev,
3461 phy_offset + RAC_ANA1F * RAC_MULT,
3462 FILTER_OUT_EQ_MASK);
3463 val16 = gray_code_to_bin(val16, hweight16(val16));
3464 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
3465 RAC_MULT);
3466 filter_out_val &= ~REG_FILTER_OUT_MASK;
3467 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
3468
3469 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
3470 filter_out_val);
3471 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
3472 B_BAC_EQ_SEL);
3473 rtw89_write16_set(rtwdev,
3474 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
3475 B_PCIE_BIT_PSAVE);
3476 } else {
3477 return -EOPNOTSUPP;
3478 }
3479 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
3480 B_PCIE_BIT_PSAVE);
3481
3482 return 0;
3483 }
3484
rtw89_pci_clkreq_set(struct rtw89_dev * rtwdev,bool enable)3485 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
3486 {
3487 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3488 int ret;
3489
3490 if (rtw89_pci_disable_clkreq)
3491 return;
3492
3493 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
3494 PCIE_CLKDLY_HW_30US);
3495 if (ret)
3496 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
3497
3498 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3499 if (enable)
3500 ret = rtw89_pci_config_byte_set(rtwdev,
3501 RTW89_PCIE_L1_CTRL,
3502 RTW89_PCIE_BIT_CLK);
3503 else
3504 ret = rtw89_pci_config_byte_clr(rtwdev,
3505 RTW89_PCIE_L1_CTRL,
3506 RTW89_PCIE_BIT_CLK);
3507 if (ret)
3508 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
3509 enable ? "set" : "unset", ret);
3510 } else if (chip_id == RTL8852C) {
3511 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
3512 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
3513 if (enable)
3514 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
3515 B_AX_CLK_REQ_N);
3516 else
3517 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
3518 B_AX_CLK_REQ_N);
3519 }
3520 }
3521
rtw89_pci_aspm_set(struct rtw89_dev * rtwdev,bool enable)3522 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
3523 {
3524 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3525 u8 value = 0;
3526 int ret;
3527
3528 if (rtw89_pci_disable_aspm_l1)
3529 return;
3530
3531 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
3532 if (ret)
3533 rtw89_err(rtwdev, "failed to read ASPM Delay\n");
3534
3535 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
3536 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
3537 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
3538
3539 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
3540 if (ret)
3541 rtw89_err(rtwdev, "failed to read ASPM Delay\n");
3542
3543 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3544 if (enable)
3545 ret = rtw89_pci_config_byte_set(rtwdev,
3546 RTW89_PCIE_L1_CTRL,
3547 RTW89_PCIE_BIT_L1);
3548 else
3549 ret = rtw89_pci_config_byte_clr(rtwdev,
3550 RTW89_PCIE_L1_CTRL,
3551 RTW89_PCIE_BIT_L1);
3552 } else if (chip_id == RTL8852C) {
3553 if (enable)
3554 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3555 B_AX_ASPM_CTRL_L1);
3556 else
3557 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3558 B_AX_ASPM_CTRL_L1);
3559 }
3560 if (ret)
3561 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
3562 enable ? "set" : "unset", ret);
3563 }
3564
rtw89_pci_recalc_int_mit(struct rtw89_dev * rtwdev)3565 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
3566 {
3567 struct rtw89_traffic_stats *stats = &rtwdev->stats;
3568 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3569 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3570 u32 val = 0;
3571
3572 if (!rtwdev->scanning &&
3573 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH))
3574 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
3575 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
3576 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
3577 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
3578
3579 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val);
3580 }
3581
rtw89_pci_link_cfg(struct rtw89_dev * rtwdev)3582 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
3583 {
3584 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3585 struct pci_dev *pdev = rtwpci->pdev;
3586 u16 link_ctrl;
3587 int ret;
3588
3589 /* Though there is standard PCIE configuration space to set the
3590 * link control register, but by Realtek's design, driver should
3591 * check if host supports CLKREQ/ASPM to enable the HW module.
3592 *
3593 * These functions are implemented by two HW modules associated,
3594 * one is responsible to access PCIE configuration space to
3595 * follow the host settings, and another is in charge of doing
3596 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
3597 * the host does not support it, and due to some reasons or wrong
3598 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
3599 * loss if HW misbehaves on the link.
3600 *
3601 * Hence it's designed that driver should first check the PCIE
3602 * configuration space is sync'ed and enabled, then driver can turn
3603 * on the other module that is actually working on the mechanism.
3604 */
3605 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
3606 if (ret) {
3607 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
3608 return;
3609 }
3610
3611 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
3612 rtw89_pci_clkreq_set(rtwdev, true);
3613
3614 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
3615 rtw89_pci_aspm_set(rtwdev, true);
3616 }
3617
rtw89_pci_l1ss_set(struct rtw89_dev * rtwdev,bool enable)3618 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
3619 {
3620 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3621 int ret;
3622
3623 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3624 if (enable)
3625 ret = rtw89_pci_config_byte_set(rtwdev,
3626 RTW89_PCIE_TIMER_CTRL,
3627 RTW89_PCIE_BIT_L1SUB);
3628 else
3629 ret = rtw89_pci_config_byte_clr(rtwdev,
3630 RTW89_PCIE_TIMER_CTRL,
3631 RTW89_PCIE_BIT_L1SUB);
3632 if (ret)
3633 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
3634 enable ? "set" : "unset", ret);
3635 } else if (chip_id == RTL8852C) {
3636 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
3637 RTW89_PCIE_BIT_ASPM_L11 |
3638 RTW89_PCIE_BIT_PCI_L11);
3639 if (ret)
3640 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
3641 if (enable)
3642 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3643 B_AX_L1SUB_DISABLE);
3644 else
3645 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3646 B_AX_L1SUB_DISABLE);
3647 }
3648 }
3649
rtw89_pci_l1ss_cfg(struct rtw89_dev * rtwdev)3650 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
3651 {
3652 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3653 struct pci_dev *pdev = rtwpci->pdev;
3654 u32 l1ss_cap_ptr, l1ss_ctrl;
3655
3656 if (rtw89_pci_disable_l1ss)
3657 return;
3658
3659 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3660 if (!l1ss_cap_ptr)
3661 return;
3662
3663 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
3664
3665 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
3666 rtw89_pci_l1ss_set(rtwdev, true);
3667 }
3668
rtw89_pci_poll_io_idle(struct rtw89_dev * rtwdev)3669 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
3670 {
3671 int ret = 0;
3672 u32 sts;
3673 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
3674
3675 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
3676 10, 1000, false, rtwdev,
3677 R_AX_PCIE_DMA_BUSY1);
3678 if (ret) {
3679 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
3680 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
3681 return -EINVAL;
3682 }
3683 return ret;
3684 }
3685
rtw89_pci_lv1rst_stop_dma(struct rtw89_dev * rtwdev)3686 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
3687 {
3688 u32 val;
3689 int ret;
3690
3691 if (rtwdev->chip->chip_id == RTL8852C)
3692 return 0;
3693
3694 rtw89_pci_ctrl_dma_all(rtwdev, false);
3695 ret = rtw89_pci_poll_io_idle(rtwdev);
3696 if (ret) {
3697 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3698 rtw89_debug(rtwdev, RTW89_DBG_HCI,
3699 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
3700 R_AX_DBG_ERR_FLAG, val);
3701 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
3702 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
3703 if (val & B_AX_RX_STUCK)
3704 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
3705 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
3706 ret = rtw89_pci_poll_io_idle(rtwdev);
3707 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3708 rtw89_debug(rtwdev, RTW89_DBG_HCI,
3709 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
3710 R_AX_DBG_ERR_FLAG, val);
3711 }
3712
3713 return ret;
3714 }
3715
3716
3717
rtw89_pci_rst_bdram(struct rtw89_dev * rtwdev)3718 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
3719 {
3720 int ret = 0;
3721 u32 val32, sts;
3722
3723 val32 = B_AX_RST_BDRAM;
3724 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
3725
3726 ret = read_poll_timeout_atomic(rtw89_read32, sts,
3727 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100,
3728 true, rtwdev, R_AX_PCIE_INIT_CFG1);
3729 return ret;
3730 }
3731
rtw89_pci_lv1rst_start_dma(struct rtw89_dev * rtwdev)3732 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
3733 {
3734 u32 ret;
3735
3736 if (rtwdev->chip->chip_id == RTL8852C)
3737 return 0;
3738
3739 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
3740 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
3741 rtw89_pci_clr_idx_all(rtwdev);
3742
3743 ret = rtw89_pci_rst_bdram(rtwdev);
3744 if (ret)
3745 return ret;
3746
3747 rtw89_pci_ctrl_dma_all(rtwdev, true);
3748 return ret;
3749 }
3750
rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev * rtwdev,enum rtw89_lv1_rcvy_step step)3751 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
3752 enum rtw89_lv1_rcvy_step step)
3753 {
3754 int ret;
3755
3756 switch (step) {
3757 case RTW89_LV1_RCVY_STEP_1:
3758 ret = rtw89_pci_lv1rst_stop_dma(rtwdev);
3759 if (ret)
3760 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
3761
3762 break;
3763
3764 case RTW89_LV1_RCVY_STEP_2:
3765 ret = rtw89_pci_lv1rst_start_dma(rtwdev);
3766 if (ret)
3767 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
3768 break;
3769
3770 default:
3771 return -EINVAL;
3772 }
3773
3774 return ret;
3775 }
3776
rtw89_pci_ops_dump_err_status(struct rtw89_dev * rtwdev)3777 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
3778 {
3779 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
3780 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
3781 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
3782 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
3783 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
3784 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
3785 }
3786
rtw89_pci_napi_poll(struct napi_struct * napi,int budget)3787 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
3788 {
3789 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
3790 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3791 unsigned long flags;
3792 int work_done;
3793
3794 rtwdev->napi_budget_countdown = budget;
3795
3796 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT);
3797 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
3798 if (work_done == budget)
3799 return budget;
3800
3801 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT);
3802 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
3803 if (work_done < budget && napi_complete_done(napi, work_done)) {
3804 spin_lock_irqsave(&rtwpci->irq_lock, flags);
3805 if (likely(rtwpci->running))
3806 rtw89_chip_enable_intr(rtwdev, rtwpci);
3807 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
3808 }
3809
3810 return work_done;
3811 }
3812
rtw89_pci_suspend(struct device * dev)3813 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
3814 {
3815 struct ieee80211_hw *hw = dev_get_drvdata(dev);
3816 struct rtw89_dev *rtwdev = hw->priv;
3817 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3818
3819 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3820 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
3821 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3822 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3823 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
3824 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
3825 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
3826 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
3827 } else {
3828 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
3829 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
3830 }
3831
3832 return 0;
3833 }
3834
rtw89_pci_l2_hci_ldo(struct rtw89_dev * rtwdev)3835 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
3836 {
3837 if (rtwdev->chip->chip_id == RTL8852C)
3838 return;
3839
3840 /* Hardware need write the reg twice to ensure the setting work */
3841 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
3842 RTW89_PCIE_BIT_CFG_RST_MSTATE);
3843 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
3844 RTW89_PCIE_BIT_CFG_RST_MSTATE);
3845 }
3846
rtw89_pci_resume(struct device * dev)3847 static int __maybe_unused rtw89_pci_resume(struct device *dev)
3848 {
3849 struct ieee80211_hw *hw = dev_get_drvdata(dev);
3850 struct rtw89_dev *rtwdev = hw->priv;
3851 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3852
3853 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3854 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
3855 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3856 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3857 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
3858 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
3859 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
3860 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
3861 } else {
3862 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
3863 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
3864 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
3865 B_AX_SEL_REQ_ENTR_L1);
3866 }
3867 rtw89_pci_l2_hci_ldo(rtwdev);
3868 rtw89_pci_filter_out(rtwdev);
3869 rtw89_pci_link_cfg(rtwdev);
3870 rtw89_pci_l1ss_cfg(rtwdev);
3871
3872 return 0;
3873 }
3874
3875 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
3876 EXPORT_SYMBOL(rtw89_pm_ops);
3877
3878 static const struct rtw89_hci_ops rtw89_pci_ops = {
3879 .tx_write = rtw89_pci_ops_tx_write,
3880 .tx_kick_off = rtw89_pci_ops_tx_kick_off,
3881 .flush_queues = rtw89_pci_ops_flush_queues,
3882 .reset = rtw89_pci_ops_reset,
3883 .start = rtw89_pci_ops_start,
3884 .stop = rtw89_pci_ops_stop,
3885 .pause = rtw89_pci_ops_pause,
3886 .switch_mode = rtw89_pci_ops_switch_mode,
3887 .recalc_int_mit = rtw89_pci_recalc_int_mit,
3888
3889 .read8 = rtw89_pci_ops_read8,
3890 .read16 = rtw89_pci_ops_read16,
3891 .read32 = rtw89_pci_ops_read32,
3892 .write8 = rtw89_pci_ops_write8,
3893 .write16 = rtw89_pci_ops_write16,
3894 .write32 = rtw89_pci_ops_write32,
3895
3896 .mac_pre_init = rtw89_pci_ops_mac_pre_init,
3897 .mac_post_init = rtw89_pci_ops_mac_post_init,
3898 .deinit = rtw89_pci_ops_deinit,
3899
3900 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
3901 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery,
3902 .dump_err_status = rtw89_pci_ops_dump_err_status,
3903 .napi_poll = rtw89_pci_napi_poll,
3904
3905 .recovery_start = rtw89_pci_ops_recovery_start,
3906 .recovery_complete = rtw89_pci_ops_recovery_complete,
3907
3908 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie,
3909 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie,
3910 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
3911 .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie,
3912 .clr_idx_all = rtw89_pci_clr_idx_all,
3913 .clear = rtw89_pci_clear_resource,
3914 .disable_intr = rtw89_pci_disable_intr_lock,
3915 .enable_intr = rtw89_pci_enable_intr_lock,
3916 .rst_bdram = rtw89_pci_rst_bdram_pcie,
3917 };
3918
rtw89_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)3919 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3920 {
3921 struct rtw89_dev *rtwdev;
3922 const struct rtw89_driver_info *info;
3923 const struct rtw89_pci_info *pci_info;
3924 int ret;
3925
3926 info = (const struct rtw89_driver_info *)id->driver_data;
3927
3928 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
3929 sizeof(struct rtw89_pci),
3930 info->chip);
3931 if (!rtwdev) {
3932 dev_err(&pdev->dev, "failed to allocate hw\n");
3933 return -ENOMEM;
3934 }
3935
3936 pci_info = info->bus.pci;
3937
3938 rtwdev->pci_info = info->bus.pci;
3939 rtwdev->hci.ops = &rtw89_pci_ops;
3940 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
3941 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
3942 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
3943
3944 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
3945
3946 ret = rtw89_core_init(rtwdev);
3947 if (ret) {
3948 rtw89_err(rtwdev, "failed to initialise core\n");
3949 goto err_release_hw;
3950 }
3951
3952 ret = rtw89_pci_claim_device(rtwdev, pdev);
3953 if (ret) {
3954 rtw89_err(rtwdev, "failed to claim pci device\n");
3955 goto err_core_deinit;
3956 }
3957
3958 ret = rtw89_pci_setup_resource(rtwdev, pdev);
3959 if (ret) {
3960 rtw89_err(rtwdev, "failed to setup pci resource\n");
3961 goto err_declaim_pci;
3962 }
3963
3964 ret = rtw89_chip_info_setup(rtwdev);
3965 if (ret) {
3966 rtw89_err(rtwdev, "failed to setup chip information\n");
3967 goto err_clear_resource;
3968 }
3969
3970 rtw89_pci_filter_out(rtwdev);
3971 rtw89_pci_link_cfg(rtwdev);
3972 rtw89_pci_l1ss_cfg(rtwdev);
3973
3974 rtw89_core_napi_init(rtwdev);
3975
3976 ret = rtw89_pci_request_irq(rtwdev, pdev);
3977 if (ret) {
3978 rtw89_err(rtwdev, "failed to request pci irq\n");
3979 goto err_deinit_napi;
3980 }
3981
3982 ret = rtw89_core_register(rtwdev);
3983 if (ret) {
3984 rtw89_err(rtwdev, "failed to register core\n");
3985 goto err_free_irq;
3986 }
3987
3988 return 0;
3989
3990 err_free_irq:
3991 rtw89_pci_free_irq(rtwdev, pdev);
3992 err_deinit_napi:
3993 rtw89_core_napi_deinit(rtwdev);
3994 err_clear_resource:
3995 rtw89_pci_clear_resource(rtwdev, pdev);
3996 err_declaim_pci:
3997 rtw89_pci_declaim_device(rtwdev, pdev);
3998 err_core_deinit:
3999 rtw89_core_deinit(rtwdev);
4000 err_release_hw:
4001 rtw89_free_ieee80211_hw(rtwdev);
4002
4003 return ret;
4004 }
4005 EXPORT_SYMBOL(rtw89_pci_probe);
4006
rtw89_pci_remove(struct pci_dev * pdev)4007 void rtw89_pci_remove(struct pci_dev *pdev)
4008 {
4009 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4010 struct rtw89_dev *rtwdev;
4011
4012 rtwdev = hw->priv;
4013
4014 rtw89_pci_free_irq(rtwdev, pdev);
4015 rtw89_core_napi_deinit(rtwdev);
4016 rtw89_core_unregister(rtwdev);
4017 rtw89_pci_clear_resource(rtwdev, pdev);
4018 rtw89_pci_declaim_device(rtwdev, pdev);
4019 rtw89_core_deinit(rtwdev);
4020 rtw89_free_ieee80211_hw(rtwdev);
4021 }
4022 EXPORT_SYMBOL(rtw89_pci_remove);
4023
4024 MODULE_AUTHOR("Realtek Corporation");
4025 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
4026 MODULE_LICENSE("Dual BSD/GPL");
4027 #if defined(__FreeBSD__)
4028 MODULE_VERSION(rtw89_pci, 1);
4029 MODULE_DEPEND(rtw89_pci, linuxkpi, 1, 1, 1);
4030 MODULE_DEPEND(rtw89_pci, linuxkpi_wlan, 1, 1, 1);
4031 #ifdef CONFIG_RTW89_DEBUGFS
4032 MODULE_DEPEND(rtw89_pci, lindebugfs, 1, 1, 1);
4033 #endif
4034 #endif
4035