1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
4
5 #if defined(__FreeBSD__)
6 #define LINUXKPI_PARAM_PREFIX rtw88_pci_
7 #endif
8
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include "main.h"
12 #include "pci.h"
13 #include "reg.h"
14 #include "tx.h"
15 #include "rx.h"
16 #include "fw.h"
17 #include "ps.h"
18 #include "debug.h"
19 #if defined(__FreeBSD__)
20 #include <sys/rman.h>
21 #include <linux/pm.h>
22 #endif
23
24 static bool rtw_disable_msi;
25 static bool rtw_pci_disable_aspm;
26 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
27 module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
28 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
29 MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
30
31 static u32 rtw_pci_tx_queue_idx_addr[] = {
32 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
33 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
34 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
35 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
36 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
37 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
38 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
39 };
40
rtw_pci_get_tx_qsel(struct sk_buff * skb,enum rtw_tx_queue_type queue)41 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
42 enum rtw_tx_queue_type queue)
43 {
44 switch (queue) {
45 case RTW_TX_QUEUE_BCN:
46 return TX_DESC_QSEL_BEACON;
47 case RTW_TX_QUEUE_H2C:
48 return TX_DESC_QSEL_H2C;
49 case RTW_TX_QUEUE_MGMT:
50 return TX_DESC_QSEL_MGMT;
51 case RTW_TX_QUEUE_HI0:
52 return TX_DESC_QSEL_HIGH;
53 default:
54 return skb->priority;
55 }
56 };
57
rtw_pci_read8(struct rtw_dev * rtwdev,u32 addr)58 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
59 {
60 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
61
62 #if defined(__linux__)
63 return readb(rtwpci->mmap + addr);
64 #elif defined(__FreeBSD__)
65 u8 val;
66
67 val = bus_read_1((struct resource *)rtwpci->mmap, addr);
68 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val);
69 return (val);
70 #endif
71 }
72
rtw_pci_read16(struct rtw_dev * rtwdev,u32 addr)73 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
74 {
75 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
76
77 #if defined(__linux__)
78 return readw(rtwpci->mmap + addr);
79 #elif defined(__FreeBSD__)
80 u16 val;
81
82 val = bus_read_2((struct resource *)rtwpci->mmap, addr);
83 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val);
84 return (val);
85 #endif
86 }
87
rtw_pci_read32(struct rtw_dev * rtwdev,u32 addr)88 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
89 {
90 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
91
92 #if defined(__linux__)
93 return readl(rtwpci->mmap + addr);
94 #elif defined(__FreeBSD__)
95 u32 val;
96
97 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
98 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
99 return (val);
100 #endif
101 }
102
rtw_pci_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)103 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
104 {
105 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
106
107 #if defined(__linux__)
108 writeb(val, rtwpci->mmap + addr);
109 #elif defined(__FreeBSD__)
110 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val);
111 return (bus_write_1((struct resource *)rtwpci->mmap, addr, val));
112 #endif
113 }
114
rtw_pci_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)115 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
116 {
117 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
118
119 #if defined(__linux__)
120 writew(val, rtwpci->mmap + addr);
121 #elif defined(__FreeBSD__)
122 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val);
123 return (bus_write_2((struct resource *)rtwpci->mmap, addr, val));
124 #endif
125 }
126
rtw_pci_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)127 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
128 {
129 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
130
131 #if defined(__linux__)
132 writel(val, rtwpci->mmap + addr);
133 #elif defined(__FreeBSD__)
134 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val);
135 return (bus_write_4((struct resource *)rtwpci->mmap, addr, val));
136 #endif
137 }
138
rtw_pci_free_tx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)139 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
140 struct rtw_pci_tx_ring *tx_ring)
141 {
142 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
143 struct rtw_pci_tx_data *tx_data;
144 struct sk_buff *skb, *tmp;
145 dma_addr_t dma;
146
147 /* free every skb remained in tx list */
148 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
149 __skb_unlink(skb, &tx_ring->queue);
150 tx_data = rtw_pci_get_tx_data(skb);
151 dma = tx_data->dma;
152
153 dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
154 dev_kfree_skb_any(skb);
155 }
156 }
157
rtw_pci_free_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)158 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
159 struct rtw_pci_tx_ring *tx_ring)
160 {
161 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
162 u8 *head = tx_ring->r.head;
163 u32 len = tx_ring->r.len;
164 int ring_sz = len * tx_ring->r.desc_size;
165
166 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
167
168 /* free the ring itself */
169 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
170 tx_ring->r.head = NULL;
171 }
172
rtw_pci_free_rx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)173 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
174 struct rtw_pci_rx_ring *rx_ring)
175 {
176 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
177 struct sk_buff *skb;
178 int buf_sz = RTK_PCI_RX_BUF_SIZE;
179 dma_addr_t dma;
180 int i;
181
182 for (i = 0; i < rx_ring->r.len; i++) {
183 skb = rx_ring->buf[i];
184 if (!skb)
185 continue;
186
187 dma = *((dma_addr_t *)skb->cb);
188 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
189 dev_kfree_skb(skb);
190 rx_ring->buf[i] = NULL;
191 }
192 }
193
rtw_pci_free_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)194 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
195 struct rtw_pci_rx_ring *rx_ring)
196 {
197 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
198 u8 *head = rx_ring->r.head;
199 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
200
201 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
202
203 dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
204 }
205
rtw_pci_free_trx_ring(struct rtw_dev * rtwdev)206 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
207 {
208 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
209 struct rtw_pci_tx_ring *tx_ring;
210 struct rtw_pci_rx_ring *rx_ring;
211 int i;
212
213 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
214 tx_ring = &rtwpci->tx_rings[i];
215 rtw_pci_free_tx_ring(rtwdev, tx_ring);
216 }
217
218 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
219 rx_ring = &rtwpci->rx_rings[i];
220 rtw_pci_free_rx_ring(rtwdev, rx_ring);
221 }
222 }
223
rtw_pci_init_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring,u8 desc_size,u32 len)224 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
225 struct rtw_pci_tx_ring *tx_ring,
226 u8 desc_size, u32 len)
227 {
228 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
229 int ring_sz = desc_size * len;
230 dma_addr_t dma;
231 u8 *head;
232
233 if (len > TRX_BD_IDX_MASK) {
234 rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
235 return -EINVAL;
236 }
237
238 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
239 if (!head) {
240 rtw_err(rtwdev, "failed to allocate tx ring\n");
241 return -ENOMEM;
242 }
243
244 skb_queue_head_init(&tx_ring->queue);
245 tx_ring->r.head = head;
246 tx_ring->r.dma = dma;
247 tx_ring->r.len = len;
248 tx_ring->r.desc_size = desc_size;
249 tx_ring->r.wp = 0;
250 tx_ring->r.rp = 0;
251
252 return 0;
253 }
254
rtw_pci_reset_rx_desc(struct rtw_dev * rtwdev,struct sk_buff * skb,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)255 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
256 struct rtw_pci_rx_ring *rx_ring,
257 u32 idx, u32 desc_sz)
258 {
259 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
260 struct rtw_pci_rx_buffer_desc *buf_desc;
261 int buf_sz = RTK_PCI_RX_BUF_SIZE;
262 dma_addr_t dma;
263
264 if (!skb)
265 return -EINVAL;
266
267 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
268 if (dma_mapping_error(&pdev->dev, dma))
269 return -EBUSY;
270
271 *((dma_addr_t *)skb->cb) = dma;
272 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
273 idx * desc_sz);
274 memset(buf_desc, 0, sizeof(*buf_desc));
275 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
276 buf_desc->dma = cpu_to_le32(dma);
277
278 return 0;
279 }
280
rtw_pci_sync_rx_desc_device(struct rtw_dev * rtwdev,dma_addr_t dma,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)281 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
282 struct rtw_pci_rx_ring *rx_ring,
283 u32 idx, u32 desc_sz)
284 {
285 struct device *dev = rtwdev->dev;
286 struct rtw_pci_rx_buffer_desc *buf_desc;
287 int buf_sz = RTK_PCI_RX_BUF_SIZE;
288
289 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
290
291 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
292 idx * desc_sz);
293 memset(buf_desc, 0, sizeof(*buf_desc));
294 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
295 buf_desc->dma = cpu_to_le32(dma);
296 }
297
rtw_pci_init_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u8 desc_size,u32 len)298 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
299 struct rtw_pci_rx_ring *rx_ring,
300 u8 desc_size, u32 len)
301 {
302 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
303 struct sk_buff *skb = NULL;
304 dma_addr_t dma;
305 u8 *head;
306 int ring_sz = desc_size * len;
307 int buf_sz = RTK_PCI_RX_BUF_SIZE;
308 int i, allocated;
309 int ret = 0;
310
311 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
312 if (!head) {
313 rtw_err(rtwdev, "failed to allocate rx ring\n");
314 return -ENOMEM;
315 }
316 rx_ring->r.head = head;
317
318 for (i = 0; i < len; i++) {
319 skb = dev_alloc_skb(buf_sz);
320 if (!skb) {
321 allocated = i;
322 ret = -ENOMEM;
323 goto err_out;
324 }
325
326 memset(skb->data, 0, buf_sz);
327 rx_ring->buf[i] = skb;
328 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
329 if (ret) {
330 allocated = i;
331 dev_kfree_skb_any(skb);
332 goto err_out;
333 }
334 }
335
336 rx_ring->r.dma = dma;
337 rx_ring->r.len = len;
338 rx_ring->r.desc_size = desc_size;
339 rx_ring->r.wp = 0;
340 rx_ring->r.rp = 0;
341
342 return 0;
343
344 err_out:
345 for (i = 0; i < allocated; i++) {
346 skb = rx_ring->buf[i];
347 if (!skb)
348 continue;
349 dma = *((dma_addr_t *)skb->cb);
350 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
351 dev_kfree_skb_any(skb);
352 rx_ring->buf[i] = NULL;
353 }
354 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
355
356 rtw_err(rtwdev, "failed to init rx buffer\n");
357
358 return ret;
359 }
360
rtw_pci_init_trx_ring(struct rtw_dev * rtwdev)361 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
362 {
363 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
364 struct rtw_pci_tx_ring *tx_ring;
365 struct rtw_pci_rx_ring *rx_ring;
366 const struct rtw_chip_info *chip = rtwdev->chip;
367 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
368 int tx_desc_size, rx_desc_size;
369 u32 len;
370 int ret;
371
372 tx_desc_size = chip->tx_buf_desc_sz;
373
374 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
375 tx_ring = &rtwpci->tx_rings[i];
376 len = max_num_of_tx_queue(i);
377 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
378 if (ret)
379 goto out;
380 }
381
382 rx_desc_size = chip->rx_buf_desc_sz;
383
384 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
385 rx_ring = &rtwpci->rx_rings[j];
386 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
387 RTK_MAX_RX_DESC_NUM);
388 if (ret)
389 goto out;
390 }
391
392 return 0;
393
394 out:
395 tx_alloced = i;
396 for (i = 0; i < tx_alloced; i++) {
397 tx_ring = &rtwpci->tx_rings[i];
398 rtw_pci_free_tx_ring(rtwdev, tx_ring);
399 }
400
401 rx_alloced = j;
402 for (j = 0; j < rx_alloced; j++) {
403 rx_ring = &rtwpci->rx_rings[j];
404 rtw_pci_free_rx_ring(rtwdev, rx_ring);
405 }
406
407 return ret;
408 }
409
rtw_pci_deinit(struct rtw_dev * rtwdev)410 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
411 {
412 rtw_pci_free_trx_ring(rtwdev);
413 }
414
rtw_pci_init(struct rtw_dev * rtwdev)415 static int rtw_pci_init(struct rtw_dev *rtwdev)
416 {
417 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
418 int ret = 0;
419
420 rtwpci->irq_mask[0] = IMR_HIGHDOK |
421 IMR_MGNTDOK |
422 IMR_BKDOK |
423 IMR_BEDOK |
424 IMR_VIDOK |
425 IMR_VODOK |
426 IMR_ROK |
427 IMR_BCNDMAINT_E |
428 IMR_C2HCMD |
429 0;
430 rtwpci->irq_mask[1] = IMR_TXFOVW |
431 0;
432 rtwpci->irq_mask[3] = IMR_H2CDOK |
433 0;
434 spin_lock_init(&rtwpci->irq_lock);
435 spin_lock_init(&rtwpci->hwirq_lock);
436 ret = rtw_pci_init_trx_ring(rtwdev);
437
438 return ret;
439 }
440
rtw_pci_reset_buf_desc(struct rtw_dev * rtwdev)441 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
442 {
443 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
444 u32 len;
445 u8 tmp;
446 dma_addr_t dma;
447
448 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
449 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
450
451 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
452 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
453
454 if (!rtw_chip_wcpu_11n(rtwdev)) {
455 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
456 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
457 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
458 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
459 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
460 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
461 }
462
463 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
464 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
465 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
466 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
467 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
468 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
469
470 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
471 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
472 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
473 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
474 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
475 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
476
477 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
478 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
479 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
480 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
481 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
482 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
483
484 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
485 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
486 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
487 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
488 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
489 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
490
491 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
492 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
493 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
494 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
495 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
496 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
497
498 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
499 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
500 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
501 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
502 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
503 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
504
505 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
506 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
507 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
508 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
509 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
510 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
511
512 /* reset read/write point */
513 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
514
515 /* reset H2C Queue index in a single write */
516 if (rtw_chip_wcpu_11ac(rtwdev))
517 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
518 BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
519 }
520
rtw_pci_reset_trx_ring(struct rtw_dev * rtwdev)521 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
522 {
523 rtw_pci_reset_buf_desc(rtwdev);
524 }
525
rtw_pci_enable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,bool exclude_rx)526 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
527 struct rtw_pci *rtwpci, bool exclude_rx)
528 {
529 unsigned long flags;
530 u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
531
532 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
533
534 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
535 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
536 if (rtw_chip_wcpu_11ac(rtwdev))
537 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
538
539 rtwpci->irq_enabled = true;
540
541 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
542 }
543
rtw_pci_disable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)544 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
545 struct rtw_pci *rtwpci)
546 {
547 unsigned long flags;
548
549 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
550
551 if (!rtwpci->irq_enabled)
552 goto out;
553
554 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
555 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
556 if (rtw_chip_wcpu_11ac(rtwdev))
557 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
558
559 rtwpci->irq_enabled = false;
560
561 out:
562 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
563 }
564
rtw_pci_dma_reset(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)565 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
566 {
567 /* reset dma and rx tag */
568 rtw_write32_set(rtwdev, RTK_PCI_CTRL,
569 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
570 rtwpci->rx_tag = 0;
571 }
572
rtw_pci_setup(struct rtw_dev * rtwdev)573 static int rtw_pci_setup(struct rtw_dev *rtwdev)
574 {
575 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
576
577 rtw_pci_reset_trx_ring(rtwdev);
578 rtw_pci_dma_reset(rtwdev, rtwpci);
579
580 return 0;
581 }
582
rtw_pci_dma_release(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)583 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
584 {
585 struct rtw_pci_tx_ring *tx_ring;
586 enum rtw_tx_queue_type queue;
587
588 rtw_pci_reset_trx_ring(rtwdev);
589 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
590 tx_ring = &rtwpci->tx_rings[queue];
591 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
592 }
593 }
594
rtw_pci_napi_start(struct rtw_dev * rtwdev)595 static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
596 {
597 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
598
599 if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
600 return;
601
602 napi_enable(&rtwpci->napi);
603 }
604
rtw_pci_napi_stop(struct rtw_dev * rtwdev)605 static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
606 {
607 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
608
609 if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
610 return;
611
612 napi_synchronize(&rtwpci->napi);
613 napi_disable(&rtwpci->napi);
614 }
615
rtw_pci_start(struct rtw_dev * rtwdev)616 static int rtw_pci_start(struct rtw_dev *rtwdev)
617 {
618 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
619
620 rtw_pci_napi_start(rtwdev);
621
622 spin_lock_bh(&rtwpci->irq_lock);
623 rtwpci->running = true;
624 rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
625 spin_unlock_bh(&rtwpci->irq_lock);
626
627 return 0;
628 }
629
rtw_pci_stop(struct rtw_dev * rtwdev)630 static void rtw_pci_stop(struct rtw_dev *rtwdev)
631 {
632 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
633 struct pci_dev *pdev = rtwpci->pdev;
634
635 spin_lock_bh(&rtwpci->irq_lock);
636 rtwpci->running = false;
637 rtw_pci_disable_interrupt(rtwdev, rtwpci);
638 spin_unlock_bh(&rtwpci->irq_lock);
639
640 synchronize_irq(pdev->irq);
641 rtw_pci_napi_stop(rtwdev);
642
643 spin_lock_bh(&rtwpci->irq_lock);
644 rtw_pci_dma_release(rtwdev, rtwpci);
645 spin_unlock_bh(&rtwpci->irq_lock);
646 }
647
rtw_pci_deep_ps_enter(struct rtw_dev * rtwdev)648 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
649 {
650 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
651 struct rtw_pci_tx_ring *tx_ring;
652 enum rtw_tx_queue_type queue;
653 bool tx_empty = true;
654
655 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
656 goto enter_deep_ps;
657
658 lockdep_assert_held(&rtwpci->irq_lock);
659
660 /* Deep PS state is not allowed to TX-DMA */
661 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
662 /* BCN queue is rsvd page, does not have DMA interrupt
663 * H2C queue is managed by firmware
664 */
665 if (queue == RTW_TX_QUEUE_BCN ||
666 queue == RTW_TX_QUEUE_H2C)
667 continue;
668
669 tx_ring = &rtwpci->tx_rings[queue];
670
671 /* check if there is any skb DMAing */
672 if (skb_queue_len(&tx_ring->queue)) {
673 tx_empty = false;
674 break;
675 }
676 }
677
678 if (!tx_empty) {
679 rtw_dbg(rtwdev, RTW_DBG_PS,
680 "TX path not empty, cannot enter deep power save state\n");
681 return;
682 }
683 enter_deep_ps:
684 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
685 rtw_power_mode_change(rtwdev, true);
686 }
687
rtw_pci_deep_ps_leave(struct rtw_dev * rtwdev)688 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
689 {
690 #if defined(__linux__)
691 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
692
693 lockdep_assert_held(&rtwpci->irq_lock);
694 #elif defined(__FreeBSD__)
695 lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock);
696 #endif
697
698 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
699 rtw_power_mode_change(rtwdev, false);
700 }
701
rtw_pci_deep_ps(struct rtw_dev * rtwdev,bool enter)702 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
703 {
704 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
705
706 spin_lock_bh(&rtwpci->irq_lock);
707
708 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
709 rtw_pci_deep_ps_enter(rtwdev);
710
711 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
712 rtw_pci_deep_ps_leave(rtwdev);
713
714 spin_unlock_bh(&rtwpci->irq_lock);
715 }
716
rtw_pci_release_rsvd_page(struct rtw_pci * rtwpci,struct rtw_pci_tx_ring * ring)717 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
718 struct rtw_pci_tx_ring *ring)
719 {
720 struct sk_buff *prev = skb_dequeue(&ring->queue);
721 struct rtw_pci_tx_data *tx_data;
722 dma_addr_t dma;
723
724 if (!prev)
725 return;
726
727 tx_data = rtw_pci_get_tx_data(prev);
728 dma = tx_data->dma;
729 dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
730 dev_kfree_skb_any(prev);
731 }
732
rtw_pci_dma_check(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u32 idx)733 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
734 struct rtw_pci_rx_ring *rx_ring,
735 u32 idx)
736 {
737 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
738 const struct rtw_chip_info *chip = rtwdev->chip;
739 struct rtw_pci_rx_buffer_desc *buf_desc;
740 u32 desc_sz = chip->rx_buf_desc_sz;
741 u16 total_pkt_size;
742
743 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
744 idx * desc_sz);
745 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
746
747 /* rx tag mismatch, throw a warning */
748 if (total_pkt_size != rtwpci->rx_tag)
749 rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
750
751 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
752 }
753
__pci_get_hw_tx_ring_rp(struct rtw_dev * rtwdev,u8 pci_q)754 static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
755 {
756 u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
757 u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
758
759 return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
760 }
761
__pci_flush_queue(struct rtw_dev * rtwdev,u8 pci_q,bool drop)762 static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
763 {
764 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
765 struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
766 u32 cur_rp;
767 u8 i;
768
769 /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
770 * bit dynamic, it's hard to define a reasonable fixed total timeout to
771 * use read_poll_timeout* helper. Instead, we can ensure a reasonable
772 * polling times, so we just use for loop with udelay here.
773 */
774 for (i = 0; i < 30; i++) {
775 cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
776 if (cur_rp == ring->r.wp)
777 return;
778
779 udelay(1);
780 }
781
782 if (!drop)
783 rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
784 }
785
__rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 pci_queues,bool drop)786 static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
787 bool drop)
788 {
789 u8 q;
790
791 for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
792 /* Unnecessary to flush BCN, H2C and HI tx queues. */
793 if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||
794 q == RTW_TX_QUEUE_HI0)
795 continue;
796
797 if (pci_queues & BIT(q))
798 __pci_flush_queue(rtwdev, q, drop);
799 }
800 }
801
rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)802 static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
803 {
804 u32 pci_queues = 0;
805 u8 i;
806
807 /* If all of the hardware queues are requested to flush,
808 * flush all of the pci queues.
809 */
810 if (queues == BIT(rtwdev->hw->queues) - 1) {
811 pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
812 } else {
813 for (i = 0; i < rtwdev->hw->queues; i++)
814 if (queues & BIT(i))
815 pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
816 }
817
818 __rtw_pci_flush_queues(rtwdev, pci_queues, drop);
819 }
820
rtw_pci_tx_kick_off_queue(struct rtw_dev * rtwdev,enum rtw_tx_queue_type queue)821 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
822 enum rtw_tx_queue_type queue)
823 {
824 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
825 struct rtw_pci_tx_ring *ring;
826 u32 bd_idx;
827
828 ring = &rtwpci->tx_rings[queue];
829 bd_idx = rtw_pci_tx_queue_idx_addr[queue];
830
831 spin_lock_bh(&rtwpci->irq_lock);
832 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
833 rtw_pci_deep_ps_leave(rtwdev);
834 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
835 spin_unlock_bh(&rtwpci->irq_lock);
836 }
837
rtw_pci_tx_kick_off(struct rtw_dev * rtwdev)838 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
839 {
840 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
841 enum rtw_tx_queue_type queue;
842
843 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
844 if (test_and_clear_bit(queue, rtwpci->tx_queued))
845 rtw_pci_tx_kick_off_queue(rtwdev, queue);
846 }
847
rtw_pci_tx_write_data(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)848 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
849 struct rtw_tx_pkt_info *pkt_info,
850 struct sk_buff *skb,
851 enum rtw_tx_queue_type queue)
852 {
853 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
854 const struct rtw_chip_info *chip = rtwdev->chip;
855 struct rtw_pci_tx_ring *ring;
856 struct rtw_pci_tx_data *tx_data;
857 dma_addr_t dma;
858 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
859 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
860 u32 size;
861 u32 psb_len;
862 u8 *pkt_desc;
863 struct rtw_pci_tx_buffer_desc *buf_desc;
864
865 ring = &rtwpci->tx_rings[queue];
866
867 size = skb->len;
868
869 if (queue == RTW_TX_QUEUE_BCN)
870 rtw_pci_release_rsvd_page(rtwpci, ring);
871 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
872 return -ENOSPC;
873
874 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
875 memset(pkt_desc, 0, tx_pkt_desc_sz);
876 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
877 rtw_tx_fill_tx_desc(pkt_info, skb);
878 dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
879 DMA_TO_DEVICE);
880 if (dma_mapping_error(&rtwpci->pdev->dev, dma))
881 return -EBUSY;
882
883 /* after this we got dma mapped, there is no way back */
884 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
885 memset(buf_desc, 0, tx_buf_desc_sz);
886 psb_len = (skb->len - 1) / 128 + 1;
887 if (queue == RTW_TX_QUEUE_BCN)
888 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
889
890 buf_desc[0].psb_len = cpu_to_le16(psb_len);
891 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
892 buf_desc[0].dma = cpu_to_le32(dma);
893 buf_desc[1].buf_size = cpu_to_le16(size);
894 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
895
896 tx_data = rtw_pci_get_tx_data(skb);
897 tx_data->dma = dma;
898 tx_data->sn = pkt_info->sn;
899
900 spin_lock_bh(&rtwpci->irq_lock);
901
902 skb_queue_tail(&ring->queue, skb);
903
904 if (queue == RTW_TX_QUEUE_BCN)
905 goto out_unlock;
906
907 /* update write-index, and kick it off later */
908 set_bit(queue, rtwpci->tx_queued);
909 if (++ring->r.wp >= ring->r.len)
910 ring->r.wp = 0;
911
912 out_unlock:
913 spin_unlock_bh(&rtwpci->irq_lock);
914
915 return 0;
916 }
917
rtw_pci_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)918 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
919 u32 size)
920 {
921 struct sk_buff *skb;
922 struct rtw_tx_pkt_info pkt_info = {0};
923 u8 reg_bcn_work;
924 int ret;
925
926 skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
927 if (!skb)
928 return -ENOMEM;
929
930 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
931 if (ret) {
932 #if defined(__FreeBSD__)
933 dev_kfree_skb_any(skb);
934 #endif
935 rtw_err(rtwdev, "failed to write rsvd page data\n");
936 return ret;
937 }
938
939 /* reserved pages go through beacon queue */
940 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
941 reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
942 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
943
944 return 0;
945 }
946
rtw_pci_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)947 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
948 {
949 struct sk_buff *skb;
950 struct rtw_tx_pkt_info pkt_info = {0};
951 int ret;
952
953 skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
954 if (!skb)
955 return -ENOMEM;
956
957 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
958 if (ret) {
959 #if defined(__FreeBSD__)
960 dev_kfree_skb_any(skb);
961 #endif
962 rtw_err(rtwdev, "failed to write h2c data\n");
963 return ret;
964 }
965
966 rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
967
968 return 0;
969 }
970
rtw_pci_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)971 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
972 struct rtw_tx_pkt_info *pkt_info,
973 struct sk_buff *skb)
974 {
975 enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
976 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
977 struct rtw_pci_tx_ring *ring;
978 int ret;
979
980 ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
981 if (ret)
982 return ret;
983
984 ring = &rtwpci->tx_rings[queue];
985 spin_lock_bh(&rtwpci->irq_lock);
986 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
987 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
988 ring->queue_stopped = true;
989 }
990 spin_unlock_bh(&rtwpci->irq_lock);
991
992 return 0;
993 }
994
rtw_pci_tx_isr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue)995 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
996 u8 hw_queue)
997 {
998 struct ieee80211_hw *hw = rtwdev->hw;
999 struct ieee80211_tx_info *info;
1000 struct rtw_pci_tx_ring *ring;
1001 struct rtw_pci_tx_data *tx_data;
1002 struct sk_buff *skb;
1003 u32 count;
1004 u32 bd_idx_addr;
1005 u32 bd_idx, cur_rp, rp_idx;
1006 u16 q_map;
1007
1008 ring = &rtwpci->tx_rings[hw_queue];
1009
1010 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
1011 bd_idx = rtw_read32(rtwdev, bd_idx_addr);
1012 cur_rp = bd_idx >> 16;
1013 cur_rp &= TRX_BD_IDX_MASK;
1014 rp_idx = ring->r.rp;
1015 if (cur_rp >= ring->r.rp)
1016 count = cur_rp - ring->r.rp;
1017 else
1018 count = ring->r.len - (ring->r.rp - cur_rp);
1019
1020 while (count--) {
1021 skb = skb_dequeue(&ring->queue);
1022 if (!skb) {
1023 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
1024 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
1025 break;
1026 }
1027 tx_data = rtw_pci_get_tx_data(skb);
1028 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1029 DMA_TO_DEVICE);
1030
1031 /* just free command packets from host to card */
1032 if (hw_queue == RTW_TX_QUEUE_H2C) {
1033 dev_kfree_skb_irq(skb);
1034 continue;
1035 }
1036
1037 if (ring->queue_stopped &&
1038 avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1039 q_map = skb_get_queue_mapping(skb);
1040 ieee80211_wake_queue(hw, q_map);
1041 ring->queue_stopped = false;
1042 }
1043
1044 if (++rp_idx >= ring->r.len)
1045 rp_idx = 0;
1046
1047 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1048
1049 info = IEEE80211_SKB_CB(skb);
1050
1051 /* enqueue to wait for tx report */
1052 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1053 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1054 continue;
1055 }
1056
1057 /* always ACK for others, then they won't be marked as drop */
1058 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1059 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1060 else
1061 info->flags |= IEEE80211_TX_STAT_ACK;
1062
1063 ieee80211_tx_info_clear_status(info);
1064 ieee80211_tx_status_irqsafe(hw, skb);
1065 }
1066
1067 ring->r.rp = cur_rp;
1068 }
1069
rtw_pci_rx_isr(struct rtw_dev * rtwdev)1070 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1071 {
1072 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1073 struct napi_struct *napi = &rtwpci->napi;
1074
1075 napi_schedule(napi);
1076 }
1077
rtw_pci_get_hw_rx_ring_nr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)1078 static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1079 struct rtw_pci *rtwpci)
1080 {
1081 struct rtw_pci_rx_ring *ring;
1082 int count = 0;
1083 u32 tmp, cur_wp;
1084
1085 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1086 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1087 cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1088 if (cur_wp >= ring->r.wp)
1089 count = cur_wp - ring->r.wp;
1090 else
1091 count = ring->r.len - (ring->r.wp - cur_wp);
1092
1093 return count;
1094 }
1095
rtw_pci_rx_napi(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue,u32 limit)1096 static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1097 u8 hw_queue, u32 limit)
1098 {
1099 const struct rtw_chip_info *chip = rtwdev->chip;
1100 struct napi_struct *napi = &rtwpci->napi;
1101 struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1102 struct rtw_rx_pkt_stat pkt_stat;
1103 struct ieee80211_rx_status rx_status;
1104 struct sk_buff *skb, *new;
1105 u32 cur_rp = ring->r.rp;
1106 u32 count, rx_done = 0;
1107 u32 pkt_offset;
1108 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1109 u32 buf_desc_sz = chip->rx_buf_desc_sz;
1110 u32 new_len;
1111 u8 *rx_desc;
1112 dma_addr_t dma;
1113
1114 count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1115 count = min(count, limit);
1116
1117 while (count--) {
1118 rtw_pci_dma_check(rtwdev, ring, cur_rp);
1119 skb = ring->buf[cur_rp];
1120 dma = *((dma_addr_t *)skb->cb);
1121 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1122 DMA_FROM_DEVICE);
1123 rx_desc = skb->data;
1124 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1125
1126 /* offset from rx_desc to payload */
1127 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1128 pkt_stat.shift;
1129
1130 /* allocate a new skb for this frame,
1131 * discard the frame if none available
1132 */
1133 new_len = pkt_stat.pkt_len + pkt_offset;
1134 new = dev_alloc_skb(new_len);
1135 if (WARN_ONCE(!new, "rx routine starvation\n"))
1136 goto next_rp;
1137
1138 /* put the DMA data including rx_desc from phy to new skb */
1139 skb_put_data(new, skb->data, new_len);
1140
1141 if (pkt_stat.is_c2h) {
1142 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1143 } else {
1144 /* remove rx_desc */
1145 skb_pull(new, pkt_offset);
1146
1147 rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1148 memcpy(new->cb, &rx_status, sizeof(rx_status));
1149 ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1150 rx_done++;
1151 }
1152
1153 next_rp:
1154 /* new skb delivered to mac80211, re-enable original skb DMA */
1155 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1156 buf_desc_sz);
1157
1158 /* host read next element in ring */
1159 if (++cur_rp >= ring->r.len)
1160 cur_rp = 0;
1161 }
1162
1163 ring->r.rp = cur_rp;
1164 /* 'rp', the last position we have read, is seen as previous posistion
1165 * of 'wp' that is used to calculate 'count' next time.
1166 */
1167 ring->r.wp = cur_rp;
1168 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1169
1170 return rx_done;
1171 }
1172
rtw_pci_irq_recognized(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u32 * irq_status)1173 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1174 struct rtw_pci *rtwpci, u32 *irq_status)
1175 {
1176 unsigned long flags;
1177
1178 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1179
1180 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1181 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1182 if (rtw_chip_wcpu_11ac(rtwdev))
1183 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1184 else
1185 irq_status[3] = 0;
1186 irq_status[0] &= rtwpci->irq_mask[0];
1187 irq_status[1] &= rtwpci->irq_mask[1];
1188 irq_status[3] &= rtwpci->irq_mask[3];
1189 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1190 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1191 if (rtw_chip_wcpu_11ac(rtwdev))
1192 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1193
1194 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1195 }
1196
rtw_pci_interrupt_handler(int irq,void * dev)1197 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1198 {
1199 struct rtw_dev *rtwdev = dev;
1200 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1201
1202 /* disable RTW PCI interrupt to avoid more interrupts before the end of
1203 * thread function
1204 *
1205 * disable HIMR here to also avoid new HISR flag being raised before
1206 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1207 * are cleared, the edge-triggered interrupt will not be generated when
1208 * a new HISR flag is set.
1209 */
1210 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1211
1212 return IRQ_WAKE_THREAD;
1213 }
1214
rtw_pci_interrupt_threadfn(int irq,void * dev)1215 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1216 {
1217 struct rtw_dev *rtwdev = dev;
1218 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1219 u32 irq_status[4];
1220 bool rx = false;
1221
1222 spin_lock_bh(&rtwpci->irq_lock);
1223 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1224
1225 if (irq_status[0] & IMR_MGNTDOK)
1226 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1227 if (irq_status[0] & IMR_HIGHDOK)
1228 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1229 if (irq_status[0] & IMR_BEDOK)
1230 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1231 if (irq_status[0] & IMR_BKDOK)
1232 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1233 if (irq_status[0] & IMR_VODOK)
1234 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1235 if (irq_status[0] & IMR_VIDOK)
1236 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1237 if (irq_status[3] & IMR_H2CDOK)
1238 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1239 if (irq_status[0] & IMR_ROK) {
1240 rtw_pci_rx_isr(rtwdev);
1241 rx = true;
1242 }
1243 if (unlikely(irq_status[0] & IMR_C2HCMD))
1244 rtw_fw_c2h_cmd_isr(rtwdev);
1245
1246 /* all of the jobs for this interrupt have been done */
1247 if (rtwpci->running)
1248 rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1249 spin_unlock_bh(&rtwpci->irq_lock);
1250
1251 return IRQ_HANDLED;
1252 }
1253
rtw_pci_io_mapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1254 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1255 struct pci_dev *pdev)
1256 {
1257 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1258 unsigned long len;
1259 u8 bar_id = 2;
1260 int ret;
1261
1262 ret = pci_request_regions(pdev, KBUILD_MODNAME);
1263 if (ret) {
1264 rtw_err(rtwdev, "failed to request pci regions\n");
1265 return ret;
1266 }
1267
1268 #if defined(__FreeBSD__)
1269 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1270 if (ret) {
1271 rtw_err(rtwdev, "failed to set dma mask to 32-bit\n");
1272 goto err_release_regions;
1273 }
1274
1275 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1276 if (ret) {
1277 rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
1278 goto err_release_regions;
1279 }
1280 #endif
1281
1282 len = pci_resource_len(pdev, bar_id);
1283 #if defined(__FreeBSD__)
1284 linuxkpi_pcim_want_to_use_bus_functions(pdev);
1285 #endif
1286 rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1287 if (!rtwpci->mmap) {
1288 pci_release_regions(pdev);
1289 rtw_err(rtwdev, "failed to map pci memory\n");
1290 return -ENOMEM;
1291 }
1292
1293 return 0;
1294 #if defined(__FreeBSD__)
1295 err_release_regions:
1296 pci_release_regions(pdev);
1297 return ret;
1298 #endif
1299 }
1300
rtw_pci_io_unmapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1301 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1302 struct pci_dev *pdev)
1303 {
1304 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1305
1306 if (rtwpci->mmap) {
1307 pci_iounmap(pdev, rtwpci->mmap);
1308 pci_release_regions(pdev);
1309 }
1310 }
1311
rtw_dbi_write8(struct rtw_dev * rtwdev,u16 addr,u8 data)1312 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1313 {
1314 u16 write_addr;
1315 u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1316 u8 flag;
1317 u8 cnt;
1318
1319 write_addr = addr & BITS_DBI_ADDR_MASK;
1320 write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1321 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1322 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1323 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1324
1325 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1326 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1327 if (flag == 0)
1328 return;
1329
1330 udelay(10);
1331 }
1332
1333 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1334 }
1335
rtw_dbi_read8(struct rtw_dev * rtwdev,u16 addr,u8 * value)1336 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1337 {
1338 u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1339 u8 flag;
1340 u8 cnt;
1341
1342 rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1343 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1344
1345 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1346 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1347 if (flag == 0) {
1348 read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1349 *value = rtw_read8(rtwdev, read_addr);
1350 return 0;
1351 }
1352
1353 udelay(10);
1354 }
1355
1356 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1357 return -EIO;
1358 }
1359
rtw_mdio_write(struct rtw_dev * rtwdev,u8 addr,u16 data,bool g1)1360 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1361 {
1362 u8 page;
1363 u8 wflag;
1364 u8 cnt;
1365
1366 rtw_write16(rtwdev, REG_MDIO_V1, data);
1367
1368 page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1369 page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1370 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1371 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1372 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1373
1374 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1375 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1376 BIT_MDIO_WFLAG_V1);
1377 if (wflag == 0)
1378 return;
1379
1380 udelay(10);
1381 }
1382
1383 WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1384 }
1385
rtw_pci_clkreq_set(struct rtw_dev * rtwdev,bool enable)1386 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1387 {
1388 u8 value;
1389 int ret;
1390
1391 if (rtw_pci_disable_aspm)
1392 return;
1393
1394 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1395 if (ret) {
1396 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1397 return;
1398 }
1399
1400 if (enable)
1401 value |= BIT_CLKREQ_SW_EN;
1402 else
1403 value &= ~BIT_CLKREQ_SW_EN;
1404
1405 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1406 }
1407
rtw_pci_clkreq_pad_low(struct rtw_dev * rtwdev,bool enable)1408 static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1409 {
1410 u8 value;
1411 int ret;
1412
1413 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1414 if (ret) {
1415 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1416 return;
1417 }
1418
1419 if (enable)
1420 value &= ~BIT_CLKREQ_N_PAD;
1421 else
1422 value |= BIT_CLKREQ_N_PAD;
1423
1424 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1425 }
1426
rtw_pci_aspm_set(struct rtw_dev * rtwdev,bool enable)1427 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1428 {
1429 u8 value;
1430 int ret;
1431
1432 if (rtw_pci_disable_aspm)
1433 return;
1434
1435 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1436 if (ret) {
1437 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1438 return;
1439 }
1440
1441 if (enable)
1442 value |= BIT_L1_SW_EN;
1443 else
1444 value &= ~BIT_L1_SW_EN;
1445
1446 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1447 }
1448
rtw_pci_link_ps(struct rtw_dev * rtwdev,bool enter)1449 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1450 {
1451 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1452
1453 /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1454 * only be enabled when host supports it.
1455 *
1456 * And ASPM mechanism should be enabled when driver/firmware enters
1457 * power save mode, without having heavy traffic. Because we've
1458 * experienced some inter-operability issues that the link tends
1459 * to enter L1 state on the fly even when driver is having high
1460 * throughput. This is probably because the ASPM behavior slightly
1461 * varies from different SOC.
1462 */
1463 if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1464 return;
1465
1466 if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1467 (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1468 rtw_pci_aspm_set(rtwdev, enter);
1469 }
1470
rtw_pci_link_cfg(struct rtw_dev * rtwdev)1471 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1472 {
1473 const struct rtw_chip_info *chip = rtwdev->chip;
1474 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1475 struct pci_dev *pdev = rtwpci->pdev;
1476 u16 link_ctrl;
1477 int ret;
1478
1479 /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1480 * to add clock delay to cover the REFCLK timing gap.
1481 */
1482 if (chip->id == RTW_CHIP_TYPE_8822C)
1483 rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1484
1485 /* Though there is standard PCIE configuration space to set the
1486 * link control register, but by Realtek's design, driver should
1487 * check if host supports CLKREQ/ASPM to enable the HW module.
1488 *
1489 * These functions are implemented by two HW modules associated,
1490 * one is responsible to access PCIE configuration space to
1491 * follow the host settings, and another is in charge of doing
1492 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1493 * the host does not support it, and due to some reasons or wrong
1494 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1495 * loss if HW misbehaves on the link.
1496 *
1497 * Hence it's designed that driver should first check the PCIE
1498 * configuration space is sync'ed and enabled, then driver can turn
1499 * on the other module that is actually working on the mechanism.
1500 */
1501 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1502 if (ret) {
1503 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1504 return;
1505 }
1506
1507 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1508 rtw_pci_clkreq_set(rtwdev, true);
1509
1510 rtwpci->link_ctrl = link_ctrl;
1511 }
1512
rtw_pci_interface_cfg(struct rtw_dev * rtwdev)1513 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1514 {
1515 const struct rtw_chip_info *chip = rtwdev->chip;
1516
1517 switch (chip->id) {
1518 case RTW_CHIP_TYPE_8822C:
1519 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1520 rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1521 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1522 break;
1523 default:
1524 break;
1525 }
1526 }
1527
rtw_pci_phy_cfg(struct rtw_dev * rtwdev)1528 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1529 {
1530 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1531 const struct rtw_chip_info *chip = rtwdev->chip;
1532 struct pci_dev *pdev = rtwpci->pdev;
1533 const struct rtw_intf_phy_para *para;
1534 u16 cut;
1535 u16 value;
1536 u16 offset;
1537 int i;
1538 int ret;
1539
1540 cut = BIT(0) << rtwdev->hal.cut_version;
1541
1542 for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1543 para = &chip->intf_table->gen1_para[i];
1544 if (!(para->cut_mask & cut))
1545 continue;
1546 if (para->offset == 0xffff)
1547 break;
1548 offset = para->offset;
1549 value = para->value;
1550 if (para->ip_sel == RTW_IP_SEL_PHY)
1551 rtw_mdio_write(rtwdev, offset, value, true);
1552 else
1553 rtw_dbi_write8(rtwdev, offset, value);
1554 }
1555
1556 for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1557 para = &chip->intf_table->gen2_para[i];
1558 if (!(para->cut_mask & cut))
1559 continue;
1560 if (para->offset == 0xffff)
1561 break;
1562 offset = para->offset;
1563 value = para->value;
1564 if (para->ip_sel == RTW_IP_SEL_PHY)
1565 rtw_mdio_write(rtwdev, offset, value, false);
1566 else
1567 rtw_dbi_write8(rtwdev, offset, value);
1568 }
1569
1570 rtw_pci_link_cfg(rtwdev);
1571
1572 /* Disable 8821ce completion timeout by default */
1573 if (chip->id == RTW_CHIP_TYPE_8821C) {
1574 ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1575 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
1576 if (ret)
1577 rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
1578 ret);
1579 }
1580 }
1581
rtw_pci_suspend(struct device * dev)1582 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1583 {
1584 struct ieee80211_hw *hw = dev_get_drvdata(dev);
1585 struct rtw_dev *rtwdev = hw->priv;
1586 const struct rtw_chip_info *chip = rtwdev->chip;
1587 struct rtw_efuse *efuse = &rtwdev->efuse;
1588
1589 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1590 rtw_pci_clkreq_pad_low(rtwdev, true);
1591 return 0;
1592 }
1593
rtw_pci_resume(struct device * dev)1594 static int __maybe_unused rtw_pci_resume(struct device *dev)
1595 {
1596 struct ieee80211_hw *hw = dev_get_drvdata(dev);
1597 struct rtw_dev *rtwdev = hw->priv;
1598 const struct rtw_chip_info *chip = rtwdev->chip;
1599 struct rtw_efuse *efuse = &rtwdev->efuse;
1600
1601 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1602 rtw_pci_clkreq_pad_low(rtwdev, false);
1603 return 0;
1604 }
1605
1606 SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1607 EXPORT_SYMBOL(rtw_pm_ops);
1608
rtw_pci_claim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1609 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1610 {
1611 int ret;
1612
1613 ret = pci_enable_device(pdev);
1614 if (ret) {
1615 rtw_err(rtwdev, "failed to enable pci device\n");
1616 return ret;
1617 }
1618
1619 pci_set_master(pdev);
1620 pci_set_drvdata(pdev, rtwdev->hw);
1621 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1622
1623 return 0;
1624 }
1625
rtw_pci_declaim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1626 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1627 {
1628 pci_disable_device(pdev);
1629 }
1630
rtw_pci_setup_resource(struct rtw_dev * rtwdev,struct pci_dev * pdev)1631 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1632 {
1633 struct rtw_pci *rtwpci;
1634 int ret;
1635
1636 rtwpci = (struct rtw_pci *)rtwdev->priv;
1637 rtwpci->pdev = pdev;
1638
1639 /* after this driver can access to hw registers */
1640 ret = rtw_pci_io_mapping(rtwdev, pdev);
1641 if (ret) {
1642 rtw_err(rtwdev, "failed to request pci io region\n");
1643 goto err_out;
1644 }
1645
1646 ret = rtw_pci_init(rtwdev);
1647 if (ret) {
1648 rtw_err(rtwdev, "failed to allocate pci resources\n");
1649 goto err_io_unmap;
1650 }
1651
1652 return 0;
1653
1654 err_io_unmap:
1655 rtw_pci_io_unmapping(rtwdev, pdev);
1656
1657 err_out:
1658 return ret;
1659 }
1660
rtw_pci_destroy(struct rtw_dev * rtwdev,struct pci_dev * pdev)1661 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1662 {
1663 rtw_pci_deinit(rtwdev);
1664 rtw_pci_io_unmapping(rtwdev, pdev);
1665 }
1666
1667 static struct rtw_hci_ops rtw_pci_ops = {
1668 .tx_write = rtw_pci_tx_write,
1669 .tx_kick_off = rtw_pci_tx_kick_off,
1670 .flush_queues = rtw_pci_flush_queues,
1671 .setup = rtw_pci_setup,
1672 .start = rtw_pci_start,
1673 .stop = rtw_pci_stop,
1674 .deep_ps = rtw_pci_deep_ps,
1675 .link_ps = rtw_pci_link_ps,
1676 .interface_cfg = rtw_pci_interface_cfg,
1677
1678 .read8 = rtw_pci_read8,
1679 .read16 = rtw_pci_read16,
1680 .read32 = rtw_pci_read32,
1681 .write8 = rtw_pci_write8,
1682 .write16 = rtw_pci_write16,
1683 .write32 = rtw_pci_write32,
1684 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1685 .write_data_h2c = rtw_pci_write_data_h2c,
1686 };
1687
rtw_pci_request_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1688 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1689 {
1690 unsigned int flags = PCI_IRQ_LEGACY;
1691 int ret;
1692
1693 if (!rtw_disable_msi)
1694 flags |= PCI_IRQ_MSI;
1695
1696 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1697 if (ret < 0) {
1698 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1699 return ret;
1700 }
1701
1702 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1703 rtw_pci_interrupt_handler,
1704 rtw_pci_interrupt_threadfn,
1705 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1706 if (ret) {
1707 rtw_err(rtwdev, "failed to request irq %d\n", ret);
1708 pci_free_irq_vectors(pdev);
1709 }
1710
1711 return ret;
1712 }
1713
rtw_pci_free_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1714 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1715 {
1716 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1717 pci_free_irq_vectors(pdev);
1718 }
1719
rtw_pci_napi_poll(struct napi_struct * napi,int budget)1720 static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1721 {
1722 struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1723 struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1724 priv);
1725 int work_done = 0;
1726
1727 if (rtwpci->rx_no_aspm)
1728 rtw_pci_link_ps(rtwdev, false);
1729
1730 while (work_done < budget) {
1731 u32 work_done_once;
1732
1733 work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1734 budget - work_done);
1735 if (work_done_once == 0)
1736 break;
1737 work_done += work_done_once;
1738 }
1739 if (work_done < budget) {
1740 napi_complete_done(napi, work_done);
1741 spin_lock_bh(&rtwpci->irq_lock);
1742 if (rtwpci->running)
1743 rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1744 spin_unlock_bh(&rtwpci->irq_lock);
1745 /* When ISR happens during polling and before napi_complete
1746 * while no further data is received. Data on the dma_ring will
1747 * not be processed immediately. Check whether dma ring is
1748 * empty and perform napi_schedule accordingly.
1749 */
1750 if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1751 napi_schedule(napi);
1752 }
1753 if (rtwpci->rx_no_aspm)
1754 rtw_pci_link_ps(rtwdev, true);
1755
1756 return work_done;
1757 }
1758
rtw_pci_napi_init(struct rtw_dev * rtwdev)1759 static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
1760 {
1761 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1762
1763 init_dummy_netdev(&rtwpci->netdev);
1764 netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1765 }
1766
rtw_pci_napi_deinit(struct rtw_dev * rtwdev)1767 static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1768 {
1769 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1770
1771 rtw_pci_napi_stop(rtwdev);
1772 netif_napi_del(&rtwpci->napi);
1773 }
1774
rtw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1775 int rtw_pci_probe(struct pci_dev *pdev,
1776 const struct pci_device_id *id)
1777 {
1778 struct pci_dev *bridge = pci_upstream_bridge(pdev);
1779 struct ieee80211_hw *hw;
1780 struct rtw_dev *rtwdev;
1781 struct rtw_pci *rtwpci;
1782 int drv_data_size;
1783 int ret;
1784
1785 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1786 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1787 if (!hw) {
1788 dev_err(&pdev->dev, "failed to allocate hw\n");
1789 return -ENOMEM;
1790 }
1791
1792 rtwdev = hw->priv;
1793 rtwdev->hw = hw;
1794 rtwdev->dev = &pdev->dev;
1795 rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1796 rtwdev->hci.ops = &rtw_pci_ops;
1797 rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1798
1799 rtwpci = (struct rtw_pci *)rtwdev->priv;
1800 atomic_set(&rtwpci->link_usage, 1);
1801
1802 ret = rtw_core_init(rtwdev);
1803 if (ret)
1804 goto err_release_hw;
1805
1806 rtw_dbg(rtwdev, RTW_DBG_PCI,
1807 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1808 pdev->vendor, pdev->device, pdev->revision);
1809
1810 ret = rtw_pci_claim(rtwdev, pdev);
1811 if (ret) {
1812 rtw_err(rtwdev, "failed to claim pci device\n");
1813 goto err_deinit_core;
1814 }
1815
1816 ret = rtw_pci_setup_resource(rtwdev, pdev);
1817 if (ret) {
1818 rtw_err(rtwdev, "failed to setup pci resources\n");
1819 goto err_pci_declaim;
1820 }
1821
1822 rtw_pci_napi_init(rtwdev);
1823
1824 ret = rtw_chip_info_setup(rtwdev);
1825 if (ret) {
1826 rtw_err(rtwdev, "failed to setup chip information\n");
1827 goto err_destroy_pci;
1828 }
1829
1830 /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1831 if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
1832 rtwpci->rx_no_aspm = true;
1833
1834 rtw_pci_phy_cfg(rtwdev);
1835
1836 ret = rtw_register_hw(rtwdev, hw);
1837 if (ret) {
1838 rtw_err(rtwdev, "failed to register hw\n");
1839 goto err_destroy_pci;
1840 }
1841
1842 ret = rtw_pci_request_irq(rtwdev, pdev);
1843 if (ret) {
1844 ieee80211_unregister_hw(hw);
1845 goto err_destroy_pci;
1846 }
1847
1848 return 0;
1849
1850 err_destroy_pci:
1851 rtw_pci_napi_deinit(rtwdev);
1852 rtw_pci_destroy(rtwdev, pdev);
1853
1854 err_pci_declaim:
1855 rtw_pci_declaim(rtwdev, pdev);
1856
1857 err_deinit_core:
1858 rtw_core_deinit(rtwdev);
1859
1860 err_release_hw:
1861 ieee80211_free_hw(hw);
1862
1863 return ret;
1864 }
1865 EXPORT_SYMBOL(rtw_pci_probe);
1866
rtw_pci_remove(struct pci_dev * pdev)1867 void rtw_pci_remove(struct pci_dev *pdev)
1868 {
1869 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1870 struct rtw_dev *rtwdev;
1871 struct rtw_pci *rtwpci;
1872
1873 if (!hw)
1874 return;
1875
1876 rtwdev = hw->priv;
1877 rtwpci = (struct rtw_pci *)rtwdev->priv;
1878
1879 rtw_unregister_hw(rtwdev, hw);
1880 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1881 rtw_pci_napi_deinit(rtwdev);
1882 rtw_pci_destroy(rtwdev, pdev);
1883 rtw_pci_declaim(rtwdev, pdev);
1884 rtw_pci_free_irq(rtwdev, pdev);
1885 rtw_core_deinit(rtwdev);
1886 ieee80211_free_hw(hw);
1887 }
1888 EXPORT_SYMBOL(rtw_pci_remove);
1889
rtw_pci_shutdown(struct pci_dev * pdev)1890 void rtw_pci_shutdown(struct pci_dev *pdev)
1891 {
1892 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1893 struct rtw_dev *rtwdev;
1894 const struct rtw_chip_info *chip;
1895
1896 if (!hw)
1897 return;
1898
1899 rtwdev = hw->priv;
1900 chip = rtwdev->chip;
1901
1902 if (chip->ops->shutdown)
1903 chip->ops->shutdown(rtwdev);
1904
1905 pci_set_power_state(pdev, PCI_D3hot);
1906 }
1907 EXPORT_SYMBOL(rtw_pci_shutdown);
1908
1909 MODULE_AUTHOR("Realtek Corporation");
1910 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1911 MODULE_LICENSE("Dual BSD/GPL");
1912 #if defined(__FreeBSD__)
1913 MODULE_VERSION(rtw_pci, 1);
1914 MODULE_DEPEND(rtw_pci, linuxkpi, 1, 1, 1);
1915 MODULE_DEPEND(rtw_pci, linuxkpi_wlan, 1, 1, 1);
1916 #ifdef CONFIG_RTW88_DEBUGFS
1917 MODULE_DEPEND(rtw_pci, lindebugfs, 1, 1, 1);
1918 #endif
1919 #endif
1920