1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6 #include <linux/dma-mapping.h>
7 #include "mt76.h"
8 #include "dma.h"
9
10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
11
12 #define Q_READ(_q, _field) ({ \
13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
14 u32 _val; \
15 if ((_q)->flags & MT_QFLAG_WED) \
16 _val = mtk_wed_device_reg_read((_q)->wed, \
17 ((_q)->wed_regs + \
18 _offset)); \
19 else \
20 _val = readl(&(_q)->regs->_field); \
21 _val; \
22 })
23
24 #define Q_WRITE(_q, _field, _val) do { \
25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26 if ((_q)->flags & MT_QFLAG_WED) \
27 mtk_wed_device_reg_write((_q)->wed, \
28 ((_q)->wed_regs + _offset), \
29 _val); \
30 else \
31 writel(_val, &(_q)->regs->_field); \
32 } while (0)
33
34 #else
35
36 #define Q_READ(_q, _field) readl(&(_q)->regs->_field)
37 #define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
38
39 #endif
40
41 static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev * dev)42 mt76_alloc_txwi(struct mt76_dev *dev)
43 {
44 struct mt76_txwi_cache *t;
45 dma_addr_t addr;
46 u8 *txwi;
47 int size;
48
49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 txwi = kzalloc(size, GFP_ATOMIC);
51 if (!txwi)
52 return NULL;
53
54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
55 DMA_TO_DEVICE);
56 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
57 kfree(txwi);
58 return NULL;
59 }
60
61 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
62 t->dma_addr = addr;
63
64 return t;
65 }
66
67 static struct mt76_txwi_cache *
mt76_alloc_rxwi(struct mt76_dev * dev)68 mt76_alloc_rxwi(struct mt76_dev *dev)
69 {
70 struct mt76_txwi_cache *t;
71
72 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
73 if (!t)
74 return NULL;
75
76 t->ptr = NULL;
77 return t;
78 }
79
80 static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev * dev)81 __mt76_get_txwi(struct mt76_dev *dev)
82 {
83 struct mt76_txwi_cache *t = NULL;
84
85 spin_lock(&dev->lock);
86 if (!list_empty(&dev->txwi_cache)) {
87 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
88 list);
89 list_del(&t->list);
90 }
91 spin_unlock(&dev->lock);
92
93 return t;
94 }
95
96 static struct mt76_txwi_cache *
__mt76_get_rxwi(struct mt76_dev * dev)97 __mt76_get_rxwi(struct mt76_dev *dev)
98 {
99 struct mt76_txwi_cache *t = NULL;
100
101 spin_lock_bh(&dev->wed_lock);
102 if (!list_empty(&dev->rxwi_cache)) {
103 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
104 list);
105 list_del(&t->list);
106 }
107 spin_unlock_bh(&dev->wed_lock);
108
109 return t;
110 }
111
112 static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev * dev)113 mt76_get_txwi(struct mt76_dev *dev)
114 {
115 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
116
117 if (t)
118 return t;
119
120 return mt76_alloc_txwi(dev);
121 }
122
123 struct mt76_txwi_cache *
mt76_get_rxwi(struct mt76_dev * dev)124 mt76_get_rxwi(struct mt76_dev *dev)
125 {
126 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
127
128 if (t)
129 return t;
130
131 return mt76_alloc_rxwi(dev);
132 }
133 EXPORT_SYMBOL_GPL(mt76_get_rxwi);
134
135 void
mt76_put_txwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)136 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
137 {
138 if (!t)
139 return;
140
141 spin_lock(&dev->lock);
142 list_add(&t->list, &dev->txwi_cache);
143 spin_unlock(&dev->lock);
144 }
145 EXPORT_SYMBOL_GPL(mt76_put_txwi);
146
147 void
mt76_put_rxwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)148 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
149 {
150 if (!t)
151 return;
152
153 spin_lock_bh(&dev->wed_lock);
154 list_add(&t->list, &dev->rxwi_cache);
155 spin_unlock_bh(&dev->wed_lock);
156 }
157 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
158
159 static void
mt76_free_pending_txwi(struct mt76_dev * dev)160 mt76_free_pending_txwi(struct mt76_dev *dev)
161 {
162 struct mt76_txwi_cache *t;
163
164 local_bh_disable();
165 while ((t = __mt76_get_txwi(dev)) != NULL) {
166 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
167 DMA_TO_DEVICE);
168 kfree(mt76_get_txwi_ptr(dev, t));
169 }
170 local_bh_enable();
171 }
172
173 void
mt76_free_pending_rxwi(struct mt76_dev * dev)174 mt76_free_pending_rxwi(struct mt76_dev *dev)
175 {
176 struct mt76_txwi_cache *t;
177
178 local_bh_disable();
179 while ((t = __mt76_get_rxwi(dev)) != NULL) {
180 if (t->ptr)
181 mt76_put_page_pool_buf(t->ptr, false);
182 kfree(t);
183 }
184 local_bh_enable();
185 }
186 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
187
188 static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
190 {
191 Q_WRITE(q, desc_base, q->desc_dma);
192 if (q->flags & MT_QFLAG_WED_RRO_EN)
193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
194 else
195 Q_WRITE(q, ring_size, q->ndesc);
196 q->head = Q_READ(q, dma_idx);
197 q->tail = q->head;
198 }
199
__mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q,bool reset_idx)200 void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
201 bool reset_idx)
202 {
203 if (!q || !q->ndesc)
204 return;
205
206 if (!mt76_queue_is_wed_rro_ind(q)) {
207 int i;
208
209 /* clear descriptors */
210 for (i = 0; i < q->ndesc; i++)
211 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
212 }
213
214 if (reset_idx) {
215 Q_WRITE(q, cpu_idx, 0);
216 Q_WRITE(q, dma_idx, 0);
217 }
218 mt76_dma_sync_idx(dev, q);
219 }
220
mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q)221 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
222 {
223 __mt76_dma_queue_reset(dev, q, true);
224 }
225
226 static int
mt76_dma_add_rx_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,void * data)227 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
228 struct mt76_queue_buf *buf, void *data)
229 {
230 struct mt76_queue_entry *entry = &q->entry[q->head];
231 struct mt76_txwi_cache *txwi = NULL;
232 struct mt76_desc *desc;
233 int idx = q->head;
234 u32 buf1 = 0, ctrl;
235 int rx_token;
236
237 if (mt76_queue_is_wed_rro_ind(q)) {
238 struct mt76_wed_rro_desc *rro_desc;
239
240 rro_desc = (struct mt76_wed_rro_desc *)q->desc;
241 data = &rro_desc[q->head];
242 goto done;
243 }
244
245 desc = &q->desc[q->head];
246 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
247 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
248 buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
249 #endif
250
251 if (mt76_queue_is_wed_rx(q)) {
252 txwi = mt76_get_rxwi(dev);
253 if (!txwi)
254 return -ENOMEM;
255
256 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
257 if (rx_token < 0) {
258 mt76_put_rxwi(dev, txwi);
259 return -ENOMEM;
260 }
261
262 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
263 ctrl |= MT_DMA_CTL_TO_HOST;
264 }
265
266 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
267 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
268 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
269 WRITE_ONCE(desc->info, 0);
270
271 done:
272 entry->dma_addr[0] = buf->addr;
273 entry->dma_len[0] = buf->len;
274 entry->txwi = txwi;
275 entry->buf = data;
276 entry->wcid = 0xffff;
277 entry->skip_buf1 = true;
278 q->head = (q->head + 1) % q->ndesc;
279 q->queued++;
280
281 return idx;
282 }
283
284 static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)285 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
286 struct mt76_queue_buf *buf, int nbufs, u32 info,
287 struct sk_buff *skb, void *txwi)
288 {
289 struct mt76_queue_entry *entry;
290 struct mt76_desc *desc;
291 int i, idx = -1;
292 u32 ctrl, next;
293
294 if (txwi) {
295 q->entry[q->head].txwi = DMA_DUMMY_DATA;
296 q->entry[q->head].skip_buf0 = true;
297 }
298
299 for (i = 0; i < nbufs; i += 2, buf += 2) {
300 u32 buf0 = buf[0].addr, buf1 = 0;
301
302 idx = q->head;
303 next = (q->head + 1) % q->ndesc;
304
305 desc = &q->desc[idx];
306 entry = &q->entry[idx];
307
308 if (buf[0].skip_unmap)
309 entry->skip_buf0 = true;
310 entry->skip_buf1 = i == nbufs - 1;
311
312 entry->dma_addr[0] = buf[0].addr;
313 entry->dma_len[0] = buf[0].len;
314
315 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
316 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
317 info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
318 #endif
319 if (i < nbufs - 1) {
320 entry->dma_addr[1] = buf[1].addr;
321 entry->dma_len[1] = buf[1].len;
322 buf1 = buf[1].addr;
323 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
324 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
325 info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
326 buf[1].addr >> 32);
327 #endif
328 if (buf[1].skip_unmap)
329 entry->skip_buf1 = true;
330 }
331
332 if (i == nbufs - 1)
333 ctrl |= MT_DMA_CTL_LAST_SEC0;
334 else if (i == nbufs - 2)
335 ctrl |= MT_DMA_CTL_LAST_SEC1;
336
337 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
338 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
339 WRITE_ONCE(desc->info, cpu_to_le32(info));
340 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
341
342 q->head = next;
343 q->queued++;
344 }
345
346 q->entry[idx].txwi = txwi;
347 q->entry[idx].skb = skb;
348 q->entry[idx].wcid = 0xffff;
349
350 return idx;
351 }
352
353 static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)354 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
355 struct mt76_queue_entry *prev_e)
356 {
357 struct mt76_queue_entry *e = &q->entry[idx];
358
359 if (!e->skip_buf0)
360 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
361 DMA_TO_DEVICE);
362
363 if (!e->skip_buf1)
364 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
365 DMA_TO_DEVICE);
366
367 if (e->txwi == DMA_DUMMY_DATA)
368 e->txwi = NULL;
369
370 *prev_e = *e;
371 memset(e, 0, sizeof(*e));
372 }
373
374 static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)375 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
376 {
377 wmb();
378 Q_WRITE(q, cpu_idx, q->head);
379 }
380
381 static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,struct mt76_queue * q,bool flush)382 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
383 {
384 struct mt76_queue_entry entry;
385 int last;
386
387 if (!q || !q->ndesc)
388 return;
389
390 spin_lock_bh(&q->cleanup_lock);
391 if (flush)
392 last = -1;
393 else
394 last = Q_READ(q, dma_idx);
395
396 while (q->queued > 0 && q->tail != last) {
397 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
398 mt76_queue_tx_complete(dev, q, &entry);
399
400 if (entry.txwi) {
401 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
402 mt76_put_txwi(dev, entry.txwi);
403 }
404
405 if (!flush && q->tail == last)
406 last = Q_READ(q, dma_idx);
407 }
408 spin_unlock_bh(&q->cleanup_lock);
409
410 if (flush) {
411 spin_lock_bh(&q->lock);
412 mt76_dma_sync_idx(dev, q);
413 mt76_dma_kick_queue(dev, q);
414 spin_unlock_bh(&q->lock);
415 }
416
417 if (!q->queued)
418 wake_up(&dev->tx_wait);
419 }
420
421 static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more,bool * drop)422 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
423 int *len, u32 *info, bool *more, bool *drop)
424 {
425 struct mt76_queue_entry *e = &q->entry[idx];
426 struct mt76_desc *desc = &q->desc[idx];
427 u32 ctrl, desc_info, buf1;
428 void *buf = e->buf;
429
430 if (mt76_queue_is_wed_rro_ind(q))
431 goto done;
432
433 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
434 if (len) {
435 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
436 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
437 }
438
439 desc_info = le32_to_cpu(desc->info);
440 if (info)
441 *info = desc_info;
442
443 buf1 = le32_to_cpu(desc->buf1);
444 mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
445
446 if (mt76_queue_is_wed_rx(q)) {
447 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
448 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
449
450 if (!t)
451 return NULL;
452
453 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
454 SKB_WITH_OVERHEAD(q->buf_size),
455 page_pool_get_dma_dir(q->page_pool));
456
457 buf = t->ptr;
458 t->dma_addr = 0;
459 t->ptr = NULL;
460
461 mt76_put_rxwi(dev, t);
462 if (drop)
463 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
464 } else {
465 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
466 SKB_WITH_OVERHEAD(q->buf_size),
467 page_pool_get_dma_dir(q->page_pool));
468 }
469
470 done:
471 e->buf = NULL;
472 return buf;
473 }
474
475 static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more,bool * drop)476 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
477 int *len, u32 *info, bool *more, bool *drop)
478 {
479 int idx = q->tail;
480
481 *more = false;
482 if (!q->queued)
483 return NULL;
484
485 if (mt76_queue_is_wed_rro_data(q))
486 return NULL;
487
488 if (!mt76_queue_is_wed_rro_ind(q)) {
489 if (flush)
490 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
491 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
492 return NULL;
493 }
494
495 q->tail = (q->tail + 1) % q->ndesc;
496 q->queued--;
497
498 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
499 }
500
501 static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,u32 tx_info)502 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
503 struct sk_buff *skb, u32 tx_info)
504 {
505 struct mt76_queue_buf buf = {};
506 dma_addr_t addr;
507
508 if (test_bit(MT76_MCU_RESET, &dev->phy.state))
509 goto error;
510
511 if (q->queued + 1 >= q->ndesc - 1)
512 goto error;
513
514 addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
515 DMA_TO_DEVICE);
516 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
517 goto error;
518
519 buf.addr = addr;
520 buf.len = skb->len;
521
522 spin_lock_bh(&q->lock);
523 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
524 mt76_dma_kick_queue(dev, q);
525 spin_unlock_bh(&q->lock);
526
527 return 0;
528
529 error:
530 dev_kfree_skb(skb);
531 return -ENOMEM;
532 }
533
534 static int
mt76_dma_tx_queue_skb(struct mt76_phy * phy,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)535 mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
536 enum mt76_txq_id qid, struct sk_buff *skb,
537 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
538 {
539 struct ieee80211_tx_status status = {
540 .sta = sta,
541 };
542 struct mt76_tx_info tx_info = {
543 .skb = skb,
544 };
545 struct mt76_dev *dev = phy->dev;
546 struct ieee80211_hw *hw;
547 int len, n = 0, ret = -ENOMEM;
548 struct mt76_txwi_cache *t;
549 struct sk_buff *iter;
550 dma_addr_t addr;
551 u8 *txwi;
552
553 if (test_bit(MT76_RESET, &phy->state))
554 goto free_skb;
555
556 t = mt76_get_txwi(dev);
557 if (!t)
558 goto free_skb;
559
560 txwi = mt76_get_txwi_ptr(dev, t);
561
562 skb->prev = skb->next = NULL;
563 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
564 mt76_insert_hdr_pad(skb);
565
566 len = skb_headlen(skb);
567 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
568 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
569 goto free;
570
571 tx_info.buf[n].addr = t->dma_addr;
572 tx_info.buf[n++].len = dev->drv->txwi_size;
573 tx_info.buf[n].addr = addr;
574 tx_info.buf[n++].len = len;
575
576 skb_walk_frags(skb, iter) {
577 if (n == ARRAY_SIZE(tx_info.buf))
578 goto unmap;
579
580 addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
581 DMA_TO_DEVICE);
582 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
583 goto unmap;
584
585 tx_info.buf[n].addr = addr;
586 tx_info.buf[n++].len = iter->len;
587 }
588 tx_info.nbuf = n;
589
590 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
591 ret = -ENOMEM;
592 goto unmap;
593 }
594
595 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
596 DMA_TO_DEVICE);
597 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
598 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
599 DMA_TO_DEVICE);
600 if (ret < 0)
601 goto unmap;
602
603 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
604 tx_info.info, tx_info.skb, t);
605
606 unmap:
607 for (n--; n > 0; n--)
608 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
609 tx_info.buf[n].len, DMA_TO_DEVICE);
610
611 free:
612 #ifdef CONFIG_NL80211_TESTMODE
613 /* fix tx_done accounting on queue overflow */
614 if (mt76_is_testmode_skb(dev, skb, &hw)) {
615 struct mt76_phy *phy = hw->priv;
616
617 if (tx_info.skb == phy->test.tx_skb)
618 phy->test.tx_done--;
619 }
620 #endif
621
622 mt76_put_txwi(dev, t);
623
624 free_skb:
625 status.skb = tx_info.skb;
626 hw = mt76_tx_status_get_hw(dev, tx_info.skb);
627 spin_lock_bh(&dev->rx_lock);
628 ieee80211_tx_status_ext(hw, &status);
629 spin_unlock_bh(&dev->rx_lock);
630
631 return ret;
632 }
633
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct)634 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
635 bool allow_direct)
636 {
637 int len = SKB_WITH_OVERHEAD(q->buf_size);
638 int frames = 0;
639
640 if (!q->ndesc)
641 return 0;
642
643 spin_lock_bh(&q->lock);
644
645 while (q->queued < q->ndesc - 1) {
646 struct mt76_queue_buf qbuf = {};
647 enum dma_data_direction dir;
648 dma_addr_t addr;
649 int offset;
650 void *buf = NULL;
651
652 if (mt76_queue_is_wed_rro_ind(q))
653 goto done;
654
655 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
656 if (!buf)
657 break;
658
659 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
660 dir = page_pool_get_dma_dir(q->page_pool);
661 dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
662
663 qbuf.addr = addr + q->buf_offset;
664 done:
665 qbuf.len = len - q->buf_offset;
666 qbuf.skip_unmap = false;
667 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
668 mt76_put_page_pool_buf(buf, allow_direct);
669 break;
670 }
671 frames++;
672 }
673
674 if (frames || mt76_queue_is_wed_rx(q))
675 mt76_dma_kick_queue(dev, q);
676
677 spin_unlock_bh(&q->lock);
678
679 return frames;
680 }
681
682 static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base)683 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
684 int idx, int n_desc, int bufsize,
685 u32 ring_base)
686 {
687 int ret, size;
688
689 spin_lock_init(&q->lock);
690 spin_lock_init(&q->cleanup_lock);
691
692 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
693 q->ndesc = n_desc;
694 q->buf_size = bufsize;
695 q->hw_idx = idx;
696
697 size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
698 : sizeof(struct mt76_desc);
699 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
700 &q->desc_dma, GFP_KERNEL);
701 if (!q->desc)
702 return -ENOMEM;
703
704 if (mt76_queue_is_wed_rro_ind(q)) {
705 struct mt76_wed_rro_desc *rro_desc;
706 int i;
707
708 rro_desc = (struct mt76_wed_rro_desc *)q->desc;
709 for (i = 0; i < q->ndesc; i++) {
710 struct mt76_wed_rro_ind *cmd;
711
712 cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
713 cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
714 }
715 }
716
717 size = q->ndesc * sizeof(*q->entry);
718 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
719 if (!q->entry)
720 return -ENOMEM;
721
722 ret = mt76_create_page_pool(dev, q);
723 if (ret)
724 return ret;
725
726 ret = mt76_wed_dma_setup(dev, q, false);
727 if (ret)
728 return ret;
729
730 if (mtk_wed_device_active(&dev->mmio.wed)) {
731 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
732 mt76_queue_is_wed_tx_free(q))
733 return 0;
734 }
735
736 mt76_dma_queue_reset(dev, q);
737
738 return 0;
739 }
740
741 static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)742 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
743 {
744 void *buf;
745 bool more;
746
747 if (!q->ndesc)
748 return;
749
750 do {
751 spin_lock_bh(&q->lock);
752 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
753 spin_unlock_bh(&q->lock);
754
755 if (!buf)
756 break;
757
758 if (!mt76_queue_is_wed_rro(q))
759 mt76_put_page_pool_buf(buf, false);
760 } while (1);
761
762 spin_lock_bh(&q->lock);
763 if (q->rx_head) {
764 dev_kfree_skb(q->rx_head);
765 q->rx_head = NULL;
766 }
767
768 spin_unlock_bh(&q->lock);
769 }
770
771 static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)772 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
773 {
774 struct mt76_queue *q = &dev->q_rx[qid];
775
776 if (!q->ndesc)
777 return;
778
779 if (!mt76_queue_is_wed_rro_ind(q)) {
780 int i;
781
782 for (i = 0; i < q->ndesc; i++)
783 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
784 }
785
786 mt76_dma_rx_cleanup(dev, q);
787
788 /* reset WED rx queues */
789 mt76_wed_dma_setup(dev, q, true);
790
791 if (mt76_queue_is_wed_tx_free(q))
792 return;
793
794 if (mtk_wed_device_active(&dev->mmio.wed) &&
795 mt76_queue_is_wed_rro(q))
796 return;
797
798 mt76_dma_sync_idx(dev, q);
799 mt76_dma_rx_fill(dev, q, false);
800 }
801
802 static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more,u32 info,bool allow_direct)803 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
804 int len, bool more, u32 info, bool allow_direct)
805 {
806 struct sk_buff *skb = q->rx_head;
807 struct skb_shared_info *shinfo = skb_shinfo(skb);
808 int nr_frags = shinfo->nr_frags;
809
810 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
811 struct page *page = virt_to_head_page(data);
812 int offset = data - page_address(page) + q->buf_offset;
813
814 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
815 } else {
816 mt76_put_page_pool_buf(data, allow_direct);
817 }
818
819 if (more)
820 return;
821
822 q->rx_head = NULL;
823 if (nr_frags < ARRAY_SIZE(shinfo->frags))
824 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
825 else
826 dev_kfree_skb(skb);
827 }
828
829 static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)830 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
831 {
832 int len, data_len, done = 0, dma_idx;
833 struct sk_buff *skb;
834 unsigned char *data;
835 bool check_ddone = false;
836 bool allow_direct = !mt76_queue_is_wed_rx(q);
837 bool more;
838
839 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
840 mt76_queue_is_wed_tx_free(q)) {
841 dma_idx = Q_READ(q, dma_idx);
842 check_ddone = true;
843 }
844
845 while (done < budget) {
846 bool drop = false;
847 u32 info;
848
849 if (check_ddone) {
850 if (q->tail == dma_idx)
851 dma_idx = Q_READ(q, dma_idx);
852
853 if (q->tail == dma_idx)
854 break;
855 }
856
857 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
858 &drop);
859 if (!data)
860 break;
861
862 if (drop)
863 goto free_frag;
864
865 if (q->rx_head)
866 data_len = q->buf_size;
867 else
868 data_len = SKB_WITH_OVERHEAD(q->buf_size);
869
870 if (data_len < len + q->buf_offset) {
871 dev_kfree_skb(q->rx_head);
872 q->rx_head = NULL;
873 goto free_frag;
874 }
875
876 if (q->rx_head) {
877 mt76_add_fragment(dev, q, data, len, more, info,
878 allow_direct);
879 continue;
880 }
881
882 if (!more && dev->drv->rx_check &&
883 !(dev->drv->rx_check(dev, data, len)))
884 goto free_frag;
885
886 skb = napi_build_skb(data, q->buf_size);
887 if (!skb)
888 goto free_frag;
889
890 skb_reserve(skb, q->buf_offset);
891 skb_mark_for_recycle(skb);
892
893 *(u32 *)skb->cb = info;
894
895 __skb_put(skb, len);
896 done++;
897
898 if (more) {
899 q->rx_head = skb;
900 continue;
901 }
902
903 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
904 continue;
905
906 free_frag:
907 mt76_put_page_pool_buf(data, allow_direct);
908 }
909
910 mt76_dma_rx_fill(dev, q, true);
911 return done;
912 }
913
mt76_dma_rx_poll(struct napi_struct * napi,int budget)914 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
915 {
916 struct mt76_dev *dev;
917 int qid, done = 0, cur;
918
919 dev = mt76_priv(napi->dev);
920 qid = napi - dev->napi;
921
922 rcu_read_lock();
923
924 do {
925 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
926 mt76_rx_poll_complete(dev, qid, napi);
927 done += cur;
928 } while (cur && done < budget);
929
930 rcu_read_unlock();
931
932 if (done < budget && napi_complete(napi))
933 dev->drv->rx_poll_complete(dev, qid);
934
935 return done;
936 }
937 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
938
939 static int
mt76_dma_init(struct mt76_dev * dev,int (* poll)(struct napi_struct * napi,int budget))940 mt76_dma_init(struct mt76_dev *dev,
941 int (*poll)(struct napi_struct *napi, int budget))
942 {
943 struct mt76_dev **priv;
944 int i;
945
946 dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
947 if (!dev->napi_dev)
948 return -ENOMEM;
949
950 /* napi_dev private data points to mt76_dev parent, so, mt76_dev
951 * can be retrieved given napi_dev
952 */
953 priv = netdev_priv(dev->napi_dev);
954 *priv = dev;
955
956 dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
957 if (!dev->tx_napi_dev) {
958 free_netdev(dev->napi_dev);
959 return -ENOMEM;
960 }
961 priv = netdev_priv(dev->tx_napi_dev);
962 *priv = dev;
963
964 snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",
965 wiphy_name(dev->hw->wiphy));
966 dev->napi_dev->threaded = 1;
967 init_completion(&dev->mmio.wed_reset);
968 init_completion(&dev->mmio.wed_reset_complete);
969
970 mt76_for_each_q_rx(dev, i) {
971 netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
972 mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
973 napi_enable(&dev->napi[i]);
974 }
975
976 return 0;
977 }
978
979 static const struct mt76_queue_ops mt76_dma_ops = {
980 .init = mt76_dma_init,
981 .alloc = mt76_dma_alloc_queue,
982 .reset_q = mt76_dma_queue_reset,
983 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
984 .tx_queue_skb = mt76_dma_tx_queue_skb,
985 .tx_cleanup = mt76_dma_tx_cleanup,
986 .rx_cleanup = mt76_dma_rx_cleanup,
987 .rx_reset = mt76_dma_rx_reset,
988 .kick = mt76_dma_kick_queue,
989 };
990
mt76_dma_attach(struct mt76_dev * dev)991 void mt76_dma_attach(struct mt76_dev *dev)
992 {
993 dev->queue_ops = &mt76_dma_ops;
994 }
995 EXPORT_SYMBOL_GPL(mt76_dma_attach);
996
mt76_dma_cleanup(struct mt76_dev * dev)997 void mt76_dma_cleanup(struct mt76_dev *dev)
998 {
999 int i;
1000
1001 mt76_worker_disable(&dev->tx_worker);
1002 netif_napi_del(&dev->tx_napi);
1003
1004 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
1005 struct mt76_phy *phy = dev->phys[i];
1006 int j;
1007
1008 if (!phy)
1009 continue;
1010
1011 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
1012 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
1013 }
1014
1015 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
1016 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
1017
1018 mt76_for_each_q_rx(dev, i) {
1019 struct mt76_queue *q = &dev->q_rx[i];
1020
1021 if (mtk_wed_device_active(&dev->mmio.wed) &&
1022 mt76_queue_is_wed_rro(q))
1023 continue;
1024
1025 netif_napi_del(&dev->napi[i]);
1026 mt76_dma_rx_cleanup(dev, q);
1027
1028 page_pool_destroy(q->page_pool);
1029 }
1030
1031 if (mtk_wed_device_active(&dev->mmio.wed))
1032 mtk_wed_device_detach(&dev->mmio.wed);
1033
1034 if (mtk_wed_device_active(&dev->mmio.wed_hif2))
1035 mtk_wed_device_detach(&dev->mmio.wed_hif2);
1036
1037 mt76_free_pending_txwi(dev);
1038 mt76_free_pending_rxwi(dev);
1039 free_netdev(dev->napi_dev);
1040 free_netdev(dev->tx_napi_dev);
1041 }
1042 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
1043