1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/irq.h>
20 
21 #include "mt76x02.h"
22 #include "mt76x02_mcu.h"
23 #include "mt76x02_trace.h"
24 
25 static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
26 {
27 	struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
28 	struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
29 	struct beacon_bc_data data = {};
30 	struct sk_buff *skb;
31 	int i;
32 
33 	if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
34 		return;
35 
36 	mt76x02_resync_beacon_timer(dev);
37 
38 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
39 		IEEE80211_IFACE_ITER_RESUME_ALL,
40 		mt76x02_update_beacon_iter, dev);
41 
42 	mt76_csa_check(&dev->mt76);
43 
44 	if (dev->mt76.csa_complete)
45 		return;
46 
47 	mt76x02_enqueue_buffered_bc(dev, &data, 8);
48 
49 	if (!skb_queue_len(&data.q))
50 		return;
51 
52 	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
53 		if (!data.tail[i])
54 			continue;
55 
56 		mt76_skb_set_moredata(data.tail[i], false);
57 	}
58 
59 	spin_lock_bh(&q->lock);
60 	while ((skb = __skb_dequeue(&data.q)) != NULL) {
61 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
62 		struct ieee80211_vif *vif = info->control.vif;
63 		struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
64 
65 		mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
66 				  NULL);
67 	}
68 	spin_unlock_bh(&q->lock);
69 }
70 
71 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
72 {
73 	if (en)
74 		tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
75 	else
76 		tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
77 }
78 
79 static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
80 {
81 	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
82 	if (en)
83 		mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
84 	else
85 		mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
86 }
87 
88 void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
89 {
90 	static const struct mt76x02_beacon_ops beacon_ops = {
91 		.nslots = 8,
92 		.slot_size = 1024,
93 		.pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
94 		.beacon_enable = mt76x02e_beacon_enable,
95 	};
96 
97 	dev->beacon_ops = &beacon_ops;
98 
99 	/* Fire a pre-TBTT interrupt 8 ms before TBTT */
100 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4);
101 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
102 		       MT_DFS_GP_INTERVAL);
103 	mt76_wr(dev, MT_INT_TIMER_EN, 0);
104 
105 	mt76x02_init_beacon_config(dev);
106 }
107 EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
108 
109 static int
110 mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
111 		      int idx, int n_desc)
112 {
113 	struct mt76_queue *hwq;
114 	int err;
115 
116 	hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
117 	if (!hwq)
118 		return -ENOMEM;
119 
120 	err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
121 	if (err < 0)
122 		return err;
123 
124 	INIT_LIST_HEAD(&q->swq);
125 	q->q = hwq;
126 
127 	mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
128 
129 	return 0;
130 }
131 
132 static int
133 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
134 		      int idx, int n_desc, int bufsize)
135 {
136 	int err;
137 
138 	err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
139 			       MT_RX_RING_BASE);
140 	if (err < 0)
141 		return err;
142 
143 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
144 
145 	return 0;
146 }
147 
148 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
149 {
150 	struct mt76x02_tx_status stat;
151 	u8 update = 1;
152 
153 	while (kfifo_get(&dev->txstatus_fifo, &stat))
154 		mt76x02_send_tx_status(dev, &stat, &update);
155 }
156 
157 static void mt76x02_tx_tasklet(unsigned long data)
158 {
159 	struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
160 
161 	mt76x02_mac_poll_tx_status(dev, false);
162 	mt76x02_process_tx_status_fifo(dev);
163 
164 	mt76_txq_schedule_all(&dev->mt76);
165 }
166 
167 static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
168 {
169 	struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, tx_napi);
170 	int i;
171 
172 	mt76x02_mac_poll_tx_status(dev, false);
173 
174 	for (i = MT_TXQ_MCU; i >= 0; i--)
175 		mt76_queue_tx_cleanup(dev, i, false);
176 
177 	if (napi_complete_done(napi, 0))
178 		mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
179 
180 	for (i = MT_TXQ_MCU; i >= 0; i--)
181 		mt76_queue_tx_cleanup(dev, i, false);
182 
183 	tasklet_schedule(&dev->mt76.tx_tasklet);
184 
185 	return 0;
186 }
187 
188 int mt76x02_dma_init(struct mt76x02_dev *dev)
189 {
190 	struct mt76_txwi_cache __maybe_unused *t;
191 	int i, ret, fifo_size;
192 	struct mt76_queue *q;
193 	void *status_fifo;
194 
195 	BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
196 
197 	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
198 	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
199 	if (!status_fifo)
200 		return -ENOMEM;
201 
202 	tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
203 		     (unsigned long) dev);
204 	tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
205 		     (unsigned long)dev);
206 
207 	spin_lock_init(&dev->txstatus_fifo_lock);
208 	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
209 
210 	mt76_dma_attach(&dev->mt76);
211 
212 	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
213 
214 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
215 		ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
216 					    mt76_ac_to_hwq(i),
217 					    MT_TX_RING_SIZE);
218 		if (ret)
219 			return ret;
220 	}
221 
222 	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
223 				    MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
224 	if (ret)
225 		return ret;
226 
227 	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
228 				    MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
229 	if (ret)
230 		return ret;
231 
232 	ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
233 				    MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
234 	if (ret)
235 		return ret;
236 
237 	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
238 	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
239 	ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
240 				    MT_RX_BUF_SIZE);
241 	if (ret)
242 		return ret;
243 
244 	ret = mt76_init_queues(dev);
245 	if (ret)
246 		return ret;
247 
248 	netif_tx_napi_add(&dev->mt76.napi_dev, &dev->tx_napi, mt76x02_poll_tx,
249 			  NAPI_POLL_WEIGHT);
250 	napi_enable(&dev->tx_napi);
251 
252 	return 0;
253 }
254 EXPORT_SYMBOL_GPL(mt76x02_dma_init);
255 
256 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
257 {
258 	struct mt76x02_dev *dev;
259 
260 	dev = container_of(mdev, struct mt76x02_dev, mt76);
261 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
262 }
263 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
264 
265 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
266 {
267 	struct mt76x02_dev *dev = dev_instance;
268 	u32 intr;
269 
270 	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
271 	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
272 
273 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
274 		return IRQ_NONE;
275 
276 	trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
277 
278 	intr &= dev->mt76.mmio.irqmask;
279 
280 	if (intr & MT_INT_RX_DONE(0)) {
281 		mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
282 		napi_schedule(&dev->mt76.napi[0]);
283 	}
284 
285 	if (intr & MT_INT_RX_DONE(1)) {
286 		mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
287 		napi_schedule(&dev->mt76.napi[1]);
288 	}
289 
290 	if (intr & MT_INT_PRE_TBTT)
291 		tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
292 
293 	/* send buffered multicast frames now */
294 	if (intr & MT_INT_TBTT) {
295 		if (dev->mt76.csa_complete)
296 			mt76_csa_finish(&dev->mt76);
297 		else
298 			mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
299 	}
300 
301 	if (intr & MT_INT_TX_STAT)
302 		mt76x02_mac_poll_tx_status(dev, true);
303 
304 	if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
305 		mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
306 		napi_schedule(&dev->tx_napi);
307 	}
308 
309 	if (intr & MT_INT_GPTIMER) {
310 		mt76x02_irq_disable(dev, MT_INT_GPTIMER);
311 		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
312 	}
313 
314 	return IRQ_HANDLED;
315 }
316 EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
317 
318 static void mt76x02_dma_enable(struct mt76x02_dev *dev)
319 {
320 	u32 val;
321 
322 	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
323 	mt76x02_wait_for_wpdma(&dev->mt76, 1000);
324 	usleep_range(50, 100);
325 
326 	val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
327 	      MT_WPDMA_GLO_CFG_TX_DMA_EN |
328 	      MT_WPDMA_GLO_CFG_RX_DMA_EN;
329 	mt76_set(dev, MT_WPDMA_GLO_CFG, val);
330 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
331 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
332 }
333 
334 void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
335 {
336 	tasklet_kill(&dev->mt76.tx_tasklet);
337 	netif_napi_del(&dev->tx_napi);
338 	mt76_dma_cleanup(&dev->mt76);
339 }
340 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
341 
342 void mt76x02_dma_disable(struct mt76x02_dev *dev)
343 {
344 	u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
345 
346 	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
347 	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
348 	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
349 	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
350 	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
351 }
352 EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
353 
354 void mt76x02_mac_start(struct mt76x02_dev *dev)
355 {
356 	mt76x02_dma_enable(dev);
357 	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
358 	mt76_wr(dev, MT_MAC_SYS_CTRL,
359 		MT_MAC_SYS_CTRL_ENABLE_TX |
360 		MT_MAC_SYS_CTRL_ENABLE_RX);
361 	mt76x02_irq_enable(dev,
362 			   MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
363 			   MT_INT_TX_STAT);
364 }
365 EXPORT_SYMBOL_GPL(mt76x02_mac_start);
366 
367 static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
368 {
369 	u32 dma_idx, prev_dma_idx;
370 	struct mt76_queue *q;
371 	int i;
372 
373 	for (i = 0; i < 4; i++) {
374 		q = dev->mt76.q_tx[i].q;
375 
376 		if (!q->queued)
377 			continue;
378 
379 		prev_dma_idx = dev->mt76.tx_dma_idx[i];
380 		dma_idx = readl(&q->regs->dma_idx);
381 		dev->mt76.tx_dma_idx[i] = dma_idx;
382 
383 		if (prev_dma_idx == dma_idx)
384 			break;
385 	}
386 
387 	return i < 4;
388 }
389 
390 static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
391 			     struct ieee80211_sta *sta,
392 			     struct ieee80211_key_conf *key, void *data)
393 {
394 	struct mt76x02_dev *dev = hw->priv;
395 	struct mt76_wcid *wcid;
396 
397 	if (!sta)
398 	    return;
399 
400 	wcid = (struct mt76_wcid *) sta->drv_priv;
401 
402 	if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
403 	    return;
404 
405 	mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
406 }
407 
408 static void mt76x02_reset_state(struct mt76x02_dev *dev)
409 {
410 	int i;
411 
412 	lockdep_assert_held(&dev->mt76.mutex);
413 
414 	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
415 
416 	rcu_read_lock();
417 	ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
418 	rcu_read_unlock();
419 
420 	for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
421 		struct ieee80211_sta *sta;
422 		struct ieee80211_vif *vif;
423 		struct mt76x02_sta *msta;
424 		struct mt76_wcid *wcid;
425 		void *priv;
426 
427 		wcid = rcu_dereference_protected(dev->mt76.wcid[i],
428 					lockdep_is_held(&dev->mt76.mutex));
429 		if (!wcid)
430 			continue;
431 
432 		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
433 		sta = container_of(priv, struct ieee80211_sta, drv_priv);
434 
435 		priv = msta->vif;
436 		vif = container_of(priv, struct ieee80211_vif, drv_priv);
437 
438 		__mt76_sta_remove(&dev->mt76, vif, sta);
439 		memset(msta, 0, sizeof(*msta));
440 	}
441 
442 	dev->vif_mask = 0;
443 	dev->mt76.beacon_mask = 0;
444 }
445 
446 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
447 {
448 	u32 mask = dev->mt76.mmio.irqmask;
449 	bool restart = dev->mt76.mcu_ops->mcu_restart;
450 	int i;
451 
452 	ieee80211_stop_queues(dev->mt76.hw);
453 	set_bit(MT76_RESET, &dev->mt76.state);
454 
455 	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
456 	tasklet_disable(&dev->mt76.tx_tasklet);
457 	napi_disable(&dev->tx_napi);
458 
459 	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
460 		napi_disable(&dev->mt76.napi[i]);
461 
462 	mutex_lock(&dev->mt76.mutex);
463 
464 	if (restart)
465 		mt76x02_reset_state(dev);
466 
467 	if (dev->mt76.beacon_mask)
468 		mt76_clear(dev, MT_BEACON_TIME_CFG,
469 			   MT_BEACON_TIME_CFG_BEACON_TX |
470 			   MT_BEACON_TIME_CFG_TBTT_EN);
471 
472 	mt76x02_irq_disable(dev, mask);
473 
474 	/* perform device reset */
475 	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
476 	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
477 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
478 		   MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
479 	usleep_range(5000, 10000);
480 	mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
481 
482 	/* let fw reset DMA */
483 	mt76_set(dev, 0x734, 0x3);
484 
485 	if (restart)
486 		mt76_mcu_restart(dev);
487 
488 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
489 		mt76_queue_tx_cleanup(dev, i, true);
490 
491 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
492 		mt76_queue_rx_reset(dev, i);
493 
494 	mt76x02_mac_start(dev);
495 
496 	if (dev->ed_monitor)
497 		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
498 
499 	if (dev->mt76.beacon_mask && !restart)
500 		mt76_set(dev, MT_BEACON_TIME_CFG,
501 			 MT_BEACON_TIME_CFG_BEACON_TX |
502 			 MT_BEACON_TIME_CFG_TBTT_EN);
503 
504 	mt76x02_irq_enable(dev, mask);
505 
506 	mutex_unlock(&dev->mt76.mutex);
507 
508 	clear_bit(MT76_RESET, &dev->mt76.state);
509 
510 	tasklet_enable(&dev->mt76.tx_tasklet);
511 	napi_enable(&dev->tx_napi);
512 	napi_schedule(&dev->tx_napi);
513 
514 	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
515 
516 	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
517 		napi_enable(&dev->mt76.napi[i]);
518 		napi_schedule(&dev->mt76.napi[i]);
519 	}
520 
521 	if (restart) {
522 		mt76x02_mcu_function_select(dev, Q_SELECT, 1);
523 		ieee80211_restart_hw(dev->mt76.hw);
524 	} else {
525 		ieee80211_wake_queues(dev->mt76.hw);
526 		mt76_txq_schedule_all(&dev->mt76);
527 	}
528 }
529 
530 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
531 {
532 	if (mt76x02_tx_hang(dev)) {
533 		if (++dev->tx_hang_check >= MT_TX_HANG_TH)
534 			goto restart;
535 	} else {
536 		dev->tx_hang_check = 0;
537 	}
538 
539 	if (dev->mcu_timeout)
540 		goto restart;
541 
542 	return;
543 
544 restart:
545 	mt76x02_watchdog_reset(dev);
546 
547 	mutex_lock(&dev->mt76.mmio.mcu.mutex);
548 	dev->mcu_timeout = 0;
549 	mutex_unlock(&dev->mt76.mmio.mcu.mutex);
550 
551 	dev->tx_hang_reset++;
552 	dev->tx_hang_check = 0;
553 	memset(dev->mt76.tx_dma_idx, 0xff,
554 	       sizeof(dev->mt76.tx_dma_idx));
555 }
556 
557 void mt76x02_wdt_work(struct work_struct *work)
558 {
559 	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
560 					       wdt_work.work);
561 
562 	mt76x02_check_tx_hang(dev);
563 
564 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
565 				     MT_WATCHDOG_TIME);
566 }
567