Lines Matching refs:dev

16 	struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);  in mt76x02_pre_tbtt_tasklet()  local
17 struct mt76_dev *mdev = &dev->mt76; in mt76x02_pre_tbtt_tasklet()
18 struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD]; in mt76x02_pre_tbtt_tasklet()
23 if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) in mt76x02_pre_tbtt_tasklet()
26 mt76x02_resync_beacon_timer(dev); in mt76x02_pre_tbtt_tasklet()
29 mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff); in mt76x02_pre_tbtt_tasklet()
30 dev->beacon_data_count = 0; in mt76x02_pre_tbtt_tasklet()
32 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), in mt76x02_pre_tbtt_tasklet()
34 mt76x02_update_beacon_iter, dev); in mt76x02_pre_tbtt_tasklet()
36 mt76_wr(dev, MT_BCN_BYPASS_MASK, in mt76x02_pre_tbtt_tasklet()
37 0xff00 | ~(0xff00 >> dev->beacon_data_count)); in mt76x02_pre_tbtt_tasklet()
44 mt76x02_enqueue_buffered_bc(dev, &data, 8); in mt76x02_pre_tbtt_tasklet()
62 mt76_tx_queue_skb(dev, q, MT_TXQ_PSD, skb, &mvif->group_wcid, in mt76x02_pre_tbtt_tasklet()
68 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_pre_tbtt_enable() argument
71 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
73 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
76 static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_beacon_enable() argument
78 mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); in mt76x02e_beacon_enable()
80 mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
82 mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
85 void mt76x02e_init_beacon_config(struct mt76x02_dev *dev) in mt76x02e_init_beacon_config() argument
94 dev->beacon_ops = &beacon_ops; in mt76x02e_init_beacon_config()
97 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, in mt76x02e_init_beacon_config()
99 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, in mt76x02e_init_beacon_config()
101 mt76_wr(dev, MT_INT_TIMER_EN, 0); in mt76x02e_init_beacon_config()
103 mt76x02_init_beacon_config(dev); in mt76x02e_init_beacon_config()
108 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, in mt76x02_init_rx_queue() argument
113 err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize, in mt76x02_init_rx_queue()
118 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); in mt76x02_init_rx_queue()
123 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) in mt76x02_process_tx_status_fifo() argument
128 while (kfifo_get(&dev->txstatus_fifo, &stat)) in mt76x02_process_tx_status_fifo()
129 mt76x02_send_tx_status(dev, &stat, &update); in mt76x02_process_tx_status_fifo()
134 struct mt76x02_dev *dev; in mt76x02_tx_worker() local
136 dev = container_of(w, struct mt76x02_dev, mt76.tx_worker); in mt76x02_tx_worker()
138 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_tx_worker()
139 mt76x02_process_tx_status_fifo(dev); in mt76x02_tx_worker()
141 mt76_txq_schedule_all(&dev->mphy); in mt76x02_tx_worker()
146 struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, in mt76x02_poll_tx() local
150 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_poll_tx()
152 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); in mt76x02_poll_tx()
154 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); in mt76x02_poll_tx()
157 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); in mt76x02_poll_tx()
159 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); in mt76x02_poll_tx()
161 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); in mt76x02_poll_tx()
163 mt76_worker_schedule(&dev->mt76.tx_worker); in mt76x02_poll_tx()
168 int mt76x02_dma_init(struct mt76x02_dev *dev) in mt76x02_dma_init() argument
178 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); in mt76x02_dma_init()
182 dev->mt76.tx_worker.fn = mt76x02_tx_worker; in mt76x02_dma_init()
183 tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet); in mt76x02_dma_init()
185 spin_lock_init(&dev->txstatus_fifo_lock); in mt76x02_dma_init()
186 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); in mt76x02_dma_init()
188 mt76_dma_attach(&dev->mt76); in mt76x02_dma_init()
190 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); in mt76x02_dma_init()
193 ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i), in mt76x02_dma_init()
200 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, in mt76x02_dma_init()
205 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU, in mt76x02_dma_init()
210 mt76x02_irq_enable(dev, in mt76x02_dma_init()
218 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, in mt76x02_dma_init()
223 q = &dev->mt76.q_rx[MT_RXQ_MAIN]; in mt76x02_dma_init()
225 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, in mt76x02_dma_init()
230 ret = mt76_init_queues(dev, mt76_dma_rx_poll); in mt76x02_dma_init()
234 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, in mt76x02_dma_init()
236 napi_enable(&dev->mt76.tx_napi); in mt76x02_dma_init()
244 struct mt76x02_dev *dev; in mt76x02_rx_poll_complete() local
246 dev = container_of(mdev, struct mt76x02_dev, mt76); in mt76x02_rx_poll_complete()
247 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); in mt76x02_rx_poll_complete()
253 struct mt76x02_dev *dev = dev_instance; in mt76x02_irq_handler() local
256 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); in mt76x02_irq_handler()
257 intr &= dev->mt76.mmio.irqmask; in mt76x02_irq_handler()
258 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); in mt76x02_irq_handler()
260 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) in mt76x02_irq_handler()
263 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); in mt76x02_irq_handler()
269 mt76x02_irq_disable(dev, mask); in mt76x02_irq_handler()
272 napi_schedule(&dev->mt76.napi[0]); in mt76x02_irq_handler()
275 napi_schedule(&dev->mt76.napi[1]); in mt76x02_irq_handler()
278 tasklet_schedule(&dev->mt76.pre_tbtt_tasklet); in mt76x02_irq_handler()
282 if (dev->mt76.csa_complete) in mt76x02_irq_handler()
283 mt76_csa_finish(&dev->mt76); in mt76x02_irq_handler()
285 mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]); in mt76x02_irq_handler()
289 mt76x02_mac_poll_tx_status(dev, true); in mt76x02_irq_handler()
292 napi_schedule(&dev->mt76.tx_napi); in mt76x02_irq_handler()
295 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); in mt76x02_irq_handler()
301 static void mt76x02_dma_enable(struct mt76x02_dev *dev) in mt76x02_dma_enable() argument
305 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); in mt76x02_dma_enable()
306 mt76x02_wait_for_wpdma(&dev->mt76, 1000); in mt76x02_dma_enable()
312 mt76_set(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_enable()
313 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_dma_enable()
317 void mt76x02_dma_disable(struct mt76x02_dev *dev) in mt76x02_dma_disable() argument
319 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); in mt76x02_dma_disable()
325 mt76_wr(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_disable()
329 void mt76x02_mac_start(struct mt76x02_dev *dev) in mt76x02_mac_start() argument
331 mt76x02_mac_reset_counters(dev); in mt76x02_mac_start()
332 mt76x02_dma_enable(dev); in mt76x02_mac_start()
333 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); in mt76x02_mac_start()
334 mt76_wr(dev, MT_MAC_SYS_CTRL, in mt76x02_mac_start()
337 mt76x02_irq_enable(dev, in mt76x02_mac_start()
343 static bool mt76x02_tx_hang(struct mt76x02_dev *dev) in mt76x02_tx_hang() argument
350 q = dev->mphy.q_tx[i]; in mt76x02_tx_hang()
352 prev_dma_idx = dev->mt76.tx_dma_idx[i]; in mt76x02_tx_hang()
354 dev->mt76.tx_dma_idx[i] = dma_idx; in mt76x02_tx_hang()
357 dev->tx_hang_check[i] = 0; in mt76x02_tx_hang()
361 if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH) in mt76x02_tx_hang()
372 struct mt76x02_dev *dev = hw->priv; in mt76x02_key_sync() local
383 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); in mt76x02_key_sync()
386 static void mt76x02_reset_state(struct mt76x02_dev *dev) in mt76x02_reset_state() argument
390 lockdep_assert_held(&dev->mt76.mutex); in mt76x02_reset_state()
392 clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); in mt76x02_reset_state()
395 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); in mt76x02_reset_state()
405 wcid = rcu_dereference_protected(dev->mt76.wcid[i], in mt76x02_reset_state()
406 lockdep_is_held(&dev->mt76.mutex)); in mt76x02_reset_state()
410 rcu_assign_pointer(dev->mt76.wcid[i], NULL); in mt76x02_reset_state()
418 __mt76_sta_remove(&dev->mt76, vif, sta); in mt76x02_reset_state()
422 dev->mt76.vif_mask = 0; in mt76x02_reset_state()
423 dev->mt76.beacon_mask = 0; in mt76x02_reset_state()
426 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) in mt76x02_watchdog_reset() argument
428 u32 mask = dev->mt76.mmio.irqmask; in mt76x02_watchdog_reset()
429 bool restart = dev->mt76.mcu_ops->mcu_restart; in mt76x02_watchdog_reset()
432 ieee80211_stop_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
433 set_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
435 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
436 mt76_worker_disable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
437 napi_disable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
439 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
440 napi_disable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
443 mutex_lock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
445 dev->mcu_timeout = 0; in mt76x02_watchdog_reset()
447 mt76x02_reset_state(dev); in mt76x02_watchdog_reset()
449 if (dev->mt76.beacon_mask) in mt76x02_watchdog_reset()
450 mt76_clear(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
454 mt76x02_irq_disable(dev, mask); in mt76x02_watchdog_reset()
457 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
458 mt76_wr(dev, MT_MAC_SYS_CTRL, 0); in mt76x02_watchdog_reset()
459 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_watchdog_reset()
462 mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff); in mt76x02_watchdog_reset()
465 mt76_set(dev, 0x734, 0x3); in mt76x02_watchdog_reset()
468 mt76_mcu_restart(dev); in mt76x02_watchdog_reset()
470 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); in mt76x02_watchdog_reset()
472 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); in mt76x02_watchdog_reset()
474 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
475 mt76_queue_rx_reset(dev, i); in mt76x02_watchdog_reset()
478 mt76_tx_status_check(&dev->mt76, true); in mt76x02_watchdog_reset()
480 mt76x02_mac_start(dev); in mt76x02_watchdog_reset()
482 if (dev->ed_monitor) in mt76x02_watchdog_reset()
483 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
485 if (dev->mt76.beacon_mask && !restart) in mt76x02_watchdog_reset()
486 mt76_set(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
490 mt76x02_irq_enable(dev, mask); in mt76x02_watchdog_reset()
492 mutex_unlock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
494 clear_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
496 mt76_worker_enable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
497 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
500 napi_enable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
501 napi_schedule(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
503 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
504 napi_enable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
505 napi_schedule(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
510 set_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_watchdog_reset()
511 mt76x02_mcu_function_select(dev, Q_SELECT, 1); in mt76x02_watchdog_reset()
512 ieee80211_restart_hw(dev->mt76.hw); in mt76x02_watchdog_reset()
514 ieee80211_wake_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
515 mt76_txq_schedule_all(&dev->mphy); in mt76x02_watchdog_reset()
522 struct mt76x02_dev *dev = hw->priv; in mt76x02_reconfig_complete() local
527 clear_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_reconfig_complete()
531 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) in mt76x02_check_tx_hang() argument
533 if (test_bit(MT76_RESTART, &dev->mphy.state)) in mt76x02_check_tx_hang()
536 if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout) in mt76x02_check_tx_hang()
539 mt76x02_watchdog_reset(dev); in mt76x02_check_tx_hang()
541 dev->tx_hang_reset++; in mt76x02_check_tx_hang()
542 memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check)); in mt76x02_check_tx_hang()
543 memset(dev->mt76.tx_dma_idx, 0xff, in mt76x02_check_tx_hang()
544 sizeof(dev->mt76.tx_dma_idx)); in mt76x02_check_tx_hang()
549 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, in mt76x02_wdt_work() local
552 mt76x02_check_tx_hang(dev); in mt76x02_wdt_work()
554 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, in mt76x02_wdt_work()