Lines Matching refs:dev

72 	u32 (*rr)(struct mt76_dev *dev, u32 offset);
73 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
74 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
75 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
77 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
79 int (*wr_rp)(struct mt76_dev *dev, u32 base,
81 int (*rd_rp)(struct mt76_dev *dev, u32 base,
86 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) argument
87 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) argument
88 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) argument
220 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
222 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
224 int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
226 u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
227 void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
228 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
230 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
232 int (*mcu_restart)(struct mt76_dev *dev);
236 int (*init)(struct mt76_dev *dev,
239 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
243 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
247 int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
250 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
253 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
255 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
258 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
260 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
262 void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
373 struct mt76_dev *dev; member
456 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
461 void (*tx_complete_skb)(struct mt76_dev *dev,
464 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
466 bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
468 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
471 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
473 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
476 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
479 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
482 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
601 int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr);
723 struct mt76_dev *dev; member
795 struct device *dev; member
1002 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) argument
1003 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) argument
1004 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) argument
1005 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) argument
1006 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) argument
1008 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) argument
1009 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) argument
1011 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) argument
1012 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) argument
1013 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) argument
1014 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) argument
1015 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) argument
1016 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) argument
1017 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) argument
1020 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) argument
1022 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) argument
1023 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) argument
1026 FIELD_GET(_field, mt76_rr(dev, _reg))
1034 #define mt76_hw(dev) (dev)->mphy.hw argument
1036 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
1039 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) argument
1041 bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
1044 #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10) argument
1045 #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) argument
1047 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
1050 static inline u16 mt76_chip(struct mt76_dev *dev) in mt76_chip() argument
1052 return dev->rev >> 16; in mt76_chip()
1055 static inline u16 mt76_rev(struct mt76_dev *dev) in mt76_rev() argument
1057 return dev->rev & 0xffff; in mt76_rev()
1060 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) argument
1061 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) argument
1063 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) argument
1064 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) argument
1065 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __V… argument
1066 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) argument
1067 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) argument
1068 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS… argument
1069 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS… argument
1070 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) argument
1071 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) argument
1073 #define mt76_for_each_q_rx(dev, i) \ argument
1074 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
1075 if ((dev)->q_rx[i].ndesc)
1080 int mt76_register_device(struct mt76_dev *dev, bool vht,
1082 void mt76_unregister_device(struct mt76_dev *dev);
1083 void mt76_free_device(struct mt76_dev *dev);
1086 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
1094 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev) in mt76_register_debugfs() argument
1096 return mt76_register_debugfs_fops(&dev->phy, NULL); in mt76_register_debugfs()
1103 int mt76_eeprom_init(struct mt76_dev *dev, int len);
1105 int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
1108 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1116 q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags); in mt76_init_tx_queue()
1125 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, in mt76_init_mcu_queue() argument
1130 q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0); in mt76_init_mcu_queue()
1134 dev->q_mcu[qid] = q; in mt76_init_mcu_queue()
1140 mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx) in mt76_dev_phy() argument
1142 if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) || in mt76_dev_phy()
1143 (phy_idx == MT_BAND2 && dev->phys[phy_idx])) in mt76_dev_phy()
1144 return dev->phys[phy_idx]; in mt76_dev_phy()
1146 return &dev->phy; in mt76_dev_phy()
1150 mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx) in mt76_phy_hw() argument
1152 return mt76_dev_phy(dev, phy_idx)->hw; in mt76_phy_hw()
1156 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) in mt76_get_txwi_ptr() argument
1158 return (u8 *)t - dev->drv->txwi_size; in mt76_get_txwi_ptr()
1259 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev, in mt76_is_testmode_skb() argument
1266 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { in mt76_is_testmode_skb()
1267 struct mt76_phy *phy = dev->phys[i]; in mt76_is_testmode_skb()
1270 *hw = dev->phys[i]->hw; in mt76_is_testmode_skb()
1280 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
1281 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
1289 void mt76_tx_worker_run(struct mt76_dev *dev);
1305 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
1307 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
1309 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1312 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
1313 __acquires(&dev->status_lock);
1314 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
1315 __releases(&dev->status_lock);
1317 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
1319 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
1322 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
1324 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
1327 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb) in mt76_tx_complete_skb() argument
1329 __mt76_tx_complete_skb(dev, wcid, skb, NULL); in mt76_tx_complete_skb()
1332 void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
1337 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1342 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
1352 void mt76_csa_check(struct mt76_dev *dev);
1353 void mt76_csa_finish(struct mt76_dev *dev);
1358 int mt76_get_rate(struct mt76_dev *dev,
1388 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) in mt76_tx_status_get_hw() argument
1392 struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx); in mt76_tx_status_get_hw()
1399 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
1400 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
1401 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
1402 void mt76_free_pending_rxwi(struct mt76_dev *dev);
1403 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1405 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1409 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
1429 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, in mt76u_bulk_msg() argument
1435 struct usb_interface *uintf = to_usb_interface(dev->dev); in mt76u_bulk_msg()
1437 struct mt76_usb *usb = &dev->usb; in mt76u_bulk_msg()
1449 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
1453 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
1455 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
1458 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
1460 void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
1462 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
1463 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
1465 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1467 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
1468 int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
1469 int mt76u_alloc_queues(struct mt76_dev *dev);
1470 void mt76u_stop_tx(struct mt76_dev *dev);
1471 void mt76u_stop_rx(struct mt76_dev *dev);
1472 int mt76u_resume_rx(struct mt76_dev *dev);
1473 void mt76u_queues_deinit(struct mt76_dev *dev);
1475 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
1477 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
1478 int mt76s_alloc_tx(struct mt76_dev *dev);
1479 void mt76s_deinit(struct mt76_dev *dev);
1482 bool mt76s_txqs_empty(struct mt76_dev *dev);
1483 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
1485 u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
1486 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
1487 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
1488 u32 mt76s_read_pcr(struct mt76_dev *dev);
1489 void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
1491 void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
1493 int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
1496 int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
1500 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
1503 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, in mt76_mcu_msg_alloc() argument
1506 return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL); in mt76_mcu_msg_alloc()
1509 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
1510 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
1512 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
1514 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
1517 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
1519 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const u8 *data,
1523 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, in mt76_mcu_send_firmware() argument
1526 int max_len = 4096 - dev->mcu_ops->headroom; in mt76_mcu_send_firmware()
1528 return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len); in mt76_mcu_send_firmware()
1532 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, in mt76_mcu_send_msg() argument
1535 return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL); in mt76_mcu_send_msg()
1539 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, in mt76_mcu_skb_send_msg() argument
1542 return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL); in mt76_mcu_skb_send_msg()
1545 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
1559 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
1560 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
1561 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
1562 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
1563 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
1565 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
1589 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) in mt76_set_tx_blocked() argument
1591 spin_lock_bh(&dev->token_lock); in mt76_set_tx_blocked()
1592 __mt76_set_tx_blocked(dev, blocked); in mt76_set_tx_blocked()
1593 spin_unlock_bh(&dev->token_lock); in mt76_set_tx_blocked()
1597 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) in mt76_token_get() argument
1601 spin_lock_bh(&dev->token_lock); in mt76_token_get()
1602 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); in mt76_token_get()
1603 spin_unlock_bh(&dev->token_lock); in mt76_token_get()
1609 mt76_token_put(struct mt76_dev *dev, int token) in mt76_token_put() argument
1613 spin_lock_bh(&dev->token_lock); in mt76_token_put()
1614 txwi = idr_remove(&dev->token, token); in mt76_token_put()
1615 spin_unlock_bh(&dev->token_lock); in mt76_token_put()
1627 mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid) in mt76_packet_id_flush() argument
1631 mt76_tx_status_lock(dev, &list); in mt76_packet_id_flush()
1632 mt76_tx_status_skb_get(dev, wcid, -1, &list); in mt76_packet_id_flush()
1633 mt76_tx_status_unlock(dev, &list); in mt76_packet_id_flush()