1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
4
5 #ifndef __RTW_HCI_H__
6 #define __RTW_HCI_H__
7
8 /* ops for PCI, USB and SDIO */
9 struct rtw_hci_ops {
10 int (*tx_write)(struct rtw_dev *rtwdev,
11 struct rtw_tx_pkt_info *pkt_info,
12 struct sk_buff *skb);
13 void (*tx_kick_off)(struct rtw_dev *rtwdev);
14 void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop);
15 int (*setup)(struct rtw_dev *rtwdev);
16 int (*start)(struct rtw_dev *rtwdev);
17 void (*stop)(struct rtw_dev *rtwdev);
18 void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
19 void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
20 void (*interface_cfg)(struct rtw_dev *rtwdev);
21 void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable);
22
23 int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
24 int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
25
26 u8 (*read8)(struct rtw_dev *rtwdev, u32 addr);
27 u16 (*read16)(struct rtw_dev *rtwdev, u32 addr);
28 u32 (*read32)(struct rtw_dev *rtwdev, u32 addr);
29 void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val);
30 void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val);
31 void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
32 };
33
rtw_hci_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)34 static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev,
35 struct rtw_tx_pkt_info *pkt_info,
36 struct sk_buff *skb)
37 {
38 return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb);
39 }
40
rtw_hci_tx_kick_off(struct rtw_dev * rtwdev)41 static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev)
42 {
43 return rtwdev->hci.ops->tx_kick_off(rtwdev);
44 }
45
rtw_hci_setup(struct rtw_dev * rtwdev)46 static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
47 {
48 return rtwdev->hci.ops->setup(rtwdev);
49 }
50
rtw_hci_start(struct rtw_dev * rtwdev)51 static inline int rtw_hci_start(struct rtw_dev *rtwdev)
52 {
53 return rtwdev->hci.ops->start(rtwdev);
54 }
55
rtw_hci_stop(struct rtw_dev * rtwdev)56 static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
57 {
58 rtwdev->hci.ops->stop(rtwdev);
59 }
60
rtw_hci_deep_ps(struct rtw_dev * rtwdev,bool enter)61 static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter)
62 {
63 rtwdev->hci.ops->deep_ps(rtwdev, enter);
64 }
65
rtw_hci_link_ps(struct rtw_dev * rtwdev,bool enter)66 static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
67 {
68 rtwdev->hci.ops->link_ps(rtwdev, enter);
69 }
70
rtw_hci_interface_cfg(struct rtw_dev * rtwdev)71 static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
72 {
73 rtwdev->hci.ops->interface_cfg(rtwdev);
74 }
75
rtw_hci_dynamic_rx_agg(struct rtw_dev * rtwdev,bool enable)76 static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
77 {
78 if (rtwdev->hci.ops->dynamic_rx_agg)
79 rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable);
80 }
81
82 static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)83 rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
84 {
85 return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size);
86 }
87
88 static inline int
rtw_hci_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)89 rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
90 {
91 return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size);
92 }
93
rtw_read8(struct rtw_dev * rtwdev,u32 addr)94 static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr)
95 {
96 return rtwdev->hci.ops->read8(rtwdev, addr);
97 }
98
rtw_read16(struct rtw_dev * rtwdev,u32 addr)99 static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr)
100 {
101 return rtwdev->hci.ops->read16(rtwdev, addr);
102 }
103
rtw_read32(struct rtw_dev * rtwdev,u32 addr)104 static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr)
105 {
106 return rtwdev->hci.ops->read32(rtwdev, addr);
107 }
108
rtw_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)109 static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
110 {
111 rtwdev->hci.ops->write8(rtwdev, addr, val);
112 }
113
rtw_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)114 static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
115 {
116 rtwdev->hci.ops->write16(rtwdev, addr, val);
117 }
118
rtw_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)119 static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
120 {
121 rtwdev->hci.ops->write32(rtwdev, addr, val);
122 }
123
rtw_write8_set(struct rtw_dev * rtwdev,u32 addr,u8 bit)124 static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit)
125 {
126 u8 val;
127
128 val = rtw_read8(rtwdev, addr);
129 rtw_write8(rtwdev, addr, val | bit);
130 }
131
rtw_write16_set(struct rtw_dev * rtwdev,u32 addr,u16 bit)132 static inline void rtw_write16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
133 {
134 u16 val;
135
136 val = rtw_read16(rtwdev, addr);
137 rtw_write16(rtwdev, addr, val | bit);
138 }
139
rtw_write32_set(struct rtw_dev * rtwdev,u32 addr,u32 bit)140 static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit)
141 {
142 u32 val;
143
144 val = rtw_read32(rtwdev, addr);
145 rtw_write32(rtwdev, addr, val | bit);
146 }
147
rtw_write8_clr(struct rtw_dev * rtwdev,u32 addr,u8 bit)148 static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit)
149 {
150 u8 val;
151
152 val = rtw_read8(rtwdev, addr);
153 rtw_write8(rtwdev, addr, val & ~bit);
154 }
155
rtw_write16_clr(struct rtw_dev * rtwdev,u32 addr,u16 bit)156 static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit)
157 {
158 u16 val;
159
160 val = rtw_read16(rtwdev, addr);
161 rtw_write16(rtwdev, addr, val & ~bit);
162 }
163
rtw_write32_clr(struct rtw_dev * rtwdev,u32 addr,u32 bit)164 static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit)
165 {
166 u32 val;
167
168 val = rtw_read32(rtwdev, addr);
169 rtw_write32(rtwdev, addr, val & ~bit);
170 }
171
172 static inline u32
rtw_read_rf(struct rtw_dev * rtwdev,enum rtw_rf_path rf_path,u32 addr,u32 mask)173 rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
174 u32 addr, u32 mask)
175 {
176 u32 val;
177
178 lockdep_assert_held(&rtwdev->mutex);
179
180 val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask);
181
182 return val;
183 }
184
185 static inline void
rtw_write_rf(struct rtw_dev * rtwdev,enum rtw_rf_path rf_path,u32 addr,u32 mask,u32 data)186 rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
187 u32 addr, u32 mask, u32 data)
188 {
189 lockdep_assert_held(&rtwdev->mutex);
190
191 rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data);
192 }
193
194 static inline u32
rtw_read32_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask)195 rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
196 {
197 u32 shift = __ffs(mask);
198 u32 orig;
199 u32 ret;
200
201 orig = rtw_read32(rtwdev, addr);
202 ret = (orig & mask) >> shift;
203
204 return ret;
205 }
206
207 static inline u16
rtw_read16_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask)208 rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
209 {
210 u32 shift = __ffs(mask);
211 u32 orig;
212 u32 ret;
213
214 orig = rtw_read16(rtwdev, addr);
215 ret = (orig & mask) >> shift;
216
217 return ret;
218 }
219
220 static inline u8
rtw_read8_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask)221 rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
222 {
223 u32 shift = __ffs(mask);
224 u32 orig;
225 u32 ret;
226
227 orig = rtw_read8(rtwdev, addr);
228 ret = (orig & mask) >> shift;
229
230 return ret;
231 }
232
233 static inline void
rtw_write32_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask,u32 data)234 rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
235 {
236 u32 shift = __ffs(mask);
237 u32 orig;
238 u32 set;
239
240 WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr);
241
242 orig = rtw_read32(rtwdev, addr);
243 set = (orig & ~mask) | ((data << shift) & mask);
244 rtw_write32(rtwdev, addr, set);
245 }
246
247 static inline void
rtw_write8_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask,u8 data)248 rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data)
249 {
250 u32 shift;
251 u8 orig, set;
252
253 mask &= 0xff;
254 shift = __ffs(mask);
255
256 orig = rtw_read8(rtwdev, addr);
257 set = (orig & ~mask) | ((data << shift) & mask);
258 rtw_write8(rtwdev, addr, set);
259 }
260
rtw_hci_type(struct rtw_dev * rtwdev)261 static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev)
262 {
263 return rtwdev->hci.type;
264 }
265
rtw_hci_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)266 static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues,
267 bool drop)
268 {
269 if (rtwdev->hci.ops->flush_queues)
270 rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
271 }
272
rtw_hci_flush_all_queues(struct rtw_dev * rtwdev,bool drop)273 static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
274 {
275 if (rtwdev->hci.ops->flush_queues)
276 rtwdev->hci.ops->flush_queues(rtwdev,
277 BIT(rtwdev->hw->queues) - 1,
278 drop);
279 }
280
281 #endif
282