xref: /openbsd/sys/dev/ic/qwxvar.h (revision 4bdff4be)
1 /*	$OpenBSD: qwxvar.h,v 1.5 2024/01/25 17:00:21 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2018-2019 The Linux Foundation.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted (subject to the limitations in the disclaimer
9  * below) provided that the following conditions are met:
10  *
11  *  * Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  *  * Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  *  * Neither the name of [Owner Organization] nor the names of its
19  *    contributors may be used to endorse or promote products derived from
20  *    this software without specific prior written permission.
21  *
22  * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
23  * THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
25  * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
26  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
27  * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
28  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #ifdef QWX_DEBUG
37 #define DPRINTF(x...)		do { if (qwx_debug) printf(x); } while(0)
38 #define DNPRINTF(n,x...)	do { if (qwx_debug & n) printf(x); } while(0)
39 #define	QWX_D_MISC		0x00000001
40 #define	QWX_D_MHI		0x00000002
41 #define	QWX_D_QMI		0x00000004
42 #define	QWX_D_WMI		0x00000008
43 #define	QWX_D_HTC		0x00000010
44 #define	QWX_D_HTT		0x00000020
45 #define	QWX_D_MAC		0x00000040
46 #define	QWX_D_MGMT		0x00000080
47 #define	QWX_D_CE		0x00000100
48 extern uint32_t	qwx_debug;
49 #else
50 #define DPRINTF(x...)
51 #define DNPRINTF(n,x...)
52 #endif
53 
54 struct qwx_softc;
55 
56 #define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
57 
58 struct ath11k_hw_ring_mask {
59 	uint8_t tx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
60 	uint8_t rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
61 	uint8_t rx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
62 	uint8_t rx_err[ATH11K_EXT_IRQ_GRP_NUM_MAX];
63 	uint8_t rx_wbm_rel[ATH11K_EXT_IRQ_GRP_NUM_MAX];
64 	uint8_t reo_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
65 	uint8_t rxdma2host[ATH11K_EXT_IRQ_GRP_NUM_MAX];
66 	uint8_t host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
67 };
68 
69 #define ATH11K_FW_DIR			"qwx"
70 
71 #define ATH11K_BOARD_MAGIC		"QCA-ATH11K-BOARD"
72 #define ATH11K_BOARD_API2_FILE		"board-2"
73 #define ATH11K_DEFAULT_BOARD_FILE	"board"
74 #define ATH11K_DEFAULT_CAL_FILE		"caldata"
75 #define ATH11K_AMSS_FILE		"amss"
76 #define ATH11K_M3_FILE			"m3"
77 #define ATH11K_REGDB_FILE		"regdb"
78 
79 #define QWX_FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
80 
81 struct ath11k_hw_tcl2wbm_rbm_map {
82 	uint8_t tcl_ring_num;
83 	uint8_t wbm_ring_num;
84 	uint8_t rbm_id;
85 };
86 
87 /**
88  * enum hal_rx_buf_return_buf_manager
89  *
90  * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
91  * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
92  *	descriptor list.
93  * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
94  * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
95  * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
96  * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
97  * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
98  */
99 
100 enum hal_rx_buf_return_buf_manager {
101 	HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
102 	HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
103 	HAL_RX_BUF_RBM_FW_BM,
104 	HAL_RX_BUF_RBM_SW0_BM,
105 	HAL_RX_BUF_RBM_SW1_BM,
106 	HAL_RX_BUF_RBM_SW2_BM,
107 	HAL_RX_BUF_RBM_SW3_BM,
108 	HAL_RX_BUF_RBM_SW4_BM,
109 };
110 
111 struct ath11k_hw_hal_params {
112 	enum hal_rx_buf_return_buf_manager rx_buf_rbm;
113 	const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
114 };
115 
116 struct ath11k_hw_params {
117 	const char *name;
118 	uint16_t hw_rev;
119 	uint8_t max_radios;
120 	uint32_t bdf_addr;
121 
122 	struct {
123 		const char *dir;
124 		size_t board_size;
125 		size_t cal_offset;
126 	} fw;
127 
128 	const struct ath11k_hw_ops *hw_ops;
129 	const struct ath11k_hw_ring_mask *ring_mask;
130 
131 	bool internal_sleep_clock;
132 
133 	const struct ath11k_hw_regs *regs;
134 	uint32_t qmi_service_ins_id;
135 	const struct ce_attr *host_ce_config;
136 	uint32_t ce_count;
137 	const struct ce_pipe_config *target_ce_config;
138 	uint32_t target_ce_count;
139 	const struct service_to_pipe *svc_to_ce_map;
140 	uint32_t svc_to_ce_map_len;
141 
142 	bool single_pdev_only;
143 
144 	bool rxdma1_enable;
145 	int num_rxmda_per_pdev;
146 	bool rx_mac_buf_ring;
147 	bool vdev_start_delay;
148 	bool htt_peer_map_v2;
149 #if notyet
150 	struct {
151 		uint8_t fft_sz;
152 		uint8_t fft_pad_sz;
153 		uint8_t summary_pad_sz;
154 		uint8_t fft_hdr_len;
155 		uint16_t max_fft_bins;
156 		bool fragment_160mhz;
157 	} spectral;
158 
159 	uint16_t interface_modes;
160 	bool supports_monitor;
161 	bool full_monitor_mode;
162 #endif
163 	bool supports_shadow_regs;
164 	bool idle_ps;
165 	bool supports_sta_ps;
166 	bool cold_boot_calib;
167 	bool cbcal_restart_fw;
168 	int fw_mem_mode;
169 	uint32_t num_vdevs;
170 	uint32_t num_peers;
171 	bool supports_suspend;
172 	uint32_t hal_desc_sz;
173 	bool supports_regdb;
174 	bool fix_l1ss;
175 	bool credit_flow;
176 	uint8_t max_tx_ring;
177 	const struct ath11k_hw_hal_params *hal_params;
178 #if notyet
179 	bool supports_dynamic_smps_6ghz;
180 	bool alloc_cacheable_memory;
181 	bool supports_rssi_stats;
182 #endif
183 	bool fw_wmi_diag_event;
184 	bool current_cc_support;
185 	bool dbr_debug_support;
186 	bool global_reset;
187 #ifdef notyet
188 	const struct cfg80211_sar_capa *bios_sar_capa;
189 #endif
190 	bool m3_fw_support;
191 	bool fixed_bdf_addr;
192 	bool fixed_mem_region;
193 	bool static_window_map;
194 	bool hybrid_bus_type;
195 	bool fixed_fw_mem;
196 #if notyet
197 	bool support_off_channel_tx;
198 	bool supports_multi_bssid;
199 
200 	struct {
201 		uint32_t start;
202 		uint32_t end;
203 	} sram_dump;
204 
205 	bool tcl_ring_retry;
206 #endif
207 	uint32_t tx_ring_size;
208 	bool smp2p_wow_exit;
209 };
210 
211 struct ath11k_hw_ops {
212 #if notyet
213 	uint8_t (*get_hw_mac_from_pdev_id)(int pdev_id);
214 #endif
215 	void (*wmi_init_config)(struct qwx_softc *sc,
216 	    struct target_resource_config *config);
217 	int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
218 	int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id);
219 #if notyet
220 	void (*tx_mesh_enable)(struct ath11k_base *ab,
221 			       struct hal_tcl_data_cmd *tcl_cmd);
222 	bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
223 	bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
224 	uint8_t (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
225 	uint8_t *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
226 	bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
227 	uint32_t (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
228 	uint8_t (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
229 	uint8_t (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
230 	bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
231 	bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
232 	bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
233 	uint16_t (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
234 	uint16_t (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
235 	uint8_t (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
236 	uint8_t (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
237 	uint8_t (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
238 	uint32_t (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
239 	uint8_t (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
240 	uint8_t (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
241 	uint8_t (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
242 	uint16_t (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
243 	void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc,
244 					  struct hal_rx_desc *ldesc);
245 	uint32_t (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
246 	uint32_t (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
247 	void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, uint16_t len);
248 	struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
249 	uint8_t *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
250 	void (*reo_setup)(struct ath11k_base *ab);
251 	uint16_t (*mpdu_info_get_peerid)(uint8_t *tlv_data);
252 	bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
253 	uint8_t* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
254 	uint32_t (*get_ring_selector)(struct sk_buff *skb);
255 #endif
256 };
257 
258 extern const struct ath11k_hw_ops ipq8074_ops;
259 extern const struct ath11k_hw_ops ipq6018_ops;
260 extern const struct ath11k_hw_ops qca6390_ops;
261 extern const struct ath11k_hw_ops qcn9074_ops;
262 extern const struct ath11k_hw_ops wcn6855_ops;
263 extern const struct ath11k_hw_ops wcn6750_ops;
264 
265 extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
266 extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
267 extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
268 extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
269 
270 struct ath11k_hw_regs {
271 	uint32_t hal_tcl1_ring_base_lsb;
272 	uint32_t hal_tcl1_ring_base_msb;
273 	uint32_t hal_tcl1_ring_id;
274 	uint32_t hal_tcl1_ring_misc;
275 	uint32_t hal_tcl1_ring_tp_addr_lsb;
276 	uint32_t hal_tcl1_ring_tp_addr_msb;
277 	uint32_t hal_tcl1_ring_consumer_int_setup_ix0;
278 	uint32_t hal_tcl1_ring_consumer_int_setup_ix1;
279 	uint32_t hal_tcl1_ring_msi1_base_lsb;
280 	uint32_t hal_tcl1_ring_msi1_base_msb;
281 	uint32_t hal_tcl1_ring_msi1_data;
282 	uint32_t hal_tcl2_ring_base_lsb;
283 	uint32_t hal_tcl_ring_base_lsb;
284 
285 	uint32_t hal_tcl_status_ring_base_lsb;
286 
287 	uint32_t hal_reo1_ring_base_lsb;
288 	uint32_t hal_reo1_ring_base_msb;
289 	uint32_t hal_reo1_ring_id;
290 	uint32_t hal_reo1_ring_misc;
291 	uint32_t hal_reo1_ring_hp_addr_lsb;
292 	uint32_t hal_reo1_ring_hp_addr_msb;
293 	uint32_t hal_reo1_ring_producer_int_setup;
294 	uint32_t hal_reo1_ring_msi1_base_lsb;
295 	uint32_t hal_reo1_ring_msi1_base_msb;
296 	uint32_t hal_reo1_ring_msi1_data;
297 	uint32_t hal_reo2_ring_base_lsb;
298 	uint32_t hal_reo1_aging_thresh_ix_0;
299 	uint32_t hal_reo1_aging_thresh_ix_1;
300 	uint32_t hal_reo1_aging_thresh_ix_2;
301 	uint32_t hal_reo1_aging_thresh_ix_3;
302 
303 	uint32_t hal_reo1_ring_hp;
304 	uint32_t hal_reo1_ring_tp;
305 	uint32_t hal_reo2_ring_hp;
306 
307 	uint32_t hal_reo_tcl_ring_base_lsb;
308 	uint32_t hal_reo_tcl_ring_hp;
309 
310 	uint32_t hal_reo_status_ring_base_lsb;
311 	uint32_t hal_reo_status_hp;
312 
313 	uint32_t hal_reo_cmd_ring_base_lsb;
314 	uint32_t hal_reo_cmd_ring_hp;
315 
316 	uint32_t hal_sw2reo_ring_base_lsb;
317 	uint32_t hal_sw2reo_ring_hp;
318 
319 	uint32_t hal_seq_wcss_umac_ce0_src_reg;
320 	uint32_t hal_seq_wcss_umac_ce0_dst_reg;
321 	uint32_t hal_seq_wcss_umac_ce1_src_reg;
322 	uint32_t hal_seq_wcss_umac_ce1_dst_reg;
323 
324 	uint32_t hal_wbm_idle_link_ring_base_lsb;
325 	uint32_t hal_wbm_idle_link_ring_misc;
326 
327 	uint32_t hal_wbm_release_ring_base_lsb;
328 
329 	uint32_t hal_wbm0_release_ring_base_lsb;
330 	uint32_t hal_wbm1_release_ring_base_lsb;
331 
332 	uint32_t pcie_qserdes_sysclk_en_sel;
333 	uint32_t pcie_pcs_osc_dtct_config_base;
334 
335 	uint32_t hal_shadow_base_addr;
336 	uint32_t hal_reo1_misc_ctl;
337 };
338 
339 extern const struct ath11k_hw_regs ipq8074_regs;
340 extern const struct ath11k_hw_regs qca6390_regs;
341 extern const struct ath11k_hw_regs qcn9074_regs;
342 extern const struct ath11k_hw_regs wcn6855_regs;
343 extern const struct ath11k_hw_regs wcn6750_regs;
344 
345 enum ath11k_dev_flags {
346 	ATH11K_CAC_RUNNING,
347 	ATH11K_FLAG_CORE_REGISTERED,
348 	ATH11K_FLAG_CRASH_FLUSH,
349 	ATH11K_FLAG_RAW_MODE,
350 	ATH11K_FLAG_HW_CRYPTO_DISABLED,
351 	ATH11K_FLAG_BTCOEX,
352 	ATH11K_FLAG_RECOVERY,
353 	ATH11K_FLAG_UNREGISTERING,
354 	ATH11K_FLAG_REGISTERED,
355 	ATH11K_FLAG_QMI_FAIL,
356 	ATH11K_FLAG_HTC_SUSPEND_COMPLETE,
357 	ATH11K_FLAG_CE_IRQ_ENABLED,
358 	ATH11K_FLAG_EXT_IRQ_ENABLED,
359 	ATH11K_FLAG_FIXED_MEM_RGN,
360 	ATH11K_FLAG_DEVICE_INIT_DONE,
361 	ATH11K_FLAG_MULTI_MSI_VECTORS,
362 };
363 
364 enum ath11k_scan_state {
365 	ATH11K_SCAN_IDLE,
366 	ATH11K_SCAN_STARTING,
367 	ATH11K_SCAN_RUNNING,
368 	ATH11K_SCAN_ABORTING,
369 };
370 
371 enum ath11k_11d_state {
372 	ATH11K_11D_IDLE,
373 	ATH11K_11D_PREPARING,
374 	ATH11K_11D_RUNNING,
375 };
376 
377 /* enum ath11k_spectral_mode:
378  *
379  * @SPECTRAL_DISABLED: spectral mode is disabled
380  * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
381  *	something else.
382  * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
383  *	is performed manually.
384  */
385 enum ath11k_spectral_mode {
386 	ATH11K_SPECTRAL_DISABLED = 0,
387 	ATH11K_SPECTRAL_BACKGROUND,
388 	ATH11K_SPECTRAL_MANUAL,
389 };
390 
391 #define QWX_SCAN_11D_INTERVAL		600000
392 #define QWX_11D_INVALID_VDEV_ID		0xFFFF
393 
394 struct qwx_ops {
395 	uint32_t	(*read32)(struct qwx_softc *, uint32_t);
396 	void		(*write32)(struct qwx_softc *, uint32_t, uint32_t);
397 	int		(*start)(struct qwx_softc *);
398 	void		(*stop)(struct qwx_softc *);
399 	int		(*power_up)(struct qwx_softc *);
400 	void		(*power_down)(struct qwx_softc *);
401 	int		(*submit_xfer)(struct qwx_softc *, struct mbuf *);
402 	void		(*irq_enable)(struct qwx_softc *sc);
403 	void		(*irq_disable)(struct qwx_softc *sc);
404 	int		(*map_service_to_pipe)(struct qwx_softc *, uint16_t,
405 			    uint8_t *, uint8_t *);
406 	int		(*get_user_msi_vector)(struct qwx_softc *, char *,
407 			    int *, uint32_t *, uint32_t *);
408 };
409 
410 struct qwx_dmamem {
411 	bus_dmamap_t		map;
412 	bus_dma_segment_t	seg;
413 	size_t			size;
414 	caddr_t			kva;
415 };
416 
417 struct qwx_dmamem *qwx_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t);
418 void qwx_dmamem_free(bus_dma_tag_t, struct qwx_dmamem *);
419 
420 #define QWX_DMA_MAP(_adm)	((_adm)->map)
421 #define QWX_DMA_LEN(_adm)	((_adm)->size)
422 #define QWX_DMA_DVA(_adm)	((_adm)->map->dm_segs[0].ds_addr)
423 #define QWX_DMA_KVA(_adm)	((void *)(_adm)->kva)
424 
425 struct hal_srng_params {
426 	bus_addr_t ring_base_paddr;
427 	uint32_t *ring_base_vaddr;
428 	int num_entries;
429 	uint32_t intr_batch_cntr_thres_entries;
430 	uint32_t intr_timer_thres_us;
431 	uint32_t flags;
432 	uint32_t max_buffer_len;
433 	uint32_t low_threshold;
434 	uint64_t msi_addr;
435 	uint32_t msi_data;
436 
437 	/* Add more params as needed */
438 };
439 
440 enum hal_srng_dir {
441 	HAL_SRNG_DIR_SRC,
442 	HAL_SRNG_DIR_DST
443 };
444 
445 /* srng flags */
446 #define HAL_SRNG_FLAGS_MSI_SWAP			0x00000008
447 #define HAL_SRNG_FLAGS_RING_PTR_SWAP		0x00000010
448 #define HAL_SRNG_FLAGS_DATA_TLV_SWAP		0x00000020
449 #define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN	0x00010000
450 #define HAL_SRNG_FLAGS_MSI_INTR			0x00020000
451 #define HAL_SRNG_FLAGS_CACHED                   0x20000000
452 #define HAL_SRNG_FLAGS_LMAC_RING		0x80000000
453 #define HAL_SRNG_FLAGS_REMAP_CE_RING        0x10000000
454 
455 #define HAL_SRNG_TLV_HDR_TAG		GENMASK(9, 1)
456 #define HAL_SRNG_TLV_HDR_LEN		GENMASK(25, 10)
457 
458 /* Common SRNG ring structure for source and destination rings */
459 struct hal_srng {
460 	/* Unique SRNG ring ID */
461 	uint8_t ring_id;
462 
463 	/* Ring initialization done */
464 	uint8_t initialized;
465 
466 	/* Interrupt/MSI value assigned to this ring */
467 	int irq;
468 
469 	/* Physical base address of the ring */
470 	bus_addr_t ring_base_paddr;
471 
472 	/* Virtual base address of the ring */
473 	uint32_t *ring_base_vaddr;
474 
475 	/* Number of entries in ring */
476 	uint32_t num_entries;
477 
478 	/* Ring size */
479 	uint32_t ring_size;
480 
481 	/* Ring size mask */
482 	uint32_t ring_size_mask;
483 
484 	/* Size of ring entry */
485 	uint32_t entry_size;
486 
487 	/* Interrupt timer threshold - in micro seconds */
488 	uint32_t intr_timer_thres_us;
489 
490 	/* Interrupt batch counter threshold - in number of ring entries */
491 	uint32_t intr_batch_cntr_thres_entries;
492 
493 	/* MSI Address */
494 	bus_addr_t msi_addr;
495 
496 	/* MSI data */
497 	uint32_t msi_data;
498 
499 	/* Misc flags */
500 	uint32_t flags;
501 #ifdef notyet
502 	/* Lock for serializing ring index updates */
503 	spinlock_t lock;
504 #endif
505 	/* Start offset of SRNG register groups for this ring
506 	 * TBD: See if this is required - register address can be derived
507 	 * from ring ID
508 	 */
509 	uint32_t hwreg_base[HAL_SRNG_NUM_REG_GRP];
510 
511 	uint64_t timestamp;
512 
513 	/* Source or Destination ring */
514 	enum hal_srng_dir ring_dir;
515 
516 	union {
517 		struct {
518 			/* SW tail pointer */
519 			uint32_t tp;
520 
521 			/* Shadow head pointer location to be updated by HW */
522 			volatile uint32_t *hp_addr;
523 
524 			/* Cached head pointer */
525 			uint32_t cached_hp;
526 
527 			/* Tail pointer location to be updated by SW - This
528 			 * will be a register address and need not be
529 			 * accessed through SW structure
530 			 */
531 			uint32_t *tp_addr;
532 
533 			/* Current SW loop cnt */
534 			uint32_t loop_cnt;
535 
536 			/* max transfer size */
537 			uint16_t max_buffer_length;
538 
539 			/* head pointer at access end */
540 			uint32_t last_hp;
541 		} dst_ring;
542 
543 		struct {
544 			/* SW head pointer */
545 			uint32_t hp;
546 
547 			/* SW reap head pointer */
548 			uint32_t reap_hp;
549 
550 			/* Shadow tail pointer location to be updated by HW */
551 			uint32_t *tp_addr;
552 
553 			/* Cached tail pointer */
554 			uint32_t cached_tp;
555 
556 			/* Head pointer location to be updated by SW - This
557 			 * will be a register address and need not be accessed
558 			 * through SW structure
559 			 */
560 			uint32_t *hp_addr;
561 
562 			/* Low threshold - in number of ring entries */
563 			uint32_t low_threshold;
564 
565 			/* tail pointer at access end */
566 			uint32_t last_tp;
567 		} src_ring;
568 	} u;
569 };
570 
571 enum hal_ring_type {
572 	HAL_REO_DST,
573 	HAL_REO_EXCEPTION,
574 	HAL_REO_REINJECT,
575 	HAL_REO_CMD,
576 	HAL_REO_STATUS,
577 	HAL_TCL_DATA,
578 	HAL_TCL_CMD,
579 	HAL_TCL_STATUS,
580 	HAL_CE_SRC,
581 	HAL_CE_DST,
582 	HAL_CE_DST_STATUS,
583 	HAL_WBM_IDLE_LINK,
584 	HAL_SW2WBM_RELEASE,
585 	HAL_WBM2SW_RELEASE,
586 	HAL_RXDMA_BUF,
587 	HAL_RXDMA_DST,
588 	HAL_RXDMA_MONITOR_BUF,
589 	HAL_RXDMA_MONITOR_STATUS,
590 	HAL_RXDMA_MONITOR_DST,
591 	HAL_RXDMA_MONITOR_DESC,
592 	HAL_RXDMA_DIR_BUF,
593 	HAL_MAX_RING_TYPES,
594 };
595 
596 /* HW SRNG configuration table */
597 struct hal_srng_config {
598 	int start_ring_id;
599 	uint16_t max_rings;
600 	uint16_t entry_size;
601 	uint32_t reg_start[HAL_SRNG_NUM_REG_GRP];
602 	uint16_t reg_size[HAL_SRNG_NUM_REG_GRP];
603 	uint8_t lmac_ring;
604 	enum hal_srng_dir ring_dir;
605 	uint32_t max_size;
606 };
607 
608 #define QWX_NUM_SRNG_CFG	21
609 
610 /* HAL context to be used to access SRNG APIs (currently used by data path
611  * and transport (CE) modules)
612  */
613 struct ath11k_hal {
614 	/* HAL internal state for all SRNG rings.
615 	 */
616 	struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
617 
618 	/* SRNG configuration table */
619 	struct hal_srng_config srng_config[QWX_NUM_SRNG_CFG];
620 
621 	/* Remote pointer memory for HW/FW updates */
622 	struct qwx_dmamem *rdpmem;
623 	struct {
624 		uint32_t *vaddr;
625 		bus_addr_t paddr;
626 	} rdp;
627 
628 	/* Shared memory for ring pointer updates from host to FW */
629 	struct qwx_dmamem *wrpmem;
630 	struct {
631 		uint32_t *vaddr;
632 		bus_addr_t paddr;
633 	} wrp;
634 
635 	/* Available REO blocking resources bitmap */
636 	uint8_t avail_blk_resource;
637 
638 	uint8_t current_blk_index;
639 
640 	/* shadow register configuration */
641 	uint32_t shadow_reg_addr[HAL_SHADOW_NUM_REGS];
642 	int num_shadow_reg_configured;
643 #ifdef notyet
644 	struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX];
645 #endif
646 };
647 
648 enum hal_pn_type {
649 	HAL_PN_TYPE_NONE,
650 	HAL_PN_TYPE_WPA,
651 	HAL_PN_TYPE_WAPI_EVEN,
652 	HAL_PN_TYPE_WAPI_UNEVEN,
653 };
654 
655 enum hal_ce_desc {
656 	HAL_CE_DESC_SRC,
657 	HAL_CE_DESC_DST,
658 	HAL_CE_DESC_DST_STATUS,
659 };
660 
661 void qwx_ce_byte_swap(void *mem, uint32_t len);
662 
663 struct ce_ie_addr {
664 	uint32_t ie1_reg_addr;
665 	uint32_t ie2_reg_addr;
666 	uint32_t ie3_reg_addr;
667 };
668 
669 struct ce_remap {
670 	uint32_t base;
671 	uint32_t size;
672 };
673 
674 struct ce_attr {
675 	/* CE_ATTR_* values */
676 	unsigned int flags;
677 
678 	/* #entries in source ring - Must be a power of 2 */
679 	unsigned int src_nentries;
680 
681 	/*
682 	 * Max source send size for this CE.
683 	 * This is also the minimum size of a destination buffer.
684 	 */
685 	unsigned int src_sz_max;
686 
687 	/* #entries in destination ring - Must be a power of 2 */
688 	unsigned int dest_nentries;
689 
690 	void (*recv_cb)(struct qwx_softc *, struct mbuf *);
691 	void (*send_cb)(struct qwx_softc *, struct mbuf *);
692 };
693 
694 #define CE_DESC_RING_ALIGN 8
695 
696 struct qwx_rx_data {
697 	struct mbuf	*m;
698 	bus_dmamap_t	map;
699 	int is_first_msdu;
700 	int is_last_msdu;
701 	int is_continuation;
702 	int is_mcbc;
703 	int is_eapol;
704 	struct hal_rx_desc *rx_desc;
705 	uint8_t err_rel_src;
706 	uint8_t err_code;
707 	uint8_t mac_id;
708 	uint8_t unmapped;
709 	uint8_t is_frag;
710 	uint8_t tid;
711 	uint16_t peer_id;
712 	uint16_t seq_no;
713 };
714 
715 struct qwx_tx_data {
716 	struct mbuf	*m;
717 	bus_dmamap_t	map;
718 	uint8_t eid;
719 	uint8_t flags;
720 	uint32_t cipher;
721 } __packed;
722 
723 struct qwx_ce_ring {
724 	/* Number of entries in this ring; must be power of 2 */
725 	unsigned int nentries;
726 	unsigned int nentries_mask;
727 
728 	/* For dest ring, this is the next index to be processed
729 	 * by software after it was/is received into.
730 	 *
731 	 * For src ring, this is the last descriptor that was sent
732 	 * and completion processed by software.
733 	 *
734 	 * Regardless of src or dest ring, this is an invariant
735 	 * (modulo ring size):
736 	 *     write index >= read index >= sw_index
737 	 */
738 	unsigned int sw_index;
739 	/* cached copy */
740 	unsigned int write_index;
741 
742 	/* Start of DMA-coherent area reserved for descriptors */
743 	/* Host address space */
744 	caddr_t base_addr;
745 
746 	/* DMA map for Tx/Rx descriptors. */
747 	bus_dmamap_t		dmap;
748 	bus_dma_segment_t	dsegs;
749 	int			nsegs;
750 	size_t			desc_sz;
751 
752 	/* HAL ring id */
753 	uint32_t hal_ring_id;
754 
755 	/*
756 	 * Per-transfer data.
757 	 * Size and type of this data depends on how the ring is used.
758 	 *
759 	 * For transfers using DMA, the context contains pointers to
760 	 * struct qwx_rx_data if this ring is a dest ring, or struct
761 	 * qwx_tx_data if this ring is a src ring. DMA maps are allocated
762 	 * when the device is started via sc->ops.start, and will be used
763 	 * to load mbufs for DMA transfers.
764 	 * In this case, the pointers MUST NOT be cleared until the device
765 	 * is stopped. Otherwise we'd lose track of our DMA mappings!
766 	 * The Linux ath11k driver works differently because it can store
767 	 * DMA mapping information in a Linux socket buffer structure, which
768 	 * is not possible with mbufs.
769 	 *
770 	 * Keep last.
771 	 */
772 	void *per_transfer_context[0];
773 };
774 
775 void qwx_htc_tx_completion_handler(struct qwx_softc *, struct mbuf *);
776 void qwx_htc_rx_completion_handler(struct qwx_softc *, struct mbuf *);
777 void qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *, struct mbuf *);
778 
779 struct qwx_dp;
780 
781 #define DP_NUM_CLIENTS_MAX 64
782 #define DP_AVG_TIDS_PER_CLIENT 2
783 #define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
784 #define DP_AVG_MSDUS_PER_FLOW 128
785 #define DP_AVG_FLOWS_PER_TID 2
786 #define DP_AVG_MPDUS_PER_TID_MAX 128
787 #define DP_AVG_MSDUS_PER_MPDU 4
788 
789 #define DP_RX_HASH_ENABLE	1 /* Enable hash based Rx steering */
790 
791 #define DP_BA_WIN_SZ_MAX	256
792 
793 #define DP_TCL_NUM_RING_MAX	3
794 #define DP_TCL_NUM_RING_MAX_QCA6390	1
795 
796 #define DP_IDLE_SCATTER_BUFS_MAX 16
797 
798 #define DP_WBM_RELEASE_RING_SIZE	64
799 #define DP_TCL_DATA_RING_SIZE		512
800 #define DP_TCL_DATA_RING_SIZE_WCN6750	2048
801 #define DP_TX_COMP_RING_SIZE		32768
802 #define DP_TX_IDR_SIZE			DP_TX_COMP_RING_SIZE
803 #define DP_TCL_CMD_RING_SIZE		32
804 #define DP_TCL_STATUS_RING_SIZE		32
805 #define DP_REO_DST_RING_MAX		4
806 #define DP_REO_DST_RING_SIZE		2048
807 #define DP_REO_REINJECT_RING_SIZE	32
808 #define DP_RX_RELEASE_RING_SIZE		1024
809 #define DP_REO_EXCEPTION_RING_SIZE	128
810 #define DP_REO_CMD_RING_SIZE		256
811 #define DP_REO_STATUS_RING_SIZE		2048
812 #define DP_RXDMA_BUF_RING_SIZE		4096
813 #define DP_RXDMA_REFILL_RING_SIZE	2048
814 #define DP_RXDMA_ERR_DST_RING_SIZE	1024
815 #define DP_RXDMA_MON_STATUS_RING_SIZE	1024
816 #define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
817 #define DP_RXDMA_MONITOR_DST_RING_SIZE	2048
818 #define DP_RXDMA_MONITOR_DESC_RING_SIZE	4096
819 
820 #define DP_RX_RELEASE_RING_NUM	3
821 
822 #define DP_RX_BUFFER_SIZE	2048
823 #define	DP_RX_BUFFER_SIZE_LITE  1024
824 #define DP_RX_BUFFER_ALIGN_SIZE	128
825 
826 #define DP_RXDMA_BUF_COOKIE_BUF_ID	GENMASK(17, 0)
827 #define DP_RXDMA_BUF_COOKIE_PDEV_ID	GENMASK(20, 18)
828 
829 #define DP_HW2SW_MACID(mac_id) ((mac_id) ? ((mac_id) - 1) : 0)
830 #define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
831 
832 #define DP_TX_DESC_ID_MAC_ID  GENMASK(1, 0)
833 #define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
834 #define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
835 
836 struct qwx_hp_update_timer {
837 	struct timeout timer;
838 	int started;
839 	int init;
840 	uint32_t tx_num;
841 	uint32_t timer_tx_num;
842 	uint32_t ring_id;
843 	uint32_t interval;
844 	struct qwx_softc *sc;
845 };
846 
847 struct dp_rx_tid {
848 	uint8_t tid;
849 	struct qwx_dmamem *mem;
850 	uint32_t *vaddr;
851 	uint64_t paddr;
852 	uint32_t size;
853 	uint32_t ba_win_sz;
854 	int active;
855 
856 	/* Info related to rx fragments */
857 	uint32_t cur_sn;
858 	uint16_t last_frag_no;
859 	uint16_t rx_frag_bitmap;
860 #if 0
861 	struct sk_buff_head rx_frags;
862 	struct hal_reo_dest_ring *dst_ring_desc;
863 
864 	/* Timer info related to fragments */
865 	struct timer_list frag_timer;
866 	struct ath11k_base *ab;
867 #endif
868 };
869 
870 #define DP_REO_DESC_FREE_THRESHOLD  64
871 #define DP_REO_DESC_FREE_TIMEOUT_MS 1000
872 #define DP_MON_PURGE_TIMEOUT_MS     100
873 #define DP_MON_SERVICE_BUDGET       128
874 
875 struct dp_reo_cache_flush_elem {
876 	TAILQ_ENTRY(dp_reo_cache_flush_elem) entry;
877 	struct dp_rx_tid data;
878 	unsigned long ts;
879 };
880 
881 TAILQ_HEAD(dp_reo_cmd_cache_flush_head, dp_reo_cache_flush_elem);
882 
883 struct dp_reo_cmd {
884 	TAILQ_ENTRY(dp_reo_cmd) entry;
885 	struct dp_rx_tid data;
886 	int cmd_num;
887 	void (*handler)(struct qwx_dp *, void *,
888 	    enum hal_reo_cmd_status status);
889 };
890 
891 TAILQ_HEAD(dp_reo_cmd_head, dp_reo_cmd);
892 
893 struct dp_srng {
894 	struct qwx_dmamem *mem;
895 	uint32_t *vaddr;
896 	bus_addr_t paddr;
897 	int size;
898 	uint32_t ring_id;
899 	uint8_t cached;
900 };
901 
902 struct dp_tx_ring {
903 	uint8_t tcl_data_ring_id;
904 	struct dp_srng tcl_data_ring;
905 	struct dp_srng tcl_comp_ring;
906 #if 0
907 	struct idr txbuf_idr;
908 	/* Protects txbuf_idr and num_pending */
909 	spinlock_t tx_idr_lock;
910 #endif
911 	struct hal_wbm_release_ring *tx_status;
912 	int tx_status_head;
913 	int tx_status_tail;
914 };
915 
916 
917 struct dp_link_desc_bank {
918 	struct qwx_dmamem *mem;
919 	caddr_t *vaddr;
920 	bus_addr_t paddr;
921 	uint32_t size;
922 };
923 
924 /* Size to enforce scatter idle list mode */
925 #define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
926 #define DP_LINK_DESC_BANKS_MAX 8
927 
928 struct hal_wbm_idle_scatter_list {
929 	struct qwx_dmamem *mem;
930 	bus_addr_t paddr;
931 	struct hal_wbm_link_desc *vaddr;
932 };
933 
934 struct qwx_dp {
935 	struct qwx_softc *sc;
936 	enum ath11k_htc_ep_id eid;
937 	int htt_tgt_version_received;
938 	uint8_t htt_tgt_ver_major;
939 	uint8_t htt_tgt_ver_minor;
940 	struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
941 	struct dp_srng wbm_idle_ring;
942 	struct dp_srng wbm_desc_rel_ring;
943 	struct dp_srng tcl_cmd_ring;
944 	struct dp_srng tcl_status_ring;
945 	struct dp_srng reo_reinject_ring;
946 	struct dp_srng rx_rel_ring;
947 	struct dp_srng reo_except_ring;
948 	struct dp_srng reo_cmd_ring;
949 	struct dp_srng reo_status_ring;
950 	struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
951 	struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
952 	struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
953 	struct dp_reo_cmd_head reo_cmd_list;
954 	struct dp_reo_cmd_cache_flush_head reo_cmd_cache_flush_list;
955 #if 0
956 	struct list_head dp_full_mon_mpdu_list;
957 #endif
958 	uint32_t reo_cmd_cache_flush_count;
959 #if 0
960 	/**
961 	 * protects access to below fields,
962 	 * - reo_cmd_list
963 	 * - reo_cmd_cache_flush_list
964 	 * - reo_cmd_cache_flush_count
965 	 */
966 	spinlock_t reo_cmd_lock;
967 #endif
968 	struct qwx_hp_update_timer reo_cmd_timer;
969 	struct qwx_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
970 };
971 
972 #define ATH11K_SHADOW_DP_TIMER_INTERVAL 20
973 #define ATH11K_SHADOW_CTRL_TIMER_INTERVAL 10
974 
975 struct qwx_ce_pipe {
976 	struct qwx_softc *sc;
977 	uint16_t pipe_num;
978 	unsigned int attr_flags;
979 	unsigned int buf_sz;
980 	unsigned int rx_buf_needed;
981 
982 	void (*send_cb)(struct qwx_softc *, struct mbuf *);
983 	void (*recv_cb)(struct qwx_softc *, struct mbuf *);
984 
985 #ifdef notyet
986 	struct tasklet_struct intr_tq;
987 #endif
988 	struct qwx_ce_ring *src_ring;
989 	struct qwx_ce_ring *dest_ring;
990 	struct qwx_ce_ring *status_ring;
991 	uint64_t timestamp;
992 };
993 
994 struct qwx_ce {
995 	struct qwx_ce_pipe ce_pipe[CE_COUNT_MAX];
996 #ifdef notyet
997 	/* Protects rings of all ce pipes */
998 	spinlock_t ce_lock;
999 #endif
1000 	struct qwx_hp_update_timer hp_timer[CE_COUNT_MAX];
1001 };
1002 
1003 
1004 /* XXX This may be non-zero on AHB but is always zero on PCI. */
1005 #define ATH11K_CE_OFFSET(sc)	(0)
1006 
1007 struct qwx_qmi_ce_cfg {
1008 	const uint8_t *shadow_reg;
1009 	int shadow_reg_len;
1010 	uint32_t *shadow_reg_v2;
1011 	uint32_t shadow_reg_v2_len;
1012 };
1013 
1014 struct qwx_qmi_target_info {
1015 	uint32_t chip_id;
1016 	uint32_t chip_family;
1017 	uint32_t board_id;
1018 	uint32_t soc_id;
1019 	uint32_t fw_version;
1020 	uint32_t eeprom_caldata;
1021 	char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
1022 	char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
1023 	char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH];
1024 };
1025 
1026 enum ath11k_bdf_search {
1027 	ATH11K_BDF_SEARCH_DEFAULT,
1028 	ATH11K_BDF_SEARCH_BUS_AND_BOARD,
1029 };
1030 
1031 struct qwx_device_id {
1032 	enum ath11k_bdf_search bdf_search;
1033 	uint32_t vendor;
1034 	uint32_t device;
1035 	uint32_t subsystem_vendor;
1036 	uint32_t subsystem_device;
1037 };
1038 
1039 struct qwx_wmi_base;
1040 
1041 struct qwx_pdev_wmi {
1042 	struct qwx_wmi_base *wmi;
1043 	enum ath11k_htc_ep_id eid;
1044 	const struct wmi_peer_flags_map *peer_flags;
1045 	uint32_t rx_decap_mode;
1046 	int tx_ce_desc;
1047 };
1048 
1049 #define QWX_MAX_RADIOS 3
1050 
1051 struct qwx_wmi_base {
1052 	struct qwx_softc *sc;
1053 	struct qwx_pdev_wmi wmi[QWX_MAX_RADIOS];
1054 	enum ath11k_htc_ep_id wmi_endpoint_id[QWX_MAX_RADIOS];
1055 	uint32_t max_msg_len[QWX_MAX_RADIOS];
1056 	int service_ready;
1057 	int unified_ready;
1058 	uint8_t svc_map[howmany(WMI_MAX_EXT2_SERVICE, 8)];
1059 	int tx_credits;
1060 	const struct wmi_peer_flags_map *peer_flags;
1061 	uint32_t num_mem_chunks;
1062 	uint32_t rx_decap_mode;
1063 	struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
1064 	enum wmi_host_hw_mode_config_type preferred_hw_mode;
1065 	struct target_resource_config  wlan_resource_config;
1066 	struct ath11k_targ_cap *targ_cap;
1067 };
1068 
1069 struct wmi_tlv_policy {
1070 	size_t min_len;
1071 };
1072 
1073 struct wmi_tlv_svc_ready_parse {
1074 	int wmi_svc_bitmap_done;
1075 };
1076 
1077 struct wmi_tlv_dma_ring_caps_parse {
1078 	struct wmi_dma_ring_capabilities *dma_ring_caps;
1079 	uint32_t n_dma_ring_caps;
1080 };
1081 
1082 struct wmi_tlv_svc_rdy_ext_parse {
1083 	struct ath11k_service_ext_param param;
1084 	struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
1085 	struct wmi_hw_mode_capabilities *hw_mode_caps;
1086 	uint32_t n_hw_mode_caps;
1087 	uint32_t tot_phy_id;
1088 	struct wmi_hw_mode_capabilities pref_hw_mode_caps;
1089 	struct wmi_mac_phy_capabilities *mac_phy_caps;
1090 	size_t mac_phy_caps_size;
1091 	uint32_t n_mac_phy_caps;
1092 	struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
1093 	struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
1094 	uint32_t n_ext_hal_reg_caps;
1095 	struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
1096 	int hw_mode_done;
1097 	int mac_phy_done;
1098 	int ext_hal_reg_done;
1099 	int mac_phy_chainmask_combo_done;
1100 	int mac_phy_chainmask_cap_done;
1101 	int oem_dma_ring_cap_done;
1102 	int dma_ring_cap_done;
1103 };
1104 
1105 struct wmi_tlv_svc_rdy_ext2_parse {
1106 	struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
1107 	bool dma_ring_cap_done;
1108 };
1109 
1110 struct wmi_tlv_rdy_parse {
1111 	uint32_t num_extra_mac_addr;
1112 };
1113 
1114 struct wmi_tlv_dma_buf_release_parse {
1115 	struct ath11k_wmi_dma_buf_release_fixed_param fixed;
1116 	struct wmi_dma_buf_release_entry *buf_entry;
1117 	struct wmi_dma_buf_release_meta_data *meta_data;
1118 	uint32_t num_buf_entry;
1119 	uint32_t num_meta;
1120 	bool buf_entry_done;
1121 	bool meta_data_done;
1122 };
1123 
1124 struct wmi_tlv_fw_stats_parse {
1125 	const struct wmi_stats_event *ev;
1126 	const struct wmi_per_chain_rssi_stats *rssi;
1127 	struct ath11k_fw_stats *stats;
1128 	int rssi_num;
1129 	bool chain_rssi_done;
1130 };
1131 
1132 struct wmi_tlv_mgmt_rx_parse {
1133 	const struct wmi_mgmt_rx_hdr *fixed;
1134 	const uint8_t *frame_buf;
1135 	bool frame_buf_done;
1136 };
1137 
1138 struct qwx_htc;
1139 
1140 struct qwx_htc_ep_ops {
1141 	void (*ep_tx_complete)(struct qwx_softc *, struct mbuf *);
1142 	void (*ep_rx_complete)(struct qwx_softc *, struct mbuf *);
1143 	void (*ep_tx_credits)(struct qwx_softc *);
1144 };
1145 
1146 /* service connection information */
1147 struct qwx_htc_svc_conn_req {
1148 	uint16_t service_id;
1149 	struct qwx_htc_ep_ops ep_ops;
1150 	int max_send_queue_depth;
1151 };
1152 
1153 /* service connection response information */
1154 struct qwx_htc_svc_conn_resp {
1155 	uint8_t buffer_len;
1156 	uint8_t actual_len;
1157 	enum ath11k_htc_ep_id eid;
1158 	unsigned int max_msg_len;
1159 	uint8_t connect_resp_code;
1160 };
1161 
1162 #define ATH11K_NUM_CONTROL_TX_BUFFERS 2
1163 #define ATH11K_HTC_MAX_LEN 4096
1164 #define ATH11K_HTC_MAX_CTRL_MSG_LEN 256
1165 #define ATH11K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
1166 #define ATH11K_HTC_CONTROL_BUFFER_SIZE (ATH11K_HTC_MAX_CTRL_MSG_LEN + \
1167 					sizeof(struct ath11k_htc_hdr))
1168 #define ATH11K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
1169 #define ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
1170 
1171 struct qwx_htc_ep {
1172 	struct qwx_htc *htc;
1173 	enum ath11k_htc_ep_id eid;
1174 	enum ath11k_htc_svc_id service_id;
1175 	struct qwx_htc_ep_ops ep_ops;
1176 
1177 	int max_tx_queue_depth;
1178 	int max_ep_message_len;
1179 	uint8_t ul_pipe_id;
1180 	uint8_t dl_pipe_id;
1181 
1182 	uint8_t seq_no; /* for debugging */
1183 	int tx_credits;
1184 	bool tx_credit_flow_enabled;
1185 };
1186 
1187 struct qwx_htc_svc_tx_credits {
1188 	uint16_t service_id;
1189 	uint8_t  credit_allocation;
1190 };
1191 
1192 struct qwx_htc {
1193 	struct qwx_softc *sc;
1194 	struct qwx_htc_ep endpoint[ATH11K_HTC_EP_COUNT];
1195 #ifdef notyet
1196 	/* protects endpoints */
1197 	spinlock_t tx_lock;
1198 #endif
1199 	uint8_t control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN];
1200 	int control_resp_len;
1201 
1202 	int ctl_resp;
1203 
1204 	int total_transmit_credits;
1205 	struct qwx_htc_svc_tx_credits
1206 		service_alloc_table[ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
1207 	int target_credit_size;
1208 	uint8_t wmi_ep_count;
1209 };
1210 
1211 struct qwx_msi_user {
1212 	char *name;
1213 	int num_vectors;
1214 	uint32_t base_vector;
1215 };
1216 
1217 struct qwx_msi_config {
1218 	int total_vectors;
1219 	int total_users;
1220 	struct qwx_msi_user *users;
1221 	uint16_t hw_rev;
1222 };
1223 
1224 struct ath11k_band_cap {
1225 	uint32_t phy_id;
1226 	uint32_t max_bw_supported;
1227 	uint32_t ht_cap_info;
1228 	uint32_t he_cap_info[2];
1229 	uint32_t he_mcs;
1230 	uint32_t he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
1231 	struct ath11k_ppe_threshold he_ppet;
1232 	uint16_t he_6ghz_capa;
1233 };
1234 
1235 struct ath11k_pdev_cap {
1236 	uint32_t supported_bands;
1237 	uint32_t ampdu_density;
1238 	uint32_t vht_cap;
1239 	uint32_t vht_mcs;
1240 	uint32_t he_mcs;
1241 	uint32_t tx_chain_mask;
1242 	uint32_t rx_chain_mask;
1243 	uint32_t tx_chain_mask_shift;
1244 	uint32_t rx_chain_mask_shift;
1245 	struct ath11k_band_cap band[WMI_NUM_SUPPORTED_BAND_MAX];
1246 	int nss_ratio_enabled;
1247 	uint8_t nss_ratio_info;
1248 };
1249 
1250 struct qwx_pdev {
1251 	struct qwx_softc *sc;
1252 	uint32_t pdev_id;
1253 	struct ath11k_pdev_cap cap;
1254 	uint8_t mac_addr[IEEE80211_ADDR_LEN];
1255 };
1256 
1257 struct qwx_dbring_cap {
1258 	uint32_t pdev_id;
1259 	enum wmi_direct_buffer_module id;
1260 	uint32_t min_elem;
1261 	uint32_t min_buf_sz;
1262 	uint32_t min_buf_align;
1263 };
1264 
1265 struct dp_rxdma_ring {
1266 	struct dp_srng refill_buf_ring;
1267 #if 0
1268 	struct idr bufs_idr;
1269 	/* Protects bufs_idr */
1270 	spinlock_t idr_lock;
1271 #else
1272 	struct qwx_rx_data *rx_data;
1273 #endif
1274 	int cur;
1275 	int bufs_max;
1276 };
1277 
1278 enum hal_rx_mon_status {
1279 	HAL_RX_MON_STATUS_PPDU_NOT_DONE,
1280 	HAL_RX_MON_STATUS_PPDU_DONE,
1281 	HAL_RX_MON_STATUS_BUF_DONE,
1282 };
1283 
1284 struct hal_rx_user_status {
1285 	uint32_t mcs:4,
1286 	nss:3,
1287 	ofdma_info_valid:1,
1288 	dl_ofdma_ru_start_index:7,
1289 	dl_ofdma_ru_width:7,
1290 	dl_ofdma_ru_size:8;
1291 	uint32_t ul_ofdma_user_v0_word0;
1292 	uint32_t ul_ofdma_user_v0_word1;
1293 	uint32_t ast_index;
1294 	uint32_t tid;
1295 	uint16_t tcp_msdu_count;
1296 	uint16_t udp_msdu_count;
1297 	uint16_t other_msdu_count;
1298 	uint16_t frame_control;
1299 	uint8_t frame_control_info_valid;
1300 	uint8_t data_sequence_control_info_valid;
1301 	uint16_t first_data_seq_ctrl;
1302 	uint32_t preamble_type;
1303 	uint16_t ht_flags;
1304 	uint16_t vht_flags;
1305 	uint16_t he_flags;
1306 	uint8_t rs_flags;
1307 	uint32_t mpdu_cnt_fcs_ok;
1308 	uint32_t mpdu_cnt_fcs_err;
1309 	uint32_t mpdu_fcs_ok_bitmap[8];
1310 	uint32_t mpdu_ok_byte_count;
1311 	uint32_t mpdu_err_byte_count;
1312 };
1313 
1314 #define HAL_INVALID_PEERID 0xffff
1315 #define VHT_SIG_SU_NSS_MASK 0x7
1316 
1317 #define HAL_RX_MAX_MCS 12
1318 #define HAL_RX_MAX_NSS 8
1319 
1320 #define HAL_TLV_STATUS_PPDU_NOT_DONE    HAL_RX_MON_STATUS_PPDU_NOT_DONE
1321 #define HAL_TLV_STATUS_PPDU_DONE        HAL_RX_MON_STATUS_PPDU_DONE
1322 #define HAL_TLV_STATUS_BUF_DONE         HAL_RX_MON_STATUS_BUF_DONE
1323 
1324 struct hal_rx_mon_ppdu_info {
1325 	uint32_t ppdu_id;
1326 	uint32_t ppdu_ts;
1327 	uint32_t num_mpdu_fcs_ok;
1328 	uint32_t num_mpdu_fcs_err;
1329 	uint32_t preamble_type;
1330 	uint16_t chan_num;
1331 	uint16_t tcp_msdu_count;
1332 	uint16_t tcp_ack_msdu_count;
1333 	uint16_t udp_msdu_count;
1334 	uint16_t other_msdu_count;
1335 	uint16_t peer_id;
1336 	uint8_t rate;
1337 	uint8_t mcs;
1338 	uint8_t nss;
1339 	uint8_t bw;
1340 	uint8_t vht_flag_values1;
1341 	uint8_t vht_flag_values2;
1342 	uint8_t vht_flag_values3[4];
1343 	uint8_t vht_flag_values4;
1344 	uint8_t vht_flag_values5;
1345 	uint16_t vht_flag_values6;
1346 	uint8_t is_stbc;
1347 	uint8_t gi;
1348 	uint8_t ldpc;
1349 	uint8_t beamformed;
1350 	uint8_t rssi_comb;
1351 	uint8_t rssi_chain_pri20[HAL_RX_MAX_NSS];
1352 	uint8_t tid;
1353 	uint16_t ht_flags;
1354 	uint16_t vht_flags;
1355 	uint16_t he_flags;
1356 	uint16_t he_mu_flags;
1357 	uint8_t dcm;
1358 	uint8_t ru_alloc;
1359 	uint8_t reception_type;
1360 	uint64_t tsft;
1361 	uint64_t rx_duration;
1362 	uint16_t frame_control;
1363 	uint32_t ast_index;
1364 	uint8_t rs_fcs_err;
1365 	uint8_t rs_flags;
1366 	uint8_t cck_flag;
1367 	uint8_t ofdm_flag;
1368 	uint8_t ulofdma_flag;
1369 	uint8_t frame_control_info_valid;
1370 	uint16_t he_per_user_1;
1371 	uint16_t he_per_user_2;
1372 	uint8_t he_per_user_position;
1373 	uint8_t he_per_user_known;
1374 	uint16_t he_flags1;
1375 	uint16_t he_flags2;
1376 	uint8_t he_RU[4];
1377 	uint16_t he_data1;
1378 	uint16_t he_data2;
1379 	uint16_t he_data3;
1380 	uint16_t he_data4;
1381 	uint16_t he_data5;
1382 	uint16_t he_data6;
1383 	uint32_t ppdu_len;
1384 	uint32_t prev_ppdu_id;
1385 	uint32_t device_id;
1386 	uint16_t first_data_seq_ctrl;
1387 	uint8_t monitor_direct_used;
1388 	uint8_t data_sequence_control_info_valid;
1389 	uint8_t ltf_size;
1390 	uint8_t rxpcu_filter_pass;
1391 	char rssi_chain[8][8];
1392 	struct hal_rx_user_status userstats;
1393 };
1394 
1395 enum dp_mon_status_buf_state {
1396 	/* PPDU id matches in dst ring and status ring */
1397 	DP_MON_STATUS_MATCH,
1398 	/* status ring dma is not done */
1399 	DP_MON_STATUS_NO_DMA,
1400 	/* status ring is lagging, reap status ring */
1401 	DP_MON_STATUS_LAG,
1402 	/* status ring is leading, reap dst ring and drop */
1403 	DP_MON_STATUS_LEAD,
1404 	/* replinish monitor status ring */
1405 	DP_MON_STATUS_REPLINISH,
1406 };
1407 
1408 struct qwx_pdev_mon_stats {
1409 	uint32_t status_ppdu_state;
1410 	uint32_t status_ppdu_start;
1411 	uint32_t status_ppdu_end;
1412 	uint32_t status_ppdu_compl;
1413 	uint32_t status_ppdu_start_mis;
1414 	uint32_t status_ppdu_end_mis;
1415 	uint32_t status_ppdu_done;
1416 	uint32_t dest_ppdu_done;
1417 	uint32_t dest_mpdu_done;
1418 	uint32_t dest_mpdu_drop;
1419 	uint32_t dup_mon_linkdesc_cnt;
1420 	uint32_t dup_mon_buf_cnt;
1421 	uint32_t dest_mon_stuck;
1422 	uint32_t dest_mon_not_reaped;
1423 };
1424 
1425 struct qwx_mon_data {
1426 	struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
1427 	struct hal_rx_mon_ppdu_info mon_ppdu_info;
1428 
1429 	uint32_t mon_ppdu_status;
1430 	uint32_t mon_last_buf_cookie;
1431 	uint64_t mon_last_linkdesc_paddr;
1432 	uint16_t chan_noise_floor;
1433 	bool hold_mon_dst_ring;
1434 	enum dp_mon_status_buf_state buf_state;
1435 	bus_addr_t mon_status_paddr;
1436 	struct dp_full_mon_mpdu *mon_mpdu;
1437 #ifdef notyet
1438 	struct hal_sw_mon_ring_entries sw_mon_entries;
1439 #endif
1440 	struct qwx_pdev_mon_stats rx_mon_stats;
1441 #ifdef notyet
1442 	/* lock for monitor data */
1443 	spinlock_t mon_lock;
1444 	struct sk_buff_head rx_status_q;
1445 #endif
1446 };
1447 
1448 
1449 #define MAX_RXDMA_PER_PDEV     2
1450 
1451 struct qwx_pdev_dp {
1452 	uint32_t mac_id;
1453 	uint32_t mon_dest_ring_stuck_cnt;
1454 #if 0
1455 	atomic_t num_tx_pending;
1456 	wait_queue_head_t tx_empty_waitq;
1457 #endif
1458 	struct dp_rxdma_ring rx_refill_buf_ring;
1459 	struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
1460 	struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
1461 	struct dp_srng rxdma_mon_dst_ring;
1462 	struct dp_srng rxdma_mon_desc_ring;
1463 	struct dp_rxdma_ring rxdma_mon_buf_ring;
1464 	struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
1465 #if 0
1466 	struct ieee80211_rx_status rx_status;
1467 #endif
1468 	struct qwx_mon_data mon_data;
1469 };
1470 
1471 struct qwx_vif {
1472 	uint32_t vdev_id;
1473 	enum wmi_vdev_type vdev_type;
1474 	enum wmi_vdev_subtype vdev_subtype;
1475 	uint32_t beacon_interval;
1476 	uint32_t dtim_period;
1477 	uint16_t ast_hash;
1478 	uint16_t ast_idx;
1479 	uint16_t tcl_metadata;
1480 	uint8_t hal_addr_search_flags;
1481 	uint8_t search_type;
1482 
1483 	struct qwx_softc *sc;
1484 
1485 	uint16_t tx_seq_no;
1486 	struct wmi_wmm_params_all_arg wmm_params;
1487 	TAILQ_ENTRY(qwx_vif) entry;
1488 	union {
1489 		struct {
1490 			uint32_t uapsd;
1491 		} sta;
1492 		struct {
1493 			/* 127 stations; wmi limit */
1494 			uint8_t tim_bitmap[16];
1495 			uint8_t tim_len;
1496 			uint32_t ssid_len;
1497 			uint8_t ssid[IEEE80211_NWID_LEN];
1498 			bool hidden_ssid;
1499 			/* P2P_IE with NoA attribute for P2P_GO case */
1500 			uint32_t noa_len;
1501 			uint8_t *noa_data;
1502 		} ap;
1503 	} u;
1504 
1505 	bool is_started;
1506 	bool is_up;
1507 	bool ftm_responder;
1508 	bool spectral_enabled;
1509 	bool ps;
1510 	uint32_t aid;
1511 	uint8_t bssid[IEEE80211_ADDR_LEN];
1512 #if 0
1513 	struct cfg80211_bitrate_mask bitrate_mask;
1514 	struct delayed_work connection_loss_work;
1515 #endif
1516 	int num_legacy_stations;
1517 	int rtscts_prot_mode;
1518 	int txpower;
1519 	bool rsnie_present;
1520 	bool wpaie_present;
1521 	bool bcca_zero_sent;
1522 	bool do_not_send_tmpl;
1523 	struct ieee80211_channel *chan;
1524 #if 0
1525 	struct ath11k_arp_ns_offload arp_ns_offload;
1526 	struct ath11k_rekey_data rekey_data;
1527 #endif
1528 #ifdef CONFIG_ATH11K_DEBUGFS
1529 	struct dentry *debugfs_twt;
1530 #endif /* CONFIG_ATH11K_DEBUGFS */
1531 };
1532 
1533 TAILQ_HEAD(qwx_vif_list, qwx_vif);
1534 
1535 struct qwx_survey_info {
1536 	int8_t noise;
1537 	uint64_t time;
1538 	uint64_t time_busy;
1539 };
1540 
1541 #define ATH11K_IRQ_NUM_MAX 52
1542 #define ATH11K_EXT_IRQ_NUM_MAX	16
1543 
1544 struct qwx_ext_irq_grp {
1545 	struct qwx_softc *sc;
1546 	uint32_t irqs[ATH11K_EXT_IRQ_NUM_MAX];
1547 	uint32_t num_irq;
1548 	uint32_t grp_id;
1549 	uint64_t timestamp;
1550 #if 0
1551 	bool napi_enabled;
1552 	struct napi_struct napi;
1553 	struct net_device napi_ndev;
1554 #endif
1555 };
1556 
1557 struct qwx_softc {
1558 	struct device			sc_dev;
1559 	struct ieee80211com		sc_ic;
1560 	uint32_t			sc_flags;
1561 
1562 	int (*sc_newstate)(struct ieee80211com *, enum ieee80211_state, int);
1563 
1564 	struct rwlock ioctl_rwl;
1565 
1566 	struct task		init_task; /* NB: not reference-counted */
1567 	struct refcnt		task_refs;
1568 	struct taskq		*sc_nswq;
1569 	struct task		newstate_task;
1570 	enum ieee80211_state	ns_nstate;
1571 	int			ns_arg;
1572 
1573 	enum ath11k_11d_state	state_11d;
1574 	int			completed_11d_scan;
1575 	uint32_t		vdev_id_11d_scan;
1576 	struct {
1577 		int started;
1578 		int completed;
1579 		int on_channel;
1580 		struct timeout timeout;
1581 		enum ath11k_scan_state state;
1582 		int vdev_id;
1583 		int is_roc;
1584 		int roc_freq;
1585 		int roc_notify;
1586 	} scan;
1587 	u_int			scan_channel;
1588 	struct qwx_survey_info	survey[IEEE80211_CHAN_MAX];
1589 
1590 	int			attached;
1591 	int			have_firmware;
1592 
1593 	int			sc_tx_timer;
1594 
1595 	bus_addr_t			mem;
1596 	struct ath11k_hw_params		hw_params;
1597 	struct ath11k_hal		hal;
1598 	struct qwx_ce			ce;
1599 	struct qwx_dp			dp;
1600 	struct qwx_pdev_dp		pdev_dp;
1601 	struct qwx_wmi_base		wmi;
1602 	struct qwx_htc			htc;
1603 
1604 	enum ath11k_firmware_mode	fw_mode;
1605 	enum ath11k_crypt_mode		crypto_mode;
1606 	enum ath11k_hw_txrx_mode	frame_mode;
1607 
1608 	struct qwx_ext_irq_grp		ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
1609 
1610 	uint16_t			qmi_txn_id;
1611 	int				qmi_cal_done;
1612 	struct qwx_qmi_ce_cfg		qmi_ce_cfg;
1613 	struct qwx_qmi_target_info	qmi_target;
1614 	struct ath11k_targ_cap		target_caps;
1615 	int				num_radios;
1616 	uint32_t			cc_freq_hz;
1617 	uint32_t			cfg_tx_chainmask;
1618 	uint32_t			cfg_rx_chainmask;
1619 	int				num_tx_chains;
1620 	int				num_rx_chains;
1621 	int				num_created_vdevs;
1622 	int				num_started_vdevs;
1623 	uint32_t			allocated_vdev_map;
1624 	uint32_t			free_vdev_map;
1625 	int				num_peers;
1626 	int				peer_mapped;
1627 	int				peer_delete_done;
1628 	int				vdev_setup_done;
1629 
1630 	struct qwx_dbring_cap	*db_caps;
1631 	uint32_t		 num_db_cap;
1632 
1633 	uint8_t		mac_addr[IEEE80211_ADDR_LEN];
1634 	int		wmi_ready;
1635 	uint32_t	wlan_init_status;
1636 
1637 	uint32_t pktlog_defs_checksum;
1638 
1639 	struct qwx_vif_list vif_list;
1640 	struct qwx_pdev pdevs[MAX_RADIOS];
1641 	struct {
1642 		enum WMI_HOST_WLAN_BAND supported_bands;
1643 		uint32_t pdev_id;
1644 	} target_pdev_ids[MAX_RADIOS];
1645 	uint8_t target_pdev_count;
1646 	uint32_t pdevs_active;
1647 	int pdevs_macaddr_valid;
1648 	struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
1649 
1650 	struct {
1651 		uint32_t service;
1652 		uint32_t instance;
1653 		uint32_t node;
1654 		uint32_t port;
1655 	} qrtr_server;
1656 
1657 	struct qmi_response_type_v01	qmi_resp;
1658 
1659 	struct qwx_dmamem		*fwmem;
1660 	int				 fwmem_ready;
1661 	int				 fw_init_done;
1662 
1663 	int				 ctl_resp;
1664 
1665 	struct qwx_dmamem		*m3_mem;
1666 
1667 	/* Provided by attachment driver: */
1668 	struct qwx_ops			ops;
1669 	bus_dma_tag_t			sc_dmat;
1670 	enum ath11k_hw_rev		sc_hw_rev;
1671 	struct qwx_device_id		id;
1672 	char				sc_bus_str[4]; /* "pci" or "ahb" */
1673 	int				num_msivec;
1674 	uint32_t			msi_addr_lo;
1675 	uint32_t			msi_addr_hi;
1676 	uint32_t			msi_data_start;
1677 	const struct qwx_msi_config	*msi_cfg;
1678 	uint32_t			msi_ce_irqmask;
1679 
1680 	struct qmi_wlanfw_request_mem_ind_msg_v01 *sc_req_mem_ind;
1681 };
1682 
1683 int	qwx_ce_intr(void *);
1684 int	qwx_ext_intr(void *);
1685 int	qwx_dp_service_srng(struct qwx_softc *, int);
1686 
1687 int	qwx_init_hw_params(struct qwx_softc *);
1688 int	qwx_attach(struct qwx_softc *);
1689 void	qwx_detach(struct qwx_softc *);
1690 
1691 void	qwx_core_deinit(struct qwx_softc *);
1692 void	qwx_ce_cleanup_pipes(struct qwx_softc *);
1693 
1694 int	qwx_ioctl(struct ifnet *, u_long, caddr_t);
1695 void	qwx_start(struct ifnet *);
1696 void	qwx_watchdog(struct ifnet *);
1697 int	qwx_media_change(struct ifnet *);
1698 void	qwx_init_task(void *);
1699 int	qwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
1700 void	qwx_newstate_task(void *);
1701 
1702 struct ath11k_peer {
1703 #if 0
1704 	struct list_head list;
1705 	struct ieee80211_sta *sta;
1706 #endif
1707 	int vdev_id;
1708 #if 0
1709 	u8 addr[ETH_ALEN];
1710 #endif
1711 	int peer_id;
1712 	uint16_t ast_hash;
1713 	uint8_t pdev_id;
1714 	uint16_t hw_peer_id;
1715 #if 0
1716 	/* protected by ab->data_lock */
1717 	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
1718 #endif
1719 	struct dp_rx_tid rx_tid[IEEE80211_NUM_TID + 1];
1720 #if 0
1721 	/* peer id based rhashtable list pointer */
1722 	struct rhash_head rhash_id;
1723 	/* peer addr based rhashtable list pointer */
1724 	struct rhash_head rhash_addr;
1725 
1726 	/* Info used in MMIC verification of
1727 	 * RX fragments
1728 	 */
1729 	struct crypto_shash *tfm_mmic;
1730 	u8 mcast_keyidx;
1731 	u8 ucast_keyidx;
1732 	u16 sec_type;
1733 	u16 sec_type_grp;
1734 	bool is_authorized;
1735 	bool dp_setup_done;
1736 #endif
1737 };
1738 
1739 struct qwx_node {
1740 	struct ieee80211_node ni;
1741 	struct ath11k_peer peer;
1742 };
1743 
1744 struct ieee80211_node *qwx_node_alloc(struct ieee80211com *);
1745 
1746 void	qwx_qrtr_recv_msg(struct qwx_softc *, struct mbuf *);
1747 
1748 int	qwx_hal_srng_init(struct qwx_softc *);
1749 
1750 int	qwx_ce_alloc_pipes(struct qwx_softc *);
1751 void	qwx_ce_free_pipes(struct qwx_softc *);
1752 void	qwx_ce_rx_post_buf(struct qwx_softc *);
1753 void	qwx_ce_get_shadow_config(struct qwx_softc *, uint32_t **, uint32_t *);
1754 
1755 static inline unsigned int
1756 qwx_roundup_pow_of_two(unsigned int i)
1757 {
1758 	return (powerof2(i) ? i : (1 << (fls(i) - 1)));
1759 }
1760 
1761 static inline unsigned int
1762 qwx_ce_get_attr_flags(struct qwx_softc *sc, int ce_id)
1763 {
1764 	KASSERT(ce_id < sc->hw_params.ce_count);
1765 	return sc->hw_params.host_ce_config[ce_id].flags;
1766 }
1767 
1768 static inline enum ieee80211_edca_ac qwx_tid_to_ac(uint32_t tid)
1769 {
1770 	return (((tid == 0) || (tid == 3)) ? EDCA_AC_BE :
1771 		((tid == 1) || (tid == 2)) ? EDCA_AC_BK :
1772 		((tid == 4) || (tid == 5)) ? EDCA_AC_VI :
1773 		EDCA_AC_VO);
1774 }
1775