xref: /linux/drivers/net/wireless/ath/ath11k/hal.c (revision f86fd32d)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5 #include <linux/dma-mapping.h>
6 #include "ahb.h"
7 #include "hal_tx.h"
8 #include "debug.h"
9 #include "hal_desc.h"
10 
11 static const struct hal_srng_config hw_srng_config[] = {
12 	/* TODO: max_rings can populated by querying HW capabilities */
13 	{ /* REO_DST */
14 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
15 		.max_rings = 4,
16 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
17 		.lmac_ring = false,
18 		.ring_dir = HAL_SRNG_DIR_DST,
19 		.reg_start = {
20 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB,
21 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP,
22 		},
23 		.reg_size = {
24 			HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB,
25 			HAL_REO2_RING_HP - HAL_REO1_RING_HP,
26 		},
27 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
28 	},
29 	{ /* REO_EXCEPTION */
30 		/* Designating REO2TCL ring as exception ring. This ring is
31 		 * similar to other REO2SW rings though it is named as REO2TCL.
32 		 * Any of theREO2SW rings can be used as exception ring.
33 		 */
34 		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
35 		.max_rings = 1,
36 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
37 		.lmac_ring = false,
38 		.ring_dir = HAL_SRNG_DIR_DST,
39 		.reg_start = {
40 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB,
41 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP,
42 		},
43 		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
44 	},
45 	{ /* REO_REINJECT */
46 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
47 		.max_rings = 1,
48 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
49 		.lmac_ring = false,
50 		.ring_dir = HAL_SRNG_DIR_SRC,
51 		.reg_start = {
52 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB,
53 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP,
54 		},
55 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
56 	},
57 	{ /* REO_CMD */
58 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
59 		.max_rings = 1,
60 		.entry_size = (sizeof(struct hal_tlv_hdr) +
61 			sizeof(struct hal_reo_get_queue_stats)) >> 2,
62 		.lmac_ring = false,
63 		.ring_dir = HAL_SRNG_DIR_SRC,
64 		.reg_start = {
65 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB,
66 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP,
67 		},
68 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
69 	},
70 	{ /* REO_STATUS */
71 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
72 		.max_rings = 1,
73 		.entry_size = (sizeof(struct hal_tlv_hdr) +
74 			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
75 		.lmac_ring = false,
76 		.ring_dir = HAL_SRNG_DIR_DST,
77 		.reg_start = {
78 			HAL_SEQ_WCSS_UMAC_REO_REG +
79 				HAL_REO_STATUS_RING_BASE_LSB,
80 			HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP,
81 		},
82 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
83 	},
84 	{ /* TCL_DATA */
85 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
86 		.max_rings = 3,
87 		.entry_size = (sizeof(struct hal_tlv_hdr) +
88 			     sizeof(struct hal_tcl_data_cmd)) >> 2,
89 		.lmac_ring = false,
90 		.ring_dir = HAL_SRNG_DIR_SRC,
91 		.reg_start = {
92 			HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB,
93 			HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP,
94 		},
95 		.reg_size = {
96 			HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB,
97 			HAL_TCL2_RING_HP - HAL_TCL1_RING_HP,
98 		},
99 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
100 	},
101 	{ /* TCL_CMD */
102 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
103 		.max_rings = 1,
104 		.entry_size = (sizeof(struct hal_tlv_hdr) +
105 			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
106 		.lmac_ring =  false,
107 		.ring_dir = HAL_SRNG_DIR_SRC,
108 		.reg_start = {
109 			HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB,
110 			HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP,
111 		},
112 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
113 	},
114 	{ /* TCL_STATUS */
115 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
116 		.max_rings = 1,
117 		.entry_size = (sizeof(struct hal_tlv_hdr) +
118 			     sizeof(struct hal_tcl_status_ring)) >> 2,
119 		.lmac_ring = false,
120 		.ring_dir = HAL_SRNG_DIR_DST,
121 		.reg_start = {
122 			HAL_SEQ_WCSS_UMAC_TCL_REG +
123 				HAL_TCL_STATUS_RING_BASE_LSB,
124 			HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP,
125 		},
126 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
127 	},
128 	{ /* CE_SRC */
129 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
130 		.max_rings = 12,
131 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
132 		.lmac_ring = false,
133 		.ring_dir = HAL_SRNG_DIR_SRC,
134 		.reg_start = {
135 			(HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
136 			 HAL_CE_DST_RING_BASE_LSB),
137 			HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
138 		},
139 		.reg_size = {
140 			(HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
141 			 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
142 			(HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
143 			 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
144 		},
145 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
146 	},
147 	{ /* CE_DST */
148 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
149 		.max_rings = 12,
150 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
151 		.lmac_ring = false,
152 		.ring_dir = HAL_SRNG_DIR_SRC,
153 		.reg_start = {
154 			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
155 			 HAL_CE_DST_RING_BASE_LSB),
156 			HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
157 		},
158 		.reg_size = {
159 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
160 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
161 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
162 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
163 		},
164 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
165 	},
166 	{ /* CE_DST_STATUS */
167 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
168 		.max_rings = 12,
169 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
170 		.lmac_ring = false,
171 		.ring_dir = HAL_SRNG_DIR_DST,
172 		.reg_start = {
173 			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
174 			 HAL_CE_DST_STATUS_RING_BASE_LSB),
175 			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
176 			 HAL_CE_DST_STATUS_RING_HP),
177 		},
178 		.reg_size = {
179 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
180 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
181 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
182 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
183 		},
184 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
185 	},
186 	{ /* WBM_IDLE_LINK */
187 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
188 		.max_rings = 1,
189 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
190 		.lmac_ring = false,
191 		.ring_dir = HAL_SRNG_DIR_SRC,
192 		.reg_start = {
193 			(HAL_SEQ_WCSS_UMAC_WBM_REG +
194 			 HAL_WBM_IDLE_LINK_RING_BASE_LSB),
195 			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
196 		},
197 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
198 	},
199 	{ /* SW2WBM_RELEASE */
200 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
201 		.max_rings = 1,
202 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
203 		.lmac_ring = false,
204 		.ring_dir = HAL_SRNG_DIR_SRC,
205 		.reg_start = {
206 			(HAL_SEQ_WCSS_UMAC_WBM_REG +
207 			 HAL_WBM_RELEASE_RING_BASE_LSB),
208 			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
209 		},
210 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
211 	},
212 	{ /* WBM2SW_RELEASE */
213 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
214 		.max_rings = 4,
215 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
216 		.lmac_ring = false,
217 		.ring_dir = HAL_SRNG_DIR_DST,
218 		.reg_start = {
219 			(HAL_SEQ_WCSS_UMAC_WBM_REG +
220 			 HAL_WBM0_RELEASE_RING_BASE_LSB),
221 			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
222 		},
223 		.reg_size = {
224 			(HAL_WBM1_RELEASE_RING_BASE_LSB -
225 			 HAL_WBM0_RELEASE_RING_BASE_LSB),
226 			(HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
227 		},
228 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
229 	},
230 	{ /* RXDMA_BUF */
231 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
232 		.max_rings = 2,
233 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
234 		.lmac_ring = true,
235 		.ring_dir = HAL_SRNG_DIR_SRC,
236 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
237 	},
238 	{ /* RXDMA_DST */
239 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
240 		.max_rings = 1,
241 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
242 		.lmac_ring = true,
243 		.ring_dir = HAL_SRNG_DIR_DST,
244 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
245 	},
246 	{ /* RXDMA_MONITOR_BUF */
247 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
248 		.max_rings = 1,
249 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
250 		.lmac_ring = true,
251 		.ring_dir = HAL_SRNG_DIR_SRC,
252 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
253 	},
254 	{ /* RXDMA_MONITOR_STATUS */
255 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
256 		.max_rings = 1,
257 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
258 		.lmac_ring = true,
259 		.ring_dir = HAL_SRNG_DIR_SRC,
260 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
261 	},
262 	{ /* RXDMA_MONITOR_DST */
263 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
264 		.max_rings = 1,
265 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
266 		.lmac_ring = true,
267 		.ring_dir = HAL_SRNG_DIR_DST,
268 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
269 	},
270 	{ /* RXDMA_MONITOR_DESC */
271 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
272 		.max_rings = 1,
273 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
274 		.lmac_ring = true,
275 		.ring_dir = HAL_SRNG_DIR_SRC,
276 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
277 	},
278 	{ /* RXDMA DIR BUF */
279 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
280 		.max_rings = 1,
281 		.entry_size = 8 >> 2, /* TODO: Define the struct */
282 		.lmac_ring = true,
283 		.ring_dir = HAL_SRNG_DIR_SRC,
284 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
285 	},
286 };
287 
288 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
289 {
290 	struct ath11k_hal *hal = &ab->hal;
291 	size_t size;
292 
293 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
294 	hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
295 					    GFP_KERNEL);
296 	if (!hal->rdp.vaddr)
297 		return -ENOMEM;
298 
299 	return 0;
300 }
301 
302 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
303 {
304 	struct ath11k_hal *hal = &ab->hal;
305 	size_t size;
306 
307 	if (!hal->rdp.vaddr)
308 		return;
309 
310 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
311 	dma_free_coherent(ab->dev, size,
312 			  hal->rdp.vaddr, hal->rdp.paddr);
313 	hal->rdp.vaddr = NULL;
314 }
315 
316 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
317 {
318 	struct ath11k_hal *hal = &ab->hal;
319 	size_t size;
320 
321 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
322 	hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
323 					    GFP_KERNEL);
324 	if (!hal->wrp.vaddr)
325 		return -ENOMEM;
326 
327 	return 0;
328 }
329 
330 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
331 {
332 	struct ath11k_hal *hal = &ab->hal;
333 	size_t size;
334 
335 	if (!hal->wrp.vaddr)
336 		return;
337 
338 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
339 	dma_free_coherent(ab->dev, size,
340 			  hal->wrp.vaddr, hal->wrp.paddr);
341 	hal->wrp.vaddr = NULL;
342 }
343 
344 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
345 				    struct hal_srng *srng, int ring_num)
346 {
347 	const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST];
348 	u32 addr;
349 	u32 val;
350 
351 	addr = HAL_CE_DST_RING_CTRL +
352 	       srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
353 	       ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
354 	val = ath11k_ahb_read32(ab, addr);
355 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
356 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
357 			  srng->u.dst_ring.max_buffer_length);
358 	ath11k_ahb_write32(ab, addr, val);
359 }
360 
361 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
362 					struct hal_srng *srng)
363 {
364 	struct ath11k_hal *hal = &ab->hal;
365 	u32 val;
366 	u64 hp_addr;
367 	u32 reg_base;
368 
369 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
370 
371 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
372 		ath11k_ahb_write32(ab, reg_base +
373 				       HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
374 				   (u32)srng->msi_addr);
375 
376 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
377 				 ((u64)srng->msi_addr >>
378 				  HAL_ADDR_MSB_REG_SHIFT)) |
379 		      HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
380 		ath11k_ahb_write32(ab, reg_base +
381 				       HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
382 
383 		ath11k_ahb_write32(ab,
384 				   reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
385 				   srng->msi_data);
386 	}
387 
388 	ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
389 
390 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
391 			 ((u64)srng->ring_base_paddr >>
392 			  HAL_ADDR_MSB_REG_SHIFT)) |
393 	      FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
394 			 (srng->entry_size * srng->num_entries));
395 	ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
396 
397 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
398 	      FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
399 	ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
400 
401 	/* interrupt setup */
402 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
403 			 (srng->intr_timer_thres_us >> 3));
404 
405 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
406 			  (srng->intr_batch_cntr_thres_entries *
407 			   srng->entry_size));
408 
409 	ath11k_ahb_write32(ab,
410 			   reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
411 			   val);
412 
413 	hp_addr = hal->rdp.paddr +
414 		  ((unsigned long)srng->u.dst_ring.hp_addr -
415 		   (unsigned long)hal->rdp.vaddr);
416 	ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
417 			   hp_addr & HAL_ADDR_LSB_REG_MASK);
418 	ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
419 			   hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
420 
421 	/* Initialize head and tail pointers to indicate ring is empty */
422 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
423 	ath11k_ahb_write32(ab, reg_base, 0);
424 	ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
425 	*srng->u.dst_ring.hp_addr = 0;
426 
427 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
428 	val = 0;
429 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
430 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
431 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
432 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
433 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
434 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
435 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
436 
437 	ath11k_ahb_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
438 }
439 
440 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
441 					struct hal_srng *srng)
442 {
443 	struct ath11k_hal *hal = &ab->hal;
444 	u32 val;
445 	u64 tp_addr;
446 	u32 reg_base;
447 
448 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
449 
450 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
451 		ath11k_ahb_write32(ab, reg_base +
452 				       HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
453 				   (u32)srng->msi_addr);
454 
455 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
456 				 ((u64)srng->msi_addr >>
457 				  HAL_ADDR_MSB_REG_SHIFT)) |
458 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
459 		ath11k_ahb_write32(ab, reg_base +
460 				       HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
461 				   val);
462 
463 		ath11k_ahb_write32(ab, reg_base +
464 				       HAL_TCL1_RING_MSI1_DATA_OFFSET,
465 				   srng->msi_data);
466 	}
467 
468 	ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr);
469 
470 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
471 			 ((u64)srng->ring_base_paddr >>
472 			  HAL_ADDR_MSB_REG_SHIFT)) |
473 	      FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
474 			 (srng->entry_size * srng->num_entries));
475 	ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
476 
477 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
478 	ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
479 
480 	/* interrupt setup */
481 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
482 	 * unit of 8 usecs instead of 1 usec (as required by v1).
483 	 */
484 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
485 			 srng->intr_timer_thres_us);
486 
487 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
488 			  (srng->intr_batch_cntr_thres_entries *
489 			   srng->entry_size));
490 
491 	ath11k_ahb_write32(ab,
492 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
493 			   val);
494 
495 	val = 0;
496 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
497 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
498 				  srng->u.src_ring.low_threshold);
499 	}
500 	ath11k_ahb_write32(ab,
501 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
502 			   val);
503 
504 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
505 		tp_addr = hal->rdp.paddr +
506 			  ((unsigned long)srng->u.src_ring.tp_addr -
507 			   (unsigned long)hal->rdp.vaddr);
508 		ath11k_ahb_write32(ab,
509 				   reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
510 				   tp_addr & HAL_ADDR_LSB_REG_MASK);
511 		ath11k_ahb_write32(ab,
512 				   reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
513 				   tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
514 	}
515 
516 	/* Initialize head and tail pointers to indicate ring is empty */
517 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
518 	ath11k_ahb_write32(ab, reg_base, 0);
519 	ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
520 	*srng->u.src_ring.tp_addr = 0;
521 
522 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
523 	val = 0;
524 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
525 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
526 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
527 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
528 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
529 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
530 
531 	/* Loop count is not used for SRC rings */
532 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
533 
534 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
535 
536 	ath11k_ahb_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
537 }
538 
539 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
540 				    struct hal_srng *srng)
541 {
542 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
543 		ath11k_hal_srng_src_hw_init(ab, srng);
544 	else
545 		ath11k_hal_srng_dst_hw_init(ab, srng);
546 }
547 
548 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
549 				       enum hal_ring_type type,
550 				       int ring_num, int mac_id)
551 {
552 	const struct hal_srng_config *srng_config = &hw_srng_config[type];
553 	int ring_id;
554 
555 	if (ring_num >= srng_config->max_rings) {
556 		ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
557 		return -EINVAL;
558 	}
559 
560 	ring_id = srng_config->start_ring_id + ring_num;
561 	if (srng_config->lmac_ring)
562 		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
563 
564 	if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
565 		return -EINVAL;
566 
567 	return ring_id;
568 }
569 
570 int ath11k_hal_srng_get_entrysize(u32 ring_type)
571 {
572 	const struct hal_srng_config *srng_config;
573 
574 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
575 		return -EINVAL;
576 
577 	srng_config = &hw_srng_config[ring_type];
578 
579 	return (srng_config->entry_size << 2);
580 }
581 
582 int ath11k_hal_srng_get_max_entries(u32 ring_type)
583 {
584 	const struct hal_srng_config *srng_config;
585 
586 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
587 		return -EINVAL;
588 
589 	srng_config = &hw_srng_config[ring_type];
590 
591 	return (srng_config->max_size / srng_config->entry_size);
592 }
593 
594 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
595 				struct hal_srng_params *params)
596 {
597 	params->ring_base_paddr = srng->ring_base_paddr;
598 	params->ring_base_vaddr = srng->ring_base_vaddr;
599 	params->num_entries = srng->num_entries;
600 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
601 	params->intr_batch_cntr_thres_entries =
602 		srng->intr_batch_cntr_thres_entries;
603 	params->low_threshold = srng->u.src_ring.low_threshold;
604 	params->flags = srng->flags;
605 }
606 
607 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
608 				       struct hal_srng *srng)
609 {
610 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
611 		return 0;
612 
613 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
614 		return ab->hal.wrp.paddr +
615 		       ((unsigned long)srng->u.src_ring.hp_addr -
616 			(unsigned long)ab->hal.wrp.vaddr);
617 	else
618 		return ab->hal.rdp.paddr +
619 		       ((unsigned long)srng->u.dst_ring.hp_addr -
620 			 (unsigned long)ab->hal.rdp.vaddr);
621 }
622 
623 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
624 				       struct hal_srng *srng)
625 {
626 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
627 		return 0;
628 
629 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
630 		return ab->hal.rdp.paddr +
631 		       ((unsigned long)srng->u.src_ring.tp_addr -
632 			(unsigned long)ab->hal.rdp.vaddr);
633 	else
634 		return ab->hal.wrp.paddr +
635 		       ((unsigned long)srng->u.dst_ring.tp_addr -
636 			(unsigned long)ab->hal.wrp.vaddr);
637 }
638 
639 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
640 {
641 	switch (type) {
642 	case HAL_CE_DESC_SRC:
643 		return sizeof(struct hal_ce_srng_src_desc);
644 	case HAL_CE_DESC_DST:
645 		return sizeof(struct hal_ce_srng_dest_desc);
646 	case HAL_CE_DESC_DST_STATUS:
647 		return sizeof(struct hal_ce_srng_dst_status_desc);
648 	}
649 
650 	return 0;
651 }
652 
653 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
654 				u8 byte_swap_data)
655 {
656 	struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
657 
658 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
659 	desc->buffer_addr_info =
660 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
661 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
662 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
663 			   byte_swap_data) |
664 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
665 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
666 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
667 }
668 
669 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
670 {
671 	struct hal_ce_srng_dest_desc *desc =
672 		(struct hal_ce_srng_dest_desc *)buf;
673 
674 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
675 	desc->buffer_addr_info =
676 		FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
677 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
678 }
679 
680 u32 ath11k_hal_ce_dst_status_get_length(void *buf)
681 {
682 	struct hal_ce_srng_dst_status_desc *desc =
683 		(struct hal_ce_srng_dst_status_desc *)buf;
684 	u32 len;
685 
686 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
687 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
688 
689 	return len;
690 }
691 
692 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
693 				   dma_addr_t paddr)
694 {
695 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
696 					       (paddr & HAL_ADDR_LSB_REG_MASK));
697 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
698 					       ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
699 				    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
700 				    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
701 }
702 
703 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
704 {
705 	lockdep_assert_held(&srng->lock);
706 
707 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
708 		return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
709 
710 	return NULL;
711 }
712 
713 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
714 					struct hal_srng *srng)
715 {
716 	u32 *desc;
717 
718 	lockdep_assert_held(&srng->lock);
719 
720 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
721 		return NULL;
722 
723 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
724 
725 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
726 			      srng->ring_size;
727 
728 	return desc;
729 }
730 
731 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
732 				 bool sync_hw_ptr)
733 {
734 	u32 tp, hp;
735 
736 	lockdep_assert_held(&srng->lock);
737 
738 	tp = srng->u.dst_ring.tp;
739 
740 	if (sync_hw_ptr) {
741 		hp = *srng->u.dst_ring.hp_addr;
742 		srng->u.dst_ring.cached_hp = hp;
743 	} else {
744 		hp = srng->u.dst_ring.cached_hp;
745 	}
746 
747 	if (hp >= tp)
748 		return (hp - tp) / srng->entry_size;
749 	else
750 		return (srng->ring_size - tp + hp) / srng->entry_size;
751 }
752 
753 /* Returns number of available entries in src ring */
754 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
755 				 bool sync_hw_ptr)
756 {
757 	u32 tp, hp;
758 
759 	lockdep_assert_held(&srng->lock);
760 
761 	hp = srng->u.src_ring.hp;
762 
763 	if (sync_hw_ptr) {
764 		tp = *srng->u.src_ring.tp_addr;
765 		srng->u.src_ring.cached_tp = tp;
766 	} else {
767 		tp = srng->u.src_ring.cached_tp;
768 	}
769 
770 	if (tp > hp)
771 		return ((tp - hp) / srng->entry_size) - 1;
772 	else
773 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
774 }
775 
776 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
777 					struct hal_srng *srng)
778 {
779 	u32 *desc;
780 	u32 next_hp;
781 
782 	lockdep_assert_held(&srng->lock);
783 
784 	/* TODO: Using % is expensive, but we have to do this since size of some
785 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
786 	 * if separate function is defined for rings having power of 2 ring size
787 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
788 	 * overhead of % by using mask (with &).
789 	 */
790 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
791 
792 	if (next_hp == srng->u.src_ring.cached_tp)
793 		return NULL;
794 
795 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
796 	srng->u.src_ring.hp = next_hp;
797 
798 	/* TODO: Reap functionality is not used by all rings. If particular
799 	 * ring does not use reap functionality, we need not update reap_hp
800 	 * with next_hp pointer. Need to make sure a separate function is used
801 	 * before doing any optimization by removing below code updating
802 	 * reap_hp.
803 	 */
804 	srng->u.src_ring.reap_hp = next_hp;
805 
806 	return desc;
807 }
808 
809 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
810 				   struct hal_srng *srng)
811 {
812 	u32 *desc;
813 	u32 next_reap_hp;
814 
815 	lockdep_assert_held(&srng->lock);
816 
817 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
818 		       srng->ring_size;
819 
820 	if (next_reap_hp == srng->u.src_ring.cached_tp)
821 		return NULL;
822 
823 	desc = srng->ring_base_vaddr + next_reap_hp;
824 	srng->u.src_ring.reap_hp = next_reap_hp;
825 
826 	return desc;
827 }
828 
829 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
830 					 struct hal_srng *srng)
831 {
832 	u32 *desc;
833 
834 	lockdep_assert_held(&srng->lock);
835 
836 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
837 		return NULL;
838 
839 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
840 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
841 			      srng->ring_size;
842 
843 	return desc;
844 }
845 
846 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
847 {
848 	lockdep_assert_held(&srng->lock);
849 
850 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
851 	    srng->u.src_ring.cached_tp)
852 		return NULL;
853 
854 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
855 }
856 
857 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
858 {
859 	lockdep_assert_held(&srng->lock);
860 
861 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
862 		srng->u.src_ring.cached_tp =
863 			*(volatile u32 *)srng->u.src_ring.tp_addr;
864 	else
865 		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
866 }
867 
868 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
869  * should have been called before this.
870  */
871 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
872 {
873 	lockdep_assert_held(&srng->lock);
874 
875 	/* TODO: See if we need a write memory barrier here */
876 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
877 		/* For LMAC rings, ring pointer updates are done through FW and
878 		 * hence written to a shared memory location that is read by FW
879 		 */
880 		if (srng->ring_dir == HAL_SRNG_DIR_SRC)
881 			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
882 		else
883 			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
884 	} else {
885 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
886 			ath11k_ahb_write32(ab,
887 					   (unsigned long)srng->u.src_ring.hp_addr -
888 					   (unsigned long)ab->mem,
889 					   srng->u.src_ring.hp);
890 		} else {
891 			ath11k_ahb_write32(ab,
892 					   (unsigned long)srng->u.dst_ring.tp_addr -
893 					   (unsigned long)ab->mem,
894 					   srng->u.dst_ring.tp);
895 		}
896 	}
897 }
898 
899 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
900 				     struct hal_wbm_idle_scatter_list *sbuf,
901 				     u32 nsbufs, u32 tot_link_desc,
902 				     u32 end_offset)
903 {
904 	struct ath11k_buffer_addr *link_addr;
905 	int i;
906 	u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
907 
908 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
909 
910 	for (i = 1; i < nsbufs; i++) {
911 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
912 		link_addr->info1 = FIELD_PREP(
913 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
914 				(u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
915 				FIELD_PREP(
916 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
917 				BASE_ADDR_MATCH_TAG_VAL);
918 
919 		link_addr = (void *)sbuf[i].vaddr +
920 			     HAL_WBM_IDLE_SCATTER_BUF_SIZE;
921 	}
922 
923 	ath11k_ahb_write32(ab,
924 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
925 			   FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
926 			   FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
927 	ath11k_ahb_write32(ab,
928 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
929 			   FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
930 				      reg_scatter_buf_sz * nsbufs));
931 	ath11k_ahb_write32(ab,
932 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
933 			   HAL_WBM_SCATTERED_RING_BASE_LSB,
934 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
935 				      sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
936 	ath11k_ahb_write32(ab,
937 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
938 			   HAL_WBM_SCATTERED_RING_BASE_MSB,
939 			   FIELD_PREP(
940 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
941 				(u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
942 				FIELD_PREP(
943 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
944 				BASE_ADDR_MATCH_TAG_VAL));
945 
946 	/* Setup head and tail pointers for the idle list */
947 	ath11k_ahb_write32(ab,
948 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
949 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
950 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
951 				      sbuf[nsbufs - 1].paddr));
952 	ath11k_ahb_write32(ab,
953 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
954 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
955 			   FIELD_PREP(
956 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
957 				((u64)sbuf[nsbufs - 1].paddr >>
958 				 HAL_ADDR_MSB_REG_SHIFT)) |
959 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
960 				      (end_offset >> 2)));
961 	ath11k_ahb_write32(ab,
962 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
963 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
964 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
965 				      sbuf[0].paddr));
966 
967 	ath11k_ahb_write32(ab,
968 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
969 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
970 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
971 				      sbuf[0].paddr));
972 	ath11k_ahb_write32(ab,
973 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
974 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
975 			   FIELD_PREP(
976 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
977 				((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
978 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
979 				      0));
980 	ath11k_ahb_write32(ab,
981 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
982 			   HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
983 			   2 * tot_link_desc);
984 
985 	/* Enable the SRNG */
986 	ath11k_ahb_write32(ab,
987 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
988 			   HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
989 }
990 
991 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
992 			  int ring_num, int mac_id,
993 			  struct hal_srng_params *params)
994 {
995 	struct ath11k_hal *hal = &ab->hal;
996 	const struct hal_srng_config *srng_config = &hw_srng_config[type];
997 	struct hal_srng *srng;
998 	int ring_id;
999 	u32 lmac_idx;
1000 	int i;
1001 	u32 reg_base;
1002 
1003 	ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
1004 	if (ring_id < 0)
1005 		return ring_id;
1006 
1007 	srng = &hal->srng_list[ring_id];
1008 
1009 	srng->ring_id = ring_id;
1010 	srng->ring_dir = srng_config->ring_dir;
1011 	srng->ring_base_paddr = params->ring_base_paddr;
1012 	srng->ring_base_vaddr = params->ring_base_vaddr;
1013 	srng->entry_size = srng_config->entry_size;
1014 	srng->num_entries = params->num_entries;
1015 	srng->ring_size = srng->entry_size * srng->num_entries;
1016 	srng->intr_batch_cntr_thres_entries =
1017 				params->intr_batch_cntr_thres_entries;
1018 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
1019 	srng->flags = params->flags;
1020 	spin_lock_init(&srng->lock);
1021 
1022 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
1023 		srng->hwreg_base[i] = srng_config->reg_start[i] +
1024 				      (ring_num * srng_config->reg_size[i]);
1025 	}
1026 
1027 	memset(srng->ring_base_vaddr, 0,
1028 	       (srng->entry_size * srng->num_entries) << 2);
1029 
1030 	/* TODO: Add comments on these swap configurations */
1031 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1032 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
1033 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
1034 
1035 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
1036 
1037 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
1038 		srng->u.src_ring.hp = 0;
1039 		srng->u.src_ring.cached_tp = 0;
1040 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
1041 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
1042 		srng->u.src_ring.low_threshold = params->low_threshold *
1043 						 srng->entry_size;
1044 		if (srng_config->lmac_ring) {
1045 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1046 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
1047 						   lmac_idx);
1048 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1049 		} else {
1050 			srng->u.src_ring.hp_addr =
1051 				(u32 *)((unsigned long)ab->mem + reg_base);
1052 		}
1053 	} else {
1054 		/* During initialization loop count in all the descriptors
1055 		 * will be set to zero, and HW will set it to 1 on completing
1056 		 * descriptor update in first loop, and increments it by 1 on
1057 		 * subsequent loops (loop count wraps around after reaching
1058 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1059 		 * loop count in descriptors updated by HW (to be processed
1060 		 * by SW).
1061 		 */
1062 		srng->u.dst_ring.loop_cnt = 1;
1063 		srng->u.dst_ring.tp = 0;
1064 		srng->u.dst_ring.cached_hp = 0;
1065 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
1066 		if (srng_config->lmac_ring) {
1067 			/* For LMAC rings, tail pointer updates will be done
1068 			 * through FW by writing to a shared memory location
1069 			 */
1070 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1071 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
1072 						   lmac_idx);
1073 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1074 		} else {
1075 			srng->u.dst_ring.tp_addr =
1076 				(u32 *)((unsigned long)ab->mem + reg_base +
1077 					(HAL_REO1_RING_TP - HAL_REO1_RING_HP));
1078 		}
1079 	}
1080 
1081 	if (srng_config->lmac_ring)
1082 		return ring_id;
1083 
1084 	ath11k_hal_srng_hw_init(ab, srng);
1085 
1086 	if (type == HAL_CE_DST) {
1087 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
1088 		ath11k_hal_ce_dst_setup(ab, srng, ring_num);
1089 	}
1090 
1091 	return ring_id;
1092 }
1093 
1094 int ath11k_hal_srng_init(struct ath11k_base *ab)
1095 {
1096 	struct ath11k_hal *hal = &ab->hal;
1097 	int ret;
1098 
1099 	memset(hal, 0, sizeof(*hal));
1100 
1101 	hal->srng_config = hw_srng_config;
1102 
1103 	ret = ath11k_hal_alloc_cont_rdp(ab);
1104 	if (ret)
1105 		goto err_hal;
1106 
1107 	ret = ath11k_hal_alloc_cont_wrp(ab);
1108 	if (ret)
1109 		goto err_free_cont_rdp;
1110 
1111 	return 0;
1112 
1113 err_free_cont_rdp:
1114 	ath11k_hal_free_cont_rdp(ab);
1115 
1116 err_hal:
1117 	return ret;
1118 }
1119 
1120 void ath11k_hal_srng_deinit(struct ath11k_base *ab)
1121 {
1122 	ath11k_hal_free_cont_rdp(ab);
1123 	ath11k_hal_free_cont_wrp(ab);
1124 }
1125