1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Amir Hanania <amir.hanania@intel.com>
8 * Haijun Liu <haijun.liu@mediatek.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15 * Eliot Lee <eliot.lee@intel.com>
16 * Sreehari Kancharla <sreehari.kancharla@intel.com>
17 */
18
19 #include <linux/bits.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/delay.h>
23 #include <linux/dev_printk.h>
24 #include <linux/io.h>
25 #include <linux/iopoll.h>
26 #include <linux/types.h>
27
28 #include "t7xx_dpmaif.h"
29 #include "t7xx_reg.h"
30
31 #define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
32 readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us)
33
t7xx_dpmaif_init_intr(struct dpmaif_hw_info * hw_info)34 static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info)
35 {
36 struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask;
37 u32 value, ul_intr_enable, dl_intr_enable;
38 int ret;
39
40 ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK;
41 isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
42 iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
43
44 /* Set interrupt enable mask */
45 iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
46 iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
47
48 /* Check mask status */
49 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
50 value, (value & ul_intr_enable) != ul_intr_enable, 0,
51 DPMAIF_CHECK_INIT_TIMEOUT_US);
52 if (ret)
53 return ret;
54
55 dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR;
56 isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable;
57 ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN |
58 DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN;
59 isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
60 iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
61
62 /* Set DL ISR PD enable mask */
63 iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
64 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0,
65 value, (value & ul_intr_enable) != ul_intr_enable, 0,
66 DPMAIF_CHECK_INIT_TIMEOUT_US);
67 if (ret)
68 return ret;
69
70 isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY;
71 iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
72 iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk,
73 hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK);
74 value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
75 value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1;
76 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
77 iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK);
78
79 return 0;
80 }
81
t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info * hw_info,unsigned int q_num)82 static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
83 {
84 struct dpmaif_isr_en_mask *isr_en_msk;
85 u32 value, ul_int_que_done;
86 int ret;
87
88 isr_en_msk = &hw_info->isr_en_mask;
89 ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
90 isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done;
91 iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
92
93 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
94 value, (value & ul_int_que_done) == ul_int_que_done, 0,
95 DPMAIF_CHECK_TIMEOUT_US);
96 if (ret)
97 dev_err(hw_info->dev,
98 "Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
99 value);
100 }
101
t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info * hw_info,unsigned int q_num)102 void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
103 {
104 struct dpmaif_isr_en_mask *isr_en_msk;
105 u32 value, ul_int_que_done;
106 int ret;
107
108 isr_en_msk = &hw_info->isr_en_mask;
109 ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
110 isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done;
111 iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
112
113 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
114 value, (value & ul_int_que_done) != ul_int_que_done, 0,
115 DPMAIF_CHECK_TIMEOUT_US);
116 if (ret)
117 dev_err(hw_info->dev,
118 "Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
119 value);
120 }
121
t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info * hw_info)122 void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
123 {
124 hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR;
125 iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
126 }
127
t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info * hw_info)128 void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
129 {
130 hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR;
131 iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
132 }
133
t7xx_update_dlq_intr(struct dpmaif_hw_info * hw_info,u32 q_done)134 static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done)
135 {
136 u32 value;
137
138 value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
139 iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
140 return value;
141 }
142
t7xx_mask_dlq_intr(struct dpmaif_hw_info * hw_info,unsigned int qno)143 static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno)
144 {
145 u32 value, q_done;
146 int ret;
147
148 q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
149 iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
150
151 ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done,
152 0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done);
153 if (ret) {
154 dev_err(hw_info->dev,
155 "Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
156 value);
157 return -ETIMEDOUT;
158 }
159
160 hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done;
161 return 0;
162 }
163
t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info * hw_info,unsigned int qno)164 void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
165 {
166 u32 mask;
167
168 mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
169 iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
170 hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask;
171 }
172
t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info * hw_info)173 void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info)
174 {
175 u32 ip_busy_sts;
176
177 ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
178 iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
179 }
180
t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info * hw_info,unsigned int qno)181 static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
182 unsigned int qno)
183 {
184 if (qno == DPF_RX_QNO0)
185 iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
186 hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
187 else
188 iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
189 hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
190 }
191
t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info * hw_info,unsigned int qno)192 void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
193 unsigned int qno)
194 {
195 if (qno == DPF_RX_QNO0)
196 iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
197 hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
198 else
199 iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
200 hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
201 }
202
t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info * hw_info)203 void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info)
204 {
205 iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
206 }
207
t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info * hw_info)208 void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info)
209 {
210 iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
211 }
212
t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para * para,enum dpmaif_hw_intr_type intr_type,unsigned int intr_queue)213 static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para,
214 enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue)
215 {
216 para->intr_types[para->intr_cnt] = intr_type;
217 para->intr_queues[para->intr_cnt] = intr_queue;
218 para->intr_cnt++;
219 }
220
221 /* The para->intr_cnt counter is set to zero before this function is called.
222 * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
223 */
t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info * hw_info,unsigned int intr_status,struct dpmaif_hw_intr_st_para * para)224 static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info,
225 unsigned int intr_status,
226 struct dpmaif_hw_intr_st_para *para)
227 {
228 unsigned long value;
229
230 value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status);
231 if (value) {
232 unsigned int index;
233
234 t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value);
235
236 for_each_set_bit(index, &value, DPMAIF_TXQ_NUM)
237 t7xx_dpmaif_mask_ulq_intr(hw_info, index);
238 }
239
240 value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status);
241 if (value)
242 t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value);
243
244 value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status);
245 if (value)
246 t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value);
247
248 value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status);
249 if (value)
250 t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value);
251
252 value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status);
253 if (value)
254 t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value);
255
256 /* Clear interrupt status */
257 iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
258 }
259
260 /* The para->intr_cnt counter is set to zero before this function is called.
261 * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
262 */
t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info * hw_info,unsigned int intr_status,struct dpmaif_hw_intr_st_para * para,int qno)263 static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info,
264 unsigned int intr_status,
265 struct dpmaif_hw_intr_st_para *para, int qno)
266 {
267 if (qno == DPF_RX_QNO_DFT) {
268 if (intr_status & DP_DL_INT_SKB_LEN_ERR)
269 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT);
270
271 if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) {
272 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT);
273 hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR;
274 iowrite32(DP_DL_INT_BATCNT_LEN_ERR,
275 hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
276 }
277
278 if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) {
279 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT);
280 hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR;
281 iowrite32(DP_DL_INT_PITCNT_LEN_ERR,
282 hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
283 }
284
285 if (intr_status & DP_DL_INT_PKT_EMPTY_MSK)
286 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT);
287
288 if (intr_status & DP_DL_INT_FRG_EMPTY_MSK)
289 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT);
290
291 if (intr_status & DP_DL_INT_MTU_ERR_MSK)
292 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT);
293
294 if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK)
295 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT);
296
297 if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) {
298 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno));
299 t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
300 }
301
302 if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR)
303 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR,
304 DPF_RX_QNO_DFT);
305
306 if (intr_status & DP_DL_INT_Q0_DONE) {
307 /* Mask RX done interrupt immediately after it occurs, do not clear
308 * the interrupt if the mask operation fails.
309 */
310 if (!t7xx_mask_dlq_intr(hw_info, qno))
311 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno));
312 else
313 intr_status &= ~DP_DL_INT_Q0_DONE;
314 }
315 } else {
316 if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) {
317 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno));
318 t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
319 }
320
321 if (intr_status & DP_DL_INT_Q1_DONE) {
322 if (!t7xx_mask_dlq_intr(hw_info, qno))
323 t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno));
324 else
325 intr_status &= ~DP_DL_INT_Q1_DONE;
326 }
327 }
328
329 intr_status |= DP_DL_INT_BATCNT_LEN_ERR;
330 /* Clear interrupt status */
331 iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
332 }
333
334 /**
335 * t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW.
336 * @hw_info: Pointer to struct hw_info.
337 * @para: Pointer to struct dpmaif_hw_intr_st_para.
338 * @qno: Queue number.
339 *
340 * Reads RX/TX interrupt status from HW and clears UL/DL status as needed.
341 *
342 * Return: Interrupt count.
343 */
t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info * hw_info,struct dpmaif_hw_intr_st_para * para,int qno)344 int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info,
345 struct dpmaif_hw_intr_st_para *para, int qno)
346 {
347 u32 rx_intr_status, tx_intr_status = 0;
348 u32 rx_intr_qdone, tx_intr_qdone = 0;
349
350 rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
351 rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0);
352
353 /* TX interrupt status */
354 if (qno == DPF_RX_QNO_DFT) {
355 /* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts
356 * when a DLQ1 interrupt has occurred.
357 */
358 tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
359 tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
360 }
361
362 t7xx_dpmaif_clr_ip_busy_sts(hw_info);
363
364 if (qno == DPF_RX_QNO_DFT) {
365 /* Do not schedule bottom half again or clear UL interrupt status when we
366 * have already masked it.
367 */
368 tx_intr_status &= ~tx_intr_qdone;
369 if (tx_intr_status)
370 t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para);
371 }
372
373 if (rx_intr_status) {
374 if (qno == DPF_RX_QNO0) {
375 rx_intr_status &= DP_DL_Q0_STATUS_MASK;
376 if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE)
377 /* Do not schedule bottom half again or clear DL
378 * queue done interrupt status when we have already masked it.
379 */
380 rx_intr_status &= ~DP_DL_INT_Q0_DONE;
381 } else {
382 rx_intr_status &= DP_DL_Q1_STATUS_MASK;
383 if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE)
384 rx_intr_status &= ~DP_DL_INT_Q1_DONE;
385 }
386
387 if (rx_intr_status)
388 t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno);
389 }
390
391 return para->intr_cnt;
392 }
393
t7xx_dpmaif_sram_init(struct dpmaif_hw_info * hw_info)394 static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info)
395 {
396 u32 value;
397
398 value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
399 value |= DPMAIF_MEM_CLR;
400 iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
401
402 return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR,
403 value, !(value & DPMAIF_MEM_CLR), 0,
404 DPMAIF_CHECK_INIT_TIMEOUT_US);
405 }
406
t7xx_dpmaif_hw_reset(struct dpmaif_hw_info * hw_info)407 static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info)
408 {
409 iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT);
410 udelay(2);
411 iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT);
412 udelay(2);
413 iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT);
414 udelay(2);
415 iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT);
416 udelay(2);
417 }
418
t7xx_dpmaif_hw_config(struct dpmaif_hw_info * hw_info)419 static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info)
420 {
421 u32 ap_port_mode;
422 int ret;
423
424 t7xx_dpmaif_hw_reset(hw_info);
425
426 ret = t7xx_dpmaif_sram_init(hw_info);
427 if (ret)
428 return ret;
429
430 ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
431 ap_port_mode |= DPMAIF_PORT_MODE_PCIE;
432 iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
433 iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN);
434 return 0;
435 }
436
t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info * hw_info)437 static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info)
438 {
439 iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW);
440 }
441
t7xx_dpmaif_dl_performance(struct dpmaif_hw_info * hw_info)442 static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info)
443 {
444 u32 enable_bat_cache, enable_pit_burst;
445
446 enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
447 enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI;
448 iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
449
450 enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
451 enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN;
452 iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
453 }
454
455 /* DPMAIF DL DLQ part HW setting */
456
t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info * hw_info)457 static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info)
458 {
459 unsigned int value;
460
461 value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2;
462 value |= DPMAIF_HASH_PRIME_DF << 4;
463 value |= DPMAIF_HPC_TOTAL_NUM << 8;
464 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL);
465 }
466
t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info * hw_info)467 static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info)
468 {
469 unsigned int value;
470
471 value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16;
472 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG);
473 }
474
t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info * hw_info)475 static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info)
476 {
477 iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF,
478 hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5);
479 }
480
t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info * hw_info)481 static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info)
482 {
483 iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0);
484 }
485
t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info * hw_info)486 static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info)
487 {
488 unsigned int value, i;
489
490 /* Each register holds two DLQ threshold timeout values */
491 for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) {
492 value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF);
493 value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK,
494 DPMAIF_DLQ_TIMEOUT_THRES_DF);
495 iowrite32(value,
496 hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i);
497 }
498 }
499
t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info * hw_info)500 static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info)
501 {
502 iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES);
503 }
504
t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info * hw_info)505 static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info)
506 {
507 t7xx_dpmaif_hw_hpc_cntl_set(hw_info);
508 t7xx_dpmaif_hw_agg_cfg_set(hw_info);
509 t7xx_dpmaif_hw_hash_bit_choose_set(hw_info);
510 t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info);
511 t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info);
512 t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info);
513 }
514
t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info * hw_info,bool frg_en)515 static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en)
516 {
517 u32 value, dl_bat_init = 0;
518 int ret;
519
520 if (frg_en)
521 dl_bat_init = DPMAIF_DL_BAT_FRG_INIT;
522
523 dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET;
524 dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
525
526 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
527 value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
528 DPMAIF_CHECK_INIT_TIMEOUT_US);
529 if (ret) {
530 dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
531 return ret;
532 }
533
534 iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
535
536 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
537 value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
538 DPMAIF_CHECK_INIT_TIMEOUT_US);
539 if (ret)
540 dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n");
541
542 return ret;
543 }
544
t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info * hw_info,dma_addr_t addr)545 static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info,
546 dma_addr_t addr)
547 {
548 iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0);
549 iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3);
550 }
551
t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info * hw_info,unsigned int size)552 static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size)
553 {
554 unsigned int value;
555
556 value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
557 value &= ~DPMAIF_BAT_SIZE_MSK;
558 value |= size & DPMAIF_BAT_SIZE_MSK;
559 iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
560 }
561
t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info * hw_info,bool enable)562 static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable)
563 {
564 unsigned int value;
565
566 value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
567
568 if (enable)
569 value |= DPMAIF_BAT_EN_MSK;
570 else
571 value &= ~DPMAIF_BAT_EN_MSK;
572
573 iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
574 }
575
t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info * hw_info)576 static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info)
577 {
578 unsigned int value;
579
580 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
581 value &= ~DPMAIF_BAT_BID_MAXCNT_MSK;
582 value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT);
583 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
584 }
585
t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info * hw_info)586 static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info)
587 {
588 iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1);
589 }
590
t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info * hw_info)591 static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info)
592 {
593 unsigned int value;
594
595 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
596 value &= ~DPMAIF_PIT_CHK_NUM_MSK;
597 value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM);
598 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
599 }
600
t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info * hw_info)601 static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info)
602 {
603 unsigned int value;
604
605 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
606 value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK;
607 value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK,
608 DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE);
609 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
610 }
611
t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info * hw_info)612 static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info)
613 {
614 unsigned int value;
615
616 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
617 value &= ~DPMAIF_BAT_BUF_SZ_MSK;
618 value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK,
619 DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE);
620 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
621 }
622
t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info * hw_info)623 static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info)
624 {
625 unsigned int value;
626
627 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
628 value &= ~DPMAIF_BAT_RSV_LEN_MSK;
629 value |= DPMAIF_HW_BAT_RSVLEN;
630 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
631 }
632
t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info * hw_info)633 static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info)
634 {
635 unsigned int value;
636
637 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
638 value &= ~DPMAIF_PKT_ALIGN_MSK;
639 value |= DPMAIF_PKT_ALIGN_EN;
640 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
641 }
642
t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info * hw_info)643 static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info)
644 {
645 unsigned int value;
646
647 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
648 value |= DPMAIF_DL_PKT_CHECKSUM_EN;
649 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
650 }
651
t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info * hw_info)652 static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info)
653 {
654 unsigned int value;
655
656 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
657 value &= ~DPMAIF_FRG_CHECK_THRES_MSK;
658 value |= DPMAIF_HW_CHK_FRG_NUM;
659 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
660 }
661
t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info * hw_info)662 static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info)
663 {
664 unsigned int value;
665
666 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
667 value &= ~DPMAIF_FRG_BUF_SZ_MSK;
668 value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK,
669 DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE);
670 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
671 }
672
t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info * hw_info,bool enable)673 static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable)
674 {
675 unsigned int value;
676
677 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
678
679 if (enable)
680 value |= DPMAIF_FRG_EN_MSK;
681 else
682 value &= ~DPMAIF_FRG_EN_MSK;
683
684 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
685 }
686
t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info * hw_info)687 static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info)
688 {
689 unsigned int value;
690
691 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
692 value &= ~DPMAIF_BAT_CHECK_THRES_MSK;
693 value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM);
694 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
695 }
696
t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info * hw_info)697 static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info)
698 {
699 unsigned int value;
700
701 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
702 value &= ~DPMAIF_DL_PIT_SEQ_MSK;
703 value |= DPMAIF_DL_PIT_SEQ_VALUE;
704 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
705 }
706
t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info * hw_info,dma_addr_t addr)707 static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info,
708 dma_addr_t addr)
709 {
710 iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0);
711 iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4);
712 }
713
t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info * hw_info,unsigned int size)714 static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size)
715 {
716 unsigned int value;
717
718 value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
719 value &= ~DPMAIF_PIT_SIZE_MSK;
720 value |= size & DPMAIF_PIT_SIZE_MSK;
721 iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
722 iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2);
723 iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
724 iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5);
725 iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6);
726 }
727
t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info * hw_info)728 static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info)
729 {
730 unsigned int value;
731
732 value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
733 value |= DPMAIF_DLQPIT_EN_MSK;
734 iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
735 }
736
t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info * hw_info,unsigned int pit_idx)737 static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info,
738 unsigned int pit_idx)
739 {
740 unsigned int dl_pit_init;
741 int timeout;
742 u32 value;
743
744 dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET;
745 dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS);
746 dl_pit_init |= DPMAIF_DL_PIT_INIT_EN;
747
748 timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
749 value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
750 DPMAIF_CHECK_DELAY_US,
751 DPMAIF_CHECK_INIT_TIMEOUT_US);
752 if (timeout) {
753 dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n");
754 return;
755 }
756
757 iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT);
758 timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
759 value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
760 DPMAIF_CHECK_DELAY_US,
761 DPMAIF_CHECK_INIT_TIMEOUT_US);
762 if (timeout)
763 dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n");
764 }
765
t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info * hw_info,unsigned int q_num,struct dpmaif_dl * dl_que)766 static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num,
767 struct dpmaif_dl *dl_que)
768 {
769 t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base);
770 t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt);
771 t7xx_dpmaif_dl_dlq_pit_en(hw_info);
772 t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num);
773 }
774
t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info * hw_info)775 static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info)
776 {
777 int i;
778
779 for (i = 0; i < DPMAIF_RXQ_NUM; i++)
780 t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]);
781 }
782
t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info * hw_info,bool enable)783 static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
784 {
785 u32 dl_bat_init, value;
786 int timeout;
787
788 value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
789
790 if (enable)
791 value |= DPMAIF_BAT_EN_MSK;
792 else
793 value &= ~DPMAIF_BAT_EN_MSK;
794
795 iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
796 dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT;
797 dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
798
799 timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
800 value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
801 DPMAIF_CHECK_TIMEOUT_US);
802 if (timeout)
803 dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n");
804
805 iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
806 timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
807 value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
808 DPMAIF_CHECK_TIMEOUT_US);
809 if (timeout)
810 dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
811 }
812
t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info * hw_info)813 static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info)
814 {
815 struct dpmaif_dl *dl_que;
816 int ret;
817
818 t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info);
819
820 dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */
821 if (!dl_que->que_started)
822 return -EBUSY;
823
824 t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info);
825 t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info);
826 t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info);
827 t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info);
828 t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info);
829 t7xx_dpmaif_dl_set_pkt_alignment(hw_info);
830 t7xx_dpmaif_dl_set_pit_seqnum(hw_info);
831 t7xx_dpmaif_dl_set_ao_mtu(hw_info);
832 t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info);
833 t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info);
834 t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info);
835 t7xx_dpmaif_dl_frg_ao_en(hw_info, true);
836
837 t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base);
838 t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt);
839 t7xx_dpmaif_dl_bat_en(hw_info, true);
840
841 ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true);
842 if (ret)
843 return ret;
844
845 t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base);
846 t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt);
847 t7xx_dpmaif_dl_bat_en(hw_info, false);
848
849 ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false);
850 if (ret)
851 return ret;
852
853 /* Init PIT (two PIT table) */
854 t7xx_dpmaif_config_all_dlq_hw(hw_info);
855 t7xx_dpmaif_dl_all_q_en(hw_info, true);
856 t7xx_dpmaif_dl_set_pkt_checksum(hw_info);
857 return 0;
858 }
859
t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info * hw_info,unsigned int q_num,unsigned int size)860 static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info,
861 unsigned int q_num, unsigned int size)
862 {
863 unsigned int value;
864
865 value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
866 value &= ~DPMAIF_DRB_SIZE_MSK;
867 value |= size & DPMAIF_DRB_SIZE_MSK;
868 iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
869 }
870
t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info * hw_info,unsigned int q_num,dma_addr_t addr)871 static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info,
872 unsigned int q_num, dma_addr_t addr)
873 {
874 iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num));
875 iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num));
876 }
877
t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info * hw_info,unsigned int q_num,bool ready)878 static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info,
879 unsigned int q_num, bool ready)
880 {
881 u32 value;
882
883 value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
884
885 if (ready)
886 value |= BIT(q_num);
887 else
888 value &= ~BIT(q_num);
889
890 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
891 }
892
t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info * hw_info,unsigned int q_num,bool enable)893 static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info,
894 unsigned int q_num, bool enable)
895 {
896 u32 value;
897
898 value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
899
900 if (enable)
901 value |= BIT(q_num + 8);
902 else
903 value &= ~BIT(q_num + 8);
904
905 iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
906 }
907
t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info * hw_info)908 static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info)
909 {
910 struct dpmaif_ul *ul_que;
911 int i;
912
913 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
914 ul_que = &hw_info->ul_que[i];
915 if (ul_que->que_started) {
916 t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt *
917 DPMAIF_UL_DRB_SIZE_WORD);
918 t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base);
919 t7xx_dpmaif_ul_rdy_en(hw_info, i, true);
920 t7xx_dpmaif_ul_arb_en(hw_info, i, true);
921 } else {
922 t7xx_dpmaif_ul_arb_en(hw_info, i, false);
923 }
924 }
925 }
926
t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info * hw_info)927 static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info)
928 {
929 u32 ap_cfg;
930 int ret;
931
932 ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
933 ap_cfg |= DPMAIF_SRAM_SYNC;
934 iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
935
936 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG,
937 ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0,
938 DPMAIF_CHECK_TIMEOUT_US);
939 if (ret)
940 return ret;
941
942 iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET);
943 iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET);
944 return 0;
945 }
946
t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info * hw_info)947 static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info)
948 {
949 u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY);
950
951 return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS);
952 }
953
t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info * hw_info,bool enable)954 static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
955 {
956 u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
957
958 if (enable)
959 ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN;
960 else
961 ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN;
962
963 iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
964 }
965
t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info * hw_info)966 static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info)
967 {
968 u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY);
969
970 return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS);
971 }
972
t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info * hw_info,unsigned int q_num,unsigned int drb_entry_cnt)973 void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num,
974 unsigned int drb_entry_cnt)
975 {
976 u32 ul_update, value;
977 int err;
978
979 ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK;
980 ul_update |= DPMAIF_UL_ADD_UPDATE;
981
982 err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
983 value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
984 DPMAIF_CHECK_TIMEOUT_US);
985 if (err) {
986 dev_err(hw_info->dev, "UL add is not ready\n");
987 return;
988 }
989
990 iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num));
991
992 err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
993 value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
994 DPMAIF_CHECK_TIMEOUT_US);
995 if (err)
996 dev_err(hw_info->dev, "Timeout updating UL add\n");
997 }
998
t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info * hw_info,unsigned int q_num)999 unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
1000 {
1001 unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num));
1002
1003 return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD;
1004 }
1005
t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info * hw_info,unsigned int dlq_pit_idx,unsigned int pit_remain_cnt)1006 int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx,
1007 unsigned int pit_remain_cnt)
1008 {
1009 u32 dl_update, value;
1010 int ret;
1011
1012 dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK;
1013 dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS);
1014
1015 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
1016 value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
1017 DPMAIF_CHECK_TIMEOUT_US);
1018 if (ret) {
1019 dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n");
1020 return ret;
1021 }
1022
1023 iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD);
1024
1025 ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
1026 value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
1027 DPMAIF_CHECK_TIMEOUT_US);
1028 if (ret) {
1029 dev_err(hw_info->dev, "Data plane modem add dlq failed\n");
1030 return ret;
1031 }
1032
1033 return 0;
1034 }
1035
t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info * hw_info,unsigned int dlq_pit_idx)1036 unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info,
1037 unsigned int dlq_pit_idx)
1038 {
1039 u32 value;
1040
1041 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX +
1042 dlq_pit_idx * DLQ_PIT_IDX_SIZE);
1043 return value & DPMAIF_DL_RD_WR_IDX_MSK;
1044 }
1045
t7xx_dl_add_timedout(struct dpmaif_hw_info * hw_info)1046 static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
1047 {
1048 u32 value;
1049
1050 return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
1051 value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
1052 DPMAIF_CHECK_TIMEOUT_US);
1053 }
1054
t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info * hw_info,unsigned int bat_entry_cnt)1055 int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt)
1056 {
1057 unsigned int value;
1058
1059 if (t7xx_dl_add_timedout(hw_info)) {
1060 dev_err(hw_info->dev, "DL add BAT not ready\n");
1061 return -EBUSY;
1062 }
1063
1064 value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
1065 value |= DPMAIF_DL_ADD_UPDATE;
1066 iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
1067
1068 if (t7xx_dl_add_timedout(hw_info)) {
1069 dev_err(hw_info->dev, "DL add BAT timeout\n");
1070 return -EBUSY;
1071 }
1072
1073 return 0;
1074 }
1075
t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info * hw_info,unsigned int q_num)1076 unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
1077 {
1078 u32 value;
1079
1080 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX);
1081 return value & DPMAIF_DL_RD_WR_IDX_MSK;
1082 }
1083
t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info * hw_info,unsigned int q_num)1084 unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
1085 {
1086 u32 value;
1087
1088 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX);
1089 return value & DPMAIF_DL_RD_WR_IDX_MSK;
1090 }
1091
t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info * hw_info,unsigned int frg_entry_cnt)1092 int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt)
1093 {
1094 unsigned int value;
1095
1096 if (t7xx_dl_add_timedout(hw_info)) {
1097 dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n");
1098 return -EBUSY;
1099 }
1100
1101 value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
1102 value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE;
1103 iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
1104
1105 if (t7xx_dl_add_timedout(hw_info)) {
1106 dev_err(hw_info->dev, "Data plane modem add frag DLQ failed");
1107 return -EBUSY;
1108 }
1109
1110 return 0;
1111 }
1112
t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info * hw_info,unsigned int q_num)1113 unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
1114 {
1115 u32 value;
1116
1117 value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX);
1118 return value & DPMAIF_DL_RD_WR_IDX_MSK;
1119 }
1120
t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info * hw_info,struct dpmaif_hw_params * init_para)1121 static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info,
1122 struct dpmaif_hw_params *init_para)
1123 {
1124 struct dpmaif_dl *dl_que;
1125 struct dpmaif_ul *ul_que;
1126 int i;
1127
1128 for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1129 dl_que = &hw_info->dl_que[i];
1130 dl_que->bat_base = init_para->pkt_bat_base_addr[i];
1131 dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i];
1132 dl_que->pit_base = init_para->pit_base_addr[i];
1133 dl_que->pit_size_cnt = init_para->pit_size_cnt[i];
1134 dl_que->frg_base = init_para->frg_bat_base_addr[i];
1135 dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i];
1136 dl_que->que_started = true;
1137 }
1138
1139 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
1140 ul_que = &hw_info->ul_que[i];
1141 ul_que->drb_base = init_para->drb_base_addr[i];
1142 ul_que->drb_size_cnt = init_para->drb_size_cnt[i];
1143 ul_que->que_started = true;
1144 }
1145 }
1146
1147 /**
1148 * t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues.
1149 * @hw_info: Pointer to struct hw_info.
1150 *
1151 * Disable HW UL queues. Checks busy UL queues to go to idle
1152 * with an attempt count of 1000000.
1153 *
1154 * Return:
1155 * * 0 - Success
1156 * * -ETIMEDOUT - Timed out checking busy queues
1157 */
t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info * hw_info)1158 int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info)
1159 {
1160 int count = 0;
1161
1162 t7xx_dpmaif_ul_all_q_en(hw_info, false);
1163 while (t7xx_dpmaif_ul_idle_check(hw_info)) {
1164 if (++count >= DPMAIF_MAX_CHECK_COUNT) {
1165 dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n",
1166 ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY));
1167 return -ETIMEDOUT;
1168 }
1169 }
1170
1171 return 0;
1172 }
1173
1174 /**
1175 * t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues.
1176 * @hw_info: Pointer to struct hw_info.
1177 *
1178 * Disable HW DL queue. Checks busy UL queues to go to idle
1179 * with an attempt count of 1000000.
1180 * Check that HW PIT write index equals read index with the same
1181 * attempt count.
1182 *
1183 * Return:
1184 * * 0 - Success.
1185 * * -ETIMEDOUT - Timed out checking busy queues.
1186 */
t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info * hw_info)1187 int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info)
1188 {
1189 unsigned int wr_idx, rd_idx;
1190 int count = 0;
1191
1192 t7xx_dpmaif_dl_all_q_en(hw_info, false);
1193 while (t7xx_dpmaif_dl_idle_check(hw_info)) {
1194 if (++count >= DPMAIF_MAX_CHECK_COUNT) {
1195 dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n",
1196 ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY));
1197 return -ETIMEDOUT;
1198 }
1199 }
1200
1201 /* Check middle PIT sync done */
1202 count = 0;
1203 do {
1204 wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX);
1205 wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
1206 rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX);
1207 rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
1208
1209 if (wr_idx == rd_idx)
1210 return 0;
1211 } while (++count < DPMAIF_MAX_CHECK_COUNT);
1212
1213 dev_err(hw_info->dev, "Check middle PIT sync fail\n");
1214 return -ETIMEDOUT;
1215 }
1216
t7xx_dpmaif_start_hw(struct dpmaif_hw_info * hw_info)1217 void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info)
1218 {
1219 t7xx_dpmaif_ul_all_q_en(hw_info, true);
1220 t7xx_dpmaif_dl_all_q_en(hw_info, true);
1221 }
1222
1223 /**
1224 * t7xx_dpmaif_hw_init() - Initialize HW data path API.
1225 * @hw_info: Pointer to struct hw_info.
1226 * @init_param: Pointer to struct dpmaif_hw_params.
1227 *
1228 * Configures port mode, clock config, HW interrupt initialization, and HW queue.
1229 *
1230 * Return:
1231 * * 0 - Success.
1232 * * -ERROR - Error code from failure sub-initializations.
1233 */
t7xx_dpmaif_hw_init(struct dpmaif_hw_info * hw_info,struct dpmaif_hw_params * init_param)1234 int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param)
1235 {
1236 int ret;
1237
1238 ret = t7xx_dpmaif_hw_config(hw_info);
1239 if (ret) {
1240 dev_err(hw_info->dev, "DPMAIF HW config failed\n");
1241 return ret;
1242 }
1243
1244 ret = t7xx_dpmaif_init_intr(hw_info);
1245 if (ret) {
1246 dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n");
1247 return ret;
1248 }
1249
1250 t7xx_dpmaif_set_queue_property(hw_info, init_param);
1251 t7xx_dpmaif_pcie_dpmaif_sign(hw_info);
1252 t7xx_dpmaif_dl_performance(hw_info);
1253
1254 ret = t7xx_dpmaif_config_dlq_hw(hw_info);
1255 if (ret) {
1256 dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n");
1257 return ret;
1258 }
1259
1260 t7xx_dpmaif_config_ulq_hw(hw_info);
1261
1262 ret = t7xx_dpmaif_hw_init_done(hw_info);
1263 if (ret)
1264 dev_err(hw_info->dev, "DPMAIF HW queue init failed\n");
1265
1266 return ret;
1267 }
1268
t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info * hw_info,unsigned int qno)1269 bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
1270 {
1271 u32 intr_status;
1272
1273 intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
1274 intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno);
1275 if (intr_status) {
1276 iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
1277 return true;
1278 }
1279
1280 return false;
1281 }
1282