1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include <linux/atomic.h>
5 #include <linux/compiler.h>
6 #include <adf_accel_devices.h>
7 #include <adf_common_drv.h>
8 #include <adf_pfvf_msg.h>
9 #include <adf_dev_err.h>
10 #include <adf_cfg.h>
11 #include <adf_fw_counters.h>
12 #include <adf_gen2_hw_data.h>
13 #include <adf_gen2_pfvf.h>
14 #include "adf_c4xxx_hw_data.h"
15 #include "adf_c4xxx_reset.h"
16 #include "adf_c4xxx_inline.h"
17 #include "adf_c4xxx_ras.h"
18 #include "adf_c4xxx_misc_error_stats.h"
19 #include "adf_c4xxx_pke_replay_stats.h"
20 #include "adf_heartbeat.h"
21 #include "icp_qat_fw_init_admin.h"
22 #include "icp_qat_hw.h"
23 
24 /* accel unit information */
25 static struct adf_accel_unit adf_c4xxx_au_32_ae[] =
26     { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
27       { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL },
28       { 0x4, 0x30, 0xF000, 0xF000, 4, ADF_ACCEL_SERVICE_NULL },
29       { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
30       { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL },
31       { 0x20, 0xC00, 0xF0000000, 0xF0000000, 4, ADF_ACCEL_SERVICE_NULL } };
32 
33 static struct adf_accel_unit adf_c4xxx_au_24_ae[] = {
34 	{ 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
35 	{ 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL },
36 	{ 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
37 	{ 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL },
38 };
39 
40 static struct adf_accel_unit adf_c4xxx_au_12_ae[] = {
41 	{ 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
42 	{ 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
43 };
44 
45 static struct adf_accel_unit adf_c4xxx_au_emulation[] =
46     { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
47       { 0x2, 0xC, 0xC0, 0xC0, 2, ADF_ACCEL_SERVICE_NULL } };
48 
49 /* Accel engine threads for each of the following services
50  * <num_asym_thd> , <num_sym_thd> , <num_dc_thd>,
51  */
52 
53 /* Thread mapping for SKU capable of symmetric cryptography */
54 static const struct adf_ae_info adf_c4xxx_32_ae_sym[] =
55     { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
56       { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
57       { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 },
58       { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
59       { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 },
60       { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
61       { 2, 6, 3 }, { 2, 6, 3 } };
62 
63 static const struct adf_ae_info adf_c4xxx_24_ae_sym[] =
64     { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
65       { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
66       { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
67       { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
68       { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 },
69       { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
70       { 0, 0, 0 }, { 0, 0, 0 } };
71 
72 static const struct adf_ae_info adf_c4xxx_12_ae_sym[] =
73     { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
74       { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
75       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
76       { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
77       { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
78       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
79       { 0, 0, 0 }, { 0, 0, 0 } };
80 
81 /* Thread mapping for SKU capable of asymmetric and symmetric cryptography */
82 static const struct adf_ae_info adf_c4xxx_32_ae[] =
83     { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
84       { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
85       { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 },
86       { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
87       { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 },
88       { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
89       { 2, 5, 3 }, { 2, 5, 3 } };
90 
91 static const struct adf_ae_info adf_c4xxx_24_ae[] =
92     { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
93       { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
94       { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
95       { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
96       { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 },
97       { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
98       { 0, 0, 0 }, { 0, 0, 0 } };
99 
100 static const struct adf_ae_info adf_c4xxx_12_ae[] =
101     { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
102       { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
103       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
104       { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
105       { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
106       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
107       { 0, 0, 0 }, { 0, 0, 0 } };
108 
109 static struct adf_hw_device_class c4xxx_class = {.name = ADF_C4XXX_DEVICE_NAME,
110 						 .type = DEV_C4XXX,
111 						 .instances = 0 };
112 
113 struct icp_qat_fw_init_c4xxx_admin_hb_stats {
114 	struct icp_qat_fw_init_admin_hb_cnt stats[ADF_NUM_THREADS_PER_AE];
115 };
116 
117 struct adf_hb_count {
118 	u16 ae_thread[ADF_NUM_THREADS_PER_AE];
119 };
120 
121 static const int sku_cy_au[] = ADF_C4XXX_NUM_CY_AU;
122 static const int sku_dc_au[] = ADF_C4XXX_NUM_DC_AU;
123 static const int sku_inline_au[] = ADF_C4XXX_NUM_INLINE_AU;
124 
125 /*
126  * C4xxx devices introduce new fuses and soft straps and
127  * are different from previous gen device implementations.
128  */
129 
130 static u32
131 get_accel_mask(struct adf_accel_dev *accel_dev)
132 {
133 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
134 	u32 fusectl0;
135 	u32 softstrappull0;
136 
137 	fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
138 	softstrappull0 =
139 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4);
140 
141 	return (~(fusectl0 | softstrappull0)) & ADF_C4XXX_ACCELERATORS_MASK;
142 }
143 
144 static u32
145 get_ae_mask(struct adf_accel_dev *accel_dev)
146 {
147 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
148 	u32 fusectl1;
149 	u32 softstrappull1;
150 
151 	fusectl1 = pci_read_config(pdev, ADF_C4XXX_FUSECTL1_OFFSET, 4);
152 	softstrappull1 =
153 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL1_OFFSET, 4);
154 
155 	/* Assume that AE and AU disable masks are consistent, so no
156 	 * checks against the AU mask are performed
157 	 */
158 	return (~(fusectl1 | softstrappull1)) & ADF_C4XXX_ACCELENGINES_MASK;
159 }
160 
161 static u32
162 get_num_accels(struct adf_hw_device_data *self)
163 {
164 	return self ? hweight32(self->accel_mask) : 0;
165 }
166 
167 static u32
168 get_num_aes(struct adf_hw_device_data *self)
169 {
170 	return self ? hweight32(self->ae_mask) : 0;
171 }
172 
173 static u32
174 get_misc_bar_id(struct adf_hw_device_data *self)
175 {
176 	return ADF_C4XXX_PMISC_BAR;
177 }
178 
179 static u32
180 get_etr_bar_id(struct adf_hw_device_data *self)
181 {
182 	return ADF_C4XXX_ETR_BAR;
183 }
184 
185 static u32
186 get_sram_bar_id(struct adf_hw_device_data *self)
187 {
188 	return ADF_C4XXX_SRAM_BAR;
189 }
190 
191 static inline void
192 c4xxx_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower)
193 {
194 	*lower = lower_32_bits(value);
195 	*upper = upper_32_bits(value);
196 }
197 
198 /**
199  * c4xxx_set_ssm_wdtimer() - Initialize the slice hang watchdog timer.
200  *
201  * @param accel_dev    Structure holding accelerator data.
202  * @return 0 on success, error code otherwise.
203  */
204 static int
205 c4xxx_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
206 {
207 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
208 	struct adf_bar *misc_bar =
209 	    &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)];
210 	struct resource *csr = misc_bar->virt_addr;
211 	unsigned long accel_mask = hw_device->accel_mask;
212 	u32 accel = 0;
213 	u64 timer_val = ADF_C4XXX_SSM_WDT_64BIT_DEFAULT_VALUE;
214 	u64 timer_val_pke = ADF_C4XXX_SSM_WDT_PKE_64BIT_DEFAULT_VALUE;
215 	u32 ssm_wdt_low = 0, ssm_wdt_high = 0;
216 	u32 ssm_wdt_pke_low = 0, ssm_wdt_pke_high = 0;
217 
218 	/* Convert 64bit Slice Hang watchdog value into 32bit values for
219 	 * mmio write to 32bit CSRs.
220 	 */
221 	c4xxx_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
222 	c4xxx_unpack_ssm_wdtimer(timer_val_pke,
223 				 &ssm_wdt_pke_high,
224 				 &ssm_wdt_pke_low);
225 
226 	/* Configures Slice Hang watchdogs */
227 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
228 	{
229 		ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTL_OFFSET(accel), ssm_wdt_low);
230 		ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTH_OFFSET(accel), ssm_wdt_high);
231 		ADF_CSR_WR(csr,
232 			   ADF_C4XXX_SSMWDTPKEL_OFFSET(accel),
233 			   ssm_wdt_pke_low);
234 		ADF_CSR_WR(csr,
235 			   ADF_C4XXX_SSMWDTPKEH_OFFSET(accel),
236 			   ssm_wdt_pke_high);
237 	}
238 
239 	return 0;
240 }
241 
242 /**
243  * c4xxx_check_slice_hang() - Check slice hang status
244  *
245  * Return: true if a slice hange interrupt is serviced..
246  */
247 static bool
248 c4xxx_check_slice_hang(struct adf_accel_dev *accel_dev)
249 {
250 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
251 	struct adf_bar *misc_bar =
252 	    &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)];
253 	struct resource *csr = misc_bar->virt_addr;
254 	u32 slice_hang_offset;
255 	u32 ia_slice_hang_offset;
256 	u32 fw_irq_source;
257 	u32 ia_irq_source;
258 	u32 accel_num = 0;
259 	bool handled = false;
260 	u32 errsou10 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU10);
261 	unsigned long accel_mask;
262 
263 	accel_mask = hw_device->accel_mask;
264 
265 	for_each_set_bit(accel_num, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
266 	{
267 		if (!(errsou10 & ADF_C4XXX_IRQ_SRC_MASK(accel_num)))
268 			continue;
269 
270 		fw_irq_source = ADF_CSR_RD(csr, ADF_INTSTATSSM(accel_num));
271 		ia_irq_source =
272 		    ADF_CSR_RD(csr, ADF_C4XXX_IAINTSTATSSM(accel_num));
273 		ia_slice_hang_offset =
274 		    ADF_C4XXX_IASLICEHANGSTATUS_OFFSET(accel_num);
275 
276 		/* FW did not clear SliceHang error, IA logs and clears
277 		 * the error
278 		 */
279 		if ((fw_irq_source & ADF_INTSTATSSM_SHANGERR) &&
280 		    (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) {
281 			slice_hang_offset =
282 			    ADF_C4XXX_SLICEHANGSTATUS_OFFSET(accel_num);
283 
284 			/* Bring hung slice out of reset */
285 			adf_csr_fetch_and_and(csr, slice_hang_offset, ~0);
286 
287 			/* Log SliceHang error and clear an interrupt */
288 			handled = adf_handle_slice_hang(accel_dev,
289 							accel_num,
290 							csr,
291 							ia_slice_hang_offset);
292 			atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
293 		}
294 		/* FW cleared SliceHang, IA only logs an error */
295 		else if (!(fw_irq_source & ADF_INTSTATSSM_SHANGERR) &&
296 			 (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) {
297 			/* Log SliceHang error and clear an interrupt */
298 			handled = adf_handle_slice_hang(accel_dev,
299 							accel_num,
300 							csr,
301 							ia_slice_hang_offset);
302 
303 			atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
304 		}
305 
306 		/* Clear the associated IA interrupt */
307 		adf_csr_fetch_and_and(csr,
308 				      ADF_C4XXX_IAINTSTATSSM(accel_num),
309 				      ~BIT(13));
310 	}
311 
312 	return handled;
313 }
314 
315 static bool
316 get_eth_doorbell_msg(struct adf_accel_dev *accel_dev)
317 {
318 	struct resource *csr =
319 	    (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
320 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
321 	u32 errsou11 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU11);
322 	u32 doorbell_int = ADF_CSR_RD(csr, ADF_C4XXX_ETH_DOORBELL_INT);
323 	u32 eth_doorbell_reg[ADF_C4XXX_NUM_ETH_DOORBELL_REGS];
324 	bool handled = false;
325 	u32 data_reg;
326 	u8 i;
327 
328 	/* Reset cannot be acknowledged until the reset */
329 	hw_device->reset_ack = false;
330 
331 	/* Check if doorbell interrupt occurred. */
332 	if (errsou11 & ADF_C4XXX_DOORBELL_INT_SRC) {
333 		/* Decode doorbell messages from ethernet device */
334 		for (i = 0; i < ADF_C4XXX_NUM_ETH_DOORBELL_REGS; i++) {
335 			eth_doorbell_reg[i] = 0;
336 			if (doorbell_int & BIT(i)) {
337 				data_reg = ADF_C4XXX_ETH_DOORBELL(i);
338 				eth_doorbell_reg[i] = ADF_CSR_RD(csr, data_reg);
339 				device_printf(
340 				    GET_DEV(accel_dev),
341 				    "Receives Doorbell message(0x%08x)\n",
342 				    eth_doorbell_reg[i]);
343 			}
344 		}
345 		/* Only need to check PF0 */
346 		if (eth_doorbell_reg[0] == ADF_C4XXX_IOSFSB_RESET_ACK) {
347 			device_printf(GET_DEV(accel_dev),
348 				      "Receives pending reset ACK\n");
349 			hw_device->reset_ack = true;
350 		}
351 		/* Clear the interrupt source */
352 		ADF_CSR_WR(csr,
353 			   ADF_C4XXX_ETH_DOORBELL_INT,
354 			   ADF_C4XXX_ETH_DOORBELL_MASK);
355 		handled = true;
356 	}
357 
358 	return handled;
359 }
360 
361 static enum dev_sku_info
362 get_sku(struct adf_hw_device_data *self)
363 {
364 	int aes = get_num_aes(self);
365 	u32 capabilities = self->accel_capabilities_mask;
366 	bool sym_only_sku = false;
367 
368 	/* Check if SKU is capable only of symmetric cryptography
369 	 * via device capabilities.
370 	 */
371 	if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) &&
372 	    !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) &&
373 	    !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION))
374 		sym_only_sku = true;
375 
376 	switch (aes) {
377 	case ADF_C4XXX_HIGH_SKU_AES:
378 		if (sym_only_sku)
379 			return DEV_SKU_1_CY;
380 		return DEV_SKU_1;
381 	case ADF_C4XXX_MED_SKU_AES:
382 		if (sym_only_sku)
383 			return DEV_SKU_2_CY;
384 		return DEV_SKU_2;
385 	case ADF_C4XXX_LOW_SKU_AES:
386 		if (sym_only_sku)
387 			return DEV_SKU_3_CY;
388 		return DEV_SKU_3;
389 	};
390 
391 	return DEV_SKU_UNKNOWN;
392 }
393 
394 static bool
395 c4xxx_check_prod_sku(struct adf_accel_dev *accel_dev)
396 {
397 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
398 	u32 fusectl0 = 0;
399 
400 	fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
401 
402 	if (fusectl0 & ADF_C4XXX_FUSE_PROD_SKU_MASK)
403 		return true;
404 	else
405 		return false;
406 }
407 
408 static bool
409 adf_check_sym_only_sku_c4xxx(struct adf_accel_dev *accel_dev)
410 {
411 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
412 	u32 legfuse = 0;
413 
414 	legfuse = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
415 
416 	if (legfuse & ADF_C4XXX_LEGFUSE_BASE_SKU_MASK)
417 		return true;
418 	else
419 		return false;
420 }
421 
422 static void
423 adf_enable_slice_hang_detection(struct adf_accel_dev *accel_dev)
424 {
425 	struct resource *csr;
426 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
427 	u32 accel = 0;
428 	unsigned long accel_mask;
429 
430 	csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
431 	accel_mask = hw_device->accel_mask;
432 
433 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
434 	{
435 		/* Unmasks Slice Hang interrupts so they can be seen by IA. */
436 		ADF_CSR_WR(csr,
437 			   ADF_C4XXX_SHINTMASKSSM_OFFSET(accel),
438 			   ADF_C4XXX_SHINTMASKSSM_VAL);
439 	}
440 }
441 
442 static void
443 adf_enable_ras(struct adf_accel_dev *accel_dev)
444 {
445 	struct resource *csr;
446 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
447 	u32 accel = 0;
448 	unsigned long accel_mask;
449 
450 	csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
451 	accel_mask = hw_device->accel_mask;
452 
453 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
454 	{
455 		ADF_CSR_WR(csr,
456 			   ADF_C4XXX_GET_SSMFEATREN_OFFSET(accel),
457 			   ADF_C4XXX_SSMFEATREN_VAL);
458 	}
459 }
460 
461 static u32
462 get_clock_speed(struct adf_hw_device_data *self)
463 {
464 	/* c4xxx CPP clock is equal to high-speed clock */
465 	return self->clock_frequency;
466 }
467 
468 static void
469 adf_enable_error_interrupts(struct adf_accel_dev *accel_dev)
470 {
471 	struct resource *csr, *aram_csr;
472 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
473 	u32 accel = 0;
474 	unsigned long accel_mask;
475 
476 	csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
477 	aram_csr = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
478 	accel_mask = hw_device->accel_mask;
479 
480 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
481 	{
482 		/* Enable shared memory, MMP, CPP, PPERR interrupts
483 		 * for a given accel
484 		 */
485 		ADF_CSR_WR(csr, ADF_C4XXX_GET_INTMASKSSM_OFFSET(accel), 0);
486 
487 		/* Enable SPP parity error interrupts for a given accel */
488 		ADF_CSR_WR(csr, ADF_C4XXX_GET_SPPPARERRMSK_OFFSET(accel), 0);
489 
490 		/* Enable ssm soft parity errors on given accel */
491 		ADF_CSR_WR(csr,
492 			   ADF_C4XXX_GET_SSMSOFTERRORPARITY_MASK_OFFSET(accel),
493 			   ADF_C4XXX_SSMSOFTERRORPARITY_MASK_VAL);
494 	}
495 
496 	/* Enable interrupts for VFtoPF0_127. */
497 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK4, ADF_C4XXX_VF2PF0_31);
498 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK5, ADF_C4XXX_VF2PF32_63);
499 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK6, ADF_C4XXX_VF2PF64_95);
500 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK7, ADF_C4XXX_VF2PF96_127);
501 
502 	/* Enable interrupts signaling ECC correctable errors for all AEs */
503 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK8, ADF_C4XXX_ERRMSK8_COERR);
504 	ADF_CSR_WR(csr,
505 		   ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE,
506 		   ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE_MASK);
507 
508 	/* Enable error interrupts reported by ERRSOU9 */
509 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK9, ADF_C4XXX_ERRMSK9_IRQ_MASK);
510 
511 	/* Enable uncorrectable errors on all the AE */
512 	ADF_CSR_WR(csr,
513 		   ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE,
514 		   ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE_MASK);
515 
516 	/* Enable CPP Agent to report command parity errors */
517 	ADF_CSR_WR(csr,
518 		   ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE,
519 		   ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE_MASK);
520 
521 	/* Enable reporting of RI memory parity errors */
522 	ADF_CSR_WR(csr,
523 		   ADF_C4XXX_RI_MEM_PAR_ERR_EN0,
524 		   ADF_C4XXX_RI_MEM_PAR_ERR_EN0_MASK);
525 
526 	/* Enable reporting of TI memory parity errors */
527 	ADF_CSR_WR(csr,
528 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN0,
529 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN0_MASK);
530 	ADF_CSR_WR(csr,
531 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN1,
532 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN1_MASK);
533 
534 	/* Enable SSM errors */
535 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK10, ADF_C4XXX_ERRMSK10_SSM_ERR);
536 
537 	/* Enable miscellaneous errors (ethernet doorbell aram, ici, ice) */
538 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK11, ADF_C4XXX_ERRMSK11_ERR);
539 
540 	/* RI CPP bus interface error detection and reporting. */
541 	ADF_CSR_WR(csr, ADF_C4XXX_RICPPINTCTL, ADF_C4XXX_RICPP_EN);
542 
543 	/* TI CPP bus interface error detection and reporting. */
544 	ADF_CSR_WR(csr, ADF_C4XXX_TICPPINTCTL, ADF_C4XXX_TICPP_EN);
545 
546 	/* Enable CFC Error interrupts and logging. */
547 	ADF_CSR_WR(csr, ADF_C4XXX_CPP_CFC_ERR_CTRL, ADF_C4XXX_CPP_CFC_UE);
548 
549 	/* Enable ARAM correctable error detection. */
550 	ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMCERR, ADF_C4XXX_ARAM_CERR);
551 
552 	/* Enable ARAM uncorrectable error detection. */
553 	ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMUERR, ADF_C4XXX_ARAM_UERR);
554 
555 	/* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
556 	ADF_CSR_WR(aram_csr, ADF_C4XXX_CPPMEMTGTERR, ADF_C4XXX_TGT_UERR);
557 }
558 
559 static void
560 adf_enable_mmp_error_correction(struct resource *csr,
561 				struct adf_hw_device_data *hw_data)
562 {
563 	unsigned int accel = 0, mmp;
564 	unsigned long uerrssmmmp_mask, cerrssmmmp_mask;
565 	enum operation op;
566 	unsigned long accel_mask;
567 
568 	/* Prepare values and operation that will be performed on
569 	 * UERRSSMMMP and CERRSSMMMP registers on each MMP
570 	 */
571 	if (hw_data->accel_capabilities_mask &
572 	    ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) {
573 		uerrssmmmp_mask = ADF_C4XXX_UERRSSMMMP_EN;
574 		cerrssmmmp_mask = ADF_C4XXX_CERRSSMMMP_EN;
575 		op = OR;
576 	} else {
577 		uerrssmmmp_mask = ~ADF_C4XXX_UERRSSMMMP_EN;
578 		cerrssmmmp_mask = ~ADF_C4XXX_CERRSSMMMP_EN;
579 		op = AND;
580 	}
581 
582 	accel_mask = hw_data->accel_mask;
583 
584 	/* Enable MMP Logging */
585 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
586 	{
587 		/* Set power-up */
588 		adf_csr_fetch_and_and(csr,
589 				      ADF_C4XXX_SLICEPWRDOWN(accel),
590 				      ~ADF_C4XXX_MMP_PWR_UP_MSK);
591 
592 		for (mmp = 0; mmp < ADF_C4XXX_MAX_MMP; ++mmp) {
593 			adf_csr_fetch_and_update(op,
594 						 csr,
595 						 ADF_C4XXX_UERRSSMMMP(accel,
596 								      mmp),
597 						 uerrssmmmp_mask);
598 			adf_csr_fetch_and_update(op,
599 						 csr,
600 						 ADF_C4XXX_CERRSSMMMP(accel,
601 								      mmp),
602 						 cerrssmmmp_mask);
603 		}
604 
605 		/* Restore power-down value */
606 		adf_csr_fetch_and_or(csr,
607 				     ADF_C4XXX_SLICEPWRDOWN(accel),
608 				     ADF_C4XXX_MMP_PWR_UP_MSK);
609 	}
610 }
611 
612 static void
613 get_arb_info(struct arb_info *arb_csrs_info)
614 {
615 	arb_csrs_info->arbiter_offset = ADF_C4XXX_ARB_OFFSET;
616 	arb_csrs_info->wrk_cfg_offset = ADF_C4XXX_ARB_WQCFG_OFFSET;
617 }
618 
619 static void
620 get_admin_info(struct admin_info *admin_csrs_info)
621 {
622 	admin_csrs_info->mailbox_offset = ADF_C4XXX_MAILBOX_BASE_OFFSET;
623 	admin_csrs_info->admin_msg_ur = ADF_C4XXX_ADMINMSGUR_OFFSET;
624 	admin_csrs_info->admin_msg_lr = ADF_C4XXX_ADMINMSGLR_OFFSET;
625 }
626 
627 static void
628 get_errsou_offset(u32 *errsou3, u32 *errsou5)
629 {
630 	*errsou3 = ADF_C4XXX_ERRSOU3;
631 	*errsou5 = ADF_C4XXX_ERRSOU5;
632 }
633 
634 static void
635 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
636 {
637 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
638 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR];
639 	struct resource *csr = misc_bar->virt_addr;
640 	unsigned int val, i = 0;
641 	unsigned long ae_mask;
642 	unsigned long accel_mask;
643 
644 	ae_mask = hw_device->ae_mask;
645 
646 	/* Enable Accel Engine error detection & correction */
647 	for_each_set_bit(i, &ae_mask, ADF_C4XXX_MAX_ACCELENGINES)
648 	{
649 		val = ADF_CSR_RD(csr, ADF_C4XXX_AE_CTX_ENABLES(i));
650 		val |= ADF_C4XXX_ENABLE_AE_ECC_ERR;
651 		ADF_CSR_WR(csr, ADF_C4XXX_AE_CTX_ENABLES(i), val);
652 		val = ADF_CSR_RD(csr, ADF_C4XXX_AE_MISC_CONTROL(i));
653 		val |= ADF_C4XXX_ENABLE_AE_ECC_PARITY_CORR;
654 		ADF_CSR_WR(csr, ADF_C4XXX_AE_MISC_CONTROL(i), val);
655 	}
656 
657 	accel_mask = hw_device->accel_mask;
658 
659 	/* Enable shared memory error detection & correction */
660 	for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
661 	{
662 		val = ADF_CSR_RD(csr, ADF_C4XXX_UERRSSMSH(i));
663 		val |= ADF_C4XXX_ERRSSMSH_EN;
664 		ADF_CSR_WR(csr, ADF_C4XXX_UERRSSMSH(i), val);
665 		val = ADF_CSR_RD(csr, ADF_C4XXX_CERRSSMSH(i));
666 		val |= ADF_C4XXX_ERRSSMSH_EN;
667 		ADF_CSR_WR(csr, ADF_C4XXX_CERRSSMSH(i), val);
668 	}
669 
670 	adf_enable_ras(accel_dev);
671 	adf_enable_mmp_error_correction(csr, hw_device);
672 	adf_enable_slice_hang_detection(accel_dev);
673 	adf_enable_error_interrupts(accel_dev);
674 }
675 
676 static void
677 adf_enable_ints(struct adf_accel_dev *accel_dev)
678 {
679 	struct resource *addr;
680 
681 	addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
682 
683 	/* Enable bundle interrupts */
684 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF0_MASK_OFFSET, ADF_C4XXX_SMIA0_MASK);
685 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF1_MASK_OFFSET, ADF_C4XXX_SMIA1_MASK);
686 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF2_MASK_OFFSET, ADF_C4XXX_SMIA2_MASK);
687 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF3_MASK_OFFSET, ADF_C4XXX_SMIA3_MASK);
688 	/*Enable misc interrupts*/
689 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF4_MASK_OFFSET, ADF_C4XXX_SMIA4_MASK);
690 }
691 
692 static u32
693 get_ae_clock(struct adf_hw_device_data *self)
694 {
695 	/* Clock update interval is <16> ticks for c4xxx. */
696 	return self->clock_frequency / 16;
697 }
698 
699 static int
700 measure_clock(struct adf_accel_dev *accel_dev)
701 {
702 	u32 frequency;
703 	int ret = 0;
704 
705 	ret = adf_dev_measure_clock(accel_dev,
706 				    &frequency,
707 				    ADF_C4XXX_MIN_AE_FREQ,
708 				    ADF_C4XXX_MAX_AE_FREQ);
709 	if (ret)
710 		return ret;
711 
712 	accel_dev->hw_device->clock_frequency = frequency;
713 	return 0;
714 }
715 
716 static int
717 get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled)
718 {
719 	if (accel_dev->au_info->num_dc_au > 0) {
720 		*storage_enabled = 1;
721 		GET_HW_DATA(accel_dev)->extended_dc_capabilities =
722 		    ICP_ACCEL_CAPABILITIES_ADVANCED_COMPRESSION;
723 	}
724 	return 0;
725 }
726 
727 static u32
728 c4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
729 {
730 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
731 	u32 legfuses;
732 	u32 softstrappull0, softstrappull2;
733 	u32 fusectl0, fusectl2;
734 	u32 capabilities;
735 
736 	/* Read accelerator capabilities mask */
737 	legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
738 	capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
739 	    ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
740 	    ICP_ACCEL_CAPABILITIES_CIPHER |
741 	    ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
742 	    ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_ZUC |
743 	    ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT |
744 	    ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
745 	    ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
746 	    ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
747 	    ICP_ACCEL_CAPABILITIES_ECEDMONT;
748 
749 	if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
750 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
751 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
752 	}
753 	if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
754 		capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
755 	if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
756 		capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
757 				  ICP_ACCEL_CAPABILITIES_ECEDMONT);
758 	if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) {
759 		capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
760 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY;
761 	}
762 	if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE)
763 		capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC;
764 	if (legfuses & ICP_ACCEL_MASK_SM3_SLICE)
765 		capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
766 	if (legfuses & ICP_ACCEL_MASK_SM4_SLICE)
767 		capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
768 
769 	/* Read fusectl0 & softstrappull0 registers to ensure inline
770 	 * acceleration is not disabled
771 	 */
772 	softstrappull0 =
773 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4);
774 	fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
775 	if ((fusectl0 | softstrappull0) & ADF_C4XXX_FUSE_DISABLE_INLINE_MASK)
776 		capabilities &= ~ICP_ACCEL_CAPABILITIES_INLINE;
777 
778 	/* Read fusectl2 & softstrappull2 registers to check out if
779 	 * PKE/DC are enabled/disabled
780 	 */
781 	softstrappull2 =
782 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL2_OFFSET, 4);
783 	fusectl2 = pci_read_config(pdev, ADF_C4XXX_FUSECTL2_OFFSET, 4);
784 	/* Disable PKE/DC cap if there are no PKE/DC-enabled AUs. */
785 	if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_PKE_MASK))
786 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
787 	if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_COMP_MASK))
788 		capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION |
789 				  ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY);
790 
791 	return capabilities;
792 }
793 
794 static int
795 c4xxx_configure_accel_units(struct adf_accel_dev *accel_dev)
796 {
797 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 };
798 	unsigned long val;
799 	char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
800 	int sku;
801 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
802 
803 	sku = get_sku(hw_data);
804 
805 	if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
806 		goto err;
807 
808 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
809 
810 	/* Base station SKU supports symmetric cryptography only. */
811 	if (adf_check_sym_only_sku_c4xxx(accel_dev))
812 		snprintf(val_str, sizeof(val_str), ADF_SERVICE_SYM);
813 	else
814 		snprintf(val_str, sizeof(val_str), ADF_SERVICE_CY);
815 
816 	val = sku_dc_au[sku];
817 	if (val) {
818 		strncat(val_str,
819 			ADF_SERVICES_SEPARATOR ADF_SERVICE_DC,
820 			ADF_CFG_MAX_VAL_LEN_IN_BYTES -
821 			    strnlen(val_str, sizeof(val_str)) -
822 			    ADF_CFG_NULL_TERM_SIZE);
823 	}
824 
825 	if (adf_cfg_add_key_value_param(
826 		accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR))
827 		goto err;
828 
829 	snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS);
830 	val = sku_cy_au[sku];
831 	if (adf_cfg_add_key_value_param(
832 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
833 		goto err;
834 
835 	snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS);
836 	val = sku_dc_au[sku];
837 	if (adf_cfg_add_key_value_param(
838 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
839 		goto err;
840 
841 	snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS);
842 	val = sku_inline_au[sku];
843 	if (adf_cfg_add_key_value_param(
844 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
845 		goto err;
846 
847 	return 0;
848 err:
849 	device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n");
850 	return EINVAL;
851 }
852 
853 static void
854 update_hw_capability(struct adf_accel_dev *accel_dev)
855 {
856 	struct adf_accel_unit_info *au_info = accel_dev->au_info;
857 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
858 	u32 disabled_caps = 0;
859 
860 	if (!au_info->asym_ae_msk)
861 		disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
862 		    ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
863 
864 	if (!au_info->sym_ae_msk)
865 		disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
866 		    ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_ZUC |
867 		    ICP_ACCEL_CAPABILITIES_SHA3_EXT |
868 		    ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
869 		    ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
870 		    ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
871 
872 	if (!au_info->dc_ae_msk) {
873 		disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
874 		    ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY;
875 		hw_device->extended_dc_capabilities = 0;
876 	}
877 
878 	if (!au_info->inline_ingress_msk && !au_info->inline_egress_msk)
879 		disabled_caps |= ICP_ACCEL_CAPABILITIES_INLINE;
880 
881 	hw_device->accel_capabilities_mask =
882 	    c4xxx_get_hw_cap(accel_dev) & ~disabled_caps;
883 }
884 
885 static void
886 c4xxx_set_sadb_size(struct adf_accel_dev *accel_dev)
887 {
888 	u32 sadb_reg_value = 0;
889 	struct resource *aram_csr_base;
890 
891 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
892 	if (accel_dev->au_info->num_inline_au) {
893 		/* REG_SA_DB_CTRL register initialisation */
894 		sadb_reg_value = ADF_C4XXX_SADB_REG_VALUE(accel_dev);
895 		ADF_CSR_WR(aram_csr_base,
896 			   ADF_C4XXX_REG_SA_DB_CTRL,
897 			   sadb_reg_value);
898 	} else {
899 		/* Zero the SADB size when inline is disabled. */
900 		adf_csr_fetch_and_and(aram_csr_base,
901 				      ADF_C4XXX_REG_SA_DB_CTRL,
902 				      ADF_C4XXX_SADB_SIZE_BIT);
903 	}
904 	/* REG_SA_CTRL_LOCK register initialisation. We set the lock
905 	 * bit in order to prevent the REG_SA_DB_CTRL to be
906 	 * overwritten
907 	 */
908 	ADF_CSR_WR(aram_csr_base,
909 		   ADF_C4XXX_REG_SA_CTRL_LOCK,
910 		   ADF_C4XXX_DEFAULT_SA_CTRL_LOCKOUT);
911 }
912 
913 static void
914 c4xxx_init_error_notification_configuration(struct adf_accel_dev *accel_dev,
915 					    u32 offset)
916 {
917 	struct resource *aram_csr_base;
918 
919 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
920 
921 	/* configure error notification configuration registers */
922 	/* Set CD Parity error */
923 	ADF_CSR_WR(aram_csr_base,
924 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_0 + offset,
925 		   ADF_C4XXX_CD_RF_PARITY_ERR_0_VAL);
926 	ADF_CSR_WR(aram_csr_base,
927 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_1 + offset,
928 		   ADF_C4XXX_CD_RF_PARITY_ERR_1_VAL);
929 	ADF_CSR_WR(aram_csr_base,
930 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_2 + offset,
931 		   ADF_C4XXX_CD_RF_PARITY_ERR_2_VAL);
932 	ADF_CSR_WR(aram_csr_base,
933 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_3 + offset,
934 		   ADF_C4XXX_CD_RF_PARITY_ERR_3_VAL);
935 	/* Set CD RAM ECC Correctable Error */
936 	ADF_CSR_WR(aram_csr_base,
937 		   ADF_C4XXX_IC_CD_CERR + offset,
938 		   ADF_C4XXX_CD_CERR_VAL);
939 	/* Set CD RAM ECC UnCorrectable Error */
940 	ADF_CSR_WR(aram_csr_base,
941 		   ADF_C4XXX_IC_CD_UERR + offset,
942 		   ADF_C4XXX_CD_UERR_VAL);
943 	/* Set Inline (excl cmd_dis) Parity Error */
944 	ADF_CSR_WR(aram_csr_base,
945 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_0 + offset,
946 		   ADF_C4XXX_INLN_RF_PARITY_ERR_0_VAL);
947 	ADF_CSR_WR(aram_csr_base,
948 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_1 + offset,
949 		   ADF_C4XXX_INLN_RF_PARITY_ERR_1_VAL);
950 	ADF_CSR_WR(aram_csr_base,
951 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_2 + offset,
952 		   ADF_C4XXX_INLN_RF_PARITY_ERR_2_VAL);
953 	ADF_CSR_WR(aram_csr_base,
954 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_3 + offset,
955 		   ADF_C4XXX_INLN_RF_PARITY_ERR_3_VAL);
956 	ADF_CSR_WR(aram_csr_base,
957 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_4 + offset,
958 		   ADF_C4XXX_INLN_RF_PARITY_ERR_4_VAL);
959 	ADF_CSR_WR(aram_csr_base,
960 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_5 + offset,
961 		   ADF_C4XXX_INLN_RF_PARITY_ERR_5_VAL);
962 	/* Set Parser RAM ECC Correctable Error */
963 	ADF_CSR_WR(aram_csr_base,
964 		   ADF_C4XXX_IC_PARSER_CERR + offset,
965 		   ADF_C4XXX_PARSER_CERR_VAL);
966 	/* Set Parser RAM ECC UnCorrectable Error */
967 	ADF_CSR_WR(aram_csr_base,
968 		   ADF_C4XXX_IC_PARSER_UERR + offset,
969 		   ADF_C4XXX_PARSER_UERR_VAL);
970 	/* Set CTPB RAM ECC Correctable Error */
971 	ADF_CSR_WR(aram_csr_base,
972 		   ADF_C4XXX_IC_CTPB_CERR + offset,
973 		   ADF_C4XXX_CTPB_CERR_VAL);
974 	/* Set CTPB RAM ECC UnCorrectable Error */
975 	ADF_CSR_WR(aram_csr_base,
976 		   ADF_C4XXX_IC_CTPB_UERR + offset,
977 		   ADF_C4XXX_CTPB_UERR_VAL);
978 	/* Set CPP Interface Status */
979 	ADF_CSR_WR(aram_csr_base,
980 		   ADF_C4XXX_IC_CPPM_ERR_STAT + offset,
981 		   ADF_C4XXX_CPPM_ERR_STAT_VAL);
982 	/* Set CGST_MGMT_INT */
983 	ADF_CSR_WR(aram_csr_base,
984 		   ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset,
985 		   ADF_C4XXX_CONGESTION_MGMT_INI_VAL);
986 	/* CPP Interface Status */
987 	ADF_CSR_WR(aram_csr_base,
988 		   ADF_C4XXX_IC_CPPT_ERR_STAT + offset,
989 		   ADF_C4XXX_CPPT_ERR_STAT_VAL);
990 	/* MAC Interrupt Mask */
991 	ADF_CSR_WR64(aram_csr_base,
992 		     ADF_C4XXX_IC_MAC_IM + offset,
993 		     ADF_C4XXX_MAC_IM_VAL);
994 }
995 
996 static void
997 c4xxx_enable_parse_extraction(struct adf_accel_dev *accel_dev)
998 {
999 	struct resource *aram_csr_base;
1000 
1001 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1002 
1003 	/* Enable Inline Parse Extraction CRSs */
1004 
1005 	/* Set IC_PARSE_CTRL register */
1006 	ADF_CSR_WR(aram_csr_base,
1007 		   ADF_C4XXX_IC_PARSE_CTRL_OFFSET,
1008 		   ADF_C4XXX_IC_PARSE_CTRL_OFFSET_DEFAULT_VALUE);
1009 
1010 	/* Set IC_PARSE_FIXED_DATA(0) */
1011 	ADF_CSR_WR(aram_csr_base,
1012 		   ADF_C4XXX_IC_PARSE_FIXED_DATA(0),
1013 		   ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_DATA_0);
1014 
1015 	/* Set IC_PARSE_FIXED_LENGTH */
1016 	ADF_CSR_WR(aram_csr_base,
1017 		   ADF_C4XXX_IC_PARSE_FIXED_LENGTH,
1018 		   ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_LEN);
1019 
1020 	/* Configure ESP protocol from an IPv4 header */
1021 	ADF_CSR_WR(aram_csr_base,
1022 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_0,
1023 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_0_VALUE);
1024 	ADF_CSR_WR(aram_csr_base,
1025 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_0,
1026 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_0_VALUE);
1027 	/* Configure protocol extraction field from an IPv4 header */
1028 	ADF_CSR_WR(aram_csr_base,
1029 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_1,
1030 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_1_VALUE);
1031 	ADF_CSR_WR(aram_csr_base,
1032 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_1,
1033 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_1_VALUE);
1034 	/* Configure SPI extraction field from an IPv4 header */
1035 	ADF_CSR_WR(aram_csr_base,
1036 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_2,
1037 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_2_VALUE);
1038 	ADF_CSR_WR(aram_csr_base,
1039 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_2,
1040 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_2_VALUE);
1041 	/* Configure destination field IP address from an IPv4 header */
1042 	ADF_CSR_WR(aram_csr_base,
1043 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_3,
1044 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_3_VALUE);
1045 	ADF_CSR_WR(aram_csr_base,
1046 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_3,
1047 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_3_VALUE);
1048 
1049 	/* Configure function number extraction field from an IPv6 header */
1050 	ADF_CSR_WR(aram_csr_base,
1051 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_0,
1052 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_0_VALUE);
1053 	ADF_CSR_WR(aram_csr_base,
1054 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_0,
1055 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_0_VALUE);
1056 	/* Configure protocol extraction field from an IPv6 header */
1057 	ADF_CSR_WR(aram_csr_base,
1058 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_1,
1059 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_1_VALUE);
1060 	ADF_CSR_WR(aram_csr_base,
1061 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_1,
1062 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_1_VALUE);
1063 	/* Configure SPI extraction field from an IPv6 header */
1064 	ADF_CSR_WR(aram_csr_base,
1065 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_2,
1066 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_2_VALUE);
1067 	ADF_CSR_WR(aram_csr_base,
1068 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_2,
1069 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_2_VALUE);
1070 	/* Configure destination field IP address from an IPv6 header */
1071 	ADF_CSR_WR(aram_csr_base,
1072 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_3,
1073 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_3_VALUE);
1074 	ADF_CSR_WR(aram_csr_base,
1075 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_3,
1076 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_3_VALUE);
1077 }
1078 
1079 static int
1080 adf_get_inline_ipsec_algo_group(struct adf_accel_dev *accel_dev,
1081 				unsigned long *ipsec_algo_group)
1082 {
1083 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1084 
1085 	if (adf_cfg_get_param_value(
1086 		accel_dev, ADF_INLINE_SEC, ADF_INLINE_IPSEC_ALGO_GROUP, val))
1087 		return EFAULT;
1088 	if (kstrtoul(val, 0, ipsec_algo_group))
1089 		return EFAULT;
1090 
1091 	/* Verify the ipsec_algo_group */
1092 	if (*ipsec_algo_group >= IPSEC_ALGO_GROUP_DELIMITER) {
1093 		device_printf(
1094 		    GET_DEV(accel_dev),
1095 		    "Unsupported IPSEC algo group %lu in config file!\n",
1096 		    *ipsec_algo_group);
1097 		return EFAULT;
1098 	}
1099 
1100 	return 0;
1101 }
1102 
1103 static int
1104 c4xxx_init_inline_hw(struct adf_accel_dev *accel_dev)
1105 {
1106 	u32 sa_entry_reg_value = 0;
1107 	u32 sa_fn_lim = 0;
1108 	u32 supported_algo = 0;
1109 	struct resource *aram_csr_base;
1110 	u32 offset;
1111 	unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP;
1112 
1113 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1114 
1115 	if (adf_get_inline_ipsec_algo_group(accel_dev, &ipsec_algo_group))
1116 		return EFAULT;
1117 
1118 	sa_entry_reg_value |=
1119 	    (ADF_C4XXX_DEFAULT_LU_KEY_LEN << ADF_C4XXX_LU_KEY_LEN_BIT_OFFSET);
1120 	if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) {
1121 		sa_entry_reg_value |= ADF_C4XXX_DEFAULT_SA_SIZE;
1122 		sa_fn_lim =
1123 		    ADF_C4XXX_FUNC_LIMIT(accel_dev, ADF_C4XXX_DEFAULT_SA_SIZE);
1124 		supported_algo = ADF_C4XXX_DEFAULT_SUPPORTED_ALGORITHMS;
1125 	} else if (ipsec_algo_group == IPSEC_ALGO_GROUP1) {
1126 		sa_entry_reg_value |= ADF_C4XXX_ALGO_GROUP1_SA_SIZE;
1127 		sa_fn_lim = ADF_C4XXX_FUNC_LIMIT(accel_dev,
1128 						 ADF_C4XXX_ALGO_GROUP1_SA_SIZE);
1129 		supported_algo = ADF_C4XXX_SUPPORTED_ALGORITHMS_GROUP1;
1130 	} else {
1131 		return EFAULT;
1132 	}
1133 
1134 	/* REG_SA_ENTRY_CTRL register initialisation */
1135 	ADF_CSR_WR(aram_csr_base,
1136 		   ADF_C4XXX_REG_SA_ENTRY_CTRL,
1137 		   sa_entry_reg_value);
1138 
1139 	/* REG_SAL_FUNC_LIMITS register initialisation. Only the first register
1140 	 * needs to be initialised to enable as it is assigned to a physical
1141 	 * function. Other registers will be initialised by the LAN PF driver.
1142 	 * The function limits is initialised to its maximal value.
1143 	 */
1144 	ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_FUNC_LIMITS, sa_fn_lim);
1145 
1146 	/* Initialize REG_SA_SCRATCH[0] register to
1147 	 * advertise supported crypto algorithms
1148 	 */
1149 	ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_SCRATCH_0, supported_algo);
1150 
1151 	/* REG_SA_SCRATCH[2] register initialisation
1152 	 * to advertise supported crypto offload features.
1153 	 */
1154 	ADF_CSR_WR(aram_csr_base,
1155 		   ADF_C4XXX_REG_SA_SCRATCH_2,
1156 		   ADF_C4XXX_DEFAULT_CY_OFFLOAD_FEATURES);
1157 
1158 	/* Overwrite default MAC_CFG register in ingress offset */
1159 	ADF_CSR_WR64(aram_csr_base,
1160 		     ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET,
1161 		     ADF_C4XXX_MAC_CFG_VALUE);
1162 
1163 	/* Overwrite default MAC_CFG register in egress offset */
1164 	ADF_CSR_WR64(aram_csr_base,
1165 		     ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET,
1166 		     ADF_C4XXX_MAC_CFG_VALUE);
1167 
1168 	/* Overwrite default MAC_PIA_CFG
1169 	 * (Packet Interface Adapter Configuration) registers
1170 	 * in ingress offset
1171 	 */
1172 	ADF_CSR_WR64(aram_csr_base,
1173 		     ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET,
1174 		     ADF_C4XXX_MAC_PIA_CFG_VALUE);
1175 
1176 	/* Overwrite default MAC_PIA_CFG in egress offset */
1177 	ADF_CSR_WR64(aram_csr_base,
1178 		     ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET,
1179 		     ADF_C4XXX_MAC_PIA_CFG_VALUE);
1180 
1181 	c4xxx_enable_parse_extraction(accel_dev);
1182 
1183 	ADF_CSR_WR(aram_csr_base,
1184 		   ADF_C4XXX_INGRESS_CMD_DIS_MISC,
1185 		   ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE);
1186 
1187 	ADF_CSR_WR(aram_csr_base,
1188 		   ADF_C4XXX_EGRESS_CMD_DIS_MISC,
1189 		   ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE);
1190 
1191 	/* Set bits<1:0> in ADF_C4XXX_INLINE_CAPABILITY register to
1192 	 * advertize that both ingress and egress directions are available
1193 	 */
1194 	ADF_CSR_WR(aram_csr_base,
1195 		   ADF_C4XXX_REG_SA_INLINE_CAPABILITY,
1196 		   ADF_C4XXX_INLINE_CAPABILITIES);
1197 
1198 	/* Set error notification configuration of ingress */
1199 	offset = ADF_C4XXX_INLINE_INGRESS_OFFSET;
1200 	c4xxx_init_error_notification_configuration(accel_dev, offset);
1201 	/* Set error notification configuration of egress */
1202 	offset = ADF_C4XXX_INLINE_EGRESS_OFFSET;
1203 	c4xxx_init_error_notification_configuration(accel_dev, offset);
1204 
1205 	return 0;
1206 }
1207 
1208 static void
1209 adf_enable_inline_notification(struct adf_accel_dev *accel_dev)
1210 {
1211 	struct resource *aram_csr_base;
1212 
1213 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1214 
1215 	/* Set bit<0> in ADF_C4XXX_REG_SA_INLINE_ENABLE to advertise
1216 	 * that inline is enabled.
1217 	 */
1218 	ADF_CSR_WR(aram_csr_base,
1219 		   ADF_C4XXX_REG_SA_INLINE_ENABLE,
1220 		   ADF_C4XXX_INLINE_ENABLED);
1221 }
1222 
1223 static int
1224 c4xxx_init_aram_config(struct adf_accel_dev *accel_dev)
1225 {
1226 	u32 aram_size = ADF_C4XXX_2MB_ARAM_SIZE;
1227 	u32 ibuff_mem_needed = 0;
1228 	u32 usable_aram_size = 0;
1229 	struct adf_hw_aram_info *aram_info;
1230 	u32 sa_db_ctl_value;
1231 	struct resource *aram_csr_base;
1232 	u8 profile = 0;
1233 	u32 sadb_size = 0;
1234 	u32 sa_size = 0;
1235 	unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP;
1236 	u32 i;
1237 
1238 	if (accel_dev->au_info->num_inline_au > 0)
1239 		if (adf_get_inline_ipsec_algo_group(accel_dev,
1240 						    &ipsec_algo_group))
1241 			return EFAULT;
1242 
1243 	/* Allocate memory for adf_hw_aram_info */
1244 	aram_info = kzalloc(sizeof(*accel_dev->aram_info), GFP_KERNEL);
1245 	if (!aram_info)
1246 		return ENOMEM;
1247 
1248 	/* Initialise Inline direction */
1249 	aram_info->inline_direction_egress_mask = 0;
1250 	if (accel_dev->au_info->num_inline_au) {
1251 		/* Set inline direction bitmap in the ARAM to
1252 		 * inform firmware which ME is egress
1253 		 */
1254 		aram_info->inline_direction_egress_mask =
1255 		    accel_dev->au_info->inline_egress_msk;
1256 
1257 		/* User profile is valid, we can now add it
1258 		 * in the ARAM partition table
1259 		 */
1260 		aram_info->inline_congest_mngt_profile = profile;
1261 	}
1262 	/* Initialise DC ME mask, "1" = ME is used for DC operations */
1263 	aram_info->dc_ae_mask = accel_dev->au_info->dc_ae_msk;
1264 
1265 	/* Initialise CY ME mask, "1" = ME is used for CY operations
1266 	 * Since asym service can also be enabled on inline AEs, here
1267 	 * we use the sym ae mask for configuring the cy_ae_msk
1268 	 */
1269 	aram_info->cy_ae_mask = accel_dev->au_info->sym_ae_msk;
1270 
1271 	/* Configure number of long words in the ARAM */
1272 	aram_info->num_aram_lw_entries = ADF_C4XXX_NUM_ARAM_ENTRIES;
1273 
1274 	/* Reset region offset values to 0xffffffff */
1275 	aram_info->mmp_region_offset = ~aram_info->mmp_region_offset;
1276 	aram_info->skm_region_offset = ~aram_info->skm_region_offset;
1277 	aram_info->inter_buff_aram_region_offset =
1278 	    ~aram_info->inter_buff_aram_region_offset;
1279 
1280 	/* Determine ARAM size */
1281 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1282 	sa_db_ctl_value = ADF_CSR_RD(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL);
1283 
1284 	aram_size = (sa_db_ctl_value & ADF_C4XXX_SADB_SIZE_BIT) ?
1285 	    ADF_C4XXX_2MB_ARAM_SIZE :
1286 	    ADF_C4XXX_4MB_ARAM_SIZE;
1287 	device_printf(GET_DEV(accel_dev),
1288 		      "Total available accelerator memory: %uMB\n",
1289 		      aram_size / ADF_C4XXX_1MB_SIZE);
1290 
1291 	/* Compute MMP region offset */
1292 	aram_info->mmp_region_size = ADF_C4XXX_DEFAULT_MMP_REGION_SIZE;
1293 	aram_info->mmp_region_offset = aram_size - aram_info->mmp_region_size;
1294 
1295 	if (accel_dev->au_info->num_cy_au ||
1296 	    accel_dev->au_info->num_inline_au) {
1297 		/* Crypto is available therefore we must
1298 		 * include space in the ARAM for SKM.
1299 		 */
1300 		aram_info->skm_region_size = ADF_C4XXX_DEFAULT_SKM_REGION_SIZE;
1301 		/* Compute SKM region offset */
1302 		aram_info->skm_region_offset = aram_size -
1303 		    (aram_info->mmp_region_size + aram_info->skm_region_size);
1304 	}
1305 
1306 	/* SADB always start at offset 0. */
1307 	if (accel_dev->au_info->num_inline_au) {
1308 		/* Inline is available therefore we must
1309 		 * use remaining ARAM for the SADB.
1310 		 */
1311 		sadb_size = aram_size -
1312 		    (aram_info->mmp_region_size + aram_info->skm_region_size);
1313 
1314 		/*
1315 		 * When the inline service is enabled, the policy is that
1316 		 * compression gives up it's space in ARAM to allow for a
1317 		 * larger SADB. Compression must use DRAM instead of ARAM.
1318 		 */
1319 		aram_info->inter_buff_aram_region_size = 0;
1320 
1321 		/* the SADB size must be an integral multiple of the SA size */
1322 		if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) {
1323 			sa_size = ADF_C4XXX_DEFAULT_SA_SIZE;
1324 		} else {
1325 			/* IPSEC_ALGO_GROUP1
1326 			 * Total 2 algo groups.
1327 			 */
1328 			sa_size = ADF_C4XXX_ALGO_GROUP1_SA_SIZE;
1329 		}
1330 
1331 		sadb_size = sadb_size -
1332 		    (sadb_size % ADF_C4XXX_SA_SIZE_IN_BYTES(sa_size));
1333 		aram_info->sadb_region_size = sadb_size;
1334 	}
1335 
1336 	if (accel_dev->au_info->num_dc_au &&
1337 	    !accel_dev->au_info->num_inline_au) {
1338 		/* Compression is available therefore we must see if there is
1339 		 * space in the ARAM for intermediate buffers.
1340 		 */
1341 		aram_info->inter_buff_aram_region_size = 0;
1342 		usable_aram_size = aram_size -
1343 		    (aram_info->mmp_region_size + aram_info->skm_region_size);
1344 
1345 		for (i = 1; i <= accel_dev->au_info->num_dc_au; i++) {
1346 			if ((i * ADF_C4XXX_AU_COMPR_INTERM_SIZE) >
1347 			    usable_aram_size)
1348 				break;
1349 
1350 			ibuff_mem_needed = i * ADF_C4XXX_AU_COMPR_INTERM_SIZE;
1351 		}
1352 
1353 		/* Set remaining ARAM to intermediate buffers. Firmware handles
1354 		 * fallback to DRAM for cases were number of AU assigned
1355 		 * to compression exceeds available ARAM memory.
1356 		 */
1357 		aram_info->inter_buff_aram_region_size = ibuff_mem_needed;
1358 
1359 		/* If ARAM is used for compression set its initial offset. */
1360 		if (aram_info->inter_buff_aram_region_size)
1361 			aram_info->inter_buff_aram_region_offset = 0;
1362 	}
1363 
1364 	accel_dev->aram_info = aram_info;
1365 
1366 	return 0;
1367 }
1368 
1369 static void
1370 c4xxx_exit_aram_config(struct adf_accel_dev *accel_dev)
1371 {
1372 	kfree(accel_dev->aram_info);
1373 	accel_dev->aram_info = NULL;
1374 }
1375 
1376 static u32
1377 get_num_accel_units(struct adf_hw_device_data *self)
1378 {
1379 	u32 i = 0, num_accel = 0;
1380 	unsigned long accel_mask = 0;
1381 
1382 	if (!self || !self->accel_mask)
1383 		return 0;
1384 
1385 	accel_mask = self->accel_mask;
1386 
1387 	for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
1388 	{
1389 		num_accel++;
1390 	}
1391 
1392 	return num_accel / ADF_C4XXX_NUM_ACCEL_PER_AU;
1393 }
1394 
1395 static int
1396 get_accel_unit(struct adf_hw_device_data *self,
1397 	       struct adf_accel_unit **accel_unit)
1398 {
1399 	enum dev_sku_info sku;
1400 
1401 	sku = get_sku(self);
1402 
1403 	switch (sku) {
1404 	case DEV_SKU_1:
1405 	case DEV_SKU_1_CY:
1406 		*accel_unit = adf_c4xxx_au_32_ae;
1407 		break;
1408 	case DEV_SKU_2:
1409 	case DEV_SKU_2_CY:
1410 		*accel_unit = adf_c4xxx_au_24_ae;
1411 		break;
1412 	case DEV_SKU_3:
1413 	case DEV_SKU_3_CY:
1414 		*accel_unit = adf_c4xxx_au_12_ae;
1415 		break;
1416 	default:
1417 		*accel_unit = adf_c4xxx_au_emulation;
1418 		break;
1419 	}
1420 	return 0;
1421 }
1422 
1423 static int
1424 get_ae_info(struct adf_hw_device_data *self, const struct adf_ae_info **ae_info)
1425 {
1426 	enum dev_sku_info sku;
1427 
1428 	sku = get_sku(self);
1429 
1430 	switch (sku) {
1431 	case DEV_SKU_1:
1432 		*ae_info = adf_c4xxx_32_ae;
1433 		break;
1434 	case DEV_SKU_1_CY:
1435 		*ae_info = adf_c4xxx_32_ae_sym;
1436 		break;
1437 	case DEV_SKU_2:
1438 		*ae_info = adf_c4xxx_24_ae;
1439 		break;
1440 	case DEV_SKU_2_CY:
1441 		*ae_info = adf_c4xxx_24_ae_sym;
1442 		break;
1443 	case DEV_SKU_3:
1444 		*ae_info = adf_c4xxx_12_ae;
1445 		break;
1446 	case DEV_SKU_3_CY:
1447 		*ae_info = adf_c4xxx_12_ae_sym;
1448 		break;
1449 	default:
1450 		*ae_info = adf_c4xxx_12_ae;
1451 		break;
1452 	}
1453 	return 0;
1454 }
1455 
1456 static int
1457 adf_add_debugfs_info(struct adf_accel_dev *accel_dev)
1458 {
1459 	/* Add Accel Unit configuration table to debug FS interface */
1460 	if (c4xxx_init_ae_config(accel_dev)) {
1461 		device_printf(GET_DEV(accel_dev),
1462 			      "Failed to create entry for AE configuration\n");
1463 		return EFAULT;
1464 	}
1465 
1466 	return 0;
1467 }
1468 
1469 static void
1470 adf_remove_debugfs_info(struct adf_accel_dev *accel_dev)
1471 {
1472 	/* Remove Accel Unit configuration table from debug FS interface */
1473 	c4xxx_exit_ae_config(accel_dev);
1474 }
1475 
1476 static int
1477 check_svc_to_hw_capabilities(struct adf_accel_dev *accel_dev,
1478 			     const char *svc_name,
1479 			     enum icp_qat_capabilities_mask cap)
1480 {
1481 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1482 	u32 hw_cap = hw_data->accel_capabilities_mask;
1483 
1484 	hw_cap &= cap;
1485 	if (hw_cap != cap) {
1486 		device_printf(GET_DEV(accel_dev),
1487 			      "Service not supported by accelerator: %s\n",
1488 			      svc_name);
1489 		return EPERM;
1490 	}
1491 
1492 	return 0;
1493 }
1494 
1495 static int
1496 check_accel_unit_config(struct adf_accel_dev *accel_dev,
1497 			u8 num_cy_au,
1498 			u8 num_dc_au,
1499 			u8 num_inline_au)
1500 {
1501 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1502 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1503 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1504 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1505 	u32 service_mask = ADF_ACCEL_SERVICE_NULL;
1506 	char *token, *cur_str;
1507 	int ret = 0;
1508 
1509 	/* Get the services enabled by user */
1510 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
1511 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1512 		return EFAULT;
1513 	cur_str = val;
1514 	token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1515 	while (token) {
1516 		if (!strncmp(token, ADF_SERVICE_CY, strlen(ADF_SERVICE_CY))) {
1517 			service_mask |= ADF_ACCEL_CRYPTO;
1518 			ret |= check_svc_to_hw_capabilities(
1519 			    accel_dev,
1520 			    token,
1521 			    ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
1522 				ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC);
1523 		}
1524 
1525 		if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) {
1526 			service_mask |= ADF_ACCEL_CRYPTO;
1527 			ret |= check_svc_to_hw_capabilities(
1528 			    accel_dev,
1529 			    token,
1530 			    ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC);
1531 		}
1532 
1533 		if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) {
1534 			/* Handle a special case of services 'asym;inline'
1535 			 * enabled where ASYM is handled by Inline firmware
1536 			 * at AE level. This configuration allows to enable
1537 			 * ASYM service without accel units assigned to
1538 			 * CRYPTO service, e.g.
1539 			 * num_inline_au = 6
1540 			 * num_cy_au = 0
1541 			 */
1542 			if (num_inline_au < num_au)
1543 				service_mask |= ADF_ACCEL_CRYPTO;
1544 
1545 			ret |= check_svc_to_hw_capabilities(
1546 			    accel_dev,
1547 			    token,
1548 			    ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC);
1549 		}
1550 
1551 		if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) {
1552 			service_mask |= ADF_ACCEL_COMPRESSION;
1553 			ret |= check_svc_to_hw_capabilities(
1554 			    accel_dev,
1555 			    token,
1556 			    ICP_ACCEL_CAPABILITIES_COMPRESSION);
1557 		}
1558 
1559 		if (!strncmp(token,
1560 			     ADF_SERVICE_INLINE,
1561 			     strlen(ADF_SERVICE_INLINE))) {
1562 			service_mask |= ADF_ACCEL_INLINE_CRYPTO;
1563 			ret |= check_svc_to_hw_capabilities(
1564 			    accel_dev, token, ICP_ACCEL_CAPABILITIES_INLINE);
1565 		}
1566 
1567 		token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1568 	}
1569 
1570 	/* Ensure the user doesn't enable services that are not supported by
1571 	 * accelerator.
1572 	 */
1573 	if (ret) {
1574 		device_printf(GET_DEV(accel_dev),
1575 			      "Invalid accelerator configuration.\n");
1576 		return EFAULT;
1577 	}
1578 
1579 	if (!(service_mask & ADF_ACCEL_COMPRESSION) && num_dc_au > 0) {
1580 		device_printf(GET_DEV(accel_dev),
1581 			      "Invalid accel unit config.\n");
1582 		device_printf(
1583 		    GET_DEV(accel_dev),
1584 		    "DC accel units set when dc service not enabled\n");
1585 		return EFAULT;
1586 	}
1587 
1588 	if (!(service_mask & ADF_ACCEL_CRYPTO) && num_cy_au > 0) {
1589 		device_printf(GET_DEV(accel_dev),
1590 			      "Invalid accel unit config.\n");
1591 		device_printf(
1592 		    GET_DEV(accel_dev),
1593 		    "CY accel units set when cy service not enabled\n");
1594 		return EFAULT;
1595 	}
1596 
1597 	if (!(service_mask & ADF_ACCEL_INLINE_CRYPTO) && num_inline_au > 0) {
1598 		device_printf(GET_DEV(accel_dev),
1599 			      "Invalid accel unit config.\n"
1600 			      "Inline feature not supported.\n");
1601 		return EFAULT;
1602 	}
1603 
1604 	hw_data->service_mask = service_mask;
1605 	/* Ensure the user doesn't allocate more than max accel units */
1606 	if (num_au != (num_cy_au + num_dc_au + num_inline_au)) {
1607 		device_printf(GET_DEV(accel_dev),
1608 			      "Invalid accel unit config.\n");
1609 		device_printf(GET_DEV(accel_dev),
1610 			      "Max accel units is %d\n",
1611 			      num_au);
1612 		return EFAULT;
1613 	}
1614 
1615 	/* Ensure user allocates hardware resources for enabled services */
1616 	if (!num_cy_au && (service_mask & ADF_ACCEL_CRYPTO)) {
1617 		device_printf(GET_DEV(accel_dev),
1618 			      "Failed to enable cy service!\n");
1619 		device_printf(GET_DEV(accel_dev),
1620 			      "%s should not be 0",
1621 			      ADF_NUM_CY_ACCEL_UNITS);
1622 		return EFAULT;
1623 	}
1624 	if (!num_dc_au && (service_mask & ADF_ACCEL_COMPRESSION)) {
1625 		device_printf(GET_DEV(accel_dev),
1626 			      "Failed to enable dc service!\n");
1627 		device_printf(GET_DEV(accel_dev),
1628 			      "%s should not be 0",
1629 			      ADF_NUM_DC_ACCEL_UNITS);
1630 		return EFAULT;
1631 	}
1632 	if (!num_inline_au && (service_mask & ADF_ACCEL_INLINE_CRYPTO)) {
1633 		device_printf(GET_DEV(accel_dev), "Failed to enable");
1634 		device_printf(GET_DEV(accel_dev), " inline service!");
1635 		device_printf(GET_DEV(accel_dev),
1636 			      " %s should not be 0\n",
1637 			      ADF_NUM_INLINE_ACCEL_UNITS);
1638 		return EFAULT;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 static int
1645 get_accel_unit_config(struct adf_accel_dev *accel_dev,
1646 		      u8 *num_cy_au,
1647 		      u8 *num_dc_au,
1648 		      u8 *num_inline_au)
1649 {
1650 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1651 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1652 
1653 	/* Get the number of accel units allocated for each service */
1654 	snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS);
1655 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1656 		return EFAULT;
1657 	if (compat_strtou8(val, 10, num_cy_au))
1658 		return EFAULT;
1659 	snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS);
1660 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1661 		return EFAULT;
1662 	if (compat_strtou8(val, 10, num_dc_au))
1663 		return EFAULT;
1664 
1665 	snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS);
1666 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1667 		return EFAULT;
1668 	if (compat_strtou8(val, 10, num_inline_au))
1669 		return EFAULT;
1670 
1671 	return 0;
1672 }
1673 
1674 /* Function reads the inline ingress/egress configuration
1675  * and returns the number of AEs reserved for ingress
1676  * and egress for accel units which are allocated for
1677  * inline service
1678  */
1679 static int
1680 adf_get_inline_config(struct adf_accel_dev *accel_dev, u32 *num_ingress_aes)
1681 {
1682 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1683 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1684 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1685 	char *value;
1686 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1687 	unsigned long ingress, egress = 0;
1688 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1689 	u32 num_inline_aes = 0, num_ingress_ae = 0;
1690 	u32 i = 0;
1691 
1692 	snprintf(key, sizeof(key), ADF_INLINE_INGRESS);
1693 	if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) {
1694 		device_printf(GET_DEV(accel_dev), "Failed to find ingress\n");
1695 		return EFAULT;
1696 	}
1697 	value = val;
1698 	value = strsep(&value, ADF_C4XXX_PERCENTAGE);
1699 	if (compat_strtoul(value, 10, &ingress))
1700 		return EFAULT;
1701 
1702 	snprintf(key, sizeof(key), ADF_INLINE_EGRESS);
1703 	if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) {
1704 		device_printf(GET_DEV(accel_dev), "Failed to find egress\n");
1705 		return EFAULT;
1706 	}
1707 	value = val;
1708 	value = strsep(&value, ADF_C4XXX_PERCENTAGE);
1709 	if (compat_strtoul(value, 10, &egress))
1710 		return EFAULT;
1711 
1712 	if (ingress + egress != ADF_C4XXX_100) {
1713 		device_printf(GET_DEV(accel_dev),
1714 			      "The sum of ingress and egress should be 100\n");
1715 		return EFAULT;
1716 	}
1717 
1718 	for (i = 0; i < num_au; i++) {
1719 		if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO)
1720 			num_inline_aes += accel_unit[i].num_ae;
1721 	}
1722 
1723 	num_ingress_ae = num_inline_aes * ingress / ADF_C4XXX_100;
1724 	if (((num_inline_aes * ingress) % ADF_C4XXX_100) >
1725 	    ADF_C4XXX_ROUND_LIMIT)
1726 		num_ingress_ae++;
1727 
1728 	*num_ingress_aes = num_ingress_ae;
1729 	return 0;
1730 }
1731 
1732 static int
1733 adf_set_inline_ae_mask(struct adf_accel_dev *accel_dev)
1734 {
1735 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1736 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1737 	struct adf_accel_unit_info *au_info = accel_dev->au_info;
1738 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1739 	u32 num_ingress_ae = 0;
1740 	u32 ingress_msk = 0;
1741 	u32 i, j, ae_mask;
1742 
1743 	if (adf_get_inline_config(accel_dev, &num_ingress_ae))
1744 		return EFAULT;
1745 
1746 	for (i = 0; i < num_au; i++) {
1747 		j = 0;
1748 		if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO) {
1749 			/* AEs with inline service enabled are also used
1750 			 * for asymmetric crypto
1751 			 */
1752 			au_info->asym_ae_msk |= accel_unit[i].ae_mask;
1753 			ae_mask = accel_unit[i].ae_mask;
1754 			while (num_ingress_ae && ae_mask) {
1755 				if (ae_mask & 1) {
1756 					ingress_msk |= BIT(j);
1757 					num_ingress_ae--;
1758 				}
1759 				ae_mask = ae_mask >> 1;
1760 				j++;
1761 			}
1762 			au_info->inline_ingress_msk |= ingress_msk;
1763 
1764 			au_info->inline_egress_msk |=
1765 			    ~(au_info->inline_ingress_msk) &
1766 			    accel_unit[i].ae_mask;
1767 		}
1768 	}
1769 
1770 	return 0;
1771 }
1772 
1773 static int
1774 adf_set_ae_mask(struct adf_accel_dev *accel_dev)
1775 {
1776 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1777 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1778 	struct adf_accel_unit_info *au_info = accel_dev->au_info;
1779 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1780 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1781 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1782 	char *token, *cur_str;
1783 	bool asym_en = false, sym_en = false;
1784 	u32 i;
1785 
1786 	/* Get the services enabled by user */
1787 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
1788 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1789 		return EFAULT;
1790 	cur_str = val;
1791 	token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1792 	while (token) {
1793 		if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM)))
1794 			asym_en = true;
1795 		if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM)))
1796 			sym_en = true;
1797 		if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) {
1798 			sym_en = true;
1799 			asym_en = true;
1800 		}
1801 		token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1802 	}
1803 
1804 	for (i = 0; i < num_au; i++) {
1805 		if (accel_unit[i].services == ADF_ACCEL_CRYPTO) {
1806 			/* AEs that support crypto can perform both
1807 			 * symmetric and asymmetric crypto, however
1808 			 * we only enable the threads if the relevant
1809 			 * service is also enabled
1810 			 */
1811 			if (asym_en)
1812 				au_info->asym_ae_msk |= accel_unit[i].ae_mask;
1813 			if (sym_en)
1814 				au_info->sym_ae_msk |= accel_unit[i].ae_mask;
1815 		} else if (accel_unit[i].services == ADF_ACCEL_COMPRESSION) {
1816 			au_info->dc_ae_msk |= accel_unit[i].comp_ae_mask;
1817 		}
1818 	}
1819 	return 0;
1820 }
1821 
1822 static int
1823 adf_init_accel_unit_services(struct adf_accel_dev *accel_dev)
1824 {
1825 	u8 num_cy_au, num_dc_au, num_inline_au;
1826 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1827 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1828 	struct adf_accel_unit *accel_unit;
1829 	const struct adf_ae_info *ae_info;
1830 	int i;
1831 
1832 	if (get_accel_unit_config(
1833 		accel_dev, &num_cy_au, &num_dc_au, &num_inline_au)) {
1834 		device_printf(GET_DEV(accel_dev), "Invalid accel unit cfg\n");
1835 		return EFAULT;
1836 	}
1837 
1838 	if (check_accel_unit_config(
1839 		accel_dev, num_cy_au, num_dc_au, num_inline_au))
1840 		return EFAULT;
1841 
1842 	accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL);
1843 	if (!accel_dev->au_info)
1844 		return ENOMEM;
1845 
1846 	accel_dev->au_info->num_cy_au = num_cy_au;
1847 	accel_dev->au_info->num_dc_au = num_dc_au;
1848 	accel_dev->au_info->num_inline_au = num_inline_au;
1849 
1850 	if (get_ae_info(hw_data, &ae_info)) {
1851 		device_printf(GET_DEV(accel_dev), "Failed to get ae info\n");
1852 		goto err_au_info;
1853 	}
1854 	accel_dev->au_info->ae_info = ae_info;
1855 
1856 	if (get_accel_unit(hw_data, &accel_unit)) {
1857 		device_printf(GET_DEV(accel_dev), "Failed to get accel unit\n");
1858 		goto err_ae_info;
1859 	}
1860 
1861 	/* Enable compression accel units */
1862 	/* Accel units with 4AEs are reserved for compression first */
1863 	for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) {
1864 		if (accel_unit[i].num_ae == ADF_C4XXX_4_AE) {
1865 			accel_unit[i].services = ADF_ACCEL_COMPRESSION;
1866 			num_dc_au--;
1867 		}
1868 	}
1869 	for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) {
1870 		if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1871 			accel_unit[i].services = ADF_ACCEL_COMPRESSION;
1872 			num_dc_au--;
1873 		}
1874 	}
1875 
1876 	/* Enable inline accel units */
1877 	for (i = 0; i < num_au && num_inline_au > 0; i++) {
1878 		if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1879 			accel_unit[i].services = ADF_ACCEL_INLINE_CRYPTO;
1880 			num_inline_au--;
1881 		}
1882 	}
1883 
1884 	/* Enable crypto accel units */
1885 	for (i = 0; i < num_au && num_cy_au > 0; i++) {
1886 		if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1887 			accel_unit[i].services = ADF_ACCEL_CRYPTO;
1888 			num_cy_au--;
1889 		}
1890 	}
1891 	accel_dev->au_info->au = accel_unit;
1892 	return 0;
1893 
1894 err_ae_info:
1895 	accel_dev->au_info->ae_info = NULL;
1896 err_au_info:
1897 	kfree(accel_dev->au_info);
1898 	accel_dev->au_info = NULL;
1899 	return EFAULT;
1900 }
1901 
1902 static void
1903 adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev)
1904 {
1905 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1906 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1907 	int i;
1908 
1909 	if (accel_dev->au_info) {
1910 		if (accel_dev->au_info->au) {
1911 			for (i = 0; i < num_au; i++) {
1912 				accel_dev->au_info->au[i].services =
1913 				    ADF_ACCEL_SERVICE_NULL;
1914 			}
1915 		}
1916 		accel_dev->au_info->au = NULL;
1917 		accel_dev->au_info->ae_info = NULL;
1918 		kfree(accel_dev->au_info);
1919 		accel_dev->au_info = NULL;
1920 	}
1921 }
1922 
1923 static inline void
1924 adf_c4xxx_reset_hw_units(struct adf_accel_dev *accel_dev)
1925 {
1926 	struct resource *pmisc =
1927 	    (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
1928 
1929 	u32 global_clk_enable = ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ARAM |
1930 	    ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICI_ENABLE |
1931 	    ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICE_ENABLE;
1932 
1933 	u32 ixp_reset_generic = ADF_C4XXX_IXP_RESET_GENERIC_ARAM |
1934 	    ADF_C4XXX_IXP_RESET_GENERIC_INLINE_EGRESS |
1935 	    ADF_C4XXX_IXP_RESET_GENERIC_INLINE_INGRESS;
1936 
1937 	/* To properly reset each of the units driver must:
1938 	 * 1)Call out resetactive state using ixp reset generic
1939 	 *   register;
1940 	 * 2)Disable generic clock;
1941 	 * 3)Take device out of reset by clearing ixp reset
1942 	 *   generic register;
1943 	 * 4)Re-enable generic clock;
1944 	 */
1945 	ADF_CSR_WR(pmisc, ADF_C4XXX_IXP_RESET_GENERIC, ixp_reset_generic);
1946 	ADF_CSR_WR(pmisc,
1947 		   ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC,
1948 		   ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_DISABLE_ALL);
1949 	ADF_CSR_WR(pmisc,
1950 		   ADF_C4XXX_IXP_RESET_GENERIC,
1951 		   ADF_C4XXX_IXP_RESET_GENERIC_OUT_OF_RESET_TRIGGER);
1952 	ADF_CSR_WR(pmisc,
1953 		   ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC,
1954 		   global_clk_enable);
1955 }
1956 
1957 static int
1958 adf_init_accel_units(struct adf_accel_dev *accel_dev)
1959 {
1960 	struct resource *csr =
1961 	    (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
1962 
1963 	if (adf_init_accel_unit_services(accel_dev))
1964 		return EFAULT;
1965 
1966 	/* Set cy and dc enabled AE masks */
1967 	if (accel_dev->au_info->num_cy_au || accel_dev->au_info->num_dc_au) {
1968 		if (adf_set_ae_mask(accel_dev)) {
1969 			device_printf(GET_DEV(accel_dev),
1970 				      "Failed to set ae masks\n");
1971 			goto err_au;
1972 		}
1973 	}
1974 	/* Set ingress/egress ae mask if inline is enabled */
1975 	if (accel_dev->au_info->num_inline_au) {
1976 		if (adf_set_inline_ae_mask(accel_dev)) {
1977 			device_printf(GET_DEV(accel_dev),
1978 				      "Failed to set inline ae masks\n");
1979 			goto err_au;
1980 		}
1981 	}
1982 	/* Define ARAM regions */
1983 	if (c4xxx_init_aram_config(accel_dev)) {
1984 		device_printf(GET_DEV(accel_dev),
1985 			      "Failed to init aram config\n");
1986 		goto err_au;
1987 	}
1988 	/* Configure h/w registers for inline operations */
1989 	if (accel_dev->au_info->num_inline_au > 0)
1990 		/* Initialise configuration parsing registers */
1991 		if (c4xxx_init_inline_hw(accel_dev))
1992 			goto err_au;
1993 
1994 	c4xxx_set_sadb_size(accel_dev);
1995 
1996 	if (accel_dev->au_info->num_inline_au > 0) {
1997 		/* ici/ice interrupt shall be enabled after msi-x enabled */
1998 		ADF_CSR_WR(csr,
1999 			   ADF_C4XXX_ERRMSK11,
2000 			   ADF_C4XXX_ERRMSK11_ERR_DISABLE_ICI_ICE_INTR);
2001 		adf_enable_inline_notification(accel_dev);
2002 	}
2003 
2004 	update_hw_capability(accel_dev);
2005 	if (adf_add_debugfs_info(accel_dev)) {
2006 		device_printf(GET_DEV(accel_dev),
2007 			      "Failed to add debug FS information\n");
2008 		goto err_au;
2009 	}
2010 	return 0;
2011 
2012 err_au:
2013 	/* Free and clear accel unit data structures */
2014 	adf_exit_accel_unit_services(accel_dev);
2015 	return EFAULT;
2016 }
2017 
2018 static void
2019 adf_exit_accel_units(struct adf_accel_dev *accel_dev)
2020 {
2021 	adf_exit_accel_unit_services(accel_dev);
2022 	/* Free aram mapping structure */
2023 	c4xxx_exit_aram_config(accel_dev);
2024 	/* Remove entries in debug FS */
2025 	adf_remove_debugfs_info(accel_dev);
2026 }
2027 
2028 static const char *
2029 get_obj_name(struct adf_accel_dev *accel_dev,
2030 	     enum adf_accel_unit_services service)
2031 {
2032 	u32 capabilities = GET_HW_DATA(accel_dev)->accel_capabilities_mask;
2033 	bool sym_only_sku = false;
2034 
2035 	/* Check if SKU is capable only of symmetric cryptography
2036 	 * via device capabilities.
2037 	 */
2038 	if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) &&
2039 	    !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) &&
2040 	    !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION))
2041 		sym_only_sku = true;
2042 
2043 	switch (service) {
2044 	case ADF_ACCEL_INLINE_CRYPTO:
2045 		return ADF_C4XXX_INLINE_OBJ;
2046 	case ADF_ACCEL_CRYPTO:
2047 		if (sym_only_sku)
2048 			return ADF_C4XXX_SYM_OBJ;
2049 		else
2050 			return ADF_C4XXX_CY_OBJ;
2051 		break;
2052 	case ADF_ACCEL_COMPRESSION:
2053 		return ADF_C4XXX_DC_OBJ;
2054 	default:
2055 		return NULL;
2056 	}
2057 }
2058 
2059 static uint32_t
2060 get_objs_num(struct adf_accel_dev *accel_dev)
2061 {
2062 	u32 srv = 0;
2063 	u32 max_srv_id = 0;
2064 	unsigned long service_mask = accel_dev->hw_device->service_mask;
2065 
2066 	/* The objects number corresponds to the number of services */
2067 	for_each_set_bit(srv, &service_mask, ADF_C4XXX_MAX_OBJ)
2068 	{
2069 		max_srv_id = srv;
2070 	}
2071 
2072 	return (max_srv_id + 1);
2073 }
2074 
2075 static uint32_t
2076 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
2077 		    enum adf_accel_unit_services service)
2078 {
2079 	u32 ae_mask = 0;
2080 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
2081 	u32 num_au = hw_data->get_num_accel_units(hw_data);
2082 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
2083 	u32 i = 0;
2084 
2085 	if (service == ADF_ACCEL_SERVICE_NULL)
2086 		return 0;
2087 
2088 	for (i = 0; i < num_au; i++) {
2089 		if (accel_unit[i].services == service)
2090 			ae_mask |= accel_unit[i].ae_mask;
2091 	}
2092 	return ae_mask;
2093 }
2094 
2095 static void
2096 configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
2097 {
2098 	struct resource *addr;
2099 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
2100 	u32 num_aes = hw_data->get_num_aes(hw_data);
2101 	u32 reg = 0x0;
2102 	u32 i;
2103 
2104 	addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
2105 
2106 	/* Set/Unset Valid bits in AE Thread to PCIe Function Mapping */
2107 	for (i = 0; i < ADF_C4XXX_AE2FUNC_REG_PER_AE * num_aes; i++) {
2108 		reg = ADF_CSR_RD(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET,
2109 				 i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE);
2110 		if (enable)
2111 			reg |= ADF_C4XXX_AE2FUNC_MAP_VALID;
2112 		else
2113 			reg &= ~ADF_C4XXX_AE2FUNC_MAP_VALID;
2114 		ADF_CSR_WR(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET,
2115 			   i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE,
2116 			   reg);
2117 	}
2118 }
2119 
2120 void
2121 adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
2122 {
2123 	hw_data->dev_class = &c4xxx_class;
2124 	hw_data->instance_id = c4xxx_class.instances++;
2125 	hw_data->num_banks = ADF_C4XXX_ETR_MAX_BANKS;
2126 	hw_data->num_rings_per_bank = ADF_C4XXX_NUM_RINGS_PER_BANK;
2127 	hw_data->num_accel = ADF_C4XXX_MAX_ACCELERATORS;
2128 	hw_data->num_engines = ADF_C4XXX_MAX_ACCELENGINES;
2129 	hw_data->num_logical_accel = 1;
2130 	hw_data->tx_rx_gap = ADF_C4XXX_RX_RINGS_OFFSET;
2131 	hw_data->tx_rings_mask = ADF_C4XXX_TX_RINGS_MASK;
2132 	hw_data->alloc_irq = adf_isr_resource_alloc;
2133 	hw_data->free_irq = adf_isr_resource_free;
2134 	hw_data->enable_error_correction = adf_enable_error_correction;
2135 	hw_data->init_ras = adf_init_ras;
2136 	hw_data->exit_ras = adf_exit_ras;
2137 	hw_data->ras_interrupts = adf_ras_interrupts;
2138 	hw_data->get_accel_mask = get_accel_mask;
2139 	hw_data->get_ae_mask = get_ae_mask;
2140 	hw_data->get_num_accels = get_num_accels;
2141 	hw_data->get_num_aes = get_num_aes;
2142 	hw_data->get_num_accel_units = get_num_accel_units;
2143 	hw_data->get_sram_bar_id = get_sram_bar_id;
2144 	hw_data->get_etr_bar_id = get_etr_bar_id;
2145 	hw_data->get_misc_bar_id = get_misc_bar_id;
2146 	hw_data->get_arb_info = get_arb_info;
2147 	hw_data->get_admin_info = get_admin_info;
2148 	hw_data->get_errsou_offset = get_errsou_offset;
2149 	hw_data->get_clock_speed = get_clock_speed;
2150 	hw_data->get_eth_doorbell_msg = get_eth_doorbell_msg;
2151 	hw_data->get_sku = get_sku;
2152 	hw_data->heartbeat_ctr_num = ADF_NUM_THREADS_PER_AE;
2153 	hw_data->check_prod_sku = c4xxx_check_prod_sku;
2154 	hw_data->fw_name = ADF_C4XXX_FW;
2155 	hw_data->fw_mmp_name = ADF_C4XXX_MMP;
2156 	hw_data->get_obj_name = get_obj_name;
2157 	hw_data->get_objs_num = get_objs_num;
2158 	hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
2159 	hw_data->init_admin_comms = adf_init_admin_comms;
2160 	hw_data->exit_admin_comms = adf_exit_admin_comms;
2161 	hw_data->configure_iov_threads = configure_iov_threads;
2162 	hw_data->disable_iov = adf_disable_sriov;
2163 	hw_data->send_admin_init = adf_send_admin_init;
2164 	hw_data->init_arb = adf_init_arb_c4xxx;
2165 	hw_data->exit_arb = adf_exit_arb_c4xxx;
2166 	hw_data->disable_arb = adf_disable_arb;
2167 	hw_data->enable_ints = adf_enable_ints;
2168 	hw_data->set_ssm_wdtimer = c4xxx_set_ssm_wdtimer;
2169 	hw_data->check_slice_hang = c4xxx_check_slice_hang;
2170 	hw_data->reset_device = adf_reset_flr;
2171 	hw_data->restore_device = adf_c4xxx_dev_restore;
2172 	hw_data->init_accel_units = adf_init_accel_units;
2173 	hw_data->reset_hw_units = adf_c4xxx_reset_hw_units;
2174 	hw_data->exit_accel_units = adf_exit_accel_units;
2175 	hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
2176 	hw_data->get_heartbeat_status = adf_get_heartbeat_status;
2177 	hw_data->get_ae_clock = get_ae_clock;
2178 	hw_data->clock_frequency = ADF_C4XXX_AE_FREQ;
2179 	hw_data->measure_clock = measure_clock;
2180 	hw_data->add_pke_stats = adf_pke_replay_counters_add_c4xxx;
2181 	hw_data->remove_pke_stats = adf_pke_replay_counters_remove_c4xxx;
2182 	hw_data->add_misc_error = adf_misc_error_add_c4xxx;
2183 	hw_data->remove_misc_error = adf_misc_error_remove_c4xxx;
2184 	hw_data->extended_dc_capabilities = 0;
2185 	hw_data->get_storage_enabled = get_storage_enabled;
2186 	hw_data->query_storage_cap = 0;
2187 	hw_data->get_accel_cap = c4xxx_get_hw_cap;
2188 	hw_data->configure_accel_units = c4xxx_configure_accel_units;
2189 	hw_data->pre_reset = adf_dev_pre_reset;
2190 	hw_data->post_reset = adf_dev_post_reset;
2191 	hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled;
2192 	hw_data->count_ras_event = adf_fw_count_ras_event;
2193 	hw_data->config_device = adf_config_device;
2194 	hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
2195 
2196 	adf_gen2_init_hw_csr_info(&hw_data->csr_info);
2197 	adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
2198 	hw_data->csr_info.arb_enable_mask = 0xF;
2199 }
2200 
2201 void
2202 adf_clean_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
2203 {
2204 	hw_data->dev_class->instances--;
2205 }
2206 
2207 void
2208 remove_oid(struct adf_accel_dev *accel_dev, struct sysctl_oid *oid)
2209 {
2210 	struct sysctl_ctx_list *qat_sysctl_ctx;
2211 	int ret;
2212 
2213 	qat_sysctl_ctx =
2214 	    device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
2215 
2216 	ret = sysctl_ctx_entry_del(qat_sysctl_ctx, oid);
2217 	if (ret)
2218 		device_printf(GET_DEV(accel_dev), "Failed to delete entry\n");
2219 
2220 	ret = sysctl_remove_oid(oid, 1, 1);
2221 	if (ret)
2222 		device_printf(GET_DEV(accel_dev), "Failed to delete oid\n");
2223 }
2224