xref: /freebsd/sys/dev/qat/qat_common/qat_hal.c (revision 271171e0)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <linux/delay.h>
15 #include "adf_accel_devices.h"
16 #include "adf_common_drv.h"
17 #include "icp_qat_hal.h"
18 #include "icp_qat_uclo.h"
19 
20 #define BAD_REGADDR 0xffff
21 #define MAX_RETRY_TIMES 1000000
22 #define INIT_CTX_ARB_VALUE 0x0
23 #define INIT_CTX_ENABLE_VALUE 0x0
24 #define INIT_PC_VALUE 0x0
25 #define INIT_WAKEUP_EVENTS_VALUE 0x1
26 #define INIT_SIG_EVENTS_VALUE 0x1
27 #define INIT_CCENABLE_VALUE 0x2000
28 #define RST_CSR_QAT_LSB 20
29 #define RST_CSR_AE_LSB 0
30 #define MC_TIMESTAMP_ENABLE (0x1 << 7)
31 
32 #define IGNORE_W1C_MASK                                                        \
33 	((~(1 << CE_BREAKPOINT_BITPOS)) &                                      \
34 	 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) &                         \
35 	 (~(1 << CE_REG_PAR_ERR_BITPOS)))
36 #define INSERT_IMMED_GPRA_CONST(inst, const_val)                               \
37 	(inst = ((inst & 0xFFFF00C03FFull) |                                   \
38 		 ((((const_val) << 12) & 0x0FF00000ull) |                      \
39 		  (((const_val) << 10) & 0x0003FC00ull))))
40 #define INSERT_IMMED_GPRB_CONST(inst, const_val)                               \
41 	(inst = ((inst & 0xFFFF00FFF00ull) |                                   \
42 		 ((((const_val) << 12) & 0x0FF00000ull) |                      \
43 		  (((const_val) << 0) & 0x000000FFull))))
44 
45 #define AE(handle, ae) ((handle)->hal_handle->aes[ae])
46 
47 static const uint64_t inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull,
48 				    0x0F040000300ull, 0x0F440000300ull,
49 				    0x0FC066C0000ull, 0x0F0000C0300ull,
50 				    0x0F0000C0300ull, 0x0F0000C0300ull,
51 				    0x0A021000000ull };
52 
53 static const uint64_t inst[] = {
54 	0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
55 	0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
56 	0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
57 	0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
58 	0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
59 	0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
60 	0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
61 	0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
62 	0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
63 	0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
64 	0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
65 	0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
66 	0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
67 	0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
68 	0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
69 	0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
70 	0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
71 	0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
72 	0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
73 	0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
74 	0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
75 	0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
76 };
77 
78 void
79 qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
80 		     unsigned char ae,
81 		     unsigned int ctx_mask)
82 {
83 	AE(handle, ae).live_ctx_mask = ctx_mask;
84 }
85 
86 #define CSR_RETRY_TIMES 500
87 static int
88 qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
89 		  unsigned char ae,
90 		  unsigned int csr,
91 		  unsigned int *value)
92 {
93 	unsigned int iterations = CSR_RETRY_TIMES;
94 
95 	do {
96 		*value = GET_AE_CSR(handle, ae, csr);
97 		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
98 			return 0;
99 	} while (iterations--);
100 
101 	pr_err("QAT: Read CSR timeout\n");
102 	return EFAULT;
103 }
104 
105 static int
106 qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
107 		  unsigned char ae,
108 		  unsigned int csr,
109 		  unsigned int value)
110 {
111 	unsigned int iterations = CSR_RETRY_TIMES;
112 
113 	do {
114 		SET_AE_CSR(handle, ae, csr, value);
115 		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
116 			return 0;
117 	} while (iterations--);
118 
119 	pr_err("QAT: Write CSR Timeout\n");
120 	return EFAULT;
121 }
122 
123 static void
124 qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
125 			 unsigned char ae,
126 			 unsigned char ctx,
127 			 unsigned int *events)
128 {
129 	unsigned int cur_ctx;
130 
131 	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
132 	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
133 	qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
134 	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
135 }
136 
137 static int
138 qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
139 		    unsigned char ae,
140 		    unsigned int cycles,
141 		    int chk_inactive)
142 {
143 	unsigned int base_cnt = 0, cur_cnt = 0;
144 	unsigned int csr = (1 << ACS_ABO_BITPOS);
145 	int times = MAX_RETRY_TIMES;
146 	int elapsed_cycles = 0;
147 
148 	qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
149 	base_cnt &= 0xffff;
150 	while ((int)cycles > elapsed_cycles && times--) {
151 		if (chk_inactive)
152 			qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
153 
154 		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
155 		cur_cnt &= 0xffff;
156 		elapsed_cycles = cur_cnt - base_cnt;
157 
158 		if (elapsed_cycles < 0)
159 			elapsed_cycles += 0x10000;
160 
161 		/* ensure at least 8 time cycles elapsed in wait_cycles */
162 		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
163 			return 0;
164 	}
165 	if (times < 0) {
166 		pr_err("QAT: wait_num_cycles time out\n");
167 		return EFAULT;
168 	}
169 	return 0;
170 }
171 
172 void
173 qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh)
174 {
175 	*ae_neigh = (ae & 0x1) ? (ae - 1) : (ae + 1);
176 }
177 
178 #define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
179 #define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
180 
181 int
182 qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
183 			unsigned char ae,
184 			unsigned char mode)
185 {
186 	unsigned int csr, new_csr;
187 
188 	if (mode != 4 && mode != 8) {
189 		pr_err("QAT: bad ctx mode=%d\n", mode);
190 		return EINVAL;
191 	}
192 
193 	/* Sets the accelaration engine context mode to either four or eight */
194 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
195 	csr = IGNORE_W1C_MASK & csr;
196 	new_csr = (mode == 4) ? SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
197 				CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
198 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
199 	return 0;
200 }
201 
202 int
203 qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
204 		       unsigned char ae,
205 		       unsigned char mode)
206 {
207 	unsigned int csr, new_csr;
208 
209 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
210 	csr &= IGNORE_W1C_MASK;
211 
212 	new_csr = (mode) ? SET_BIT(csr, CE_NN_MODE_BITPOS) :
213 			   CLR_BIT(csr, CE_NN_MODE_BITPOS);
214 
215 	if (new_csr != csr)
216 		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
217 
218 	return 0;
219 }
220 
221 int
222 qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
223 		       unsigned char ae,
224 		       enum icp_qat_uof_regtype lm_type,
225 		       unsigned char mode)
226 {
227 	unsigned int csr, new_csr;
228 
229 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
230 	csr &= IGNORE_W1C_MASK;
231 	switch (lm_type) {
232 	case ICP_LMEM0:
233 		new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
234 				   CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
235 		break;
236 	case ICP_LMEM1:
237 		new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
238 				   CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
239 		break;
240 	case ICP_LMEM2:
241 		new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
242 				   CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
243 		break;
244 	case ICP_LMEM3:
245 		new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
246 				   CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
247 		break;
248 	default:
249 		pr_err("QAT: lmType = 0x%x\n", lm_type);
250 		return EINVAL;
251 	}
252 
253 	if (new_csr != csr)
254 		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
255 	return 0;
256 }
257 
258 void
259 qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
260 			   unsigned char ae,
261 			   unsigned char mode)
262 {
263 	unsigned int csr, new_csr;
264 
265 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
266 	csr &= IGNORE_W1C_MASK;
267 	new_csr = (mode) ? SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
268 			   CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
269 	if (new_csr != csr)
270 		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
271 }
272 
273 void
274 qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle *handle,
275 			unsigned char ae,
276 			unsigned char mode)
277 {
278 	unsigned int csr, new_csr;
279 
280 	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr);
281 	new_csr = (mode) ? SET_BIT(csr, MMC_SHARE_CS_BITPOS) :
282 			   CLR_BIT(csr, MMC_SHARE_CS_BITPOS);
283 	if (new_csr != csr)
284 		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, new_csr);
285 }
286 
287 static unsigned short
288 qat_hal_get_reg_addr(unsigned int type, unsigned short reg_num)
289 {
290 	unsigned short reg_addr;
291 
292 	switch (type) {
293 	case ICP_GPA_ABS:
294 	case ICP_GPB_ABS:
295 		reg_addr = 0x80 | (reg_num & 0x7f);
296 		break;
297 	case ICP_GPA_REL:
298 	case ICP_GPB_REL:
299 		reg_addr = reg_num & 0x1f;
300 		break;
301 	case ICP_SR_RD_REL:
302 	case ICP_SR_WR_REL:
303 	case ICP_SR_REL:
304 		reg_addr = 0x180 | (reg_num & 0x1f);
305 		break;
306 	case ICP_SR_ABS:
307 		reg_addr = 0x140 | ((reg_num & 0x3) << 1);
308 		break;
309 	case ICP_DR_RD_REL:
310 	case ICP_DR_WR_REL:
311 	case ICP_DR_REL:
312 		reg_addr = 0x1c0 | (reg_num & 0x1f);
313 		break;
314 	case ICP_DR_ABS:
315 		reg_addr = 0x100 | ((reg_num & 0x3) << 1);
316 		break;
317 	case ICP_NEIGH_REL:
318 		reg_addr = 0x280 | (reg_num & 0x1f);
319 		break;
320 	case ICP_LMEM0:
321 		reg_addr = 0x200;
322 		break;
323 	case ICP_LMEM1:
324 		reg_addr = 0x220;
325 		break;
326 	case ICP_LMEM2:
327 		reg_addr = 0x2c0;
328 		break;
329 	case ICP_LMEM3:
330 		reg_addr = 0x2e0;
331 		break;
332 	case ICP_NO_DEST:
333 		reg_addr = 0x300 | (reg_num & 0xff);
334 		break;
335 	default:
336 		reg_addr = BAD_REGADDR;
337 		break;
338 	}
339 	return reg_addr;
340 }
341 
342 void
343 qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
344 {
345 	unsigned int ae_reset_csr[MAX_CPP_NUM];
346 	unsigned int ae_reset_val[MAX_CPP_NUM];
347 	unsigned int valid_ae_mask, valid_slice_mask;
348 	unsigned int cpp_num = 1;
349 	unsigned int i;
350 
351 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
352 		ae_reset_csr[0] = ICP_RESET_CPP0;
353 		ae_reset_csr[1] = ICP_RESET_CPP1;
354 		if (handle->hal_handle->ae_mask > 0xffff)
355 			++cpp_num;
356 	} else {
357 		ae_reset_csr[0] = ICP_RESET;
358 	}
359 
360 	for (i = 0; i < cpp_num; i++) {
361 		if (i == 0) {
362 			valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF;
363 			valid_slice_mask =
364 			    handle->hal_handle->slice_mask & 0x3F;
365 		} else {
366 			valid_ae_mask =
367 			    (handle->hal_handle->ae_mask >> AES_PER_CPP) &
368 			    0xFFFF;
369 			valid_slice_mask =
370 			    (handle->hal_handle->slice_mask >> SLICES_PER_CPP) &
371 			    0x3F;
372 		}
373 
374 		ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
375 		ae_reset_val[i] |= valid_ae_mask << RST_CSR_AE_LSB;
376 		ae_reset_val[i] |= valid_slice_mask << RST_CSR_QAT_LSB;
377 		SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]);
378 	}
379 }
380 
381 static void
382 qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
383 		    unsigned char ae,
384 		    unsigned int ctx_mask,
385 		    unsigned int ae_csr,
386 		    unsigned int csr_val)
387 {
388 	unsigned int ctx, cur_ctx;
389 
390 	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
391 
392 	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
393 		if (!(ctx_mask & (1 << ctx)))
394 			continue;
395 		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
396 		qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
397 	}
398 
399 	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
400 }
401 
402 static void
403 qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
404 		    unsigned char ae,
405 		    unsigned char ctx,
406 		    unsigned int ae_csr,
407 		    unsigned int *csr_val)
408 {
409 	unsigned int cur_ctx;
410 
411 	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
412 	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
413 	qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
414 	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
415 }
416 
417 static void
418 qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
419 		      unsigned char ae,
420 		      unsigned int ctx_mask,
421 		      unsigned int events)
422 {
423 	unsigned int ctx, cur_ctx;
424 
425 	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
426 	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
427 		if (!(ctx_mask & (1 << ctx)))
428 			continue;
429 		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
430 		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
431 	}
432 	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
433 }
434 
435 static void
436 qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
437 			 unsigned char ae,
438 			 unsigned int ctx_mask,
439 			 unsigned int events)
440 {
441 	unsigned int ctx, cur_ctx;
442 
443 	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
444 	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
445 		if (!(ctx_mask & (1 << ctx)))
446 			continue;
447 		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
448 		qat_hal_wr_ae_csr(handle,
449 				  ae,
450 				  CTX_WAKEUP_EVENTS_INDIRECT,
451 				  events);
452 	}
453 	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
454 }
455 
456 static int
457 qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
458 {
459 	unsigned int base_cnt, cur_cnt;
460 	unsigned char ae;
461 	unsigned long ae_mask = handle->hal_handle->ae_mask;
462 	int times = MAX_RETRY_TIMES;
463 
464 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
465 	{
466 		qat_hal_rd_ae_csr(handle,
467 				  ae,
468 				  PROFILE_COUNT,
469 				  (unsigned int *)&base_cnt);
470 		base_cnt &= 0xffff;
471 
472 		do {
473 			qat_hal_rd_ae_csr(handle,
474 					  ae,
475 					  PROFILE_COUNT,
476 					  (unsigned int *)&cur_cnt);
477 			cur_cnt &= 0xffff;
478 		} while (times-- && (cur_cnt == base_cnt));
479 
480 		if (times < 0) {
481 			pr_err("QAT: AE%d is inactive!!\n", ae);
482 			return EFAULT;
483 		}
484 	}
485 
486 	return 0;
487 }
488 
489 int
490 qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
491 			unsigned int ae)
492 {
493 	unsigned int enable = 0, active = 0;
494 
495 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
496 	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
497 	if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
498 	    (active & (1 << ACS_ABO_BITPOS)))
499 		return 1;
500 	else
501 		return 0;
502 }
503 
504 static void
505 qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
506 {
507 	unsigned int misc_ctl_csr, misc_ctl;
508 	unsigned char ae;
509 	unsigned long ae_mask = handle->hal_handle->ae_mask;
510 
511 	misc_ctl_csr =
512 	    (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) ?
513 	    MISC_CONTROL_C4XXX :
514 	    MISC_CONTROL;
515 	/* stop the timestamp timers */
516 	misc_ctl = GET_GLB_CSR(handle, misc_ctl_csr);
517 	if (misc_ctl & MC_TIMESTAMP_ENABLE)
518 		SET_GLB_CSR(handle,
519 			    misc_ctl_csr,
520 			    misc_ctl & (~MC_TIMESTAMP_ENABLE));
521 
522 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
523 	{
524 		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
525 		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
526 	}
527 	/* start timestamp timers */
528 	SET_GLB_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
529 }
530 
531 #define ESRAM_AUTO_TINIT BIT(2)
532 #define ESRAM_AUTO_TINIT_DONE BIT(3)
533 #define ESRAM_AUTO_INIT_USED_CYCLES (1640)
534 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
535 
536 static int
537 qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
538 {
539 	uintptr_t csr_addr =
540 	    ((uintptr_t)handle->hal_ep_csr_addr_v + ESRAM_AUTO_INIT_CSR_OFFSET);
541 	unsigned int csr_val;
542 	int times = 30;
543 
544 	if (pci_get_device(GET_DEV(handle->accel_dev)) !=
545 	    ADF_DH895XCC_PCI_DEVICE_ID)
546 		return 0;
547 
548 	csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
549 	if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
550 		return 0;
551 	csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
552 	csr_val |= ESRAM_AUTO_TINIT;
553 
554 	ADF_CSR_WR(handle->hal_misc_addr_v, csr_addr, csr_val);
555 	do {
556 		qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
557 		csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
558 
559 	} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
560 	if (times < 0) {
561 		pr_err("QAT: Fail to init eSram!\n");
562 		return EFAULT;
563 	}
564 	return 0;
565 }
566 
567 #define SHRAM_INIT_CYCLES 2060
568 int
569 qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
570 {
571 	unsigned int ae_reset_csr[MAX_CPP_NUM];
572 	unsigned int ae_reset_val[MAX_CPP_NUM];
573 	unsigned int cpp_num = 1;
574 	unsigned int valid_ae_mask, valid_slice_mask;
575 	unsigned char ae;
576 	unsigned int i;
577 	unsigned int clk_csr[MAX_CPP_NUM];
578 	unsigned int clk_val[MAX_CPP_NUM];
579 	unsigned int times = 100;
580 	unsigned long ae_mask = handle->hal_handle->ae_mask;
581 
582 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
583 		ae_reset_csr[0] = ICP_RESET_CPP0;
584 		ae_reset_csr[1] = ICP_RESET_CPP1;
585 		clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0;
586 		clk_csr[1] = ICP_GLOBAL_CLK_ENABLE_CPP1;
587 		if (handle->hal_handle->ae_mask > 0xffff)
588 			++cpp_num;
589 	} else {
590 		ae_reset_csr[0] = ICP_RESET;
591 		clk_csr[0] = ICP_GLOBAL_CLK_ENABLE;
592 	}
593 
594 	for (i = 0; i < cpp_num; i++) {
595 		if (i == 0) {
596 			valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF;
597 			valid_slice_mask =
598 			    handle->hal_handle->slice_mask & 0x3F;
599 		} else {
600 			valid_ae_mask =
601 			    (handle->hal_handle->ae_mask >> AES_PER_CPP) &
602 			    0xFFFF;
603 			valid_slice_mask =
604 			    (handle->hal_handle->slice_mask >> SLICES_PER_CPP) &
605 			    0x3F;
606 		}
607 		/* write to the reset csr */
608 		ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
609 		ae_reset_val[i] &= ~(valid_ae_mask << RST_CSR_AE_LSB);
610 		ae_reset_val[i] &= ~(valid_slice_mask << RST_CSR_QAT_LSB);
611 		do {
612 			SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]);
613 			if (!(times--))
614 				goto out_err;
615 			ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
616 		} while (
617 		    (valid_ae_mask | (valid_slice_mask << RST_CSR_QAT_LSB)) &
618 		    ae_reset_val[i]);
619 		/* enable clock */
620 		clk_val[i] = GET_GLB_CSR(handle, clk_csr[i]);
621 		clk_val[i] |= valid_ae_mask << 0;
622 		clk_val[i] |= valid_slice_mask << 20;
623 		SET_GLB_CSR(handle, clk_csr[i], clk_val[i]);
624 	}
625 	if (qat_hal_check_ae_alive(handle))
626 		goto out_err;
627 
628 	/* Set undefined power-up/reset states to reasonable default values */
629 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
630 	{
631 		qat_hal_wr_ae_csr(handle,
632 				  ae,
633 				  CTX_ENABLES,
634 				  INIT_CTX_ENABLE_VALUE);
635 		qat_hal_wr_indr_csr(handle,
636 				    ae,
637 				    ICP_QAT_UCLO_AE_ALL_CTX,
638 				    CTX_STS_INDIRECT,
639 				    handle->hal_handle->upc_mask &
640 					INIT_PC_VALUE);
641 		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
642 		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
643 		qat_hal_put_wakeup_event(handle,
644 					 ae,
645 					 ICP_QAT_UCLO_AE_ALL_CTX,
646 					 INIT_WAKEUP_EVENTS_VALUE);
647 		qat_hal_put_sig_event(handle,
648 				      ae,
649 				      ICP_QAT_UCLO_AE_ALL_CTX,
650 				      INIT_SIG_EVENTS_VALUE);
651 	}
652 	if (qat_hal_init_esram(handle))
653 		goto out_err;
654 	if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
655 		goto out_err;
656 	qat_hal_reset_timestamp(handle);
657 
658 	return 0;
659 out_err:
660 	pr_err("QAT: failed to get device out of reset\n");
661 	return EFAULT;
662 }
663 
664 static void
665 qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
666 		    unsigned char ae,
667 		    unsigned int ctx_mask)
668 {
669 	unsigned int ctx;
670 
671 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
672 	ctx &= IGNORE_W1C_MASK &
673 	    (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
674 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
675 }
676 
677 static uint64_t
678 qat_hal_parity_64bit(uint64_t word)
679 {
680 	word ^= word >> 1;
681 	word ^= word >> 2;
682 	word ^= word >> 4;
683 	word ^= word >> 8;
684 	word ^= word >> 16;
685 	word ^= word >> 32;
686 	return word & 1;
687 }
688 
689 static uint64_t
690 qat_hal_set_uword_ecc(uint64_t uword)
691 {
692 	uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
693 		 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
694 		 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
695 		 bit6_mask = 0xdaf69a46910ULL;
696 
697 	/* clear the ecc bits */
698 	uword &= ~(0x7fULL << 0x2C);
699 	uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
700 	uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
701 	uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
702 	uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
703 	uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
704 	uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
705 	uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
706 	return uword;
707 }
708 
709 void
710 qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
711 		  unsigned char ae,
712 		  unsigned int uaddr,
713 		  unsigned int words_num,
714 		  const uint64_t *uword)
715 {
716 	unsigned int ustore_addr;
717 	unsigned int i;
718 
719 	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
720 	uaddr |= UA_ECS;
721 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
722 	for (i = 0; i < words_num; i++) {
723 		unsigned int uwrd_lo, uwrd_hi;
724 		uint64_t tmp;
725 
726 		tmp = qat_hal_set_uword_ecc(uword[i]);
727 		uwrd_lo = (unsigned int)(tmp & 0xffffffff);
728 		uwrd_hi = (unsigned int)(tmp >> 0x20);
729 		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
730 		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
731 	}
732 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
733 }
734 
735 void
736 qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle *handle,
737 			   unsigned char ae,
738 			   unsigned int uaddr,
739 			   unsigned int words_num,
740 			   u64 *uword)
741 {
742 	u64 *even_uwrods, *odd_uwords;
743 	unsigned char neigh_ae, odd_ae, even_ae;
744 	int i, even_cpy_cnt = 0, odd_cpy_cnt = 0;
745 
746 	even_uwrods =
747 	    malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO);
748 	odd_uwords =
749 	    malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO);
750 	qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
751 	if (ae & 1) {
752 		odd_ae = ae;
753 		even_ae = neigh_ae;
754 	} else {
755 		odd_ae = neigh_ae;
756 		even_ae = ae;
757 	}
758 	for (i = 0; i < words_num; i++) {
759 		if ((uaddr + i) & 1)
760 			odd_uwords[odd_cpy_cnt++] = uword[i];
761 		else
762 			even_uwrods[even_cpy_cnt++] = uword[i];
763 	}
764 	if (even_cpy_cnt)
765 		qat_hal_wr_uwords(handle,
766 				  even_ae,
767 				  (uaddr + 1) / 2,
768 				  even_cpy_cnt,
769 				  even_uwrods);
770 	if (odd_cpy_cnt)
771 		qat_hal_wr_uwords(
772 		    handle, odd_ae, uaddr / 2, odd_cpy_cnt, odd_uwords);
773 	free(even_uwrods, M_QAT);
774 	free(odd_uwords, M_QAT);
775 }
776 
777 static void
778 qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
779 		   unsigned char ae,
780 		   unsigned int ctx_mask)
781 {
782 	unsigned int ctx;
783 
784 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
785 	ctx &= IGNORE_W1C_MASK;
786 	ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
787 	ctx |= (ctx_mask << CE_ENABLE_BITPOS);
788 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
789 }
790 
791 static void
792 qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
793 {
794 	unsigned char ae;
795 	unsigned short reg;
796 	unsigned long ae_mask = handle->hal_handle->ae_mask;
797 
798 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
799 	{
800 		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
801 			qat_hal_init_rd_xfer(
802 			    handle, ae, 0, ICP_SR_RD_ABS, reg, 0);
803 			qat_hal_init_rd_xfer(
804 			    handle, ae, 0, ICP_DR_RD_ABS, reg, 0);
805 		}
806 	}
807 }
808 
809 static int
810 qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
811 {
812 	unsigned char ae;
813 	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
814 	int times = MAX_RETRY_TIMES;
815 	unsigned int csr_val = 0;
816 	unsigned int savctx = 0;
817 	unsigned int scs_flag = 0;
818 	unsigned long ae_mask = handle->hal_handle->ae_mask;
819 	int ret = 0;
820 
821 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
822 	{
823 		qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
824 		scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
825 		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
826 		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
827 		qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
828 		csr_val &= IGNORE_W1C_MASK;
829 		csr_val |= CE_NN_MODE;
830 		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
831 		qat_hal_wr_uwords(
832 		    handle, ae, 0, ARRAY_SIZE(inst), (const uint64_t *)inst);
833 		qat_hal_wr_indr_csr(handle,
834 				    ae,
835 				    ctx_mask,
836 				    CTX_STS_INDIRECT,
837 				    handle->hal_handle->upc_mask &
838 					INIT_PC_VALUE);
839 		qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
840 		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
841 		qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
842 		qat_hal_wr_indr_csr(
843 		    handle, ae, ctx_mask, CTX_SIG_EVENTS_INDIRECT, 0);
844 		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
845 		qat_hal_enable_ctx(handle, ae, ctx_mask);
846 	}
847 
848 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
849 	{
850 		/* wait for AE to finish */
851 		do {
852 			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
853 		} while (ret && times--);
854 
855 		if (times < 0) {
856 			pr_err("QAT: clear GPR of AE %d failed", ae);
857 			return EINVAL;
858 		}
859 		qat_hal_disable_ctx(handle, ae, ctx_mask);
860 		qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
861 		if (scs_flag)
862 			csr_val |= (1 << MMC_SHARE_CS_BITPOS);
863 		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
864 		qat_hal_wr_ae_csr(handle,
865 				  ae,
866 				  ACTIVE_CTX_STATUS,
867 				  savctx & ACS_ACNO);
868 		qat_hal_wr_ae_csr(handle,
869 				  ae,
870 				  CTX_ENABLES,
871 				  INIT_CTX_ENABLE_VALUE);
872 		qat_hal_wr_indr_csr(handle,
873 				    ae,
874 				    ctx_mask,
875 				    CTX_STS_INDIRECT,
876 				    handle->hal_handle->upc_mask &
877 					INIT_PC_VALUE);
878 		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
879 		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
880 		qat_hal_put_wakeup_event(handle,
881 					 ae,
882 					 ctx_mask,
883 					 INIT_WAKEUP_EVENTS_VALUE);
884 		qat_hal_put_sig_event(handle,
885 				      ae,
886 				      ctx_mask,
887 				      INIT_SIG_EVENTS_VALUE);
888 	}
889 	return 0;
890 }
891 
892 static int
893 qat_hal_check_imr(struct icp_qat_fw_loader_handle *handle)
894 {
895 	device_t dev = accel_to_pci_dev(handle->accel_dev);
896 	u8 reg_val = 0;
897 
898 	if (pci_get_device(GET_DEV(handle->accel_dev)) !=
899 		ADF_C3XXX_PCI_DEVICE_ID &&
900 	    pci_get_device(GET_DEV(handle->accel_dev)) !=
901 		ADF_200XX_PCI_DEVICE_ID)
902 		return 0;
903 
904 	reg_val = pci_read_config(dev, 0x04, 1);
905 	/*
906 	 * PCI command register memory bit and rambaseaddr_lo address
907 	 * are checked to confirm IMR2 is enabled in BIOS settings
908 	 */
909 	if ((reg_val & 0x2) && GET_FCU_CSR(handle, FCU_RAMBASE_ADDR_LO))
910 		return 0;
911 
912 	return EINVAL;
913 }
914 
915 int
916 qat_hal_init(struct adf_accel_dev *accel_dev)
917 {
918 	unsigned char ae;
919 	unsigned int cap_offset, ae_offset, ep_offset;
920 	unsigned int sram_offset = 0;
921 	unsigned int max_en_ae_id = 0;
922 	int ret = 0;
923 	unsigned long ae_mask;
924 	struct icp_qat_fw_loader_handle *handle;
925 	if (!accel_dev) {
926 		return EFAULT;
927 	}
928 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
929 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
930 	struct adf_bar *misc_bar =
931 	    &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
932 	struct adf_bar *sram_bar;
933 
934 	handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
935 
936 	handle->hal_misc_addr_v = misc_bar->virt_addr;
937 	handle->accel_dev = accel_dev;
938 	if (pci_get_device(GET_DEV(handle->accel_dev)) ==
939 		ADF_DH895XCC_PCI_DEVICE_ID ||
940 	    IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
941 		sram_bar =
942 		    &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
943 		if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev))))
944 			sram_offset =
945 			    0x400000 + accel_dev->aram_info->mmp_region_offset;
946 		handle->hal_sram_addr_v = sram_bar->virt_addr;
947 		handle->hal_sram_offset = sram_offset;
948 		handle->hal_sram_size = sram_bar->size;
949 	}
950 	GET_CSR_OFFSET(pci_get_device(GET_DEV(handle->accel_dev)),
951 		       cap_offset,
952 		       ae_offset,
953 		       ep_offset);
954 	handle->hal_cap_g_ctl_csr_addr_v = cap_offset;
955 	handle->hal_cap_ae_xfer_csr_addr_v = ae_offset;
956 	handle->hal_ep_csr_addr_v = ep_offset;
957 	handle->hal_cap_ae_local_csr_addr_v =
958 	    ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
959 	     LOCAL_TO_XFER_REG_OFFSET);
960 	handle->fw_auth = (pci_get_device(GET_DEV(handle->accel_dev)) ==
961 			   ADF_DH895XCC_PCI_DEVICE_ID) ?
962 	    false :
963 	    true;
964 	if (handle->fw_auth && qat_hal_check_imr(handle)) {
965 		device_printf(GET_DEV(accel_dev), "IMR2 not enabled in BIOS\n");
966 		ret = EINVAL;
967 		goto out_hal_handle;
968 	}
969 
970 	handle->hal_handle =
971 	    malloc(sizeof(*handle->hal_handle), M_QAT, M_WAITOK | M_ZERO);
972 	handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
973 	handle->hal_handle->ae_mask = hw_data->ae_mask;
974 	handle->hal_handle->slice_mask = hw_data->accel_mask;
975 	handle->cfg_ae_mask = 0xFFFFFFFF;
976 	/* create AE objects */
977 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
978 		handle->hal_handle->upc_mask = 0xffff;
979 		handle->hal_handle->max_ustore = 0x2000;
980 	} else {
981 		handle->hal_handle->upc_mask = 0x1ffff;
982 		handle->hal_handle->max_ustore = 0x4000;
983 	}
984 
985 	ae_mask = hw_data->ae_mask;
986 
987 	for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE)
988 	{
989 		handle->hal_handle->aes[ae].free_addr = 0;
990 		handle->hal_handle->aes[ae].free_size =
991 		    handle->hal_handle->max_ustore;
992 		handle->hal_handle->aes[ae].ustore_size =
993 		    handle->hal_handle->max_ustore;
994 		handle->hal_handle->aes[ae].live_ctx_mask =
995 		    ICP_QAT_UCLO_AE_ALL_CTX;
996 		max_en_ae_id = ae;
997 	}
998 	handle->hal_handle->ae_max_num = max_en_ae_id + 1;
999 	/* take all AEs out of reset */
1000 	if (qat_hal_clr_reset(handle)) {
1001 		device_printf(GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
1002 		ret = EIO;
1003 		goto out_err;
1004 	}
1005 	qat_hal_clear_xfer(handle);
1006 	if (!handle->fw_auth) {
1007 		if (qat_hal_clear_gpr(handle)) {
1008 			ret = EIO;
1009 			goto out_err;
1010 		}
1011 	}
1012 
1013 	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
1014 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1015 	{
1016 		unsigned int csr_val = 0;
1017 
1018 		qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
1019 		csr_val |= 0x1;
1020 		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
1021 	}
1022 	accel_dev->fw_loader->fw_loader = handle;
1023 	return 0;
1024 
1025 out_err:
1026 	free(handle->hal_handle, M_QAT);
1027 out_hal_handle:
1028 	free(handle, M_QAT);
1029 	return ret;
1030 }
1031 
1032 void
1033 qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
1034 {
1035 	if (!handle)
1036 		return;
1037 	free(handle->hal_handle, M_QAT);
1038 	free(handle, M_QAT);
1039 }
1040 
1041 void
1042 qat_hal_start(struct icp_qat_fw_loader_handle *handle,
1043 	      unsigned char ae,
1044 	      unsigned int ctx_mask)
1045 {
1046 	int retry = 0;
1047 	unsigned int fcu_sts = 0;
1048 	unsigned int fcu_ctl_csr, fcu_sts_csr;
1049 
1050 	if (handle->fw_auth) {
1051 		if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1052 			fcu_ctl_csr = FCU_CONTROL_C4XXX;
1053 			fcu_sts_csr = FCU_STATUS_C4XXX;
1054 
1055 		} else {
1056 			fcu_ctl_csr = FCU_CONTROL;
1057 			fcu_sts_csr = FCU_STATUS;
1058 		}
1059 		SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
1060 		do {
1061 			pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1062 			fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1063 			if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
1064 				return;
1065 		} while (retry++ < FW_AUTH_MAX_RETRY);
1066 		pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n",
1067 		       ae,
1068 		       fcu_sts);
1069 	} else {
1070 		qat_hal_put_wakeup_event(handle,
1071 					 ae,
1072 					 (~ctx_mask) & ICP_QAT_UCLO_AE_ALL_CTX,
1073 					 0x10000);
1074 		qat_hal_enable_ctx(handle, ae, ctx_mask);
1075 	}
1076 }
1077 
1078 void
1079 qat_hal_stop(struct icp_qat_fw_loader_handle *handle,
1080 	     unsigned char ae,
1081 	     unsigned int ctx_mask)
1082 {
1083 	if (!handle->fw_auth)
1084 		qat_hal_disable_ctx(handle, ae, ctx_mask);
1085 }
1086 
1087 void
1088 qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
1089 	       unsigned char ae,
1090 	       unsigned int ctx_mask,
1091 	       unsigned int upc)
1092 {
1093 	qat_hal_wr_indr_csr(handle,
1094 			    ae,
1095 			    ctx_mask,
1096 			    CTX_STS_INDIRECT,
1097 			    handle->hal_handle->upc_mask & upc);
1098 }
1099 
1100 static void
1101 qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
1102 		   unsigned char ae,
1103 		   unsigned int uaddr,
1104 		   unsigned int words_num,
1105 		   uint64_t *uword)
1106 {
1107 	unsigned int i, uwrd_lo, uwrd_hi;
1108 	unsigned int ustore_addr, misc_control;
1109 	unsigned int scs_flag = 0;
1110 
1111 	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
1112 	scs_flag = misc_control & (0x1 << MMC_SHARE_CS_BITPOS);
1113 	/*disable scs*/
1114 	qat_hal_wr_ae_csr(handle,
1115 			  ae,
1116 			  AE_MISC_CONTROL,
1117 			  misc_control & 0xfffffffb);
1118 	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1119 	uaddr |= UA_ECS;
1120 	for (i = 0; i < words_num; i++) {
1121 		qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1122 		uaddr++;
1123 		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
1124 		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
1125 		uword[i] = uwrd_hi;
1126 		uword[i] = (uword[i] << 0x20) | uwrd_lo;
1127 	}
1128 	if (scs_flag)
1129 		misc_control |= (0x1 << MMC_SHARE_CS_BITPOS);
1130 	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
1131 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1132 }
1133 
1134 void
1135 qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
1136 		unsigned char ae,
1137 		unsigned int uaddr,
1138 		unsigned int words_num,
1139 		unsigned int *data)
1140 {
1141 	unsigned int i, ustore_addr;
1142 
1143 	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1144 	uaddr |= UA_ECS;
1145 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1146 	for (i = 0; i < words_num; i++) {
1147 		unsigned int uwrd_lo, uwrd_hi, tmp;
1148 
1149 		uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
1150 		    ((data[i] & 0xff00) << 2) | (0x3 << 8) | (data[i] & 0xff);
1151 		uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
1152 		uwrd_hi |= (bitcount32(data[i] & 0xffff) & 0x1) << 8;
1153 		tmp = ((data[i] >> 0x10) & 0xffff);
1154 		uwrd_hi |= (bitcount32(tmp) & 0x1) << 9;
1155 		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1156 		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1157 	}
1158 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1159 }
1160 
1161 #define MAX_EXEC_INST 100
1162 static int
1163 qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
1164 			unsigned char ae,
1165 			unsigned char ctx,
1166 			uint64_t *micro_inst,
1167 			unsigned int inst_num,
1168 			int code_off,
1169 			unsigned int max_cycle,
1170 			unsigned int *endpc)
1171 {
1172 	uint64_t savuwords[MAX_EXEC_INST];
1173 	unsigned int ind_lm_addr0, ind_lm_addr1;
1174 	unsigned int ind_lm_addr2, ind_lm_addr3;
1175 	unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
1176 	unsigned int ind_lm_addr_byte2, ind_lm_addr_byte3;
1177 	unsigned int ind_t_index, ind_t_index_byte;
1178 	unsigned int ind_cnt_sig;
1179 	unsigned int ind_sig, act_sig;
1180 	unsigned int csr_val = 0, newcsr_val;
1181 	unsigned int savctx, scs_flag;
1182 	unsigned int savcc, wakeup_events, savpc;
1183 	unsigned int ctxarb_ctl, ctx_enables;
1184 
1185 	if (inst_num > handle->hal_handle->max_ustore || !micro_inst) {
1186 		pr_err("QAT: invalid instruction num %d\n", inst_num);
1187 		return EINVAL;
1188 	}
1189 	/* save current context */
1190 	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
1191 	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
1192 	qat_hal_rd_indr_csr(
1193 	    handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, &ind_lm_addr_byte0);
1194 	qat_hal_rd_indr_csr(
1195 	    handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, &ind_lm_addr_byte1);
1196 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1197 		qat_hal_rd_indr_csr(
1198 		    handle, ae, ctx, LM_ADDR_2_INDIRECT, &ind_lm_addr2);
1199 		qat_hal_rd_indr_csr(
1200 		    handle, ae, ctx, LM_ADDR_3_INDIRECT, &ind_lm_addr3);
1201 		qat_hal_rd_indr_csr(handle,
1202 				    ae,
1203 				    ctx,
1204 				    INDIRECT_LM_ADDR_2_BYTE_INDEX,
1205 				    &ind_lm_addr_byte2);
1206 		qat_hal_rd_indr_csr(handle,
1207 				    ae,
1208 				    ctx,
1209 				    INDIRECT_LM_ADDR_3_BYTE_INDEX,
1210 				    &ind_lm_addr_byte3);
1211 		qat_hal_rd_indr_csr(
1212 		    handle, ae, ctx, INDIRECT_T_INDEX, &ind_t_index);
1213 		qat_hal_rd_indr_csr(handle,
1214 				    ae,
1215 				    ctx,
1216 				    INDIRECT_T_INDEX_BYTE_INDEX,
1217 				    &ind_t_index_byte);
1218 	}
1219 	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1220 	scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
1221 	newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1222 	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1223 	if (inst_num <= MAX_EXEC_INST)
1224 		qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
1225 	qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
1226 	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
1227 	savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
1228 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1229 	ctx_enables &= IGNORE_W1C_MASK;
1230 	qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
1231 	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
1232 	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
1233 	qat_hal_rd_indr_csr(
1234 	    handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, &ind_cnt_sig);
1235 	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
1236 	qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
1237 	/* execute micro codes */
1238 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1239 	qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
1240 	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
1241 	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
1242 	if (code_off)
1243 		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
1244 	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
1245 	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
1246 	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
1247 	qat_hal_enable_ctx(handle, ae, (1 << ctx));
1248 	/* wait for micro codes to finish */
1249 	if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
1250 		return EFAULT;
1251 	if (endpc) {
1252 		unsigned int ctx_status;
1253 
1254 		qat_hal_rd_indr_csr(
1255 		    handle, ae, ctx, CTX_STS_INDIRECT, &ctx_status);
1256 		*endpc = ctx_status & handle->hal_handle->upc_mask;
1257 	}
1258 	/* retore to saved context */
1259 	qat_hal_disable_ctx(handle, ae, (1 << ctx));
1260 	if (inst_num <= MAX_EXEC_INST)
1261 		qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
1262 	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
1263 	qat_hal_wr_indr_csr(handle,
1264 			    ae,
1265 			    (1 << ctx),
1266 			    CTX_STS_INDIRECT,
1267 			    handle->hal_handle->upc_mask & savpc);
1268 	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1269 	newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) :
1270 				CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1271 	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1272 	qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
1273 	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
1274 	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
1275 	qat_hal_wr_indr_csr(
1276 	    handle, ae, (1 << ctx), LM_ADDR_0_INDIRECT, ind_lm_addr0);
1277 	qat_hal_wr_indr_csr(
1278 	    handle, ae, (1 << ctx), LM_ADDR_1_INDIRECT, ind_lm_addr1);
1279 	qat_hal_wr_indr_csr(handle,
1280 			    ae,
1281 			    (1 << ctx),
1282 			    INDIRECT_LM_ADDR_0_BYTE_INDEX,
1283 			    ind_lm_addr_byte0);
1284 	qat_hal_wr_indr_csr(handle,
1285 			    ae,
1286 			    (1 << ctx),
1287 			    INDIRECT_LM_ADDR_1_BYTE_INDEX,
1288 			    ind_lm_addr_byte1);
1289 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1290 		qat_hal_wr_indr_csr(
1291 		    handle, ae, (1 << ctx), LM_ADDR_2_INDIRECT, ind_lm_addr2);
1292 		qat_hal_wr_indr_csr(
1293 		    handle, ae, (1 << ctx), LM_ADDR_3_INDIRECT, ind_lm_addr3);
1294 		qat_hal_wr_indr_csr(handle,
1295 				    ae,
1296 				    (1 << ctx),
1297 				    INDIRECT_LM_ADDR_2_BYTE_INDEX,
1298 				    ind_lm_addr_byte2);
1299 		qat_hal_wr_indr_csr(handle,
1300 				    ae,
1301 				    (1 << ctx),
1302 				    INDIRECT_LM_ADDR_3_BYTE_INDEX,
1303 				    ind_lm_addr_byte3);
1304 		qat_hal_wr_indr_csr(
1305 		    handle, ae, (1 << ctx), INDIRECT_T_INDEX, ind_t_index);
1306 		qat_hal_wr_indr_csr(handle,
1307 				    ae,
1308 				    (1 << ctx),
1309 				    INDIRECT_T_INDEX_BYTE_INDEX,
1310 				    ind_t_index_byte);
1311 	}
1312 	qat_hal_wr_indr_csr(
1313 	    handle, ae, (1 << ctx), FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
1314 	qat_hal_wr_indr_csr(
1315 	    handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, ind_sig);
1316 	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
1317 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1318 
1319 	return 0;
1320 }
1321 
1322 static int
1323 qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
1324 		   unsigned char ae,
1325 		   unsigned char ctx,
1326 		   enum icp_qat_uof_regtype reg_type,
1327 		   unsigned short reg_num,
1328 		   unsigned int *data)
1329 {
1330 	unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
1331 	unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
1332 	unsigned short reg_addr;
1333 	int status = 0;
1334 	unsigned int scs_flag = 0;
1335 	unsigned int csr_val = 0, newcsr_val = 0;
1336 	u64 insts, savuword;
1337 
1338 	reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1339 	if (reg_addr == BAD_REGADDR) {
1340 		pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
1341 		return EINVAL;
1342 	}
1343 	switch (reg_type) {
1344 	case ICP_GPA_REL:
1345 		insts = 0xA070000000ull | (reg_addr & 0x3ff);
1346 		break;
1347 	default:
1348 		insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
1349 		break;
1350 	}
1351 	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1352 	scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
1353 	newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1354 	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1355 	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
1356 	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
1357 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1358 	ctx_enables &= IGNORE_W1C_MASK;
1359 	if (ctx != (savctx & ACS_ACNO))
1360 		qat_hal_wr_ae_csr(handle,
1361 				  ae,
1362 				  ACTIVE_CTX_STATUS,
1363 				  ctx & ACS_ACNO);
1364 	qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
1365 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1366 	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1367 	uaddr = UA_ECS;
1368 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1369 	insts = qat_hal_set_uword_ecc(insts);
1370 	uwrd_lo = (unsigned int)(insts & 0xffffffff);
1371 	uwrd_hi = (unsigned int)(insts >> 0x20);
1372 	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1373 	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1374 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1375 	/* delay for at least 8 cycles */
1376 	qat_hal_wait_cycles(handle, ae, 0x8, 0);
1377 	/*
1378 	 * read ALU output
1379 	 * the instruction should have been executed
1380 	 * prior to clearing the ECS in putUwords
1381 	 */
1382 	qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
1383 	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1384 	qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
1385 	if (ctx != (savctx & ACS_ACNO))
1386 		qat_hal_wr_ae_csr(handle,
1387 				  ae,
1388 				  ACTIVE_CTX_STATUS,
1389 				  savctx & ACS_ACNO);
1390 	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
1391 	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1392 	newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) :
1393 				CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1394 	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1395 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1396 
1397 	return status;
1398 }
1399 
1400 static int
1401 qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
1402 		   unsigned char ae,
1403 		   unsigned char ctx,
1404 		   enum icp_qat_uof_regtype reg_type,
1405 		   unsigned short reg_num,
1406 		   unsigned int data)
1407 {
1408 	unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
1409 	uint64_t insts[] = { 0x0F440000000ull,
1410 			     0x0F040000000ull,
1411 			     0x0F0000C0300ull,
1412 			     0x0E000010000ull };
1413 	const int num_inst = ARRAY_SIZE(insts), code_off = 1;
1414 	const int imm_w1 = 0, imm_w0 = 1;
1415 
1416 	dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1417 	if (dest_addr == BAD_REGADDR) {
1418 		pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
1419 		return EINVAL;
1420 	}
1421 
1422 	data16lo = 0xffff & data;
1423 	data16hi = 0xffff & (data >> 0x10);
1424 	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1425 					  (unsigned short)(0xff & data16hi));
1426 	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1427 					   (unsigned short)(0xff & data16lo));
1428 	switch (reg_type) {
1429 	case ICP_GPA_REL:
1430 		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1431 		    ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1432 		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1433 		    ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1434 		break;
1435 	default:
1436 		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1437 		    ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1438 
1439 		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1440 		    ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1441 		break;
1442 	}
1443 
1444 	return qat_hal_exec_micro_inst(
1445 	    handle, ae, ctx, insts, num_inst, code_off, num_inst * 0x5, NULL);
1446 }
1447 
1448 int
1449 qat_hal_get_ins_num(void)
1450 {
1451 	return ARRAY_SIZE(inst_4b);
1452 }
1453 
1454 static int
1455 qat_hal_concat_micro_code(uint64_t *micro_inst,
1456 			  unsigned int inst_num,
1457 			  unsigned int size,
1458 			  unsigned int addr,
1459 			  unsigned int *value)
1460 {
1461 	int i;
1462 	unsigned int cur_value;
1463 	const uint64_t *inst_arr;
1464 	unsigned int fixup_offset;
1465 	int usize = 0;
1466 	unsigned int orig_num;
1467 	unsigned int delta;
1468 
1469 	orig_num = inst_num;
1470 	fixup_offset = inst_num;
1471 	cur_value = value[0];
1472 	inst_arr = inst_4b;
1473 	usize = ARRAY_SIZE(inst_4b);
1474 	for (i = 0; i < usize; i++)
1475 		micro_inst[inst_num++] = inst_arr[i];
1476 	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1477 	fixup_offset++;
1478 	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1479 	fixup_offset++;
1480 	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1481 	fixup_offset++;
1482 	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1483 
1484 	delta = inst_num - orig_num;
1485 
1486 	return (int)delta;
1487 }
1488 
1489 static int
1490 qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1491 			   unsigned char ae,
1492 			   unsigned char ctx,
1493 			   int *pfirst_exec,
1494 			   uint64_t *micro_inst,
1495 			   unsigned int inst_num)
1496 {
1497 	int stat = 0;
1498 	unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1499 	unsigned int gprb0 = 0, gprb1 = 0;
1500 
1501 	if (*pfirst_exec) {
1502 		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1503 		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1504 		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1505 		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1506 		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1507 		*pfirst_exec = 0;
1508 	}
1509 	stat = qat_hal_exec_micro_inst(
1510 	    handle, ae, ctx, micro_inst, inst_num, 1, inst_num * 0x5, NULL);
1511 	if (stat != 0)
1512 		return EFAULT;
1513 	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1514 	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1515 	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1516 	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1517 	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1518 
1519 	return 0;
1520 }
1521 
1522 int
1523 qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1524 		    unsigned char ae,
1525 		    struct icp_qat_uof_batch_init *lm_init_header)
1526 {
1527 	struct icp_qat_uof_batch_init *plm_init;
1528 	uint64_t *micro_inst_arry;
1529 	int micro_inst_num;
1530 	int alloc_inst_size;
1531 	int first_exec = 1;
1532 	int stat = 0;
1533 
1534 	if (!lm_init_header)
1535 		return 0;
1536 	plm_init = lm_init_header->next;
1537 	alloc_inst_size = lm_init_header->size;
1538 	if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1539 		alloc_inst_size = handle->hal_handle->max_ustore;
1540 	micro_inst_arry = malloc(alloc_inst_size * sizeof(uint64_t),
1541 				 M_QAT,
1542 				 M_WAITOK | M_ZERO);
1543 	micro_inst_num = 0;
1544 	while (plm_init) {
1545 		unsigned int addr, *value, size;
1546 
1547 		ae = plm_init->ae;
1548 		addr = plm_init->addr;
1549 		value = plm_init->value;
1550 		size = plm_init->size;
1551 		micro_inst_num += qat_hal_concat_micro_code(
1552 		    micro_inst_arry, micro_inst_num, size, addr, value);
1553 		plm_init = plm_init->next;
1554 	}
1555 	/* exec micro codes */
1556 	if (micro_inst_arry && micro_inst_num > 0) {
1557 		micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1558 		stat = qat_hal_exec_micro_init_lm(handle,
1559 						  ae,
1560 						  0,
1561 						  &first_exec,
1562 						  micro_inst_arry,
1563 						  micro_inst_num);
1564 	}
1565 	free(micro_inst_arry, M_QAT);
1566 	return stat;
1567 }
1568 
1569 static int
1570 qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1571 			unsigned char ae,
1572 			unsigned char ctx,
1573 			enum icp_qat_uof_regtype reg_type,
1574 			unsigned short reg_num,
1575 			unsigned int val)
1576 {
1577 	int status = 0;
1578 	unsigned int reg_addr;
1579 	unsigned int ctx_enables;
1580 	unsigned short mask;
1581 	unsigned short dr_offset = 0x10;
1582 
1583 	status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1584 	if (CE_INUSE_CONTEXTS & ctx_enables) {
1585 		if (ctx & 0x1) {
1586 			pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1587 			return EINVAL;
1588 		}
1589 		mask = 0x1f;
1590 		dr_offset = 0x20;
1591 	} else {
1592 		mask = 0x0f;
1593 	}
1594 	if (reg_num & ~mask)
1595 		return EINVAL;
1596 	reg_addr = reg_num + (ctx << 0x5);
1597 	switch (reg_type) {
1598 	case ICP_SR_RD_REL:
1599 	case ICP_SR_REL:
1600 		SET_AE_XFER(handle, ae, reg_addr, val);
1601 		break;
1602 	case ICP_DR_RD_REL:
1603 	case ICP_DR_REL:
1604 		SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1605 		break;
1606 	default:
1607 		status = EINVAL;
1608 		break;
1609 	}
1610 	return status;
1611 }
1612 
1613 static int
1614 qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1615 			unsigned char ae,
1616 			unsigned char ctx,
1617 			enum icp_qat_uof_regtype reg_type,
1618 			unsigned short reg_num,
1619 			unsigned int data)
1620 {
1621 	unsigned int gprval, ctx_enables;
1622 	unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1623 	    data16low;
1624 	unsigned short reg_mask;
1625 	int status = 0;
1626 	uint64_t micro_inst[] = { 0x0F440000000ull,
1627 				  0x0F040000000ull,
1628 				  0x0A000000000ull,
1629 				  0x0F0000C0300ull,
1630 				  0x0E000010000ull };
1631 	const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1632 	const unsigned short gprnum = 0, dly = num_inst * 0x5;
1633 
1634 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1635 	if (CE_INUSE_CONTEXTS & ctx_enables) {
1636 		if (ctx & 0x1) {
1637 			pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1638 			return EINVAL;
1639 		}
1640 		reg_mask = (unsigned short)~0x1f;
1641 	} else {
1642 		reg_mask = (unsigned short)~0xf;
1643 	}
1644 	if (reg_num & reg_mask)
1645 		return EINVAL;
1646 	xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1647 	if (xfr_addr == BAD_REGADDR) {
1648 		pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1649 		return EINVAL;
1650 	}
1651 	qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1652 	gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1653 	data16low = 0xffff & data;
1654 	data16hi = 0xffff & (data >> 0x10);
1655 	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1656 					  (unsigned short)(0xff & data16hi));
1657 	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1658 					   (unsigned short)(0xff & data16low));
1659 	micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1660 	    ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1661 	micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1662 	    ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1663 	micro_inst[0x2] = micro_inst[0x2] | ((xfr_addr & 0x3ff) << 20) |
1664 	    ((gpr_addr & 0x3ff) << 10);
1665 	status = qat_hal_exec_micro_inst(
1666 	    handle, ae, ctx, micro_inst, num_inst, code_off, dly, NULL);
1667 	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1668 	return status;
1669 }
1670 
1671 static int
1672 qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1673 		   unsigned char ae,
1674 		   unsigned char ctx,
1675 		   unsigned short nn,
1676 		   unsigned int val)
1677 {
1678 	unsigned int ctx_enables;
1679 	int stat = 0;
1680 
1681 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1682 	ctx_enables &= IGNORE_W1C_MASK;
1683 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1684 
1685 	stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1686 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1687 	return stat;
1688 }
1689 
1690 static int
1691 qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle *handle,
1692 			   unsigned char ae,
1693 			   unsigned short absreg_num,
1694 			   unsigned short *relreg,
1695 			   unsigned char *ctx)
1696 {
1697 	unsigned int ctx_enables;
1698 
1699 	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1700 	if (ctx_enables & CE_INUSE_CONTEXTS) {
1701 		/* 4-ctx mode */
1702 		*relreg = absreg_num & 0x1F;
1703 		*ctx = (absreg_num >> 0x4) & 0x6;
1704 	} else {
1705 		/* 8-ctx mode */
1706 		*relreg = absreg_num & 0x0F;
1707 		*ctx = (absreg_num >> 0x4) & 0x7;
1708 	}
1709 	return 0;
1710 }
1711 
1712 int
1713 qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1714 		 unsigned char ae,
1715 		 unsigned long ctx_mask,
1716 		 enum icp_qat_uof_regtype reg_type,
1717 		 unsigned short reg_num,
1718 		 unsigned int regdata)
1719 {
1720 	int stat = 0;
1721 	unsigned short reg;
1722 	unsigned char ctx = 0;
1723 	enum icp_qat_uof_regtype type;
1724 
1725 	if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1726 		return EINVAL;
1727 
1728 	do {
1729 		if (ctx_mask == 0) {
1730 			qat_hal_convert_abs_to_rel(
1731 			    handle, ae, reg_num, &reg, &ctx);
1732 			type = reg_type - 1;
1733 		} else {
1734 			reg = reg_num;
1735 			type = reg_type;
1736 			if (!test_bit(ctx, &ctx_mask))
1737 				continue;
1738 		}
1739 		stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1740 		if (stat) {
1741 			pr_err("QAT: write gpr fail\n");
1742 			return EINVAL;
1743 		}
1744 	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1745 
1746 	return 0;
1747 }
1748 
1749 int
1750 qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1751 		     unsigned char ae,
1752 		     unsigned long ctx_mask,
1753 		     enum icp_qat_uof_regtype reg_type,
1754 		     unsigned short reg_num,
1755 		     unsigned int regdata)
1756 {
1757 	int stat = 0;
1758 	unsigned short reg;
1759 	unsigned char ctx = 0;
1760 	enum icp_qat_uof_regtype type;
1761 
1762 	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1763 		return EINVAL;
1764 
1765 	do {
1766 		if (ctx_mask == 0) {
1767 			qat_hal_convert_abs_to_rel(
1768 			    handle, ae, reg_num, &reg, &ctx);
1769 			type = reg_type - 3;
1770 		} else {
1771 			reg = reg_num;
1772 			type = reg_type;
1773 			if (!test_bit(ctx, &ctx_mask))
1774 				continue;
1775 		}
1776 		stat = qat_hal_put_rel_wr_xfer(
1777 		    handle, ae, ctx, type, reg, regdata);
1778 		if (stat) {
1779 			pr_err("QAT: write wr xfer fail\n");
1780 			return EINVAL;
1781 		}
1782 	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1783 
1784 	return 0;
1785 }
1786 
1787 int
1788 qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1789 		     unsigned char ae,
1790 		     unsigned long ctx_mask,
1791 		     enum icp_qat_uof_regtype reg_type,
1792 		     unsigned short reg_num,
1793 		     unsigned int regdata)
1794 {
1795 	int stat = 0;
1796 	unsigned short reg;
1797 	unsigned char ctx = 0;
1798 	enum icp_qat_uof_regtype type;
1799 
1800 	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1801 		return EINVAL;
1802 
1803 	do {
1804 		if (ctx_mask == 0) {
1805 			qat_hal_convert_abs_to_rel(
1806 			    handle, ae, reg_num, &reg, &ctx);
1807 			type = reg_type - 3;
1808 		} else {
1809 			reg = reg_num;
1810 			type = reg_type;
1811 			if (!test_bit(ctx, &ctx_mask))
1812 				continue;
1813 		}
1814 		stat = qat_hal_put_rel_rd_xfer(
1815 		    handle, ae, ctx, type, reg, regdata);
1816 		if (stat) {
1817 			pr_err("QAT: write rd xfer fail\n");
1818 			return EINVAL;
1819 		}
1820 	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1821 
1822 	return 0;
1823 }
1824 
1825 int
1826 qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1827 		unsigned char ae,
1828 		unsigned long ctx_mask,
1829 		unsigned short reg_num,
1830 		unsigned int regdata)
1831 {
1832 	int stat = 0;
1833 	unsigned char ctx;
1834 
1835 	if (ctx_mask == 0)
1836 		return EINVAL;
1837 
1838 	for_each_set_bit(ctx, &ctx_mask, ICP_QAT_UCLO_MAX_CTX)
1839 	{
1840 		stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1841 		if (stat) {
1842 			pr_err("QAT: write neigh error\n");
1843 			return EINVAL;
1844 		}
1845 	}
1846 
1847 	return 0;
1848 }
1849