xref: /freebsd/sys/dev/qat/qat_common/qat_uclo.c (revision 71625ec9)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_transport_access_macros.h"
12 #include "adf_transport_internal.h"
13 #include <sys/ctype.h>
14 #include <sys/kernel.h>
15 #include <linux/delay.h>
16 #include "adf_accel_devices.h"
17 #include "adf_common_drv.h"
18 #include "icp_qat_uclo.h"
19 #include "icp_qat_hal.h"
20 #include "icp_qat_fw_loader_handle.h"
21 
22 #define UWORD_CPYBUF_SIZE 1024
23 #define INVLD_UWORD 0xffffffffffull
24 #define PID_MINOR_REV 0xf
25 #define PID_MAJOR_REV (0xf << 4)
26 #define MAX_UINT32_VAL 0xfffffffful
27 
28 static int
qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle * obj_handle,unsigned int ae,unsigned int image_num)29 qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
30 		      unsigned int ae,
31 		      unsigned int image_num)
32 {
33 	struct icp_qat_uclo_aedata *ae_data;
34 	struct icp_qat_uclo_encapme *encap_image;
35 	struct icp_qat_uclo_page *page = NULL;
36 	struct icp_qat_uclo_aeslice *ae_slice = NULL;
37 
38 	ae_data = &obj_handle->ae_data[ae];
39 	encap_image = &obj_handle->ae_uimage[image_num];
40 	ae_slice = &ae_data->ae_slices[ae_data->slice_num];
41 	ae_slice->encap_image = encap_image;
42 
43 	if (encap_image->img_ptr) {
44 		ae_slice->ctx_mask_assigned =
45 		    encap_image->img_ptr->ctx_assigned;
46 		ae_data->shareable_ustore =
47 		    ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode);
48 		if (obj_handle->prod_type == ICP_QAT_AC_4XXX_A_DEV_TYPE)
49 			ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
50 		else {
51 			ae_data->eff_ustore_size = ae_data->shareable_ustore ?
52 			    (obj_handle->ustore_phy_size << 1) :
53 			    obj_handle->ustore_phy_size;
54 		}
55 	} else {
56 		ae_slice->ctx_mask_assigned = 0;
57 	}
58 	ae_slice->region =
59 	    malloc(sizeof(*ae_slice->region), M_QAT, M_WAITOK | M_ZERO);
60 	ae_slice->page =
61 	    malloc(sizeof(*ae_slice->page), M_QAT, M_WAITOK | M_ZERO);
62 	page = ae_slice->page;
63 	page->encap_page = encap_image->page;
64 	ae_slice->page->region = ae_slice->region;
65 	ae_data->slice_num++;
66 	return 0;
67 }
68 
69 static int
qat_uclo_free_ae_data(struct icp_qat_uclo_aedata * ae_data)70 qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
71 {
72 	unsigned int i;
73 
74 	if (!ae_data) {
75 		pr_err("QAT: bad argument, ae_data is NULL\n ");
76 		return EINVAL;
77 	}
78 
79 	for (i = 0; i < ae_data->slice_num; i++) {
80 		free(ae_data->ae_slices[i].region, M_QAT);
81 		ae_data->ae_slices[i].region = NULL;
82 		free(ae_data->ae_slices[i].page, M_QAT);
83 		ae_data->ae_slices[i].page = NULL;
84 	}
85 	return 0;
86 }
87 
88 static char *
qat_uclo_get_string(struct icp_qat_uof_strtable * str_table,unsigned int str_offset)89 qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
90 		    unsigned int str_offset)
91 {
92 	if (!str_table->table_len || str_offset > str_table->table_len)
93 		return NULL;
94 	return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
95 }
96 
97 static int
qat_uclo_check_uof_format(struct icp_qat_uof_filehdr * hdr)98 qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
99 {
100 	int maj = hdr->maj_ver & 0xff;
101 	int min = hdr->min_ver & 0xff;
102 
103 	if (hdr->file_id != ICP_QAT_UOF_FID) {
104 		pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
105 		return EINVAL;
106 	}
107 	if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
108 		pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
109 		       maj,
110 		       min);
111 		return EINVAL;
112 	}
113 	return 0;
114 }
115 
116 static int
qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr * suof_hdr)117 qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr *suof_hdr)
118 {
119 	int maj = suof_hdr->maj_ver & 0xff;
120 	int min = suof_hdr->min_ver & 0xff;
121 
122 	if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
123 		pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
124 		return EINVAL;
125 	}
126 	if (suof_hdr->fw_type != 0) {
127 		pr_err("QAT: unsupported firmware type\n");
128 		return EINVAL;
129 	}
130 	if (suof_hdr->num_chunks <= 0x1) {
131 		pr_err("QAT: SUOF chunk amount is incorrect\n");
132 		return EINVAL;
133 	}
134 	if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
135 		pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
136 		       maj,
137 		       min);
138 		return EINVAL;
139 	}
140 	return 0;
141 }
142 
143 static int
qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle * handle,unsigned int addr,const unsigned int * val,unsigned int num_in_bytes)144 qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
145 			  unsigned int addr,
146 			  const unsigned int *val,
147 			  unsigned int num_in_bytes)
148 {
149 	unsigned int outval;
150 	const unsigned char *ptr = (const unsigned char *)val;
151 
152 	if (num_in_bytes > handle->hal_sram_size) {
153 		pr_err("QAT: error, mmp size overflow %d\n", num_in_bytes);
154 		return EINVAL;
155 	}
156 	while (num_in_bytes) {
157 		memcpy(&outval, ptr, 4);
158 		SRAM_WRITE(handle, addr, outval);
159 		num_in_bytes -= 4;
160 		ptr += 4;
161 		addr += 4;
162 	}
163 	return 0;
164 }
165 
166 static void
qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int addr,unsigned int * val,unsigned int num_in_bytes)167 qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
168 			  unsigned char ae,
169 			  unsigned int addr,
170 			  unsigned int *val,
171 			  unsigned int num_in_bytes)
172 {
173 	unsigned int outval;
174 	unsigned char *ptr = (unsigned char *)val;
175 
176 	addr >>= 0x2; /* convert to uword address */
177 
178 	while (num_in_bytes) {
179 		memcpy(&outval, ptr, 4);
180 		qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
181 		num_in_bytes -= 4;
182 		ptr += 4;
183 	}
184 }
185 
186 static void
qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle * handle,unsigned char ae,struct icp_qat_uof_batch_init * umem_init_header)187 qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
188 		       unsigned char ae,
189 		       struct icp_qat_uof_batch_init *umem_init_header)
190 {
191 	struct icp_qat_uof_batch_init *umem_init;
192 
193 	if (!umem_init_header)
194 		return;
195 	umem_init = umem_init_header->next;
196 	while (umem_init) {
197 		unsigned int addr, *value, size;
198 
199 		ae = umem_init->ae;
200 		addr = umem_init->addr;
201 		value = umem_init->value;
202 		size = umem_init->size;
203 		qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
204 		umem_init = umem_init->next;
205 	}
206 }
207 
208 static void
qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_batch_init ** base)209 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
210 				 struct icp_qat_uof_batch_init **base)
211 {
212 	struct icp_qat_uof_batch_init *umem_init;
213 
214 	umem_init = *base;
215 	while (umem_init) {
216 		struct icp_qat_uof_batch_init *pre;
217 
218 		pre = umem_init;
219 		umem_init = umem_init->next;
220 		free(pre, M_QAT);
221 	}
222 	*base = NULL;
223 }
224 
225 static int
qat_uclo_parse_num(char * str,unsigned int * num)226 qat_uclo_parse_num(char *str, unsigned int *num)
227 {
228 	char buf[16] = { 0 };
229 	unsigned long ae = 0;
230 	int i;
231 
232 	strncpy(buf, str, 15);
233 	for (i = 0; i < 16; i++) {
234 		if (!isdigit(buf[i])) {
235 			buf[i] = '\0';
236 			break;
237 		}
238 	}
239 	if ((compat_strtoul(buf, 10, &ae)))
240 		return EFAULT;
241 
242 	if (ae > MAX_UINT32_VAL)
243 		return EFAULT;
244 
245 	*num = (unsigned int)ae;
246 	return 0;
247 }
248 
249 static int
qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem,unsigned int size_range,unsigned int * ae)250 qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
251 			  struct icp_qat_uof_initmem *init_mem,
252 			  unsigned int size_range,
253 			  unsigned int *ae)
254 {
255 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
256 	char *str;
257 
258 	if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
259 		pr_err("QAT: initmem is out of range");
260 		return EINVAL;
261 	}
262 	if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
263 		pr_err("QAT: Memory scope for init_mem error\n");
264 		return EINVAL;
265 	}
266 	str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
267 	if (!str) {
268 		pr_err("QAT: AE name assigned in UOF init table is NULL\n");
269 		return EINVAL;
270 	}
271 	if (qat_uclo_parse_num(str, ae)) {
272 		pr_err("QAT: Parse num for AE number failed\n");
273 		return EINVAL;
274 	}
275 	if (*ae >= ICP_QAT_UCLO_MAX_AE) {
276 		pr_err("QAT: ae %d out of range\n", *ae);
277 		return EINVAL;
278 	}
279 	return 0;
280 }
281 
282 static int
qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem,unsigned int ae,struct icp_qat_uof_batch_init ** init_tab_base)283 qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle *handle,
284 				struct icp_qat_uof_initmem *init_mem,
285 				unsigned int ae,
286 				struct icp_qat_uof_batch_init **init_tab_base)
287 {
288 	struct icp_qat_uof_batch_init *init_header, *tail;
289 	struct icp_qat_uof_batch_init *mem_init, *tail_old;
290 	struct icp_qat_uof_memvar_attr *mem_val_attr;
291 	unsigned int i = 0;
292 
293 	mem_val_attr =
294 	    (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
295 					       sizeof(
296 						   struct icp_qat_uof_initmem));
297 
298 	init_header = *init_tab_base;
299 	if (!init_header) {
300 		init_header =
301 		    malloc(sizeof(*init_header), M_QAT, M_WAITOK | M_ZERO);
302 		init_header->size = 1;
303 		*init_tab_base = init_header;
304 	}
305 	tail_old = init_header;
306 	while (tail_old->next)
307 		tail_old = tail_old->next;
308 	tail = tail_old;
309 	for (i = 0; i < init_mem->val_attr_num; i++) {
310 		mem_init = malloc(sizeof(*mem_init), M_QAT, M_WAITOK | M_ZERO);
311 		mem_init->ae = ae;
312 		mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
313 		mem_init->value = &mem_val_attr->value;
314 		mem_init->size = 4;
315 		mem_init->next = NULL;
316 		tail->next = mem_init;
317 		tail = mem_init;
318 		init_header->size += qat_hal_get_ins_num();
319 		mem_val_attr++;
320 	}
321 	return 0;
322 }
323 
324 static int
qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)325 qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
326 		       struct icp_qat_uof_initmem *init_mem)
327 {
328 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
329 	unsigned int ae;
330 	unsigned int lmem;
331 
332 	lmem = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))) ?
333 	    ICP_QAT_UCLO_MAX_LMEM_REG_2X :
334 	    ICP_QAT_UCLO_MAX_LMEM_REG;
335 
336 	if (qat_uclo_fetch_initmem_ae(handle, init_mem, lmem, &ae))
337 		return EINVAL;
338 	if (qat_uclo_create_batch_init_list(
339 		handle, init_mem, ae, &obj_handle->lm_init_tab[ae]))
340 		return EINVAL;
341 	return 0;
342 }
343 
344 static int
qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)345 qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
346 		       struct icp_qat_uof_initmem *init_mem)
347 {
348 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
349 	unsigned int ae, ustore_size, uaddr, i;
350 	struct icp_qat_uclo_aedata *aed;
351 
352 	ustore_size = obj_handle->ustore_phy_size;
353 	if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
354 		return EINVAL;
355 	if (qat_uclo_create_batch_init_list(
356 		handle, init_mem, ae, &obj_handle->umem_init_tab[ae]))
357 		return EINVAL;
358 	/* set the highest ustore address referenced */
359 	uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
360 	aed = &obj_handle->ae_data[ae];
361 	for (i = 0; i < aed->slice_num; i++) {
362 		if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
363 			aed->ae_slices[i].encap_image->uwords_num = uaddr;
364 	}
365 	return 0;
366 }
367 
368 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
369 static int
qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)370 qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
371 			struct icp_qat_uof_initmem *init_mem)
372 {
373 	switch (init_mem->region) {
374 	case ICP_QAT_UOF_LMEM_REGION:
375 		if (qat_uclo_init_lmem_seg(handle, init_mem))
376 			return EINVAL;
377 		break;
378 	case ICP_QAT_UOF_UMEM_REGION:
379 		if (qat_uclo_init_umem_seg(handle, init_mem))
380 			return EINVAL;
381 		break;
382 	default:
383 		pr_err("QAT: initmem region error. region type=0x%x\n",
384 		       init_mem->region);
385 		return EINVAL;
386 	}
387 	return 0;
388 }
389 
390 static int
qat_uclo_init_ustore(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_encapme * image)391 qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
392 		     struct icp_qat_uclo_encapme *image)
393 {
394 	unsigned int i;
395 	struct icp_qat_uclo_encap_page *page;
396 	struct icp_qat_uof_image *uof_image;
397 	unsigned char ae = 0;
398 	unsigned char neigh_ae;
399 	unsigned int ustore_size;
400 	unsigned int patt_pos;
401 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
402 	uint64_t *fill_data;
403 	static unsigned int init[32] = { 0 };
404 	unsigned long ae_mask = handle->hal_handle->ae_mask;
405 
406 	uof_image = image->img_ptr;
407 	/*if shared CS mode, the ustore size should be 2*ustore_phy_size*/
408 	fill_data = malloc(obj_handle->ustore_phy_size * 2 * sizeof(uint64_t),
409 			   M_QAT,
410 			   M_WAITOK | M_ZERO);
411 	for (i = 0; i < obj_handle->ustore_phy_size * 2; i++)
412 		memcpy(&fill_data[i],
413 		       &uof_image->fill_pattern,
414 		       sizeof(uint64_t));
415 	page = image->page;
416 
417 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
418 	{
419 		unsigned long cfg_ae_mask = handle->cfg_ae_mask;
420 		unsigned long ae_assigned = uof_image->ae_assigned;
421 		const bool gen4 =
422 		    IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)));
423 
424 		if (!test_bit(ae, &cfg_ae_mask))
425 			continue;
426 
427 		if (!test_bit(ae, &ae_assigned))
428 			continue;
429 
430 		if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1) &&
431 		    !gen4) {
432 			qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
433 
434 			if (test_bit(neigh_ae, &ae_assigned))
435 				continue;
436 		}
437 
438 		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
439 		patt_pos = page->beg_addr_p + page->micro_words_num;
440 		if (obj_handle->ae_data[ae].shareable_ustore && !gen4) {
441 			qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
442 			if (init[ae] == 0 && page->beg_addr_p != 0) {
443 				qat_hal_wr_coalesce_uwords(handle,
444 							   (unsigned char)ae,
445 							   0,
446 							   page->beg_addr_p,
447 							   &fill_data[0]);
448 			}
449 			qat_hal_wr_coalesce_uwords(
450 			    handle,
451 			    (unsigned char)ae,
452 			    patt_pos,
453 			    ustore_size - patt_pos,
454 			    &fill_data[page->beg_addr_p]);
455 			init[ae] = 1;
456 			init[neigh_ae] = 1;
457 		} else {
458 			if (gen4 && (ae % 4 != 0))
459 				continue;
460 
461 			qat_hal_wr_uwords(handle,
462 					  (unsigned char)ae,
463 					  0,
464 					  page->beg_addr_p,
465 					  &fill_data[0]);
466 			qat_hal_wr_uwords(handle,
467 					  (unsigned char)ae,
468 					  patt_pos,
469 					  ustore_size - patt_pos + 1,
470 					  &fill_data[page->beg_addr_p]);
471 		}
472 	}
473 	free(fill_data, M_QAT);
474 	return 0;
475 }
476 
477 static int
qat_uclo_init_memory(struct icp_qat_fw_loader_handle * handle)478 qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
479 {
480 	int i;
481 	int ae = 0;
482 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
483 	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
484 	unsigned long ae_mask = handle->hal_handle->ae_mask;
485 
486 	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
487 		if (initmem->num_in_bytes) {
488 			if (qat_uclo_init_ae_memory(handle, initmem))
489 				return EINVAL;
490 		}
491 		initmem =
492 		    (struct icp_qat_uof_initmem
493 			 *)((uintptr_t)((uintptr_t)initmem +
494 					sizeof(struct icp_qat_uof_initmem)) +
495 			    (sizeof(struct icp_qat_uof_memvar_attr) *
496 			     initmem->val_attr_num));
497 	}
498 
499 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
500 	{
501 		if (qat_hal_batch_wr_lm(handle,
502 					ae,
503 					obj_handle->lm_init_tab[ae])) {
504 			pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
505 			return EINVAL;
506 		}
507 		qat_uclo_cleanup_batch_init_list(handle,
508 						 &obj_handle->lm_init_tab[ae]);
509 		qat_uclo_batch_wr_umem(handle,
510 				       ae,
511 				       obj_handle->umem_init_tab[ae]);
512 		qat_uclo_cleanup_batch_init_list(
513 		    handle, &obj_handle->umem_init_tab[ae]);
514 	}
515 	return 0;
516 }
517 
518 static void *
qat_uclo_find_chunk(struct icp_qat_uof_objhdr * obj_hdr,char * chunk_id,void * cur)519 qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
520 		    char *chunk_id,
521 		    void *cur)
522 {
523 	int i;
524 	struct icp_qat_uof_chunkhdr *chunk_hdr =
525 	    (struct icp_qat_uof_chunkhdr *)((uintptr_t)obj_hdr +
526 					    sizeof(struct icp_qat_uof_objhdr));
527 
528 	for (i = 0; i < obj_hdr->num_chunks; i++) {
529 		if ((cur < (void *)&chunk_hdr[i]) &&
530 		    !strncmp(chunk_hdr[i].chunk_id,
531 			     chunk_id,
532 			     ICP_QAT_UOF_OBJID_LEN)) {
533 			return &chunk_hdr[i];
534 		}
535 	}
536 	return NULL;
537 }
538 
539 static unsigned int
qat_uclo_calc_checksum(unsigned int reg,int ch)540 qat_uclo_calc_checksum(unsigned int reg, int ch)
541 {
542 	int i;
543 	unsigned int topbit = 1 << 0xF;
544 	unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
545 
546 	reg ^= inbyte << 0x8;
547 	for (i = 0; i < 0x8; i++) {
548 		if (reg & topbit)
549 			reg = (reg << 1) ^ 0x1021;
550 		else
551 			reg <<= 1;
552 	}
553 	return reg & 0xFFFF;
554 }
555 
556 static unsigned int
qat_uclo_calc_str_checksum(const char * ptr,int num)557 qat_uclo_calc_str_checksum(const char *ptr, int num)
558 {
559 	unsigned int chksum = 0;
560 
561 	if (ptr)
562 		while (num--)
563 			chksum = qat_uclo_calc_checksum(chksum, *ptr++);
564 	return chksum;
565 }
566 
567 static struct icp_qat_uclo_objhdr *
qat_uclo_map_chunk(char * buf,struct icp_qat_uof_filehdr * file_hdr,char * chunk_id)568 qat_uclo_map_chunk(char *buf,
569 		   struct icp_qat_uof_filehdr *file_hdr,
570 		   char *chunk_id)
571 {
572 	struct icp_qat_uof_filechunkhdr *file_chunk;
573 	struct icp_qat_uclo_objhdr *obj_hdr;
574 	char *chunk;
575 	int i;
576 
577 	file_chunk = (struct icp_qat_uof_filechunkhdr
578 			  *)(buf + sizeof(struct icp_qat_uof_filehdr));
579 	for (i = 0; i < file_hdr->num_chunks; i++) {
580 		if (!strncmp(file_chunk->chunk_id,
581 			     chunk_id,
582 			     ICP_QAT_UOF_OBJID_LEN)) {
583 			chunk = buf + file_chunk->offset;
584 			if (file_chunk->checksum !=
585 			    qat_uclo_calc_str_checksum(chunk, file_chunk->size))
586 				break;
587 			obj_hdr =
588 			    malloc(sizeof(*obj_hdr), M_QAT, M_WAITOK | M_ZERO);
589 			obj_hdr->file_buff = chunk;
590 			obj_hdr->checksum = file_chunk->checksum;
591 			obj_hdr->size = file_chunk->size;
592 			return obj_hdr;
593 		}
594 		file_chunk++;
595 	}
596 	return NULL;
597 }
598 
599 static unsigned int
qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uof_image * image)600 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
601 			    struct icp_qat_uof_image *image)
602 {
603 	struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
604 	struct icp_qat_uof_objtable *neigh_reg_tab;
605 	struct icp_qat_uof_code_page *code_page;
606 
607 	code_page =
608 	    (struct icp_qat_uof_code_page *)((char *)image +
609 					     sizeof(struct icp_qat_uof_image));
610 	uc_var_tab =
611 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
612 					    code_page->uc_var_tab_offset);
613 	imp_var_tab =
614 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
615 					    code_page->imp_var_tab_offset);
616 	imp_expr_tab =
617 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
618 					    code_page->imp_expr_tab_offset);
619 	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
620 	    imp_expr_tab->entry_num) {
621 		pr_err("QAT: UOF can't contain imported variable to be parsed");
622 		return EINVAL;
623 	}
624 	neigh_reg_tab =
625 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
626 					    code_page->neigh_reg_tab_offset);
627 	if (neigh_reg_tab->entry_num) {
628 		pr_err("QAT: UOF can't contain neighbor register table\n");
629 		return EINVAL;
630 	}
631 	if (image->numpages > 1) {
632 		pr_err("QAT: UOF can't contain multiple pages\n");
633 		return EINVAL;
634 	}
635 	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
636 		pr_err("QAT: UOF can't use reloadable feature\n");
637 		return EFAULT;
638 	}
639 	return 0;
640 }
641 
642 static void
qat_uclo_map_image_page(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uof_image * img,struct icp_qat_uclo_encap_page * page)643 qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj,
644 			struct icp_qat_uof_image *img,
645 			struct icp_qat_uclo_encap_page *page)
646 {
647 	struct icp_qat_uof_code_page *code_page;
648 	struct icp_qat_uof_code_area *code_area;
649 	struct icp_qat_uof_objtable *uword_block_tab;
650 	struct icp_qat_uof_uword_block *uwblock;
651 	int i;
652 
653 	code_page =
654 	    (struct icp_qat_uof_code_page *)((char *)img +
655 					     sizeof(struct icp_qat_uof_image));
656 	page->def_page = code_page->def_page;
657 	page->page_region = code_page->page_region;
658 	page->beg_addr_v = code_page->beg_addr_v;
659 	page->beg_addr_p = code_page->beg_addr_p;
660 	code_area =
661 	    (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
662 					     code_page->code_area_offset);
663 	page->micro_words_num = code_area->micro_words_num;
664 	uword_block_tab =
665 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
666 					    code_area->uword_block_tab);
667 	page->uwblock_num = uword_block_tab->entry_num;
668 	uwblock = (struct icp_qat_uof_uword_block
669 		       *)((char *)uword_block_tab +
670 			  sizeof(struct icp_qat_uof_objtable));
671 	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
672 	for (i = 0; i < uword_block_tab->entry_num; i++)
673 		page->uwblock[i].micro_words =
674 		    (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
675 }
676 
677 static int
qat_uclo_map_uimage(struct icp_qat_uclo_objhandle * obj_handle,struct icp_qat_uclo_encapme * ae_uimage,int max_image)678 qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
679 		    struct icp_qat_uclo_encapme *ae_uimage,
680 		    int max_image)
681 {
682 	int i, j;
683 	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
684 	struct icp_qat_uof_image *image;
685 	struct icp_qat_uof_objtable *ae_regtab;
686 	struct icp_qat_uof_objtable *init_reg_sym_tab;
687 	struct icp_qat_uof_objtable *sbreak_tab;
688 	struct icp_qat_uof_encap_obj *encap_uof_obj =
689 	    &obj_handle->encap_uof_obj;
690 
691 	for (j = 0; j < max_image; j++) {
692 		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
693 						ICP_QAT_UOF_IMAG,
694 						chunk_hdr);
695 		if (!chunk_hdr)
696 			break;
697 		image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
698 						     chunk_hdr->offset);
699 		ae_regtab =
700 		    (struct icp_qat_uof_objtable *)(image->reg_tab_offset +
701 						    obj_handle->obj_hdr
702 							->file_buff);
703 		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
704 		ae_uimage[j].ae_reg =
705 		    (struct icp_qat_uof_ae_reg
706 			 *)(((char *)ae_regtab) +
707 			    sizeof(struct icp_qat_uof_objtable));
708 		init_reg_sym_tab =
709 		    (struct icp_qat_uof_objtable *)(image->init_reg_sym_tab +
710 						    obj_handle->obj_hdr
711 							->file_buff);
712 		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
713 		ae_uimage[j].init_regsym =
714 		    (struct icp_qat_uof_init_regsym
715 			 *)(((char *)init_reg_sym_tab) +
716 			    sizeof(struct icp_qat_uof_objtable));
717 		sbreak_tab = (struct icp_qat_uof_objtable *)(image->sbreak_tab +
718 							     obj_handle->obj_hdr
719 								 ->file_buff);
720 		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
721 		ae_uimage[j].sbreak =
722 		    (struct icp_qat_uof_sbreak
723 			 *)(((char *)sbreak_tab) +
724 			    sizeof(struct icp_qat_uof_objtable));
725 		ae_uimage[j].img_ptr = image;
726 		if (qat_uclo_check_image_compat(encap_uof_obj, image))
727 			goto out_err;
728 		ae_uimage[j].page =
729 		    malloc(sizeof(struct icp_qat_uclo_encap_page),
730 			   M_QAT,
731 			   M_WAITOK | M_ZERO);
732 		qat_uclo_map_image_page(encap_uof_obj,
733 					image,
734 					ae_uimage[j].page);
735 	}
736 	return j;
737 out_err:
738 	for (i = 0; i < j; i++)
739 		free(ae_uimage[i].page, M_QAT);
740 	return 0;
741 }
742 
743 static int
UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle * handle)744 UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle *handle)
745 {
746 	int i;
747 	unsigned int swAe = 0;
748 	unsigned int ii, jj;
749 	struct icp_qat_uclo_aedata *ae_data0, *ae_datax;
750 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
751 
752 	for (i = 0; i < obj_handle->uimage_num; i++) {
753 		struct icp_qat_uof_image *image =
754 		    obj_handle->ae_uimage[i].img_ptr;
755 		if (image->numpages > 1) {
756 			pr_err(
757 			    "Only 1 page is allowed in a UOF for CPM2X; We found %d in %s\n",
758 			    image->numpages,
759 			    qat_uclo_get_string(&obj_handle->str_table,
760 						image->img_name));
761 			return EINVAL;
762 		}
763 	}
764 
765 	for (swAe = 0;
766 	     (swAe < obj_handle->ae_num) && (swAe < ICP_QAT_UCLO_MAX_AE);
767 	     swAe += AE_TG_NUM_CPM2X) {
768 		if (!qat_hal_check_ae_active(handle, swAe)) {
769 			continue;
770 		}
771 
772 		for (ii = swAe; ii < (swAe + AE_TG_NUM_CPM2X); ii++) {
773 			ae_data0 = &obj_handle->ae_data[ii];
774 			if (ae_data0->slice_num != 1) // not assigned
775 				continue;
776 
777 			for (jj = ii + 1; jj < (swAe + AE_TG_NUM_CPM2X); jj++) {
778 				ae_datax = &obj_handle->ae_data[jj];
779 				if (ae_datax->slice_num != 1) // not assigned
780 					continue;
781 				if (ae_data0->ae_slices[0]
782 					.encap_image->img_ptr !=
783 				    ae_datax->ae_slices[0]
784 					.encap_image->img_ptr) {
785 					pr_err("Only 1 list is allowed in a ");
786 					pr_err("Tgroup for CPM2X;\n");
787 					pr_err("ME%d, %d is assigned", ii, jj);
788 					pr_err(" different list files\n");
789 					return EINVAL;
790 				}
791 			}
792 		}
793 	}
794 
795 	return 0;
796 }
797 
798 static int
qat_uclo_map_ae(struct icp_qat_fw_loader_handle * handle,int max_ae)799 qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
800 {
801 	int i;
802 	int ae = 0;
803 	unsigned long ae_mask = handle->hal_handle->ae_mask;
804 	unsigned long cfg_ae_mask = handle->cfg_ae_mask;
805 	int mflag = 0;
806 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
807 
808 	for_each_set_bit(ae, &ae_mask, max_ae)
809 	{
810 		if (!test_bit(ae, &cfg_ae_mask))
811 			continue;
812 
813 		for (i = 0; i < obj_handle->uimage_num; i++) {
814 			unsigned long ae_assigned =
815 			    obj_handle->ae_uimage[i].img_ptr->ae_assigned;
816 			if (!test_bit(ae, &ae_assigned))
817 				continue;
818 			mflag = 1;
819 			if (qat_uclo_init_ae_data(obj_handle, ae, i))
820 				return EINVAL;
821 		}
822 	}
823 	if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
824 		if (UcLo_checkTGroupList2X(handle)) {
825 			return EINVAL;
826 		}
827 	}
828 	if (!mflag) {
829 		pr_err("QAT: uimage uses AE not set");
830 		return EINVAL;
831 	}
832 	return 0;
833 }
834 
835 static struct icp_qat_uof_strtable *
qat_uclo_map_str_table(struct icp_qat_uclo_objhdr * obj_hdr,char * tab_name,struct icp_qat_uof_strtable * str_table)836 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
837 		       char *tab_name,
838 		       struct icp_qat_uof_strtable *str_table)
839 {
840 	struct icp_qat_uof_chunkhdr *chunk_hdr;
841 
842 	chunk_hdr =
843 	    qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)obj_hdr->file_buff,
844 				tab_name,
845 				NULL);
846 	if (chunk_hdr) {
847 		int hdr_size;
848 
849 		memcpy(&str_table->table_len,
850 		       obj_hdr->file_buff + chunk_hdr->offset,
851 		       sizeof(str_table->table_len));
852 		hdr_size = (char *)&str_table->strings - (char *)str_table;
853 		str_table->strings = (uintptr_t)obj_hdr->file_buff +
854 		    chunk_hdr->offset + hdr_size;
855 		return str_table;
856 	}
857 	return NULL;
858 }
859 
860 static void
qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uclo_init_mem_table * init_mem_tab)861 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
862 			   struct icp_qat_uclo_init_mem_table *init_mem_tab)
863 {
864 	struct icp_qat_uof_chunkhdr *chunk_hdr;
865 
866 	chunk_hdr =
867 	    qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMEM, NULL);
868 	if (chunk_hdr) {
869 		memmove(&init_mem_tab->entry_num,
870 			encap_uof_obj->beg_uof + chunk_hdr->offset,
871 			sizeof(unsigned int));
872 		init_mem_tab->init_mem =
873 		    (struct icp_qat_uof_initmem *)(encap_uof_obj->beg_uof +
874 						   chunk_hdr->offset +
875 						   sizeof(unsigned int));
876 	}
877 }
878 
879 static unsigned int
qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle * handle)880 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
881 {
882 	switch (pci_get_device(GET_DEV(handle->accel_dev))) {
883 	case ADF_DH895XCC_PCI_DEVICE_ID:
884 		return ICP_QAT_AC_895XCC_DEV_TYPE;
885 	case ADF_C62X_PCI_DEVICE_ID:
886 		return ICP_QAT_AC_C62X_DEV_TYPE;
887 	case ADF_C3XXX_PCI_DEVICE_ID:
888 		return ICP_QAT_AC_C3XXX_DEV_TYPE;
889 	case ADF_200XX_PCI_DEVICE_ID:
890 		return ICP_QAT_AC_200XX_DEV_TYPE;
891 	case ADF_C4XXX_PCI_DEVICE_ID:
892 		return ICP_QAT_AC_C4XXX_DEV_TYPE;
893 	case ADF_4XXX_PCI_DEVICE_ID:
894 	case ADF_401XX_PCI_DEVICE_ID:
895 		return ICP_QAT_AC_4XXX_A_DEV_TYPE;
896 	default:
897 		pr_err("QAT: unsupported device 0x%x\n",
898 		       pci_get_device(GET_DEV(handle->accel_dev)));
899 		return 0;
900 	}
901 }
902 
903 static int
qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle * obj_handle)904 qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
905 {
906 	unsigned int maj_ver, prod_type = obj_handle->prod_type;
907 
908 	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
909 		pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
910 		       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
911 		       prod_type);
912 		return EINVAL;
913 	}
914 	maj_ver = obj_handle->prod_rev & 0xff;
915 	if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
916 	    obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
917 		pr_err("QAT: UOF maj_ver 0x%x out of range\n", maj_ver);
918 		return EINVAL;
919 	}
920 	return 0;
921 }
922 
923 static int
qat_uclo_init_reg(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_addr,unsigned int value)924 qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
925 		  unsigned char ae,
926 		  unsigned char ctx_mask,
927 		  enum icp_qat_uof_regtype reg_type,
928 		  unsigned short reg_addr,
929 		  unsigned int value)
930 {
931 	switch (reg_type) {
932 	case ICP_GPA_ABS:
933 	case ICP_GPB_ABS:
934 		ctx_mask = 0;
935 		return qat_hal_init_gpr(
936 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
937 	case ICP_GPA_REL:
938 	case ICP_GPB_REL:
939 		return qat_hal_init_gpr(
940 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
941 	case ICP_SR_ABS:
942 	case ICP_DR_ABS:
943 	case ICP_SR_RD_ABS:
944 	case ICP_DR_RD_ABS:
945 		ctx_mask = 0;
946 		return qat_hal_init_rd_xfer(
947 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
948 	case ICP_SR_REL:
949 	case ICP_DR_REL:
950 	case ICP_SR_RD_REL:
951 	case ICP_DR_RD_REL:
952 		return qat_hal_init_rd_xfer(
953 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
954 	case ICP_SR_WR_ABS:
955 	case ICP_DR_WR_ABS:
956 		ctx_mask = 0;
957 		return qat_hal_init_wr_xfer(
958 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
959 	case ICP_SR_WR_REL:
960 	case ICP_DR_WR_REL:
961 		return qat_hal_init_wr_xfer(
962 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
963 	case ICP_NEIGH_REL:
964 		return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
965 	default:
966 		pr_err("QAT: UOF uses unsupported reg type 0x%x\n", reg_type);
967 		return EFAULT;
968 	}
969 	return 0;
970 }
971 
972 static int
qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle * handle,unsigned int ae,struct icp_qat_uclo_encapme * encap_ae)973 qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
974 		      unsigned int ae,
975 		      struct icp_qat_uclo_encapme *encap_ae)
976 {
977 	unsigned int i;
978 	unsigned char ctx_mask;
979 	struct icp_qat_uof_init_regsym *init_regsym;
980 
981 	if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
982 	    ICP_QAT_UCLO_MAX_CTX)
983 		ctx_mask = 0xff;
984 	else
985 		ctx_mask = 0x55;
986 
987 	for (i = 0; i < encap_ae->init_regsym_num; i++) {
988 		unsigned int exp_res;
989 
990 		init_regsym = &encap_ae->init_regsym[i];
991 		exp_res = init_regsym->value;
992 		switch (init_regsym->init_type) {
993 		case ICP_QAT_UOF_INIT_REG:
994 			qat_uclo_init_reg(handle,
995 					  ae,
996 					  ctx_mask,
997 					  (enum icp_qat_uof_regtype)
998 					      init_regsym->reg_type,
999 					  (unsigned short)init_regsym->reg_addr,
1000 					  exp_res);
1001 			break;
1002 		case ICP_QAT_UOF_INIT_REG_CTX:
1003 			/* check if ctx is appropriate for the ctxMode */
1004 			if (!((1 << init_regsym->ctx) & ctx_mask)) {
1005 				pr_err("QAT: invalid ctx num = 0x%x\n",
1006 				       init_regsym->ctx);
1007 				return EINVAL;
1008 			}
1009 			qat_uclo_init_reg(
1010 			    handle,
1011 			    ae,
1012 			    (unsigned char)(1 << init_regsym->ctx),
1013 			    (enum icp_qat_uof_regtype)init_regsym->reg_type,
1014 			    (unsigned short)init_regsym->reg_addr,
1015 			    exp_res);
1016 			break;
1017 		case ICP_QAT_UOF_INIT_EXPR:
1018 			pr_err("QAT: INIT_EXPR feature not supported\n");
1019 			return EINVAL;
1020 		case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
1021 			pr_err("QAT: INIT_EXPR_ENDIAN_SWAP not supported\n");
1022 			return EINVAL;
1023 		default:
1024 			break;
1025 		}
1026 	}
1027 	return 0;
1028 }
1029 
1030 static int
qat_uclo_init_globals(struct icp_qat_fw_loader_handle * handle)1031 qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
1032 {
1033 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1034 	unsigned int s;
1035 	unsigned int ae = 0;
1036 	struct icp_qat_uclo_aedata *aed;
1037 	unsigned long ae_mask = handle->hal_handle->ae_mask;
1038 
1039 	if (obj_handle->global_inited)
1040 		return 0;
1041 	if (obj_handle->init_mem_tab.entry_num) {
1042 		if (qat_uclo_init_memory(handle)) {
1043 			pr_err("QAT: initialize memory failed\n");
1044 			return EINVAL;
1045 		}
1046 	}
1047 
1048 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1049 	{
1050 		aed = &obj_handle->ae_data[ae];
1051 		for (s = 0; s < aed->slice_num; s++) {
1052 			if (!aed->ae_slices[s].encap_image)
1053 				continue;
1054 			if (qat_uclo_init_reg_sym(
1055 				handle, ae, aed->ae_slices[s].encap_image))
1056 				return EINVAL;
1057 		}
1058 	}
1059 	obj_handle->global_inited = 1;
1060 	return 0;
1061 }
1062 
1063 static int
qat_hal_set_modes(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_objhandle * obj_handle,unsigned char ae,struct icp_qat_uof_image * uof_image)1064 qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
1065 		  struct icp_qat_uclo_objhandle *obj_handle,
1066 		  unsigned char ae,
1067 		  struct icp_qat_uof_image *uof_image)
1068 {
1069 	unsigned char nn_mode;
1070 	char ae_mode = 0;
1071 
1072 	ae_mode = (char)ICP_QAT_CTX_MODE(uof_image->ae_mode);
1073 	if (qat_hal_set_ae_ctx_mode(handle, ae, ae_mode)) {
1074 		pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
1075 		return EFAULT;
1076 	}
1077 
1078 	ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode);
1079 	qat_hal_set_ae_scs_mode(handle, ae, ae_mode);
1080 	if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1081 		nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
1082 
1083 		if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
1084 			pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
1085 			return EFAULT;
1086 		}
1087 	}
1088 	ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
1089 	if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) {
1090 		pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
1091 		return EFAULT;
1092 	}
1093 	ae_mode = (char)ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
1094 	if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, ae_mode)) {
1095 		pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
1096 		return EFAULT;
1097 	}
1098 	if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1099 		ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
1100 		if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) {
1101 			pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
1102 			return EFAULT;
1103 		}
1104 		ae_mode = (char)ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
1105 		if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, ae_mode)) {
1106 			pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
1107 			return EFAULT;
1108 		}
1109 		ae_mode = (char)ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
1110 		qat_hal_set_ae_tindex_mode(handle, ae, ae_mode);
1111 	}
1112 	return 0;
1113 }
1114 
1115 static int
qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle * handle)1116 qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
1117 {
1118 	int error;
1119 	unsigned char s;
1120 	unsigned char ae = 0;
1121 	struct icp_qat_uof_image *uof_image;
1122 	struct icp_qat_uclo_aedata *ae_data;
1123 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1124 	unsigned long ae_mask = handle->hal_handle->ae_mask;
1125 
1126 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1127 	{
1128 		unsigned long cfg_ae_mask = handle->cfg_ae_mask;
1129 
1130 		if (!test_bit(ae, &cfg_ae_mask))
1131 			continue;
1132 
1133 		ae_data = &obj_handle->ae_data[ae];
1134 		for (s = 0; s < min_t(unsigned int,
1135 				      ae_data->slice_num,
1136 				      ICP_QAT_UCLO_MAX_CTX);
1137 		     s++) {
1138 			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
1139 				continue;
1140 			uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
1141 			error = qat_hal_set_modes(handle,
1142 						  obj_handle,
1143 						  ae,
1144 						  uof_image);
1145 			if (error)
1146 				return error;
1147 		}
1148 	}
1149 	return 0;
1150 }
1151 
1152 static void
qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle * handle)1153 qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
1154 {
1155 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1156 	struct icp_qat_uclo_encapme *image;
1157 	int a;
1158 
1159 	for (a = 0; a < obj_handle->uimage_num; a++) {
1160 		image = &obj_handle->ae_uimage[a];
1161 		image->uwords_num =
1162 		    image->page->beg_addr_p + image->page->micro_words_num;
1163 	}
1164 }
1165 
1166 static int
qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle * handle)1167 qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
1168 {
1169 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1170 	unsigned int ae;
1171 
1172 	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
1173 	obj_handle->encap_uof_obj.obj_hdr =
1174 	    (struct icp_qat_uof_objhdr *)obj_handle->obj_hdr->file_buff;
1175 	obj_handle->uword_in_bytes = 6;
1176 	obj_handle->prod_type = qat_uclo_get_dev_type(handle);
1177 	obj_handle->prod_rev =
1178 	    PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id);
1179 	if (qat_uclo_check_uof_compat(obj_handle)) {
1180 		pr_err("QAT: UOF incompatible\n");
1181 		return EINVAL;
1182 	}
1183 	obj_handle->uword_buf = malloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t),
1184 				       M_QAT,
1185 				       M_WAITOK | M_ZERO);
1186 	obj_handle->ustore_phy_size =
1187 	    (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) ? 0x2000 :
1188 								   0x4000;
1189 	if (!obj_handle->obj_hdr->file_buff ||
1190 	    !qat_uclo_map_str_table(obj_handle->obj_hdr,
1191 				    ICP_QAT_UOF_STRT,
1192 				    &obj_handle->str_table)) {
1193 		pr_err("QAT: UOF doesn't have effective images\n");
1194 		goto out_err;
1195 	}
1196 	obj_handle->uimage_num =
1197 	    qat_uclo_map_uimage(obj_handle,
1198 				obj_handle->ae_uimage,
1199 				ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1200 	if (!obj_handle->uimage_num)
1201 		goto out_err;
1202 	if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1203 		pr_err("QAT: Bad object\n");
1204 		goto out_check_uof_aemask_err;
1205 	}
1206 	qat_uclo_init_uword_num(handle);
1207 	qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1208 				   &obj_handle->init_mem_tab);
1209 	if (qat_uclo_set_ae_mode(handle))
1210 		goto out_check_uof_aemask_err;
1211 	return 0;
1212 out_check_uof_aemask_err:
1213 	for (ae = 0; ae < obj_handle->uimage_num; ae++)
1214 		free(obj_handle->ae_uimage[ae].page, M_QAT);
1215 out_err:
1216 	free(obj_handle->uword_buf, M_QAT);
1217 	obj_handle->uword_buf = NULL;
1218 	return EFAULT;
1219 }
1220 
1221 static int
qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle * handle,const struct icp_qat_suof_filehdr * suof_ptr,int suof_size)1222 qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle,
1223 			   const struct icp_qat_suof_filehdr *suof_ptr,
1224 			   int suof_size)
1225 {
1226 	unsigned int check_sum = 0;
1227 	unsigned int min_ver_offset = 0;
1228 	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1229 
1230 	suof_handle->file_id = ICP_QAT_SUOF_FID;
1231 	suof_handle->suof_buf = (const char *)suof_ptr;
1232 	suof_handle->suof_size = suof_size;
1233 	min_ver_offset =
1234 	    suof_size - offsetof(struct icp_qat_suof_filehdr, min_ver);
1235 	check_sum = qat_uclo_calc_str_checksum((const char *)&suof_ptr->min_ver,
1236 					       min_ver_offset);
1237 	if (check_sum != suof_ptr->check_sum) {
1238 		pr_err("QAT: incorrect SUOF checksum\n");
1239 		return EINVAL;
1240 	}
1241 	suof_handle->check_sum = suof_ptr->check_sum;
1242 	suof_handle->min_ver = suof_ptr->min_ver;
1243 	suof_handle->maj_ver = suof_ptr->maj_ver;
1244 	suof_handle->fw_type = suof_ptr->fw_type;
1245 	return 0;
1246 }
1247 
1248 static void
qat_uclo_map_simg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_img_hdr * suof_img_hdr,struct icp_qat_suof_chunk_hdr * suof_chunk_hdr)1249 qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
1250 		  struct icp_qat_suof_img_hdr *suof_img_hdr,
1251 		  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1252 {
1253 	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1254 	const struct icp_qat_simg_ae_mode *ae_mode;
1255 	struct icp_qat_suof_objhdr *suof_objhdr;
1256 	unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
1257 
1258 	suof_img_hdr->simg_buf =
1259 	    (suof_handle->suof_buf + suof_chunk_hdr->offset +
1260 	     sizeof(*suof_objhdr));
1261 	suof_img_hdr->simg_len =
1262 	    ((struct icp_qat_suof_objhdr *)(uintptr_t)(suof_handle->suof_buf +
1263 						       suof_chunk_hdr->offset))
1264 		->img_length;
1265 
1266 	suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1267 	suof_img_hdr->css_key =
1268 	    (suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr));
1269 	suof_img_hdr->css_signature = suof_img_hdr->css_key +
1270 	    ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
1271 	    ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id);
1272 	suof_img_hdr->css_simg =
1273 	    suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1274 
1275 	ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1276 	suof_img_hdr->ae_mask = ae_mode->ae_mask;
1277 	suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1278 	suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1279 	suof_img_hdr->fw_type = ae_mode->fw_type;
1280 }
1281 
1282 static void
qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle * suof_handle,struct icp_qat_suof_chunk_hdr * suof_chunk_hdr)1283 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1284 			  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1285 {
1286 	char **sym_str = (char **)&suof_handle->sym_str;
1287 	unsigned int *sym_size = &suof_handle->sym_size;
1288 	struct icp_qat_suof_strtable *str_table_obj;
1289 
1290 	*sym_size = *(unsigned int *)(uintptr_t)(suof_chunk_hdr->offset +
1291 						 suof_handle->suof_buf);
1292 	*sym_str =
1293 	    (char *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset +
1294 				sizeof(str_table_obj->tab_length));
1295 }
1296 
1297 static int
qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_img_hdr * img_hdr)1298 qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1299 			   struct icp_qat_suof_img_hdr *img_hdr)
1300 {
1301 	const struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1302 	unsigned int prod_rev, maj_ver, prod_type;
1303 
1304 	prod_type = qat_uclo_get_dev_type(handle);
1305 	img_ae_mode = (const struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1306 	prod_rev =
1307 	    PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id);
1308 	if (img_ae_mode->dev_type != prod_type) {
1309 		pr_err("QAT: incompatible product type %x\n",
1310 		       img_ae_mode->dev_type);
1311 		return EINVAL;
1312 	}
1313 	maj_ver = prod_rev & 0xff;
1314 	if (maj_ver > img_ae_mode->devmax_ver ||
1315 	    maj_ver < img_ae_mode->devmin_ver) {
1316 		pr_err("QAT: incompatible device maj_ver 0x%x\n", maj_ver);
1317 		return EINVAL;
1318 	}
1319 	return 0;
1320 }
1321 
1322 static void
qat_uclo_del_suof(struct icp_qat_fw_loader_handle * handle)1323 qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1324 {
1325 	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1326 
1327 	free(sobj_handle->img_table.simg_hdr, M_QAT);
1328 	sobj_handle->img_table.simg_hdr = NULL;
1329 	free(handle->sobj_handle, M_QAT);
1330 	handle->sobj_handle = NULL;
1331 }
1332 
1333 static void
qat_uclo_tail_img(struct icp_qat_suof_img_hdr * suof_img_hdr,unsigned int img_id,unsigned int num_simgs)1334 qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1335 		  unsigned int img_id,
1336 		  unsigned int num_simgs)
1337 {
1338 	struct icp_qat_suof_img_hdr img_header;
1339 
1340 	if ((img_id != num_simgs - 1) && img_id != ICP_QAT_UCLO_MAX_AE) {
1341 		memcpy(&img_header,
1342 		       &suof_img_hdr[num_simgs - 1],
1343 		       sizeof(*suof_img_hdr));
1344 		memcpy(&suof_img_hdr[num_simgs - 1],
1345 		       &suof_img_hdr[img_id],
1346 		       sizeof(*suof_img_hdr));
1347 		memcpy(&suof_img_hdr[img_id],
1348 		       &img_header,
1349 		       sizeof(*suof_img_hdr));
1350 	}
1351 }
1352 
1353 static int
qat_uclo_map_suof(struct icp_qat_fw_loader_handle * handle,const struct icp_qat_suof_filehdr * suof_ptr,int suof_size)1354 qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1355 		  const struct icp_qat_suof_filehdr *suof_ptr,
1356 		  int suof_size)
1357 {
1358 	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1359 	struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1360 	struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1361 	int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE,
1362 	    aeMax_img = ICP_QAT_UCLO_MAX_AE;
1363 	unsigned int i = 0;
1364 	struct icp_qat_suof_img_hdr img_header;
1365 
1366 	if (!suof_ptr || suof_size == 0) {
1367 		pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1368 		return EINVAL;
1369 	}
1370 	if (qat_uclo_check_suof_format(suof_ptr))
1371 		return EINVAL;
1372 	ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1373 	if (ret)
1374 		return ret;
1375 	suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)((uintptr_t)suof_ptr +
1376 							   sizeof(*suof_ptr));
1377 
1378 	qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1379 	suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1380 
1381 	if (suof_handle->img_table.num_simgs != 0) {
1382 		suof_img_hdr = malloc(suof_handle->img_table.num_simgs *
1383 					  sizeof(img_header),
1384 				      M_QAT,
1385 				      M_WAITOK | M_ZERO);
1386 		suof_handle->img_table.simg_hdr = suof_img_hdr;
1387 	}
1388 
1389 	for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1390 		qat_uclo_map_simg(handle,
1391 				  &suof_img_hdr[i],
1392 				  &suof_chunk_hdr[1 + i]);
1393 		ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]);
1394 		if (ret)
1395 			return ret;
1396 		suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
1397 		if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1398 			ae0_img = i;
1399 	}
1400 
1401 	if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1402 		qat_uclo_tail_img(suof_img_hdr,
1403 				  ae0_img,
1404 				  suof_handle->img_table.num_simgs);
1405 	} else {
1406 		if (suof_handle->img_table.num_simgs == 1)
1407 			return 0;
1408 		qat_uclo_tail_img(suof_img_hdr,
1409 				  ae0_img,
1410 				  suof_handle->img_table.num_simgs - 1);
1411 		for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1412 			if ((suof_img_hdr[i].ae_mask &
1413 			     (0x1 << (handle->hal_handle->ae_max_num - 1))) !=
1414 			    0) {
1415 				aeMax_img = i;
1416 				break;
1417 			}
1418 		}
1419 		qat_uclo_tail_img(suof_img_hdr,
1420 				  aeMax_img,
1421 				  suof_handle->img_table.num_simgs);
1422 	}
1423 	return 0;
1424 }
1425 
1426 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + (low))
1427 #define BITS_IN_DWORD 32
1428 
1429 static int
qat_uclo_auth_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1430 qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1431 		 struct icp_qat_fw_auth_desc *desc)
1432 {
1433 	unsigned int fcu_sts, mem_cfg_err, retry = 0;
1434 	unsigned int fcu_ctl_csr, fcu_sts_csr;
1435 	unsigned int fcu_dram_hi_csr, fcu_dram_lo_csr;
1436 	u64 bus_addr;
1437 
1438 	bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) -
1439 	    sizeof(struct icp_qat_auth_chunk);
1440 	if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1441 		fcu_ctl_csr = FCU_CONTROL_C4XXX;
1442 		fcu_sts_csr = FCU_STATUS_C4XXX;
1443 		fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX;
1444 		fcu_dram_lo_csr = FCU_DRAM_ADDR_LO_C4XXX;
1445 	} else {
1446 		fcu_ctl_csr = FCU_CONTROL;
1447 		fcu_sts_csr = FCU_STATUS;
1448 		fcu_dram_hi_csr = FCU_DRAM_ADDR_HI;
1449 		fcu_dram_lo_csr = FCU_DRAM_ADDR_LO;
1450 	}
1451 	SET_FCU_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
1452 	SET_FCU_CSR(handle, fcu_dram_lo_csr, bus_addr);
1453 	SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
1454 
1455 	do {
1456 		pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1457 		fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1458 		if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1459 			goto auth_fail;
1460 		if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1461 			if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1462 				return 0;
1463 	} while (retry++ < FW_AUTH_MAX_RETRY);
1464 auth_fail:
1465 	pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1466 	       fcu_sts & FCU_AUTH_STS_MASK,
1467 	       retry);
1468 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1469 		mem_cfg_err =
1470 		    (GET_FCU_CSR(handle, FCU_STATUS1_C4XXX) & MEM_CFG_ERR_BIT);
1471 		if (mem_cfg_err)
1472 			pr_err("QAT: MEM_CFG_ERR\n");
1473 	}
1474 	return EINVAL;
1475 }
1476 
1477 static int
qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle * handle,int imgid)1478 qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, int imgid)
1479 {
1480 	struct icp_qat_suof_handle *sobj_handle;
1481 
1482 	if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))))
1483 		return 0;
1484 
1485 	sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
1486 	if (handle->hal_handle->admin_ae_mask &
1487 	    sobj_handle->img_table.simg_hdr[imgid].ae_mask)
1488 		return 0;
1489 
1490 	return 1;
1491 }
1492 
1493 static int
qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1494 qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
1495 			   struct icp_qat_fw_auth_desc *desc)
1496 {
1497 	unsigned int i = 0;
1498 	unsigned int fcuSts = 0, fcuAeBroadcastMask = 0;
1499 	unsigned int retry = 0;
1500 	unsigned int fcuStsCsr = 0;
1501 	unsigned int fcuCtlCsr = 0;
1502 	unsigned int loadedAes = 0;
1503 	unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
1504 
1505 	if (IS_QAT_GEN4(device_id)) {
1506 		fcuCtlCsr = FCU_CONTROL_4XXX;
1507 		fcuStsCsr = FCU_STATUS_4XXX;
1508 	} else {
1509 		pr_err("Uclo_BroadcastLoadFW only applicable for CPM20\n");
1510 		return EINVAL;
1511 	}
1512 
1513 	for (i = 0; i < ICP_QAT_UCLO_MAX_AE; i++) {
1514 		if (!test_bit(i, (unsigned long *)&handle->hal_handle->ae_mask))
1515 			continue;
1516 
1517 		if (qat_hal_check_ae_active(handle, (unsigned char)i)) {
1518 			pr_err(
1519 			    "Uclo_BroadcastLoadFW error (invalid AE status)\n");
1520 			return EINVAL;
1521 		}
1522 
1523 		if ((desc->ae_mask >> i) & 0x1) {
1524 			fcuAeBroadcastMask |= 1 << i;
1525 		}
1526 	}
1527 
1528 	if (fcuAeBroadcastMask) {
1529 		retry = 0;
1530 		SET_FCU_CSR(handle,
1531 			    FCU_ME_BROADCAST_MASK_TYPE,
1532 			    fcuAeBroadcastMask);
1533 		SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD);
1534 		do {
1535 			msleep(FW_AUTH_WAIT_PERIOD);
1536 			fcuSts = GET_FCU_CSR(handle, fcuStsCsr);
1537 
1538 			if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_FAIL) {
1539 				pr_err(
1540 				    "Uclo_BroadcastLoadFW fail (fcu_status = 0x%x)\n",
1541 				    fcuSts & FCU_AUTH_STS_MASK);
1542 				return EINVAL;
1543 			} else if ((fcuSts & FCU_AUTH_STS_MASK) ==
1544 				   FCU_STS_LOAD_DONE) {
1545 				if (IS_QAT_GEN4(device_id))
1546 					loadedAes =
1547 					    GET_FCU_CSR(handle,
1548 							FCU_AE_LOADED_4XXX);
1549 				else
1550 					loadedAes =
1551 					    (fcuSts >> FCU_LOADED_AE_POS);
1552 
1553 				if ((loadedAes & fcuAeBroadcastMask) ==
1554 				    fcuAeBroadcastMask)
1555 					break;
1556 			} else if ((fcuSts & FCU_AUTH_STS_MASK) ==
1557 				   FCU_STS_VERI_DONE) {
1558 				SET_FCU_CSR(handle,
1559 					    fcuCtlCsr,
1560 					    FCU_CTRL_CMD_LOAD);
1561 			}
1562 		} while (retry++ < FW_BROADCAST_MAX_RETRY);
1563 		if (retry > FW_BROADCAST_MAX_RETRY) {
1564 			pr_err(
1565 			    "Uclo_BroadcastLoadFW fail(fcu_status = 0x%x),retry = %d\n",
1566 			    fcuSts & FCU_AUTH_STS_MASK,
1567 			    retry);
1568 			return EINVAL;
1569 		}
1570 	}
1571 	return 0;
1572 }
1573 
1574 static int
qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle * handle,struct icp_firml_dram_desc * dram_desc,unsigned int size)1575 qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1576 		    struct icp_firml_dram_desc *dram_desc,
1577 		    unsigned int size)
1578 {
1579 	int ret;
1580 
1581 	ret = bus_dma_mem_create(&dram_desc->dram_mem,
1582 				 handle->accel_dev->dma_tag,
1583 				 1,
1584 				 BUS_SPACE_MAXADDR,
1585 				 size,
1586 				 0);
1587 	if (ret != 0)
1588 		return ret;
1589 	dram_desc->dram_base_addr_v = dram_desc->dram_mem.dma_vaddr;
1590 	dram_desc->dram_bus_addr = dram_desc->dram_mem.dma_baddr;
1591 	dram_desc->dram_size = size;
1592 	return 0;
1593 }
1594 
1595 static void
qat_uclo_simg_free(struct icp_qat_fw_loader_handle * handle,struct icp_firml_dram_desc * dram_desc)1596 qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1597 		   struct icp_firml_dram_desc *dram_desc)
1598 {
1599 	if (handle && dram_desc && dram_desc->dram_base_addr_v)
1600 		bus_dma_mem_free(&dram_desc->dram_mem);
1601 
1602 	if (dram_desc)
1603 		explicit_bzero(dram_desc, sizeof(*dram_desc));
1604 }
1605 
1606 static int
qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle * handle,const char * image,unsigned int size,struct icp_firml_dram_desc * img_desc,struct icp_qat_fw_auth_desc ** desc)1607 qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1608 		     const char *image,
1609 		     unsigned int size,
1610 		     struct icp_firml_dram_desc *img_desc,
1611 		     struct icp_qat_fw_auth_desc **desc)
1612 {
1613 	const struct icp_qat_css_hdr *css_hdr =
1614 	    (const struct icp_qat_css_hdr *)image;
1615 	struct icp_qat_fw_auth_desc *auth_desc;
1616 	struct icp_qat_auth_chunk *auth_chunk;
1617 	u64 virt_addr, bus_addr, virt_base;
1618 	unsigned int length, simg_offset = sizeof(*auth_chunk);
1619 	unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
1620 
1621 	if (size >
1622 	    (ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1623 		pr_err("QAT: error, input image size overflow %d\n", size);
1624 		return EINVAL;
1625 	}
1626 	length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1627 	    ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset :
1628 	    size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset;
1629 	if (qat_uclo_simg_alloc(handle, img_desc, length)) {
1630 		pr_err("QAT: error, allocate continuous dram fail\n");
1631 		return -ENOMEM;
1632 	}
1633 
1634 	auth_chunk = img_desc->dram_base_addr_v;
1635 	auth_chunk->chunk_size = img_desc->dram_size;
1636 	auth_chunk->chunk_bus_addr = img_desc->dram_bus_addr;
1637 	virt_base = (uintptr_t)img_desc->dram_base_addr_v + simg_offset;
1638 	bus_addr = img_desc->dram_bus_addr + simg_offset;
1639 	auth_desc = img_desc->dram_base_addr_v;
1640 	auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1641 	auth_desc->css_hdr_low = (unsigned int)bus_addr;
1642 	virt_addr = virt_base;
1643 
1644 	memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1645 	/* pub key */
1646 	bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1647 	    sizeof(*css_hdr);
1648 	virt_addr = virt_addr + sizeof(*css_hdr);
1649 
1650 	auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1651 	auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1652 
1653 	memcpy((void *)(uintptr_t)virt_addr,
1654 	       (const void *)(image + sizeof(*css_hdr)),
1655 	       ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id));
1656 	/* padding */
1657 	explicit_bzero((void *)(uintptr_t)(
1658 			   virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)),
1659 		       ICP_QAT_CSS_FWSK_PAD_LEN(device_id));
1660 
1661 	/* exponent */
1662 	memcpy((void *)(uintptr_t)(virt_addr +
1663 				   ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
1664 				   ICP_QAT_CSS_FWSK_PAD_LEN(device_id)),
1665 	       (const void *)(image + sizeof(*css_hdr) +
1666 			      ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)),
1667 	       sizeof(unsigned int));
1668 
1669 	/* signature */
1670 	bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) +
1671 	    ICP_QAT_CSS_FWSK_PUB_LEN(device_id);
1672 	virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(device_id);
1673 	auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1674 	auth_desc->signature_low = (unsigned int)bus_addr;
1675 
1676 	memcpy((void *)(uintptr_t)virt_addr,
1677 	       (const void *)(image + sizeof(*css_hdr) +
1678 			      ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
1679 			      ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id)),
1680 	       ICP_QAT_CSS_SIGNATURE_LEN(device_id));
1681 
1682 	bus_addr =
1683 	    ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) +
1684 	    ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1685 	virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1686 
1687 	auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1688 	auth_desc->img_low = (unsigned int)bus_addr;
1689 	auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(device_id);
1690 	memcpy((void *)(uintptr_t)virt_addr,
1691 	       (const void *)(image + ICP_QAT_AE_IMG_OFFSET(device_id)),
1692 	       auth_desc->img_len);
1693 	virt_addr = virt_base;
1694 	/* AE firmware */
1695 	if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1696 	    CSS_AE_FIRMWARE) {
1697 		auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1698 		auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1699 		bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1700 				    auth_desc->img_ae_mode_data_low) +
1701 		    sizeof(struct icp_qat_simg_ae_mode);
1702 
1703 		auth_desc->img_ae_init_data_high =
1704 		    (unsigned int)(bus_addr >> BITS_IN_DWORD);
1705 		auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1706 		bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1707 		auth_desc->img_ae_insts_high =
1708 		    (unsigned int)(bus_addr >> BITS_IN_DWORD);
1709 		auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1710 		virt_addr += sizeof(struct icp_qat_css_hdr) +
1711 		    ICP_QAT_CSS_FWSK_PUB_LEN(device_id) +
1712 		    ICP_QAT_CSS_SIGNATURE_LEN(device_id);
1713 		auth_desc->ae_mask =
1714 		    ((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask &
1715 		    handle->cfg_ae_mask;
1716 	} else {
1717 		auth_desc->img_ae_insts_high = auth_desc->img_high;
1718 		auth_desc->img_ae_insts_low = auth_desc->img_low;
1719 	}
1720 	*desc = auth_desc;
1721 	return 0;
1722 }
1723 
1724 static int
qat_uclo_load_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1725 qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1726 		 struct icp_qat_fw_auth_desc *desc)
1727 {
1728 	unsigned int i = 0;
1729 	unsigned int fcu_sts;
1730 	unsigned int fcu_sts_csr, fcu_ctl_csr;
1731 	unsigned int loaded_aes = FCU_LOADED_AE_POS;
1732 	unsigned long ae_mask = handle->hal_handle->ae_mask;
1733 
1734 	if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1735 		fcu_ctl_csr = FCU_CONTROL_C4XXX;
1736 		fcu_sts_csr = FCU_STATUS_C4XXX;
1737 
1738 	} else {
1739 		fcu_ctl_csr = FCU_CONTROL;
1740 		fcu_sts_csr = FCU_STATUS;
1741 	}
1742 
1743 	for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num)
1744 	{
1745 		int retry = 0;
1746 
1747 		if (!((desc->ae_mask >> i) & 0x1))
1748 			continue;
1749 		if (qat_hal_check_ae_active(handle, i)) {
1750 			pr_err("QAT: AE %d is active\n", i);
1751 			return EINVAL;
1752 		}
1753 		SET_FCU_CSR(handle,
1754 			    fcu_ctl_csr,
1755 			    (FCU_CTRL_CMD_LOAD |
1756 			     (IS_QAT_GEN4(
1757 				  pci_get_device(GET_DEV(handle->accel_dev))) ?
1758 				  (1 << FCU_CTRL_BROADCAST_POS) :
1759 				  0) |
1760 			     (i << FCU_CTRL_AE_POS)));
1761 
1762 		do {
1763 			pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1764 			fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1765 			if ((fcu_sts & FCU_AUTH_STS_MASK) ==
1766 			    FCU_STS_LOAD_DONE) {
1767 				loaded_aes = IS_QAT_GEN3_OR_GEN4(pci_get_device(
1768 						 GET_DEV(handle->accel_dev))) ?
1769 				    GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) :
1770 				    (fcu_sts >> FCU_LOADED_AE_POS);
1771 				if (loaded_aes & (1 << i))
1772 					break;
1773 			}
1774 		} while (retry++ < FW_AUTH_MAX_RETRY);
1775 		if (retry > FW_AUTH_MAX_RETRY) {
1776 			pr_err("QAT: firmware load failed timeout %x\n", retry);
1777 			return EINVAL;
1778 		}
1779 	}
1780 	return 0;
1781 }
1782 
1783 static int
qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle * handle,const void * addr_ptr,int mem_size)1784 qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1785 		      const void *addr_ptr,
1786 		      int mem_size)
1787 {
1788 	struct icp_qat_suof_handle *suof_handle;
1789 
1790 	suof_handle = malloc(sizeof(*suof_handle), M_QAT, M_WAITOK | M_ZERO);
1791 	handle->sobj_handle = suof_handle;
1792 	if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1793 		qat_uclo_del_suof(handle);
1794 		pr_err("QAT: map SUOF failed\n");
1795 		return EINVAL;
1796 	}
1797 	return 0;
1798 }
1799 
1800 int
qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle * handle,const void * addr_ptr,int mem_size)1801 qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1802 		   const void *addr_ptr,
1803 		   int mem_size)
1804 {
1805 	struct icp_qat_fw_auth_desc *desc = NULL;
1806 	struct icp_firml_dram_desc img_desc;
1807 	int status = 0;
1808 
1809 	if (handle->fw_auth) {
1810 		status = qat_uclo_map_auth_fw(
1811 		    handle, addr_ptr, mem_size, &img_desc, &desc);
1812 		if (!status)
1813 			status = qat_uclo_auth_fw(handle, desc);
1814 
1815 		qat_uclo_simg_free(handle, &img_desc);
1816 	} else {
1817 		if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1818 			device_printf(
1819 			    NULL, "QAT: PKE service is not allowed because ");
1820 			device_printf(NULL, "MMP fw will not be loaded for ");
1821 			device_printf(NULL,
1822 				      "device 0x%x",
1823 				      pci_get_device(
1824 					  GET_DEV(handle->accel_dev)));
1825 			return status;
1826 		}
1827 		if (pci_get_device(GET_DEV(handle->accel_dev)) ==
1828 		    ADF_C3XXX_PCI_DEVICE_ID) {
1829 			pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1830 			return EINVAL;
1831 		}
1832 		status = qat_uclo_wr_sram_by_words(handle,
1833 						   handle->hal_sram_offset,
1834 						   addr_ptr,
1835 						   mem_size);
1836 	}
1837 	return status;
1838 }
1839 
1840 static int
qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle * handle,const void * addr_ptr,int mem_size)1841 qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1842 		     const void *addr_ptr,
1843 		     int mem_size)
1844 {
1845 	struct icp_qat_uof_filehdr *filehdr;
1846 	struct icp_qat_uclo_objhandle *objhdl;
1847 
1848 	objhdl = malloc(sizeof(*objhdl), M_QAT, M_WAITOK | M_ZERO);
1849 	objhdl->obj_buf = malloc(mem_size, M_QAT, M_WAITOK);
1850 	bcopy(addr_ptr, objhdl->obj_buf, mem_size);
1851 	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1852 	if (qat_uclo_check_uof_format(filehdr))
1853 		goto out_objhdr_err;
1854 	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf,
1855 					     filehdr,
1856 					     ICP_QAT_UOF_OBJS);
1857 	if (!objhdl->obj_hdr) {
1858 		pr_err("QAT: object file chunk is null\n");
1859 		goto out_objhdr_err;
1860 	}
1861 	handle->obj_handle = objhdl;
1862 	if (qat_uclo_parse_uof_obj(handle))
1863 		goto out_overlay_obj_err;
1864 	return 0;
1865 
1866 out_overlay_obj_err:
1867 	handle->obj_handle = NULL;
1868 	free(objhdl->obj_hdr, M_QAT);
1869 out_objhdr_err:
1870 	free(objhdl->obj_buf, M_QAT);
1871 	free(objhdl, M_QAT);
1872 	return ENOMEM;
1873 }
1874 
1875 static int
qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle * handle,const struct icp_qat_mof_file_hdr * mof_ptr,u32 mof_size)1876 qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1877 			  const struct icp_qat_mof_file_hdr *mof_ptr,
1878 			  u32 mof_size)
1879 {
1880 	unsigned int checksum = 0;
1881 	unsigned int min_ver_offset = 0;
1882 	struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1883 
1884 	mobj_handle->file_id = ICP_QAT_MOF_FID;
1885 	mobj_handle->mof_buf = (const char *)mof_ptr;
1886 	mobj_handle->mof_size = mof_size;
1887 
1888 	min_ver_offset =
1889 	    mof_size - offsetof(struct icp_qat_mof_file_hdr, min_ver);
1890 	checksum = qat_uclo_calc_str_checksum((const char *)&mof_ptr->min_ver,
1891 					      min_ver_offset);
1892 	if (checksum != mof_ptr->checksum) {
1893 		pr_err("QAT: incorrect MOF checksum\n");
1894 		return EINVAL;
1895 	}
1896 	mobj_handle->checksum = mof_ptr->checksum;
1897 	mobj_handle->min_ver = mof_ptr->min_ver;
1898 	mobj_handle->maj_ver = mof_ptr->maj_ver;
1899 	return 0;
1900 }
1901 
1902 void
qat_uclo_del_mof(struct icp_qat_fw_loader_handle * handle)1903 qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
1904 {
1905 	struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1906 
1907 	free(mobj_handle->obj_table.obj_hdr, M_QAT);
1908 	mobj_handle->obj_table.obj_hdr = NULL;
1909 	free(handle->mobj_handle, M_QAT);
1910 	handle->mobj_handle = NULL;
1911 }
1912 
1913 static int
qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle * mobj_handle,const char * obj_name,const char ** obj_ptr,unsigned int * obj_size)1914 qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
1915 			     const char *obj_name,
1916 			     const char **obj_ptr,
1917 			     unsigned int *obj_size)
1918 {
1919 	unsigned int i;
1920 	struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
1921 
1922 	for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
1923 		if (!strncmp(obj_hdr[i].obj_name,
1924 			     obj_name,
1925 			     ICP_QAT_SUOF_OBJ_NAME_LEN)) {
1926 			*obj_ptr = obj_hdr[i].obj_buf;
1927 			*obj_size = obj_hdr[i].obj_size;
1928 			break;
1929 		}
1930 	}
1931 
1932 	if (i >= mobj_handle->obj_table.num_objs) {
1933 		pr_err("QAT: object %s is not found inside MOF\n", obj_name);
1934 		return EFAULT;
1935 	}
1936 	return 0;
1937 }
1938 
1939 static int
qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle * mobj_handle,struct icp_qat_mof_objhdr * mobj_hdr,struct icp_qat_mof_obj_chunkhdr * obj_chunkhdr)1940 qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
1941 			  struct icp_qat_mof_objhdr *mobj_hdr,
1942 			  struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
1943 {
1944 	if ((strncmp((char *)obj_chunkhdr->chunk_id,
1945 		     ICP_QAT_UOF_IMAG,
1946 		     ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) {
1947 		mobj_hdr->obj_buf =
1948 		    (const char *)((unsigned long)obj_chunkhdr->offset +
1949 				   mobj_handle->uobjs_hdr);
1950 	} else if ((strncmp((char *)(obj_chunkhdr->chunk_id),
1951 			    ICP_QAT_SUOF_IMAG,
1952 			    ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) {
1953 		mobj_hdr->obj_buf =
1954 		    (const char *)((unsigned long)obj_chunkhdr->offset +
1955 				   mobj_handle->sobjs_hdr);
1956 
1957 	} else {
1958 		pr_err("QAT: unsupported chunk id\n");
1959 		return EINVAL;
1960 	}
1961 	mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
1962 	mobj_hdr->obj_name =
1963 	    (char *)(obj_chunkhdr->name + mobj_handle->sym_str);
1964 	return 0;
1965 }
1966 
1967 static int
qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle * mobj_handle)1968 qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
1969 {
1970 	struct icp_qat_mof_objhdr *mof_obj_hdr;
1971 	const struct icp_qat_mof_obj_hdr *uobj_hdr;
1972 	const struct icp_qat_mof_obj_hdr *sobj_hdr;
1973 	struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
1974 	struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
1975 	unsigned int uobj_chunk_num = 0, sobj_chunk_num = 0;
1976 	unsigned int *valid_chunks = 0;
1977 	int ret, i;
1978 
1979 	uobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
1980 	sobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
1981 	if (uobj_hdr)
1982 		uobj_chunk_num = uobj_hdr->num_chunks;
1983 	if (sobj_hdr)
1984 		sobj_chunk_num = sobj_hdr->num_chunks;
1985 
1986 	mof_obj_hdr = (struct icp_qat_mof_objhdr *)
1987 	    malloc((uobj_chunk_num + sobj_chunk_num) * sizeof(*mof_obj_hdr),
1988 		   M_QAT,
1989 		   M_WAITOK | M_ZERO);
1990 
1991 	mobj_handle->obj_table.obj_hdr = mof_obj_hdr;
1992 	valid_chunks = &mobj_handle->obj_table.num_objs;
1993 	uobj_chunkhdr =
1994 	    (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)uobj_hdr +
1995 						sizeof(*uobj_hdr));
1996 	sobj_chunkhdr =
1997 	    (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)sobj_hdr +
1998 						sizeof(*sobj_hdr));
1999 
2000 	/* map uof objects */
2001 	for (i = 0; i < uobj_chunk_num; i++) {
2002 		ret = qat_uclo_map_obj_from_mof(mobj_handle,
2003 						&mof_obj_hdr[*valid_chunks],
2004 						&uobj_chunkhdr[i]);
2005 		if (ret)
2006 			return ret;
2007 		(*valid_chunks)++;
2008 	}
2009 
2010 	/* map suof objects */
2011 	for (i = 0; i < sobj_chunk_num; i++) {
2012 		ret = qat_uclo_map_obj_from_mof(mobj_handle,
2013 						&mof_obj_hdr[*valid_chunks],
2014 						&sobj_chunkhdr[i]);
2015 		if (ret)
2016 			return ret;
2017 		(*valid_chunks)++;
2018 	}
2019 
2020 	if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunks) {
2021 		pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
2022 		return EINVAL;
2023 	}
2024 	return 0;
2025 }
2026 
2027 static void
qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle * mobj_handle,struct icp_qat_mof_chunkhdr * mof_chunkhdr)2028 qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
2029 			 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
2030 {
2031 	char **sym_str = (char **)&mobj_handle->sym_str;
2032 	unsigned int *sym_size = &mobj_handle->sym_size;
2033 	struct icp_qat_mof_str_table *str_table_obj;
2034 
2035 	*sym_size = *(unsigned int *)(uintptr_t)(mof_chunkhdr->offset +
2036 						 mobj_handle->mof_buf);
2037 	*sym_str =
2038 	    (char *)(uintptr_t)(mobj_handle->mof_buf + mof_chunkhdr->offset +
2039 				sizeof(str_table_obj->tab_len));
2040 }
2041 
2042 static void
qat_uclo_map_mof_chunk(struct icp_qat_mof_handle * mobj_handle,struct icp_qat_mof_chunkhdr * mof_chunkhdr)2043 qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
2044 		       struct icp_qat_mof_chunkhdr *mof_chunkhdr)
2045 {
2046 	if (!strncmp(mof_chunkhdr->chunk_id,
2047 		     ICP_QAT_MOF_SYM_OBJS,
2048 		     ICP_QAT_MOF_OBJ_ID_LEN))
2049 		qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
2050 	else if (!strncmp(mof_chunkhdr->chunk_id,
2051 			  ICP_QAT_UOF_OBJS,
2052 			  ICP_QAT_MOF_OBJ_ID_LEN))
2053 		mobj_handle->uobjs_hdr =
2054 		    mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset;
2055 	else if (!strncmp(mof_chunkhdr->chunk_id,
2056 			  ICP_QAT_SUOF_OBJS,
2057 			  ICP_QAT_MOF_OBJ_ID_LEN))
2058 		mobj_handle->sobjs_hdr =
2059 		    mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset;
2060 }
2061 
2062 static int
qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr * mof_hdr)2063 qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr *mof_hdr)
2064 {
2065 	int maj = mof_hdr->maj_ver & 0xff;
2066 	int min = mof_hdr->min_ver & 0xff;
2067 
2068 	if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
2069 		pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
2070 		return EINVAL;
2071 	}
2072 
2073 	if (mof_hdr->num_chunks <= 0x1) {
2074 		pr_err("QAT: MOF chunk amount is incorrect\n");
2075 		return EINVAL;
2076 	}
2077 	if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
2078 		pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
2079 		       maj,
2080 		       min);
2081 		return EINVAL;
2082 	}
2083 	return 0;
2084 }
2085 
2086 static int
qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle * handle,const struct icp_qat_mof_file_hdr * mof_ptr,u32 mof_size,const char * obj_name,const char ** obj_ptr,unsigned int * obj_size)2087 qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
2088 		     const struct icp_qat_mof_file_hdr *mof_ptr,
2089 		     u32 mof_size,
2090 		     const char *obj_name,
2091 		     const char **obj_ptr,
2092 		     unsigned int *obj_size)
2093 {
2094 	struct icp_qat_mof_handle *mobj_handle;
2095 	struct icp_qat_mof_chunkhdr *mof_chunkhdr;
2096 	unsigned short chunks_num;
2097 	int ret;
2098 	unsigned int i;
2099 
2100 	if (mof_ptr->file_id == ICP_QAT_UOF_FID ||
2101 	    mof_ptr->file_id == ICP_QAT_SUOF_FID) {
2102 		if (obj_ptr)
2103 			*obj_ptr = (const char *)mof_ptr;
2104 		if (obj_size)
2105 			*obj_size = (unsigned int)mof_size;
2106 		return 0;
2107 	}
2108 	if (qat_uclo_check_mof_format(mof_ptr))
2109 		return EINVAL;
2110 	mobj_handle = malloc(sizeof(*mobj_handle), M_QAT, M_WAITOK | M_ZERO);
2111 	handle->mobj_handle = mobj_handle;
2112 	ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
2113 	if (ret)
2114 		return ret;
2115 	mof_chunkhdr = (struct icp_qat_mof_chunkhdr *)((uintptr_t)mof_ptr +
2116 						       sizeof(*mof_ptr));
2117 	chunks_num = mof_ptr->num_chunks;
2118 	/*Parse MOF file chunks*/
2119 	for (i = 0; i < chunks_num; i++)
2120 		qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
2121 	/*All sym_objs uobjs and sobjs should be available*/
2122 	if (!mobj_handle->sym_str ||
2123 	    (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
2124 		return EINVAL;
2125 	ret = qat_uclo_map_objs_from_mof(mobj_handle);
2126 	if (ret)
2127 		return ret;
2128 	/*Seek specified uof object in MOF*/
2129 	ret = qat_uclo_seek_obj_inside_mof(mobj_handle,
2130 					   obj_name,
2131 					   obj_ptr,
2132 					   obj_size);
2133 	if (ret)
2134 		return ret;
2135 	return 0;
2136 }
2137 
2138 int
qat_uclo_map_obj(struct icp_qat_fw_loader_handle * handle,const void * addr_ptr,u32 mem_size,const char * obj_name)2139 qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
2140 		 const void *addr_ptr,
2141 		 u32 mem_size,
2142 		 const char *obj_name)
2143 {
2144 	const char *obj_addr;
2145 	u32 obj_size;
2146 	int ret;
2147 
2148 	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >
2149 		     (sizeof(handle->hal_handle->ae_mask) * 8));
2150 
2151 	if (!handle || !addr_ptr || mem_size < 24)
2152 		return EINVAL;
2153 
2154 	if (obj_name) {
2155 		ret = qat_uclo_map_mof_obj(
2156 		    handle, addr_ptr, mem_size, obj_name, &obj_addr, &obj_size);
2157 		if (ret)
2158 			return ret;
2159 	} else {
2160 		obj_addr = addr_ptr;
2161 		obj_size = mem_size;
2162 	}
2163 
2164 	return (handle->fw_auth) ?
2165 	    qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
2166 	    qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
2167 }
2168 
2169 void
qat_uclo_del_obj(struct icp_qat_fw_loader_handle * handle)2170 qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
2171 {
2172 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2173 	unsigned int a;
2174 	unsigned long ae_mask = handle->hal_handle->ae_mask;
2175 
2176 	if (handle->mobj_handle)
2177 		qat_uclo_del_mof(handle);
2178 	if (handle->sobj_handle)
2179 		qat_uclo_del_suof(handle);
2180 	if (!obj_handle)
2181 		return;
2182 
2183 	free(obj_handle->uword_buf, M_QAT);
2184 	for (a = 0; a < obj_handle->uimage_num; a++)
2185 		free(obj_handle->ae_uimage[a].page, M_QAT);
2186 
2187 	for_each_set_bit(a, &ae_mask, handle->hal_handle->ae_max_num)
2188 	{
2189 		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
2190 	}
2191 
2192 	free(obj_handle->obj_hdr, M_QAT);
2193 	free(obj_handle->obj_buf, M_QAT);
2194 	free(obj_handle, M_QAT);
2195 	handle->obj_handle = NULL;
2196 }
2197 
2198 static void
qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle * obj_handle,struct icp_qat_uclo_encap_page * encap_page,uint64_t * uword,unsigned int addr_p,unsigned int raddr,uint64_t fill)2199 qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
2200 		     struct icp_qat_uclo_encap_page *encap_page,
2201 		     uint64_t *uword,
2202 		     unsigned int addr_p,
2203 		     unsigned int raddr,
2204 		     uint64_t fill)
2205 {
2206 	uint64_t uwrd = 0;
2207 	unsigned int i, addr;
2208 
2209 	if (!encap_page) {
2210 		*uword = fill;
2211 		return;
2212 	}
2213 	addr = (encap_page->page_region) ? raddr : addr_p;
2214 	for (i = 0; i < encap_page->uwblock_num; i++) {
2215 		if (addr >= encap_page->uwblock[i].start_addr &&
2216 		    addr <= encap_page->uwblock[i].start_addr +
2217 			    encap_page->uwblock[i].words_num - 1) {
2218 			addr -= encap_page->uwblock[i].start_addr;
2219 			addr *= obj_handle->uword_in_bytes;
2220 			memcpy(&uwrd,
2221 			       (void *)(((uintptr_t)encap_page->uwblock[i]
2222 					     .micro_words) +
2223 					addr),
2224 			       obj_handle->uword_in_bytes);
2225 			uwrd = uwrd & 0xbffffffffffull;
2226 		}
2227 	}
2228 	*uword = uwrd;
2229 	if (*uword == INVLD_UWORD)
2230 		*uword = fill;
2231 }
2232 
2233 static void
qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_encap_page * encap_page,unsigned int ae)2234 qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
2235 			    struct icp_qat_uclo_encap_page *encap_page,
2236 			    unsigned int ae)
2237 {
2238 	unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
2239 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2240 	uint64_t fill_pat;
2241 
2242 	/* load the page starting at appropriate ustore address */
2243 	/* get fill-pattern from an image -- they are all the same */
2244 	memcpy(&fill_pat,
2245 	       obj_handle->ae_uimage[0].img_ptr->fill_pattern,
2246 	       sizeof(uint64_t));
2247 	uw_physical_addr = encap_page->beg_addr_p;
2248 	uw_relative_addr = 0;
2249 	words_num = encap_page->micro_words_num;
2250 	while (words_num) {
2251 		if (words_num < UWORD_CPYBUF_SIZE)
2252 			cpylen = words_num;
2253 		else
2254 			cpylen = UWORD_CPYBUF_SIZE;
2255 
2256 		/* load the buffer */
2257 		for (i = 0; i < cpylen; i++)
2258 			qat_uclo_fill_uwords(obj_handle,
2259 					     encap_page,
2260 					     &obj_handle->uword_buf[i],
2261 					     uw_physical_addr + i,
2262 					     uw_relative_addr + i,
2263 					     fill_pat);
2264 
2265 		if (obj_handle->ae_data[ae].shareable_ustore &&
2266 		    !IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))))
2267 			/* copy the buffer to ustore */
2268 			qat_hal_wr_coalesce_uwords(handle,
2269 						   (unsigned char)ae,
2270 						   uw_physical_addr,
2271 						   cpylen,
2272 						   obj_handle->uword_buf);
2273 		else
2274 			/* copy the buffer to ustore */
2275 			qat_hal_wr_uwords(handle,
2276 					  (unsigned char)ae,
2277 					  uw_physical_addr,
2278 					  cpylen,
2279 					  obj_handle->uword_buf);
2280 		uw_physical_addr += cpylen;
2281 		uw_relative_addr += cpylen;
2282 		words_num -= cpylen;
2283 	}
2284 }
2285 
2286 static void
qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_image * image)2287 qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
2288 			struct icp_qat_uof_image *image)
2289 {
2290 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2291 	unsigned int ctx_mask, s;
2292 	struct icp_qat_uclo_page *page;
2293 	unsigned char ae = 0;
2294 	int ctx;
2295 	struct icp_qat_uclo_aedata *aed;
2296 	unsigned long ae_mask = handle->hal_handle->ae_mask;
2297 
2298 	if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
2299 		ctx_mask = 0xff;
2300 	else
2301 		ctx_mask = 0x55;
2302 	/* load the default page and set assigned CTX PC
2303 	 * to the entrypoint address
2304 	 */
2305 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
2306 	{
2307 		unsigned long cfg_ae_mask = handle->cfg_ae_mask;
2308 		unsigned long ae_assigned = image->ae_assigned;
2309 
2310 		if (!test_bit(ae, &cfg_ae_mask))
2311 			continue;
2312 
2313 		if (!test_bit(ae, &ae_assigned))
2314 			continue;
2315 
2316 		aed = &obj_handle->ae_data[ae];
2317 		/* find the slice to which this image is assigned */
2318 		for (s = 0; s < aed->slice_num; s++) {
2319 			if (image->ctx_assigned &
2320 			    aed->ae_slices[s].ctx_mask_assigned)
2321 				break;
2322 		}
2323 		if (s >= aed->slice_num)
2324 			continue;
2325 		page = aed->ae_slices[s].page;
2326 		if (!page->encap_page->def_page)
2327 			continue;
2328 		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
2329 
2330 		page = aed->ae_slices[s].page;
2331 		for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
2332 			aed->ae_slices[s].cur_page[ctx] =
2333 			    (ctx_mask & (1 << ctx)) ? page : NULL;
2334 		qat_hal_set_live_ctx(handle,
2335 				     (unsigned char)ae,
2336 				     image->ctx_assigned);
2337 		qat_hal_set_pc(handle,
2338 			       (unsigned char)ae,
2339 			       image->ctx_assigned,
2340 			       image->entry_address);
2341 	}
2342 }
2343 
2344 static int
qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle * handle)2345 qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
2346 {
2347 	unsigned int i;
2348 	struct icp_qat_fw_auth_desc *desc = NULL;
2349 	struct icp_firml_dram_desc img_desc;
2350 	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
2351 	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
2352 
2353 	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
2354 		if (qat_uclo_map_auth_fw(handle,
2355 					 (const char *)simg_hdr[i].simg_buf,
2356 					 (unsigned int)(simg_hdr[i].simg_len),
2357 					 &img_desc,
2358 					 &desc))
2359 			goto wr_err;
2360 		if (qat_uclo_auth_fw(handle, desc))
2361 			goto wr_err;
2362 		if (qat_uclo_is_broadcast(handle, i)) {
2363 			if (qat_uclo_broadcast_load_fw(handle, desc))
2364 				goto wr_err;
2365 		} else {
2366 			if (qat_uclo_load_fw(handle, desc))
2367 				goto wr_err;
2368 		}
2369 		qat_uclo_simg_free(handle, &img_desc);
2370 	}
2371 
2372 	return 0;
2373 wr_err:
2374 	qat_uclo_simg_free(handle, &img_desc);
2375 	return -EINVAL;
2376 }
2377 
2378 static int
qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle * handle)2379 qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
2380 {
2381 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2382 	unsigned int i;
2383 
2384 	if (qat_uclo_init_globals(handle))
2385 		return EINVAL;
2386 	for (i = 0; i < obj_handle->uimage_num; i++) {
2387 		if (!obj_handle->ae_uimage[i].img_ptr)
2388 			return EINVAL;
2389 		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
2390 			return EINVAL;
2391 		qat_uclo_wr_uimage_page(handle,
2392 					obj_handle->ae_uimage[i].img_ptr);
2393 	}
2394 	return 0;
2395 }
2396 
2397 int
qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle * handle)2398 qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
2399 {
2400 	return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
2401 				   qat_uclo_wr_uof_img(handle);
2402 }
2403 
2404 int
qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle * handle,unsigned int cfg_ae_mask)2405 qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
2406 			 unsigned int cfg_ae_mask)
2407 {
2408 	if (!cfg_ae_mask)
2409 		return EINVAL;
2410 
2411 	handle->cfg_ae_mask = cfg_ae_mask;
2412 	return 0;
2413 }
2414