xref: /linux/drivers/crypto/caam/caamalg_qi2.c (revision dd093fb0)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
24 
25 #define CAAM_CRA_PRIORITY	2000
26 
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 				 SHA512_DIGEST_SIZE * 2)
30 
31 /*
32  * This is a cache of buffers, from which the users of CAAM QI driver
33  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34  * NOTE: A more elegant solution would be to have some headroom in the frames
35  *       being processed. This can be added by the dpaa2-eth driver. This would
36  *       pose a problem for userspace application processing which cannot
37  *       know of this limitation. So for now, this will work.
38  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
39  */
40 static struct kmem_cache *qi_cache;
41 
42 struct caam_alg_entry {
43 	struct device *dev;
44 	int class1_alg_type;
45 	int class2_alg_type;
46 	bool rfc3686;
47 	bool geniv;
48 	bool nodkp;
49 };
50 
51 struct caam_aead_alg {
52 	struct aead_alg aead;
53 	struct caam_alg_entry caam;
54 	bool registered;
55 };
56 
57 struct caam_skcipher_alg {
58 	struct skcipher_alg skcipher;
59 	struct caam_alg_entry caam;
60 	bool registered;
61 };
62 
63 /**
64  * struct caam_ctx - per-session context
65  * @flc: Flow Contexts array
66  * @key:  [authentication key], encryption key
67  * @flc_dma: I/O virtual addresses of the Flow Contexts
68  * @key_dma: I/O virtual address of the key
69  * @dir: DMA direction for mapping key and Flow Contexts
70  * @dev: dpseci device
71  * @adata: authentication algorithm details
72  * @cdata: encryption algorithm details
73  * @authsize: authentication tag (a.k.a. ICV / MAC) size
74  * @xts_key_fallback: true if fallback tfm needs to be used due
75  *		      to unsupported xts key lengths
76  * @fallback: xts fallback tfm
77  */
78 struct caam_ctx {
79 	struct caam_flc flc[NUM_OP];
80 	u8 key[CAAM_MAX_KEY_SIZE];
81 	dma_addr_t flc_dma[NUM_OP];
82 	dma_addr_t key_dma;
83 	enum dma_data_direction dir;
84 	struct device *dev;
85 	struct alginfo adata;
86 	struct alginfo cdata;
87 	unsigned int authsize;
88 	bool xts_key_fallback;
89 	struct crypto_skcipher *fallback;
90 };
91 
92 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
93 				     dma_addr_t iova_addr)
94 {
95 	phys_addr_t phys_addr;
96 
97 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
98 				   iova_addr;
99 
100 	return phys_to_virt(phys_addr);
101 }
102 
103 /*
104  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
105  *
106  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
107  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
108  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
109  * hosting 16 SG entries.
110  *
111  * @flags - flags that would be used for the equivalent kmalloc(..) call
112  *
113  * Returns a pointer to a retrieved buffer on success or NULL on failure.
114  */
115 static inline void *qi_cache_zalloc(gfp_t flags)
116 {
117 	return kmem_cache_zalloc(qi_cache, flags);
118 }
119 
120 /*
121  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
122  *
123  * @obj - buffer previously allocated by qi_cache_zalloc
124  *
125  * No checking is being done, the call is a passthrough call to
126  * kmem_cache_free(...)
127  */
128 static inline void qi_cache_free(void *obj)
129 {
130 	kmem_cache_free(qi_cache, obj);
131 }
132 
133 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
134 {
135 	switch (crypto_tfm_alg_type(areq->tfm)) {
136 	case CRYPTO_ALG_TYPE_SKCIPHER:
137 		return skcipher_request_ctx_dma(skcipher_request_cast(areq));
138 	case CRYPTO_ALG_TYPE_AEAD:
139 		return aead_request_ctx_dma(
140 			container_of(areq, struct aead_request, base));
141 	case CRYPTO_ALG_TYPE_AHASH:
142 		return ahash_request_ctx_dma(ahash_request_cast(areq));
143 	default:
144 		return ERR_PTR(-EINVAL);
145 	}
146 }
147 
148 static void caam_unmap(struct device *dev, struct scatterlist *src,
149 		       struct scatterlist *dst, int src_nents,
150 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
151 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
152 		       int qm_sg_bytes)
153 {
154 	if (dst != src) {
155 		if (src_nents)
156 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
157 		if (dst_nents)
158 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
159 	} else {
160 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
161 	}
162 
163 	if (iv_dma)
164 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
165 
166 	if (qm_sg_bytes)
167 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
168 }
169 
170 static int aead_set_sh_desc(struct crypto_aead *aead)
171 {
172 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
173 						 typeof(*alg), aead);
174 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
175 	unsigned int ivsize = crypto_aead_ivsize(aead);
176 	struct device *dev = ctx->dev;
177 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
178 	struct caam_flc *flc;
179 	u32 *desc;
180 	u32 ctx1_iv_off = 0;
181 	u32 *nonce = NULL;
182 	unsigned int data_len[2];
183 	u32 inl_mask;
184 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
185 			       OP_ALG_AAI_CTR_MOD128);
186 	const bool is_rfc3686 = alg->caam.rfc3686;
187 
188 	if (!ctx->cdata.keylen || !ctx->authsize)
189 		return 0;
190 
191 	/*
192 	 * AES-CTR needs to load IV in CONTEXT1 reg
193 	 * at an offset of 128bits (16bytes)
194 	 * CONTEXT1[255:128] = IV
195 	 */
196 	if (ctr_mode)
197 		ctx1_iv_off = 16;
198 
199 	/*
200 	 * RFC3686 specific:
201 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
202 	 */
203 	if (is_rfc3686) {
204 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
205 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
206 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
207 	}
208 
209 	/*
210 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
211 	 * in invalid opcodes (last bytes of user key) in the resulting
212 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
213 	 * addresses are needed.
214 	 */
215 	ctx->adata.key_virt = ctx->key;
216 	ctx->adata.key_dma = ctx->key_dma;
217 
218 	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
219 	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
220 
221 	data_len[0] = ctx->adata.keylen_pad;
222 	data_len[1] = ctx->cdata.keylen;
223 
224 	/* aead_encrypt shared descriptor */
225 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
226 						 DESC_QI_AEAD_ENC_LEN) +
227 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
228 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
229 			      ARRAY_SIZE(data_len)) < 0)
230 		return -EINVAL;
231 
232 	ctx->adata.key_inline = !!(inl_mask & 1);
233 	ctx->cdata.key_inline = !!(inl_mask & 2);
234 
235 	flc = &ctx->flc[ENCRYPT];
236 	desc = flc->sh_desc;
237 
238 	if (alg->caam.geniv)
239 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
240 					  ivsize, ctx->authsize, is_rfc3686,
241 					  nonce, ctx1_iv_off, true,
242 					  priv->sec_attr.era);
243 	else
244 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
245 				       ivsize, ctx->authsize, is_rfc3686, nonce,
246 				       ctx1_iv_off, true, priv->sec_attr.era);
247 
248 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
249 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
250 				   sizeof(flc->flc) + desc_bytes(desc),
251 				   ctx->dir);
252 
253 	/* aead_decrypt shared descriptor */
254 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
255 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
256 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
257 			      ARRAY_SIZE(data_len)) < 0)
258 		return -EINVAL;
259 
260 	ctx->adata.key_inline = !!(inl_mask & 1);
261 	ctx->cdata.key_inline = !!(inl_mask & 2);
262 
263 	flc = &ctx->flc[DECRYPT];
264 	desc = flc->sh_desc;
265 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
266 			       ivsize, ctx->authsize, alg->caam.geniv,
267 			       is_rfc3686, nonce, ctx1_iv_off, true,
268 			       priv->sec_attr.era);
269 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
270 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
271 				   sizeof(flc->flc) + desc_bytes(desc),
272 				   ctx->dir);
273 
274 	return 0;
275 }
276 
277 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
278 {
279 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
280 
281 	ctx->authsize = authsize;
282 	aead_set_sh_desc(authenc);
283 
284 	return 0;
285 }
286 
287 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
288 		       unsigned int keylen)
289 {
290 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
291 	struct device *dev = ctx->dev;
292 	struct crypto_authenc_keys keys;
293 
294 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
295 		goto badkey;
296 
297 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
298 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
299 		keys.authkeylen);
300 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
301 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
302 
303 	ctx->adata.keylen = keys.authkeylen;
304 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
305 					      OP_ALG_ALGSEL_MASK);
306 
307 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
308 		goto badkey;
309 
310 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
311 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
312 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
313 				   keys.enckeylen, ctx->dir);
314 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
315 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
316 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
317 
318 	ctx->cdata.keylen = keys.enckeylen;
319 
320 	memzero_explicit(&keys, sizeof(keys));
321 	return aead_set_sh_desc(aead);
322 badkey:
323 	memzero_explicit(&keys, sizeof(keys));
324 	return -EINVAL;
325 }
326 
327 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
328 			    unsigned int keylen)
329 {
330 	struct crypto_authenc_keys keys;
331 	int err;
332 
333 	err = crypto_authenc_extractkeys(&keys, key, keylen);
334 	if (unlikely(err))
335 		goto out;
336 
337 	err = -EINVAL;
338 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
339 		goto out;
340 
341 	err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
342 	      aead_setkey(aead, key, keylen);
343 
344 out:
345 	memzero_explicit(&keys, sizeof(keys));
346 	return err;
347 }
348 
349 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
350 					   bool encrypt)
351 {
352 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
353 	struct caam_request *req_ctx = aead_request_ctx_dma(req);
354 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
355 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
356 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
357 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
358 						 typeof(*alg), aead);
359 	struct device *dev = ctx->dev;
360 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
361 		      GFP_KERNEL : GFP_ATOMIC;
362 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
363 	int src_len, dst_len = 0;
364 	struct aead_edesc *edesc;
365 	dma_addr_t qm_sg_dma, iv_dma = 0;
366 	int ivsize = 0;
367 	unsigned int authsize = ctx->authsize;
368 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
369 	int in_len, out_len;
370 	struct dpaa2_sg_entry *sg_table;
371 
372 	/* allocate space for base edesc, link tables and IV */
373 	edesc = qi_cache_zalloc(GFP_DMA | flags);
374 	if (unlikely(!edesc)) {
375 		dev_err(dev, "could not allocate extended descriptor\n");
376 		return ERR_PTR(-ENOMEM);
377 	}
378 
379 	if (unlikely(req->dst != req->src)) {
380 		src_len = req->assoclen + req->cryptlen;
381 		dst_len = src_len + (encrypt ? authsize : (-authsize));
382 
383 		src_nents = sg_nents_for_len(req->src, src_len);
384 		if (unlikely(src_nents < 0)) {
385 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
386 				src_len);
387 			qi_cache_free(edesc);
388 			return ERR_PTR(src_nents);
389 		}
390 
391 		dst_nents = sg_nents_for_len(req->dst, dst_len);
392 		if (unlikely(dst_nents < 0)) {
393 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
394 				dst_len);
395 			qi_cache_free(edesc);
396 			return ERR_PTR(dst_nents);
397 		}
398 
399 		if (src_nents) {
400 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
401 						      DMA_TO_DEVICE);
402 			if (unlikely(!mapped_src_nents)) {
403 				dev_err(dev, "unable to map source\n");
404 				qi_cache_free(edesc);
405 				return ERR_PTR(-ENOMEM);
406 			}
407 		} else {
408 			mapped_src_nents = 0;
409 		}
410 
411 		if (dst_nents) {
412 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
413 						      DMA_FROM_DEVICE);
414 			if (unlikely(!mapped_dst_nents)) {
415 				dev_err(dev, "unable to map destination\n");
416 				dma_unmap_sg(dev, req->src, src_nents,
417 					     DMA_TO_DEVICE);
418 				qi_cache_free(edesc);
419 				return ERR_PTR(-ENOMEM);
420 			}
421 		} else {
422 			mapped_dst_nents = 0;
423 		}
424 	} else {
425 		src_len = req->assoclen + req->cryptlen +
426 			  (encrypt ? authsize : 0);
427 
428 		src_nents = sg_nents_for_len(req->src, src_len);
429 		if (unlikely(src_nents < 0)) {
430 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
431 				src_len);
432 			qi_cache_free(edesc);
433 			return ERR_PTR(src_nents);
434 		}
435 
436 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
437 					      DMA_BIDIRECTIONAL);
438 		if (unlikely(!mapped_src_nents)) {
439 			dev_err(dev, "unable to map source\n");
440 			qi_cache_free(edesc);
441 			return ERR_PTR(-ENOMEM);
442 		}
443 	}
444 
445 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
446 		ivsize = crypto_aead_ivsize(aead);
447 
448 	/*
449 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
450 	 * Input is not contiguous.
451 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
452 	 * the end of the table by allocating more S/G entries. Logic:
453 	 * if (src != dst && output S/G)
454 	 *      pad output S/G, if needed
455 	 * else if (src == dst && S/G)
456 	 *      overlapping S/Gs; pad one of them
457 	 * else if (input S/G) ...
458 	 *      pad input S/G, if needed
459 	 */
460 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
461 	if (mapped_dst_nents > 1)
462 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
463 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
464 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
465 				  1 + !!ivsize +
466 				  pad_sg_nents(mapped_src_nents));
467 	else
468 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
469 
470 	sg_table = &edesc->sgt[0];
471 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
472 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
473 		     CAAM_QI_MEMCACHE_SIZE)) {
474 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
475 			qm_sg_nents, ivsize);
476 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
477 			   0, DMA_NONE, 0, 0);
478 		qi_cache_free(edesc);
479 		return ERR_PTR(-ENOMEM);
480 	}
481 
482 	if (ivsize) {
483 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
484 
485 		/* Make sure IV is located in a DMAable area */
486 		memcpy(iv, req->iv, ivsize);
487 
488 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
489 		if (dma_mapping_error(dev, iv_dma)) {
490 			dev_err(dev, "unable to map IV\n");
491 			caam_unmap(dev, req->src, req->dst, src_nents,
492 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
493 			qi_cache_free(edesc);
494 			return ERR_PTR(-ENOMEM);
495 		}
496 	}
497 
498 	edesc->src_nents = src_nents;
499 	edesc->dst_nents = dst_nents;
500 	edesc->iv_dma = iv_dma;
501 
502 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
503 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
504 		/*
505 		 * The associated data comes already with the IV but we need
506 		 * to skip it when we authenticate or encrypt...
507 		 */
508 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
509 	else
510 		edesc->assoclen = cpu_to_caam32(req->assoclen);
511 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
512 					     DMA_TO_DEVICE);
513 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
514 		dev_err(dev, "unable to map assoclen\n");
515 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
516 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
517 		qi_cache_free(edesc);
518 		return ERR_PTR(-ENOMEM);
519 	}
520 
521 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
522 	qm_sg_index++;
523 	if (ivsize) {
524 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
525 		qm_sg_index++;
526 	}
527 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
528 	qm_sg_index += mapped_src_nents;
529 
530 	if (mapped_dst_nents > 1)
531 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
532 
533 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
534 	if (dma_mapping_error(dev, qm_sg_dma)) {
535 		dev_err(dev, "unable to map S/G table\n");
536 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
537 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
538 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
539 		qi_cache_free(edesc);
540 		return ERR_PTR(-ENOMEM);
541 	}
542 
543 	edesc->qm_sg_dma = qm_sg_dma;
544 	edesc->qm_sg_bytes = qm_sg_bytes;
545 
546 	out_len = req->assoclen + req->cryptlen +
547 		  (encrypt ? ctx->authsize : (-ctx->authsize));
548 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
549 
550 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
551 	dpaa2_fl_set_final(in_fle, true);
552 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
553 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
554 	dpaa2_fl_set_len(in_fle, in_len);
555 
556 	if (req->dst == req->src) {
557 		if (mapped_src_nents == 1) {
558 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
559 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
560 		} else {
561 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
562 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
563 					  (1 + !!ivsize) * sizeof(*sg_table));
564 		}
565 	} else if (!mapped_dst_nents) {
566 		/*
567 		 * crypto engine requires the output entry to be present when
568 		 * "frame list" FD is used.
569 		 * Since engine does not support FMT=2'b11 (unused entry type),
570 		 * leaving out_fle zeroized is the best option.
571 		 */
572 		goto skip_out_fle;
573 	} else if (mapped_dst_nents == 1) {
574 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
575 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
576 	} else {
577 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
578 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
579 				  sizeof(*sg_table));
580 	}
581 
582 	dpaa2_fl_set_len(out_fle, out_len);
583 
584 skip_out_fle:
585 	return edesc;
586 }
587 
588 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
589 {
590 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
591 	unsigned int ivsize = crypto_aead_ivsize(aead);
592 	struct device *dev = ctx->dev;
593 	struct caam_flc *flc;
594 	u32 *desc;
595 
596 	if (!ctx->cdata.keylen || !ctx->authsize)
597 		return 0;
598 
599 	flc = &ctx->flc[ENCRYPT];
600 	desc = flc->sh_desc;
601 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
602 			       ctx->authsize, true, true);
603 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
604 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
605 				   sizeof(flc->flc) + desc_bytes(desc),
606 				   ctx->dir);
607 
608 	flc = &ctx->flc[DECRYPT];
609 	desc = flc->sh_desc;
610 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
611 			       ctx->authsize, false, true);
612 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
613 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
614 				   sizeof(flc->flc) + desc_bytes(desc),
615 				   ctx->dir);
616 
617 	return 0;
618 }
619 
620 static int chachapoly_setauthsize(struct crypto_aead *aead,
621 				  unsigned int authsize)
622 {
623 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
624 
625 	if (authsize != POLY1305_DIGEST_SIZE)
626 		return -EINVAL;
627 
628 	ctx->authsize = authsize;
629 	return chachapoly_set_sh_desc(aead);
630 }
631 
632 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
633 			     unsigned int keylen)
634 {
635 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
636 	unsigned int ivsize = crypto_aead_ivsize(aead);
637 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
638 
639 	if (keylen != CHACHA_KEY_SIZE + saltlen)
640 		return -EINVAL;
641 
642 	ctx->cdata.key_virt = key;
643 	ctx->cdata.keylen = keylen - saltlen;
644 
645 	return chachapoly_set_sh_desc(aead);
646 }
647 
648 static int gcm_set_sh_desc(struct crypto_aead *aead)
649 {
650 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
651 	struct device *dev = ctx->dev;
652 	unsigned int ivsize = crypto_aead_ivsize(aead);
653 	struct caam_flc *flc;
654 	u32 *desc;
655 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
656 			ctx->cdata.keylen;
657 
658 	if (!ctx->cdata.keylen || !ctx->authsize)
659 		return 0;
660 
661 	/*
662 	 * AES GCM encrypt shared descriptor
663 	 * Job Descriptor and Shared Descriptor
664 	 * must fit into the 64-word Descriptor h/w Buffer
665 	 */
666 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
667 		ctx->cdata.key_inline = true;
668 		ctx->cdata.key_virt = ctx->key;
669 	} else {
670 		ctx->cdata.key_inline = false;
671 		ctx->cdata.key_dma = ctx->key_dma;
672 	}
673 
674 	flc = &ctx->flc[ENCRYPT];
675 	desc = flc->sh_desc;
676 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
677 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
678 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
679 				   sizeof(flc->flc) + desc_bytes(desc),
680 				   ctx->dir);
681 
682 	/*
683 	 * Job Descriptor and Shared Descriptors
684 	 * must all fit into the 64-word Descriptor h/w Buffer
685 	 */
686 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
687 		ctx->cdata.key_inline = true;
688 		ctx->cdata.key_virt = ctx->key;
689 	} else {
690 		ctx->cdata.key_inline = false;
691 		ctx->cdata.key_dma = ctx->key_dma;
692 	}
693 
694 	flc = &ctx->flc[DECRYPT];
695 	desc = flc->sh_desc;
696 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
697 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
698 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
699 				   sizeof(flc->flc) + desc_bytes(desc),
700 				   ctx->dir);
701 
702 	return 0;
703 }
704 
705 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
706 {
707 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
708 	int err;
709 
710 	err = crypto_gcm_check_authsize(authsize);
711 	if (err)
712 		return err;
713 
714 	ctx->authsize = authsize;
715 	gcm_set_sh_desc(authenc);
716 
717 	return 0;
718 }
719 
720 static int gcm_setkey(struct crypto_aead *aead,
721 		      const u8 *key, unsigned int keylen)
722 {
723 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
724 	struct device *dev = ctx->dev;
725 	int ret;
726 
727 	ret = aes_check_keylen(keylen);
728 	if (ret)
729 		return ret;
730 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
731 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
732 
733 	memcpy(ctx->key, key, keylen);
734 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
735 	ctx->cdata.keylen = keylen;
736 
737 	return gcm_set_sh_desc(aead);
738 }
739 
740 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
741 {
742 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
743 	struct device *dev = ctx->dev;
744 	unsigned int ivsize = crypto_aead_ivsize(aead);
745 	struct caam_flc *flc;
746 	u32 *desc;
747 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
748 			ctx->cdata.keylen;
749 
750 	if (!ctx->cdata.keylen || !ctx->authsize)
751 		return 0;
752 
753 	ctx->cdata.key_virt = ctx->key;
754 
755 	/*
756 	 * RFC4106 encrypt shared descriptor
757 	 * Job Descriptor and Shared Descriptor
758 	 * must fit into the 64-word Descriptor h/w Buffer
759 	 */
760 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
761 		ctx->cdata.key_inline = true;
762 	} else {
763 		ctx->cdata.key_inline = false;
764 		ctx->cdata.key_dma = ctx->key_dma;
765 	}
766 
767 	flc = &ctx->flc[ENCRYPT];
768 	desc = flc->sh_desc;
769 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
770 				  true);
771 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
772 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
773 				   sizeof(flc->flc) + desc_bytes(desc),
774 				   ctx->dir);
775 
776 	/*
777 	 * Job Descriptor and Shared Descriptors
778 	 * must all fit into the 64-word Descriptor h/w Buffer
779 	 */
780 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
781 		ctx->cdata.key_inline = true;
782 	} else {
783 		ctx->cdata.key_inline = false;
784 		ctx->cdata.key_dma = ctx->key_dma;
785 	}
786 
787 	flc = &ctx->flc[DECRYPT];
788 	desc = flc->sh_desc;
789 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
790 				  true);
791 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
792 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
793 				   sizeof(flc->flc) + desc_bytes(desc),
794 				   ctx->dir);
795 
796 	return 0;
797 }
798 
799 static int rfc4106_setauthsize(struct crypto_aead *authenc,
800 			       unsigned int authsize)
801 {
802 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
803 	int err;
804 
805 	err = crypto_rfc4106_check_authsize(authsize);
806 	if (err)
807 		return err;
808 
809 	ctx->authsize = authsize;
810 	rfc4106_set_sh_desc(authenc);
811 
812 	return 0;
813 }
814 
815 static int rfc4106_setkey(struct crypto_aead *aead,
816 			  const u8 *key, unsigned int keylen)
817 {
818 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
819 	struct device *dev = ctx->dev;
820 	int ret;
821 
822 	ret = aes_check_keylen(keylen - 4);
823 	if (ret)
824 		return ret;
825 
826 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
827 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
828 
829 	memcpy(ctx->key, key, keylen);
830 	/*
831 	 * The last four bytes of the key material are used as the salt value
832 	 * in the nonce. Update the AES key length.
833 	 */
834 	ctx->cdata.keylen = keylen - 4;
835 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
836 				   ctx->dir);
837 
838 	return rfc4106_set_sh_desc(aead);
839 }
840 
841 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
842 {
843 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
844 	struct device *dev = ctx->dev;
845 	unsigned int ivsize = crypto_aead_ivsize(aead);
846 	struct caam_flc *flc;
847 	u32 *desc;
848 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
849 			ctx->cdata.keylen;
850 
851 	if (!ctx->cdata.keylen || !ctx->authsize)
852 		return 0;
853 
854 	ctx->cdata.key_virt = ctx->key;
855 
856 	/*
857 	 * RFC4543 encrypt shared descriptor
858 	 * Job Descriptor and Shared Descriptor
859 	 * must fit into the 64-word Descriptor h/w Buffer
860 	 */
861 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
862 		ctx->cdata.key_inline = true;
863 	} else {
864 		ctx->cdata.key_inline = false;
865 		ctx->cdata.key_dma = ctx->key_dma;
866 	}
867 
868 	flc = &ctx->flc[ENCRYPT];
869 	desc = flc->sh_desc;
870 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
871 				  true);
872 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
873 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
874 				   sizeof(flc->flc) + desc_bytes(desc),
875 				   ctx->dir);
876 
877 	/*
878 	 * Job Descriptor and Shared Descriptors
879 	 * must all fit into the 64-word Descriptor h/w Buffer
880 	 */
881 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
882 		ctx->cdata.key_inline = true;
883 	} else {
884 		ctx->cdata.key_inline = false;
885 		ctx->cdata.key_dma = ctx->key_dma;
886 	}
887 
888 	flc = &ctx->flc[DECRYPT];
889 	desc = flc->sh_desc;
890 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
891 				  true);
892 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
893 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
894 				   sizeof(flc->flc) + desc_bytes(desc),
895 				   ctx->dir);
896 
897 	return 0;
898 }
899 
900 static int rfc4543_setauthsize(struct crypto_aead *authenc,
901 			       unsigned int authsize)
902 {
903 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
904 
905 	if (authsize != 16)
906 		return -EINVAL;
907 
908 	ctx->authsize = authsize;
909 	rfc4543_set_sh_desc(authenc);
910 
911 	return 0;
912 }
913 
914 static int rfc4543_setkey(struct crypto_aead *aead,
915 			  const u8 *key, unsigned int keylen)
916 {
917 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
918 	struct device *dev = ctx->dev;
919 	int ret;
920 
921 	ret = aes_check_keylen(keylen - 4);
922 	if (ret)
923 		return ret;
924 
925 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
926 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
927 
928 	memcpy(ctx->key, key, keylen);
929 	/*
930 	 * The last four bytes of the key material are used as the salt value
931 	 * in the nonce. Update the AES key length.
932 	 */
933 	ctx->cdata.keylen = keylen - 4;
934 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
935 				   ctx->dir);
936 
937 	return rfc4543_set_sh_desc(aead);
938 }
939 
940 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
941 			   unsigned int keylen, const u32 ctx1_iv_off)
942 {
943 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
944 	struct caam_skcipher_alg *alg =
945 		container_of(crypto_skcipher_alg(skcipher),
946 			     struct caam_skcipher_alg, skcipher);
947 	struct device *dev = ctx->dev;
948 	struct caam_flc *flc;
949 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
950 	u32 *desc;
951 	const bool is_rfc3686 = alg->caam.rfc3686;
952 
953 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
954 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
955 
956 	ctx->cdata.keylen = keylen;
957 	ctx->cdata.key_virt = key;
958 	ctx->cdata.key_inline = true;
959 
960 	/* skcipher_encrypt shared descriptor */
961 	flc = &ctx->flc[ENCRYPT];
962 	desc = flc->sh_desc;
963 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
964 				   ctx1_iv_off);
965 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
966 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
967 				   sizeof(flc->flc) + desc_bytes(desc),
968 				   ctx->dir);
969 
970 	/* skcipher_decrypt shared descriptor */
971 	flc = &ctx->flc[DECRYPT];
972 	desc = flc->sh_desc;
973 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
974 				   ctx1_iv_off);
975 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
976 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
977 				   sizeof(flc->flc) + desc_bytes(desc),
978 				   ctx->dir);
979 
980 	return 0;
981 }
982 
983 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
984 			       const u8 *key, unsigned int keylen)
985 {
986 	int err;
987 
988 	err = aes_check_keylen(keylen);
989 	if (err)
990 		return err;
991 
992 	return skcipher_setkey(skcipher, key, keylen, 0);
993 }
994 
995 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
996 				   const u8 *key, unsigned int keylen)
997 {
998 	u32 ctx1_iv_off;
999 	int err;
1000 
1001 	/*
1002 	 * RFC3686 specific:
1003 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1004 	 *	| *key = {KEY, NONCE}
1005 	 */
1006 	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1007 	keylen -= CTR_RFC3686_NONCE_SIZE;
1008 
1009 	err = aes_check_keylen(keylen);
1010 	if (err)
1011 		return err;
1012 
1013 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1014 }
1015 
1016 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1017 			       const u8 *key, unsigned int keylen)
1018 {
1019 	u32 ctx1_iv_off;
1020 	int err;
1021 
1022 	/*
1023 	 * AES-CTR needs to load IV in CONTEXT1 reg
1024 	 * at an offset of 128bits (16bytes)
1025 	 * CONTEXT1[255:128] = IV
1026 	 */
1027 	ctx1_iv_off = 16;
1028 
1029 	err = aes_check_keylen(keylen);
1030 	if (err)
1031 		return err;
1032 
1033 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1034 }
1035 
1036 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1037 				    const u8 *key, unsigned int keylen)
1038 {
1039 	if (keylen != CHACHA_KEY_SIZE)
1040 		return -EINVAL;
1041 
1042 	return skcipher_setkey(skcipher, key, keylen, 0);
1043 }
1044 
1045 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1046 			       const u8 *key, unsigned int keylen)
1047 {
1048 	return verify_skcipher_des_key(skcipher, key) ?:
1049 	       skcipher_setkey(skcipher, key, keylen, 0);
1050 }
1051 
1052 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1053 			        const u8 *key, unsigned int keylen)
1054 {
1055 	return verify_skcipher_des3_key(skcipher, key) ?:
1056 	       skcipher_setkey(skcipher, key, keylen, 0);
1057 }
1058 
1059 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1060 			       unsigned int keylen)
1061 {
1062 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1063 	struct device *dev = ctx->dev;
1064 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1065 	struct caam_flc *flc;
1066 	u32 *desc;
1067 	int err;
1068 
1069 	err = xts_verify_key(skcipher, key, keylen);
1070 	if (err) {
1071 		dev_dbg(dev, "key size mismatch\n");
1072 		return err;
1073 	}
1074 
1075 	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1076 		ctx->xts_key_fallback = true;
1077 
1078 	if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1079 		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1080 		if (err)
1081 			return err;
1082 	}
1083 
1084 	ctx->cdata.keylen = keylen;
1085 	ctx->cdata.key_virt = key;
1086 	ctx->cdata.key_inline = true;
1087 
1088 	/* xts_skcipher_encrypt shared descriptor */
1089 	flc = &ctx->flc[ENCRYPT];
1090 	desc = flc->sh_desc;
1091 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1092 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1093 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1094 				   sizeof(flc->flc) + desc_bytes(desc),
1095 				   ctx->dir);
1096 
1097 	/* xts_skcipher_decrypt shared descriptor */
1098 	flc = &ctx->flc[DECRYPT];
1099 	desc = flc->sh_desc;
1100 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1101 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1102 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1103 				   sizeof(flc->flc) + desc_bytes(desc),
1104 				   ctx->dir);
1105 
1106 	return 0;
1107 }
1108 
1109 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1110 {
1111 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1112 	struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
1113 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1114 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1115 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1116 	struct device *dev = ctx->dev;
1117 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118 		       GFP_KERNEL : GFP_ATOMIC;
1119 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1120 	struct skcipher_edesc *edesc;
1121 	dma_addr_t iv_dma;
1122 	u8 *iv;
1123 	int ivsize = crypto_skcipher_ivsize(skcipher);
1124 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1125 	struct dpaa2_sg_entry *sg_table;
1126 
1127 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1128 	if (unlikely(src_nents < 0)) {
1129 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1130 			req->cryptlen);
1131 		return ERR_PTR(src_nents);
1132 	}
1133 
1134 	if (unlikely(req->dst != req->src)) {
1135 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1136 		if (unlikely(dst_nents < 0)) {
1137 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1138 				req->cryptlen);
1139 			return ERR_PTR(dst_nents);
1140 		}
1141 
1142 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1143 					      DMA_TO_DEVICE);
1144 		if (unlikely(!mapped_src_nents)) {
1145 			dev_err(dev, "unable to map source\n");
1146 			return ERR_PTR(-ENOMEM);
1147 		}
1148 
1149 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1150 					      DMA_FROM_DEVICE);
1151 		if (unlikely(!mapped_dst_nents)) {
1152 			dev_err(dev, "unable to map destination\n");
1153 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1154 			return ERR_PTR(-ENOMEM);
1155 		}
1156 	} else {
1157 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1158 					      DMA_BIDIRECTIONAL);
1159 		if (unlikely(!mapped_src_nents)) {
1160 			dev_err(dev, "unable to map source\n");
1161 			return ERR_PTR(-ENOMEM);
1162 		}
1163 	}
1164 
1165 	qm_sg_ents = 1 + mapped_src_nents;
1166 	dst_sg_idx = qm_sg_ents;
1167 
1168 	/*
1169 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1170 	 * IV entries point to the same buffer
1171 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1172 	 *
1173 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1174 	 * the end of the table by allocating more S/G entries.
1175 	 */
1176 	if (req->src != req->dst)
1177 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1178 	else
1179 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1180 
1181 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1182 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1183 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1184 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1185 			qm_sg_ents, ivsize);
1186 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1187 			   0, DMA_NONE, 0, 0);
1188 		return ERR_PTR(-ENOMEM);
1189 	}
1190 
1191 	/* allocate space for base edesc, link tables and IV */
1192 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1193 	if (unlikely(!edesc)) {
1194 		dev_err(dev, "could not allocate extended descriptor\n");
1195 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1196 			   0, DMA_NONE, 0, 0);
1197 		return ERR_PTR(-ENOMEM);
1198 	}
1199 
1200 	/* Make sure IV is located in a DMAable area */
1201 	sg_table = &edesc->sgt[0];
1202 	iv = (u8 *)(sg_table + qm_sg_ents);
1203 	memcpy(iv, req->iv, ivsize);
1204 
1205 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1206 	if (dma_mapping_error(dev, iv_dma)) {
1207 		dev_err(dev, "unable to map IV\n");
1208 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1209 			   0, DMA_NONE, 0, 0);
1210 		qi_cache_free(edesc);
1211 		return ERR_PTR(-ENOMEM);
1212 	}
1213 
1214 	edesc->src_nents = src_nents;
1215 	edesc->dst_nents = dst_nents;
1216 	edesc->iv_dma = iv_dma;
1217 	edesc->qm_sg_bytes = qm_sg_bytes;
1218 
1219 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1220 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1221 
1222 	if (req->src != req->dst)
1223 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1224 
1225 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1226 			 ivsize, 0);
1227 
1228 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1229 					  DMA_TO_DEVICE);
1230 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1231 		dev_err(dev, "unable to map S/G table\n");
1232 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1233 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1234 		qi_cache_free(edesc);
1235 		return ERR_PTR(-ENOMEM);
1236 	}
1237 
1238 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1239 	dpaa2_fl_set_final(in_fle, true);
1240 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1241 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1242 
1243 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1244 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1245 
1246 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1247 
1248 	if (req->src == req->dst)
1249 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1250 				  sizeof(*sg_table));
1251 	else
1252 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1253 				  sizeof(*sg_table));
1254 
1255 	return edesc;
1256 }
1257 
1258 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1259 		       struct aead_request *req)
1260 {
1261 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1262 	int ivsize = crypto_aead_ivsize(aead);
1263 
1264 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1265 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1266 		   edesc->qm_sg_bytes);
1267 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1268 }
1269 
1270 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1271 			   struct skcipher_request *req)
1272 {
1273 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1274 	int ivsize = crypto_skcipher_ivsize(skcipher);
1275 
1276 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1277 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1278 		   edesc->qm_sg_bytes);
1279 }
1280 
1281 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1282 {
1283 	struct crypto_async_request *areq = cbk_ctx;
1284 	struct aead_request *req = container_of(areq, struct aead_request,
1285 						base);
1286 	struct caam_request *req_ctx = to_caam_req(areq);
1287 	struct aead_edesc *edesc = req_ctx->edesc;
1288 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1289 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1290 	int ecode = 0;
1291 
1292 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1293 
1294 	if (unlikely(status))
1295 		ecode = caam_qi2_strstatus(ctx->dev, status);
1296 
1297 	aead_unmap(ctx->dev, edesc, req);
1298 	qi_cache_free(edesc);
1299 	aead_request_complete(req, ecode);
1300 }
1301 
1302 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1303 {
1304 	struct crypto_async_request *areq = cbk_ctx;
1305 	struct aead_request *req = container_of(areq, struct aead_request,
1306 						base);
1307 	struct caam_request *req_ctx = to_caam_req(areq);
1308 	struct aead_edesc *edesc = req_ctx->edesc;
1309 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1310 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1311 	int ecode = 0;
1312 
1313 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1314 
1315 	if (unlikely(status))
1316 		ecode = caam_qi2_strstatus(ctx->dev, status);
1317 
1318 	aead_unmap(ctx->dev, edesc, req);
1319 	qi_cache_free(edesc);
1320 	aead_request_complete(req, ecode);
1321 }
1322 
1323 static int aead_encrypt(struct aead_request *req)
1324 {
1325 	struct aead_edesc *edesc;
1326 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1327 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1328 	struct caam_request *caam_req = aead_request_ctx_dma(req);
1329 	int ret;
1330 
1331 	/* allocate extended descriptor */
1332 	edesc = aead_edesc_alloc(req, true);
1333 	if (IS_ERR(edesc))
1334 		return PTR_ERR(edesc);
1335 
1336 	caam_req->flc = &ctx->flc[ENCRYPT];
1337 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1338 	caam_req->cbk = aead_encrypt_done;
1339 	caam_req->ctx = &req->base;
1340 	caam_req->edesc = edesc;
1341 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1342 	if (ret != -EINPROGRESS &&
1343 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1344 		aead_unmap(ctx->dev, edesc, req);
1345 		qi_cache_free(edesc);
1346 	}
1347 
1348 	return ret;
1349 }
1350 
1351 static int aead_decrypt(struct aead_request *req)
1352 {
1353 	struct aead_edesc *edesc;
1354 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1355 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1356 	struct caam_request *caam_req = aead_request_ctx_dma(req);
1357 	int ret;
1358 
1359 	/* allocate extended descriptor */
1360 	edesc = aead_edesc_alloc(req, false);
1361 	if (IS_ERR(edesc))
1362 		return PTR_ERR(edesc);
1363 
1364 	caam_req->flc = &ctx->flc[DECRYPT];
1365 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1366 	caam_req->cbk = aead_decrypt_done;
1367 	caam_req->ctx = &req->base;
1368 	caam_req->edesc = edesc;
1369 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1370 	if (ret != -EINPROGRESS &&
1371 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1372 		aead_unmap(ctx->dev, edesc, req);
1373 		qi_cache_free(edesc);
1374 	}
1375 
1376 	return ret;
1377 }
1378 
1379 static int ipsec_gcm_encrypt(struct aead_request *req)
1380 {
1381 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1382 }
1383 
1384 static int ipsec_gcm_decrypt(struct aead_request *req)
1385 {
1386 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1387 }
1388 
1389 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1390 {
1391 	struct crypto_async_request *areq = cbk_ctx;
1392 	struct skcipher_request *req = skcipher_request_cast(areq);
1393 	struct caam_request *req_ctx = to_caam_req(areq);
1394 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1395 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1396 	struct skcipher_edesc *edesc = req_ctx->edesc;
1397 	int ecode = 0;
1398 	int ivsize = crypto_skcipher_ivsize(skcipher);
1399 
1400 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1401 
1402 	if (unlikely(status))
1403 		ecode = caam_qi2_strstatus(ctx->dev, status);
1404 
1405 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1406 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1407 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1408 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1409 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1410 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1411 
1412 	skcipher_unmap(ctx->dev, edesc, req);
1413 
1414 	/*
1415 	 * The crypto API expects us to set the IV (req->iv) to the last
1416 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1417 	 * This is used e.g. by the CTS mode.
1418 	 */
1419 	if (!ecode)
1420 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1421 		       ivsize);
1422 
1423 	qi_cache_free(edesc);
1424 	skcipher_request_complete(req, ecode);
1425 }
1426 
1427 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1428 {
1429 	struct crypto_async_request *areq = cbk_ctx;
1430 	struct skcipher_request *req = skcipher_request_cast(areq);
1431 	struct caam_request *req_ctx = to_caam_req(areq);
1432 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1433 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1434 	struct skcipher_edesc *edesc = req_ctx->edesc;
1435 	int ecode = 0;
1436 	int ivsize = crypto_skcipher_ivsize(skcipher);
1437 
1438 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1439 
1440 	if (unlikely(status))
1441 		ecode = caam_qi2_strstatus(ctx->dev, status);
1442 
1443 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1444 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1445 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1446 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1447 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1448 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1449 
1450 	skcipher_unmap(ctx->dev, edesc, req);
1451 
1452 	/*
1453 	 * The crypto API expects us to set the IV (req->iv) to the last
1454 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1455 	 * This is used e.g. by the CTS mode.
1456 	 */
1457 	if (!ecode)
1458 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1459 		       ivsize);
1460 
1461 	qi_cache_free(edesc);
1462 	skcipher_request_complete(req, ecode);
1463 }
1464 
1465 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1466 {
1467 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1468 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1469 
1470 	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1471 }
1472 
1473 static int skcipher_encrypt(struct skcipher_request *req)
1474 {
1475 	struct skcipher_edesc *edesc;
1476 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1477 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1478 	struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1479 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1480 	int ret;
1481 
1482 	/*
1483 	 * XTS is expected to return an error even for input length = 0
1484 	 * Note that the case input length < block size will be caught during
1485 	 * HW offloading and return an error.
1486 	 */
1487 	if (!req->cryptlen && !ctx->fallback)
1488 		return 0;
1489 
1490 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1491 			      ctx->xts_key_fallback)) {
1492 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1493 		skcipher_request_set_callback(&caam_req->fallback_req,
1494 					      req->base.flags,
1495 					      req->base.complete,
1496 					      req->base.data);
1497 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1498 					   req->dst, req->cryptlen, req->iv);
1499 
1500 		return crypto_skcipher_encrypt(&caam_req->fallback_req);
1501 	}
1502 
1503 	/* allocate extended descriptor */
1504 	edesc = skcipher_edesc_alloc(req);
1505 	if (IS_ERR(edesc))
1506 		return PTR_ERR(edesc);
1507 
1508 	caam_req->flc = &ctx->flc[ENCRYPT];
1509 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1510 	caam_req->cbk = skcipher_encrypt_done;
1511 	caam_req->ctx = &req->base;
1512 	caam_req->edesc = edesc;
1513 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1514 	if (ret != -EINPROGRESS &&
1515 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1516 		skcipher_unmap(ctx->dev, edesc, req);
1517 		qi_cache_free(edesc);
1518 	}
1519 
1520 	return ret;
1521 }
1522 
1523 static int skcipher_decrypt(struct skcipher_request *req)
1524 {
1525 	struct skcipher_edesc *edesc;
1526 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1527 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1528 	struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1529 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1530 	int ret;
1531 
1532 	/*
1533 	 * XTS is expected to return an error even for input length = 0
1534 	 * Note that the case input length < block size will be caught during
1535 	 * HW offloading and return an error.
1536 	 */
1537 	if (!req->cryptlen && !ctx->fallback)
1538 		return 0;
1539 
1540 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1541 			      ctx->xts_key_fallback)) {
1542 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1543 		skcipher_request_set_callback(&caam_req->fallback_req,
1544 					      req->base.flags,
1545 					      req->base.complete,
1546 					      req->base.data);
1547 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1548 					   req->dst, req->cryptlen, req->iv);
1549 
1550 		return crypto_skcipher_decrypt(&caam_req->fallback_req);
1551 	}
1552 
1553 	/* allocate extended descriptor */
1554 	edesc = skcipher_edesc_alloc(req);
1555 	if (IS_ERR(edesc))
1556 		return PTR_ERR(edesc);
1557 
1558 	caam_req->flc = &ctx->flc[DECRYPT];
1559 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1560 	caam_req->cbk = skcipher_decrypt_done;
1561 	caam_req->ctx = &req->base;
1562 	caam_req->edesc = edesc;
1563 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1564 	if (ret != -EINPROGRESS &&
1565 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1566 		skcipher_unmap(ctx->dev, edesc, req);
1567 		qi_cache_free(edesc);
1568 	}
1569 
1570 	return ret;
1571 }
1572 
1573 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1574 			 bool uses_dkp)
1575 {
1576 	dma_addr_t dma_addr;
1577 	int i;
1578 
1579 	/* copy descriptor header template value */
1580 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1581 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1582 
1583 	ctx->dev = caam->dev;
1584 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1585 
1586 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1587 					offsetof(struct caam_ctx, flc_dma),
1588 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1589 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1590 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1591 		return -ENOMEM;
1592 	}
1593 
1594 	for (i = 0; i < NUM_OP; i++)
1595 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1596 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1597 
1598 	return 0;
1599 }
1600 
1601 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1602 {
1603 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1604 	struct caam_skcipher_alg *caam_alg =
1605 		container_of(alg, typeof(*caam_alg), skcipher);
1606 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1607 	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1608 	int ret = 0;
1609 
1610 	if (alg_aai == OP_ALG_AAI_XTS) {
1611 		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1612 		struct crypto_skcipher *fallback;
1613 
1614 		fallback = crypto_alloc_skcipher(tfm_name, 0,
1615 						 CRYPTO_ALG_NEED_FALLBACK);
1616 		if (IS_ERR(fallback)) {
1617 			dev_err(caam_alg->caam.dev,
1618 				"Failed to allocate %s fallback: %ld\n",
1619 				tfm_name, PTR_ERR(fallback));
1620 			return PTR_ERR(fallback);
1621 		}
1622 
1623 		ctx->fallback = fallback;
1624 		crypto_skcipher_set_reqsize_dma(
1625 			tfm, sizeof(struct caam_request) +
1626 			     crypto_skcipher_reqsize(fallback));
1627 	} else {
1628 		crypto_skcipher_set_reqsize_dma(tfm,
1629 						sizeof(struct caam_request));
1630 	}
1631 
1632 	ret = caam_cra_init(ctx, &caam_alg->caam, false);
1633 	if (ret && ctx->fallback)
1634 		crypto_free_skcipher(ctx->fallback);
1635 
1636 	return ret;
1637 }
1638 
1639 static int caam_cra_init_aead(struct crypto_aead *tfm)
1640 {
1641 	struct aead_alg *alg = crypto_aead_alg(tfm);
1642 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1643 						      aead);
1644 
1645 	crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
1646 	return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
1647 			     !caam_alg->caam.nodkp);
1648 }
1649 
1650 static void caam_exit_common(struct caam_ctx *ctx)
1651 {
1652 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1653 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1654 			       DMA_ATTR_SKIP_CPU_SYNC);
1655 }
1656 
1657 static void caam_cra_exit(struct crypto_skcipher *tfm)
1658 {
1659 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1660 
1661 	if (ctx->fallback)
1662 		crypto_free_skcipher(ctx->fallback);
1663 	caam_exit_common(ctx);
1664 }
1665 
1666 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1667 {
1668 	caam_exit_common(crypto_aead_ctx_dma(tfm));
1669 }
1670 
1671 static struct caam_skcipher_alg driver_algs[] = {
1672 	{
1673 		.skcipher = {
1674 			.base = {
1675 				.cra_name = "cbc(aes)",
1676 				.cra_driver_name = "cbc-aes-caam-qi2",
1677 				.cra_blocksize = AES_BLOCK_SIZE,
1678 			},
1679 			.setkey = aes_skcipher_setkey,
1680 			.encrypt = skcipher_encrypt,
1681 			.decrypt = skcipher_decrypt,
1682 			.min_keysize = AES_MIN_KEY_SIZE,
1683 			.max_keysize = AES_MAX_KEY_SIZE,
1684 			.ivsize = AES_BLOCK_SIZE,
1685 		},
1686 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1687 	},
1688 	{
1689 		.skcipher = {
1690 			.base = {
1691 				.cra_name = "cbc(des3_ede)",
1692 				.cra_driver_name = "cbc-3des-caam-qi2",
1693 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1694 			},
1695 			.setkey = des3_skcipher_setkey,
1696 			.encrypt = skcipher_encrypt,
1697 			.decrypt = skcipher_decrypt,
1698 			.min_keysize = DES3_EDE_KEY_SIZE,
1699 			.max_keysize = DES3_EDE_KEY_SIZE,
1700 			.ivsize = DES3_EDE_BLOCK_SIZE,
1701 		},
1702 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1703 	},
1704 	{
1705 		.skcipher = {
1706 			.base = {
1707 				.cra_name = "cbc(des)",
1708 				.cra_driver_name = "cbc-des-caam-qi2",
1709 				.cra_blocksize = DES_BLOCK_SIZE,
1710 			},
1711 			.setkey = des_skcipher_setkey,
1712 			.encrypt = skcipher_encrypt,
1713 			.decrypt = skcipher_decrypt,
1714 			.min_keysize = DES_KEY_SIZE,
1715 			.max_keysize = DES_KEY_SIZE,
1716 			.ivsize = DES_BLOCK_SIZE,
1717 		},
1718 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1719 	},
1720 	{
1721 		.skcipher = {
1722 			.base = {
1723 				.cra_name = "ctr(aes)",
1724 				.cra_driver_name = "ctr-aes-caam-qi2",
1725 				.cra_blocksize = 1,
1726 			},
1727 			.setkey = ctr_skcipher_setkey,
1728 			.encrypt = skcipher_encrypt,
1729 			.decrypt = skcipher_decrypt,
1730 			.min_keysize = AES_MIN_KEY_SIZE,
1731 			.max_keysize = AES_MAX_KEY_SIZE,
1732 			.ivsize = AES_BLOCK_SIZE,
1733 			.chunksize = AES_BLOCK_SIZE,
1734 		},
1735 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1736 					OP_ALG_AAI_CTR_MOD128,
1737 	},
1738 	{
1739 		.skcipher = {
1740 			.base = {
1741 				.cra_name = "rfc3686(ctr(aes))",
1742 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1743 				.cra_blocksize = 1,
1744 			},
1745 			.setkey = rfc3686_skcipher_setkey,
1746 			.encrypt = skcipher_encrypt,
1747 			.decrypt = skcipher_decrypt,
1748 			.min_keysize = AES_MIN_KEY_SIZE +
1749 				       CTR_RFC3686_NONCE_SIZE,
1750 			.max_keysize = AES_MAX_KEY_SIZE +
1751 				       CTR_RFC3686_NONCE_SIZE,
1752 			.ivsize = CTR_RFC3686_IV_SIZE,
1753 			.chunksize = AES_BLOCK_SIZE,
1754 		},
1755 		.caam = {
1756 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1757 					   OP_ALG_AAI_CTR_MOD128,
1758 			.rfc3686 = true,
1759 		},
1760 	},
1761 	{
1762 		.skcipher = {
1763 			.base = {
1764 				.cra_name = "xts(aes)",
1765 				.cra_driver_name = "xts-aes-caam-qi2",
1766 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1767 				.cra_blocksize = AES_BLOCK_SIZE,
1768 			},
1769 			.setkey = xts_skcipher_setkey,
1770 			.encrypt = skcipher_encrypt,
1771 			.decrypt = skcipher_decrypt,
1772 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1773 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1774 			.ivsize = AES_BLOCK_SIZE,
1775 		},
1776 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1777 	},
1778 	{
1779 		.skcipher = {
1780 			.base = {
1781 				.cra_name = "chacha20",
1782 				.cra_driver_name = "chacha20-caam-qi2",
1783 				.cra_blocksize = 1,
1784 			},
1785 			.setkey = chacha20_skcipher_setkey,
1786 			.encrypt = skcipher_encrypt,
1787 			.decrypt = skcipher_decrypt,
1788 			.min_keysize = CHACHA_KEY_SIZE,
1789 			.max_keysize = CHACHA_KEY_SIZE,
1790 			.ivsize = CHACHA_IV_SIZE,
1791 		},
1792 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1793 	},
1794 };
1795 
1796 static struct caam_aead_alg driver_aeads[] = {
1797 	{
1798 		.aead = {
1799 			.base = {
1800 				.cra_name = "rfc4106(gcm(aes))",
1801 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1802 				.cra_blocksize = 1,
1803 			},
1804 			.setkey = rfc4106_setkey,
1805 			.setauthsize = rfc4106_setauthsize,
1806 			.encrypt = ipsec_gcm_encrypt,
1807 			.decrypt = ipsec_gcm_decrypt,
1808 			.ivsize = 8,
1809 			.maxauthsize = AES_BLOCK_SIZE,
1810 		},
1811 		.caam = {
1812 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1813 			.nodkp = true,
1814 		},
1815 	},
1816 	{
1817 		.aead = {
1818 			.base = {
1819 				.cra_name = "rfc4543(gcm(aes))",
1820 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1821 				.cra_blocksize = 1,
1822 			},
1823 			.setkey = rfc4543_setkey,
1824 			.setauthsize = rfc4543_setauthsize,
1825 			.encrypt = ipsec_gcm_encrypt,
1826 			.decrypt = ipsec_gcm_decrypt,
1827 			.ivsize = 8,
1828 			.maxauthsize = AES_BLOCK_SIZE,
1829 		},
1830 		.caam = {
1831 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1832 			.nodkp = true,
1833 		},
1834 	},
1835 	/* Galois Counter Mode */
1836 	{
1837 		.aead = {
1838 			.base = {
1839 				.cra_name = "gcm(aes)",
1840 				.cra_driver_name = "gcm-aes-caam-qi2",
1841 				.cra_blocksize = 1,
1842 			},
1843 			.setkey = gcm_setkey,
1844 			.setauthsize = gcm_setauthsize,
1845 			.encrypt = aead_encrypt,
1846 			.decrypt = aead_decrypt,
1847 			.ivsize = 12,
1848 			.maxauthsize = AES_BLOCK_SIZE,
1849 		},
1850 		.caam = {
1851 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1852 			.nodkp = true,
1853 		}
1854 	},
1855 	/* single-pass ipsec_esp descriptor */
1856 	{
1857 		.aead = {
1858 			.base = {
1859 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1860 				.cra_driver_name = "authenc-hmac-md5-"
1861 						   "cbc-aes-caam-qi2",
1862 				.cra_blocksize = AES_BLOCK_SIZE,
1863 			},
1864 			.setkey = aead_setkey,
1865 			.setauthsize = aead_setauthsize,
1866 			.encrypt = aead_encrypt,
1867 			.decrypt = aead_decrypt,
1868 			.ivsize = AES_BLOCK_SIZE,
1869 			.maxauthsize = MD5_DIGEST_SIZE,
1870 		},
1871 		.caam = {
1872 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1873 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1874 					   OP_ALG_AAI_HMAC_PRECOMP,
1875 		}
1876 	},
1877 	{
1878 		.aead = {
1879 			.base = {
1880 				.cra_name = "echainiv(authenc(hmac(md5),"
1881 					    "cbc(aes)))",
1882 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1883 						   "cbc-aes-caam-qi2",
1884 				.cra_blocksize = AES_BLOCK_SIZE,
1885 			},
1886 			.setkey = aead_setkey,
1887 			.setauthsize = aead_setauthsize,
1888 			.encrypt = aead_encrypt,
1889 			.decrypt = aead_decrypt,
1890 			.ivsize = AES_BLOCK_SIZE,
1891 			.maxauthsize = MD5_DIGEST_SIZE,
1892 		},
1893 		.caam = {
1894 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1895 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1896 					   OP_ALG_AAI_HMAC_PRECOMP,
1897 			.geniv = true,
1898 		}
1899 	},
1900 	{
1901 		.aead = {
1902 			.base = {
1903 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1904 				.cra_driver_name = "authenc-hmac-sha1-"
1905 						   "cbc-aes-caam-qi2",
1906 				.cra_blocksize = AES_BLOCK_SIZE,
1907 			},
1908 			.setkey = aead_setkey,
1909 			.setauthsize = aead_setauthsize,
1910 			.encrypt = aead_encrypt,
1911 			.decrypt = aead_decrypt,
1912 			.ivsize = AES_BLOCK_SIZE,
1913 			.maxauthsize = SHA1_DIGEST_SIZE,
1914 		},
1915 		.caam = {
1916 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1917 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1918 					   OP_ALG_AAI_HMAC_PRECOMP,
1919 		}
1920 	},
1921 	{
1922 		.aead = {
1923 			.base = {
1924 				.cra_name = "echainiv(authenc(hmac(sha1),"
1925 					    "cbc(aes)))",
1926 				.cra_driver_name = "echainiv-authenc-"
1927 						   "hmac-sha1-cbc-aes-caam-qi2",
1928 				.cra_blocksize = AES_BLOCK_SIZE,
1929 			},
1930 			.setkey = aead_setkey,
1931 			.setauthsize = aead_setauthsize,
1932 			.encrypt = aead_encrypt,
1933 			.decrypt = aead_decrypt,
1934 			.ivsize = AES_BLOCK_SIZE,
1935 			.maxauthsize = SHA1_DIGEST_SIZE,
1936 		},
1937 		.caam = {
1938 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1939 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1940 					   OP_ALG_AAI_HMAC_PRECOMP,
1941 			.geniv = true,
1942 		},
1943 	},
1944 	{
1945 		.aead = {
1946 			.base = {
1947 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1948 				.cra_driver_name = "authenc-hmac-sha224-"
1949 						   "cbc-aes-caam-qi2",
1950 				.cra_blocksize = AES_BLOCK_SIZE,
1951 			},
1952 			.setkey = aead_setkey,
1953 			.setauthsize = aead_setauthsize,
1954 			.encrypt = aead_encrypt,
1955 			.decrypt = aead_decrypt,
1956 			.ivsize = AES_BLOCK_SIZE,
1957 			.maxauthsize = SHA224_DIGEST_SIZE,
1958 		},
1959 		.caam = {
1960 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1961 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1962 					   OP_ALG_AAI_HMAC_PRECOMP,
1963 		}
1964 	},
1965 	{
1966 		.aead = {
1967 			.base = {
1968 				.cra_name = "echainiv(authenc(hmac(sha224),"
1969 					    "cbc(aes)))",
1970 				.cra_driver_name = "echainiv-authenc-"
1971 						   "hmac-sha224-cbc-aes-caam-qi2",
1972 				.cra_blocksize = AES_BLOCK_SIZE,
1973 			},
1974 			.setkey = aead_setkey,
1975 			.setauthsize = aead_setauthsize,
1976 			.encrypt = aead_encrypt,
1977 			.decrypt = aead_decrypt,
1978 			.ivsize = AES_BLOCK_SIZE,
1979 			.maxauthsize = SHA224_DIGEST_SIZE,
1980 		},
1981 		.caam = {
1982 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1983 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1984 					   OP_ALG_AAI_HMAC_PRECOMP,
1985 			.geniv = true,
1986 		}
1987 	},
1988 	{
1989 		.aead = {
1990 			.base = {
1991 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1992 				.cra_driver_name = "authenc-hmac-sha256-"
1993 						   "cbc-aes-caam-qi2",
1994 				.cra_blocksize = AES_BLOCK_SIZE,
1995 			},
1996 			.setkey = aead_setkey,
1997 			.setauthsize = aead_setauthsize,
1998 			.encrypt = aead_encrypt,
1999 			.decrypt = aead_decrypt,
2000 			.ivsize = AES_BLOCK_SIZE,
2001 			.maxauthsize = SHA256_DIGEST_SIZE,
2002 		},
2003 		.caam = {
2004 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2005 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2006 					   OP_ALG_AAI_HMAC_PRECOMP,
2007 		}
2008 	},
2009 	{
2010 		.aead = {
2011 			.base = {
2012 				.cra_name = "echainiv(authenc(hmac(sha256),"
2013 					    "cbc(aes)))",
2014 				.cra_driver_name = "echainiv-authenc-"
2015 						   "hmac-sha256-cbc-aes-"
2016 						   "caam-qi2",
2017 				.cra_blocksize = AES_BLOCK_SIZE,
2018 			},
2019 			.setkey = aead_setkey,
2020 			.setauthsize = aead_setauthsize,
2021 			.encrypt = aead_encrypt,
2022 			.decrypt = aead_decrypt,
2023 			.ivsize = AES_BLOCK_SIZE,
2024 			.maxauthsize = SHA256_DIGEST_SIZE,
2025 		},
2026 		.caam = {
2027 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2028 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2029 					   OP_ALG_AAI_HMAC_PRECOMP,
2030 			.geniv = true,
2031 		}
2032 	},
2033 	{
2034 		.aead = {
2035 			.base = {
2036 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2037 				.cra_driver_name = "authenc-hmac-sha384-"
2038 						   "cbc-aes-caam-qi2",
2039 				.cra_blocksize = AES_BLOCK_SIZE,
2040 			},
2041 			.setkey = aead_setkey,
2042 			.setauthsize = aead_setauthsize,
2043 			.encrypt = aead_encrypt,
2044 			.decrypt = aead_decrypt,
2045 			.ivsize = AES_BLOCK_SIZE,
2046 			.maxauthsize = SHA384_DIGEST_SIZE,
2047 		},
2048 		.caam = {
2049 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2050 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2051 					   OP_ALG_AAI_HMAC_PRECOMP,
2052 		}
2053 	},
2054 	{
2055 		.aead = {
2056 			.base = {
2057 				.cra_name = "echainiv(authenc(hmac(sha384),"
2058 					    "cbc(aes)))",
2059 				.cra_driver_name = "echainiv-authenc-"
2060 						   "hmac-sha384-cbc-aes-"
2061 						   "caam-qi2",
2062 				.cra_blocksize = AES_BLOCK_SIZE,
2063 			},
2064 			.setkey = aead_setkey,
2065 			.setauthsize = aead_setauthsize,
2066 			.encrypt = aead_encrypt,
2067 			.decrypt = aead_decrypt,
2068 			.ivsize = AES_BLOCK_SIZE,
2069 			.maxauthsize = SHA384_DIGEST_SIZE,
2070 		},
2071 		.caam = {
2072 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2073 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2074 					   OP_ALG_AAI_HMAC_PRECOMP,
2075 			.geniv = true,
2076 		}
2077 	},
2078 	{
2079 		.aead = {
2080 			.base = {
2081 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2082 				.cra_driver_name = "authenc-hmac-sha512-"
2083 						   "cbc-aes-caam-qi2",
2084 				.cra_blocksize = AES_BLOCK_SIZE,
2085 			},
2086 			.setkey = aead_setkey,
2087 			.setauthsize = aead_setauthsize,
2088 			.encrypt = aead_encrypt,
2089 			.decrypt = aead_decrypt,
2090 			.ivsize = AES_BLOCK_SIZE,
2091 			.maxauthsize = SHA512_DIGEST_SIZE,
2092 		},
2093 		.caam = {
2094 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2095 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2096 					   OP_ALG_AAI_HMAC_PRECOMP,
2097 		}
2098 	},
2099 	{
2100 		.aead = {
2101 			.base = {
2102 				.cra_name = "echainiv(authenc(hmac(sha512),"
2103 					    "cbc(aes)))",
2104 				.cra_driver_name = "echainiv-authenc-"
2105 						   "hmac-sha512-cbc-aes-"
2106 						   "caam-qi2",
2107 				.cra_blocksize = AES_BLOCK_SIZE,
2108 			},
2109 			.setkey = aead_setkey,
2110 			.setauthsize = aead_setauthsize,
2111 			.encrypt = aead_encrypt,
2112 			.decrypt = aead_decrypt,
2113 			.ivsize = AES_BLOCK_SIZE,
2114 			.maxauthsize = SHA512_DIGEST_SIZE,
2115 		},
2116 		.caam = {
2117 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2118 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2119 					   OP_ALG_AAI_HMAC_PRECOMP,
2120 			.geniv = true,
2121 		}
2122 	},
2123 	{
2124 		.aead = {
2125 			.base = {
2126 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2127 				.cra_driver_name = "authenc-hmac-md5-"
2128 						   "cbc-des3_ede-caam-qi2",
2129 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2130 			},
2131 			.setkey = des3_aead_setkey,
2132 			.setauthsize = aead_setauthsize,
2133 			.encrypt = aead_encrypt,
2134 			.decrypt = aead_decrypt,
2135 			.ivsize = DES3_EDE_BLOCK_SIZE,
2136 			.maxauthsize = MD5_DIGEST_SIZE,
2137 		},
2138 		.caam = {
2139 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2140 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2141 					   OP_ALG_AAI_HMAC_PRECOMP,
2142 		}
2143 	},
2144 	{
2145 		.aead = {
2146 			.base = {
2147 				.cra_name = "echainiv(authenc(hmac(md5),"
2148 					    "cbc(des3_ede)))",
2149 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2150 						   "cbc-des3_ede-caam-qi2",
2151 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2152 			},
2153 			.setkey = des3_aead_setkey,
2154 			.setauthsize = aead_setauthsize,
2155 			.encrypt = aead_encrypt,
2156 			.decrypt = aead_decrypt,
2157 			.ivsize = DES3_EDE_BLOCK_SIZE,
2158 			.maxauthsize = MD5_DIGEST_SIZE,
2159 		},
2160 		.caam = {
2161 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2162 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2163 					   OP_ALG_AAI_HMAC_PRECOMP,
2164 			.geniv = true,
2165 		}
2166 	},
2167 	{
2168 		.aead = {
2169 			.base = {
2170 				.cra_name = "authenc(hmac(sha1),"
2171 					    "cbc(des3_ede))",
2172 				.cra_driver_name = "authenc-hmac-sha1-"
2173 						   "cbc-des3_ede-caam-qi2",
2174 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2175 			},
2176 			.setkey = des3_aead_setkey,
2177 			.setauthsize = aead_setauthsize,
2178 			.encrypt = aead_encrypt,
2179 			.decrypt = aead_decrypt,
2180 			.ivsize = DES3_EDE_BLOCK_SIZE,
2181 			.maxauthsize = SHA1_DIGEST_SIZE,
2182 		},
2183 		.caam = {
2184 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2185 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2186 					   OP_ALG_AAI_HMAC_PRECOMP,
2187 		},
2188 	},
2189 	{
2190 		.aead = {
2191 			.base = {
2192 				.cra_name = "echainiv(authenc(hmac(sha1),"
2193 					    "cbc(des3_ede)))",
2194 				.cra_driver_name = "echainiv-authenc-"
2195 						   "hmac-sha1-"
2196 						   "cbc-des3_ede-caam-qi2",
2197 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2198 			},
2199 			.setkey = des3_aead_setkey,
2200 			.setauthsize = aead_setauthsize,
2201 			.encrypt = aead_encrypt,
2202 			.decrypt = aead_decrypt,
2203 			.ivsize = DES3_EDE_BLOCK_SIZE,
2204 			.maxauthsize = SHA1_DIGEST_SIZE,
2205 		},
2206 		.caam = {
2207 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2208 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2209 					   OP_ALG_AAI_HMAC_PRECOMP,
2210 			.geniv = true,
2211 		}
2212 	},
2213 	{
2214 		.aead = {
2215 			.base = {
2216 				.cra_name = "authenc(hmac(sha224),"
2217 					    "cbc(des3_ede))",
2218 				.cra_driver_name = "authenc-hmac-sha224-"
2219 						   "cbc-des3_ede-caam-qi2",
2220 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2221 			},
2222 			.setkey = des3_aead_setkey,
2223 			.setauthsize = aead_setauthsize,
2224 			.encrypt = aead_encrypt,
2225 			.decrypt = aead_decrypt,
2226 			.ivsize = DES3_EDE_BLOCK_SIZE,
2227 			.maxauthsize = SHA224_DIGEST_SIZE,
2228 		},
2229 		.caam = {
2230 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2231 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2232 					   OP_ALG_AAI_HMAC_PRECOMP,
2233 		},
2234 	},
2235 	{
2236 		.aead = {
2237 			.base = {
2238 				.cra_name = "echainiv(authenc(hmac(sha224),"
2239 					    "cbc(des3_ede)))",
2240 				.cra_driver_name = "echainiv-authenc-"
2241 						   "hmac-sha224-"
2242 						   "cbc-des3_ede-caam-qi2",
2243 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2244 			},
2245 			.setkey = des3_aead_setkey,
2246 			.setauthsize = aead_setauthsize,
2247 			.encrypt = aead_encrypt,
2248 			.decrypt = aead_decrypt,
2249 			.ivsize = DES3_EDE_BLOCK_SIZE,
2250 			.maxauthsize = SHA224_DIGEST_SIZE,
2251 		},
2252 		.caam = {
2253 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2254 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2255 					   OP_ALG_AAI_HMAC_PRECOMP,
2256 			.geniv = true,
2257 		}
2258 	},
2259 	{
2260 		.aead = {
2261 			.base = {
2262 				.cra_name = "authenc(hmac(sha256),"
2263 					    "cbc(des3_ede))",
2264 				.cra_driver_name = "authenc-hmac-sha256-"
2265 						   "cbc-des3_ede-caam-qi2",
2266 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2267 			},
2268 			.setkey = des3_aead_setkey,
2269 			.setauthsize = aead_setauthsize,
2270 			.encrypt = aead_encrypt,
2271 			.decrypt = aead_decrypt,
2272 			.ivsize = DES3_EDE_BLOCK_SIZE,
2273 			.maxauthsize = SHA256_DIGEST_SIZE,
2274 		},
2275 		.caam = {
2276 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2277 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2278 					   OP_ALG_AAI_HMAC_PRECOMP,
2279 		},
2280 	},
2281 	{
2282 		.aead = {
2283 			.base = {
2284 				.cra_name = "echainiv(authenc(hmac(sha256),"
2285 					    "cbc(des3_ede)))",
2286 				.cra_driver_name = "echainiv-authenc-"
2287 						   "hmac-sha256-"
2288 						   "cbc-des3_ede-caam-qi2",
2289 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2290 			},
2291 			.setkey = des3_aead_setkey,
2292 			.setauthsize = aead_setauthsize,
2293 			.encrypt = aead_encrypt,
2294 			.decrypt = aead_decrypt,
2295 			.ivsize = DES3_EDE_BLOCK_SIZE,
2296 			.maxauthsize = SHA256_DIGEST_SIZE,
2297 		},
2298 		.caam = {
2299 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2300 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2301 					   OP_ALG_AAI_HMAC_PRECOMP,
2302 			.geniv = true,
2303 		}
2304 	},
2305 	{
2306 		.aead = {
2307 			.base = {
2308 				.cra_name = "authenc(hmac(sha384),"
2309 					    "cbc(des3_ede))",
2310 				.cra_driver_name = "authenc-hmac-sha384-"
2311 						   "cbc-des3_ede-caam-qi2",
2312 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2313 			},
2314 			.setkey = des3_aead_setkey,
2315 			.setauthsize = aead_setauthsize,
2316 			.encrypt = aead_encrypt,
2317 			.decrypt = aead_decrypt,
2318 			.ivsize = DES3_EDE_BLOCK_SIZE,
2319 			.maxauthsize = SHA384_DIGEST_SIZE,
2320 		},
2321 		.caam = {
2322 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2323 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2324 					   OP_ALG_AAI_HMAC_PRECOMP,
2325 		},
2326 	},
2327 	{
2328 		.aead = {
2329 			.base = {
2330 				.cra_name = "echainiv(authenc(hmac(sha384),"
2331 					    "cbc(des3_ede)))",
2332 				.cra_driver_name = "echainiv-authenc-"
2333 						   "hmac-sha384-"
2334 						   "cbc-des3_ede-caam-qi2",
2335 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2336 			},
2337 			.setkey = des3_aead_setkey,
2338 			.setauthsize = aead_setauthsize,
2339 			.encrypt = aead_encrypt,
2340 			.decrypt = aead_decrypt,
2341 			.ivsize = DES3_EDE_BLOCK_SIZE,
2342 			.maxauthsize = SHA384_DIGEST_SIZE,
2343 		},
2344 		.caam = {
2345 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2346 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2347 					   OP_ALG_AAI_HMAC_PRECOMP,
2348 			.geniv = true,
2349 		}
2350 	},
2351 	{
2352 		.aead = {
2353 			.base = {
2354 				.cra_name = "authenc(hmac(sha512),"
2355 					    "cbc(des3_ede))",
2356 				.cra_driver_name = "authenc-hmac-sha512-"
2357 						   "cbc-des3_ede-caam-qi2",
2358 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2359 			},
2360 			.setkey = des3_aead_setkey,
2361 			.setauthsize = aead_setauthsize,
2362 			.encrypt = aead_encrypt,
2363 			.decrypt = aead_decrypt,
2364 			.ivsize = DES3_EDE_BLOCK_SIZE,
2365 			.maxauthsize = SHA512_DIGEST_SIZE,
2366 		},
2367 		.caam = {
2368 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2369 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2370 					   OP_ALG_AAI_HMAC_PRECOMP,
2371 		},
2372 	},
2373 	{
2374 		.aead = {
2375 			.base = {
2376 				.cra_name = "echainiv(authenc(hmac(sha512),"
2377 					    "cbc(des3_ede)))",
2378 				.cra_driver_name = "echainiv-authenc-"
2379 						   "hmac-sha512-"
2380 						   "cbc-des3_ede-caam-qi2",
2381 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2382 			},
2383 			.setkey = des3_aead_setkey,
2384 			.setauthsize = aead_setauthsize,
2385 			.encrypt = aead_encrypt,
2386 			.decrypt = aead_decrypt,
2387 			.ivsize = DES3_EDE_BLOCK_SIZE,
2388 			.maxauthsize = SHA512_DIGEST_SIZE,
2389 		},
2390 		.caam = {
2391 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2392 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2393 					   OP_ALG_AAI_HMAC_PRECOMP,
2394 			.geniv = true,
2395 		}
2396 	},
2397 	{
2398 		.aead = {
2399 			.base = {
2400 				.cra_name = "authenc(hmac(md5),cbc(des))",
2401 				.cra_driver_name = "authenc-hmac-md5-"
2402 						   "cbc-des-caam-qi2",
2403 				.cra_blocksize = DES_BLOCK_SIZE,
2404 			},
2405 			.setkey = aead_setkey,
2406 			.setauthsize = aead_setauthsize,
2407 			.encrypt = aead_encrypt,
2408 			.decrypt = aead_decrypt,
2409 			.ivsize = DES_BLOCK_SIZE,
2410 			.maxauthsize = MD5_DIGEST_SIZE,
2411 		},
2412 		.caam = {
2413 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2414 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2415 					   OP_ALG_AAI_HMAC_PRECOMP,
2416 		},
2417 	},
2418 	{
2419 		.aead = {
2420 			.base = {
2421 				.cra_name = "echainiv(authenc(hmac(md5),"
2422 					    "cbc(des)))",
2423 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2424 						   "cbc-des-caam-qi2",
2425 				.cra_blocksize = DES_BLOCK_SIZE,
2426 			},
2427 			.setkey = aead_setkey,
2428 			.setauthsize = aead_setauthsize,
2429 			.encrypt = aead_encrypt,
2430 			.decrypt = aead_decrypt,
2431 			.ivsize = DES_BLOCK_SIZE,
2432 			.maxauthsize = MD5_DIGEST_SIZE,
2433 		},
2434 		.caam = {
2435 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2436 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2437 					   OP_ALG_AAI_HMAC_PRECOMP,
2438 			.geniv = true,
2439 		}
2440 	},
2441 	{
2442 		.aead = {
2443 			.base = {
2444 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2445 				.cra_driver_name = "authenc-hmac-sha1-"
2446 						   "cbc-des-caam-qi2",
2447 				.cra_blocksize = DES_BLOCK_SIZE,
2448 			},
2449 			.setkey = aead_setkey,
2450 			.setauthsize = aead_setauthsize,
2451 			.encrypt = aead_encrypt,
2452 			.decrypt = aead_decrypt,
2453 			.ivsize = DES_BLOCK_SIZE,
2454 			.maxauthsize = SHA1_DIGEST_SIZE,
2455 		},
2456 		.caam = {
2457 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2458 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2459 					   OP_ALG_AAI_HMAC_PRECOMP,
2460 		},
2461 	},
2462 	{
2463 		.aead = {
2464 			.base = {
2465 				.cra_name = "echainiv(authenc(hmac(sha1),"
2466 					    "cbc(des)))",
2467 				.cra_driver_name = "echainiv-authenc-"
2468 						   "hmac-sha1-cbc-des-caam-qi2",
2469 				.cra_blocksize = DES_BLOCK_SIZE,
2470 			},
2471 			.setkey = aead_setkey,
2472 			.setauthsize = aead_setauthsize,
2473 			.encrypt = aead_encrypt,
2474 			.decrypt = aead_decrypt,
2475 			.ivsize = DES_BLOCK_SIZE,
2476 			.maxauthsize = SHA1_DIGEST_SIZE,
2477 		},
2478 		.caam = {
2479 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2480 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2481 					   OP_ALG_AAI_HMAC_PRECOMP,
2482 			.geniv = true,
2483 		}
2484 	},
2485 	{
2486 		.aead = {
2487 			.base = {
2488 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2489 				.cra_driver_name = "authenc-hmac-sha224-"
2490 						   "cbc-des-caam-qi2",
2491 				.cra_blocksize = DES_BLOCK_SIZE,
2492 			},
2493 			.setkey = aead_setkey,
2494 			.setauthsize = aead_setauthsize,
2495 			.encrypt = aead_encrypt,
2496 			.decrypt = aead_decrypt,
2497 			.ivsize = DES_BLOCK_SIZE,
2498 			.maxauthsize = SHA224_DIGEST_SIZE,
2499 		},
2500 		.caam = {
2501 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2502 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2503 					   OP_ALG_AAI_HMAC_PRECOMP,
2504 		},
2505 	},
2506 	{
2507 		.aead = {
2508 			.base = {
2509 				.cra_name = "echainiv(authenc(hmac(sha224),"
2510 					    "cbc(des)))",
2511 				.cra_driver_name = "echainiv-authenc-"
2512 						   "hmac-sha224-cbc-des-"
2513 						   "caam-qi2",
2514 				.cra_blocksize = DES_BLOCK_SIZE,
2515 			},
2516 			.setkey = aead_setkey,
2517 			.setauthsize = aead_setauthsize,
2518 			.encrypt = aead_encrypt,
2519 			.decrypt = aead_decrypt,
2520 			.ivsize = DES_BLOCK_SIZE,
2521 			.maxauthsize = SHA224_DIGEST_SIZE,
2522 		},
2523 		.caam = {
2524 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2525 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2526 					   OP_ALG_AAI_HMAC_PRECOMP,
2527 			.geniv = true,
2528 		}
2529 	},
2530 	{
2531 		.aead = {
2532 			.base = {
2533 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2534 				.cra_driver_name = "authenc-hmac-sha256-"
2535 						   "cbc-des-caam-qi2",
2536 				.cra_blocksize = DES_BLOCK_SIZE,
2537 			},
2538 			.setkey = aead_setkey,
2539 			.setauthsize = aead_setauthsize,
2540 			.encrypt = aead_encrypt,
2541 			.decrypt = aead_decrypt,
2542 			.ivsize = DES_BLOCK_SIZE,
2543 			.maxauthsize = SHA256_DIGEST_SIZE,
2544 		},
2545 		.caam = {
2546 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2547 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2548 					   OP_ALG_AAI_HMAC_PRECOMP,
2549 		},
2550 	},
2551 	{
2552 		.aead = {
2553 			.base = {
2554 				.cra_name = "echainiv(authenc(hmac(sha256),"
2555 					    "cbc(des)))",
2556 				.cra_driver_name = "echainiv-authenc-"
2557 						   "hmac-sha256-cbc-des-"
2558 						   "caam-qi2",
2559 				.cra_blocksize = DES_BLOCK_SIZE,
2560 			},
2561 			.setkey = aead_setkey,
2562 			.setauthsize = aead_setauthsize,
2563 			.encrypt = aead_encrypt,
2564 			.decrypt = aead_decrypt,
2565 			.ivsize = DES_BLOCK_SIZE,
2566 			.maxauthsize = SHA256_DIGEST_SIZE,
2567 		},
2568 		.caam = {
2569 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2570 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2571 					   OP_ALG_AAI_HMAC_PRECOMP,
2572 			.geniv = true,
2573 		},
2574 	},
2575 	{
2576 		.aead = {
2577 			.base = {
2578 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2579 				.cra_driver_name = "authenc-hmac-sha384-"
2580 						   "cbc-des-caam-qi2",
2581 				.cra_blocksize = DES_BLOCK_SIZE,
2582 			},
2583 			.setkey = aead_setkey,
2584 			.setauthsize = aead_setauthsize,
2585 			.encrypt = aead_encrypt,
2586 			.decrypt = aead_decrypt,
2587 			.ivsize = DES_BLOCK_SIZE,
2588 			.maxauthsize = SHA384_DIGEST_SIZE,
2589 		},
2590 		.caam = {
2591 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2592 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2593 					   OP_ALG_AAI_HMAC_PRECOMP,
2594 		},
2595 	},
2596 	{
2597 		.aead = {
2598 			.base = {
2599 				.cra_name = "echainiv(authenc(hmac(sha384),"
2600 					    "cbc(des)))",
2601 				.cra_driver_name = "echainiv-authenc-"
2602 						   "hmac-sha384-cbc-des-"
2603 						   "caam-qi2",
2604 				.cra_blocksize = DES_BLOCK_SIZE,
2605 			},
2606 			.setkey = aead_setkey,
2607 			.setauthsize = aead_setauthsize,
2608 			.encrypt = aead_encrypt,
2609 			.decrypt = aead_decrypt,
2610 			.ivsize = DES_BLOCK_SIZE,
2611 			.maxauthsize = SHA384_DIGEST_SIZE,
2612 		},
2613 		.caam = {
2614 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2615 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2616 					   OP_ALG_AAI_HMAC_PRECOMP,
2617 			.geniv = true,
2618 		}
2619 	},
2620 	{
2621 		.aead = {
2622 			.base = {
2623 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2624 				.cra_driver_name = "authenc-hmac-sha512-"
2625 						   "cbc-des-caam-qi2",
2626 				.cra_blocksize = DES_BLOCK_SIZE,
2627 			},
2628 			.setkey = aead_setkey,
2629 			.setauthsize = aead_setauthsize,
2630 			.encrypt = aead_encrypt,
2631 			.decrypt = aead_decrypt,
2632 			.ivsize = DES_BLOCK_SIZE,
2633 			.maxauthsize = SHA512_DIGEST_SIZE,
2634 		},
2635 		.caam = {
2636 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2637 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2638 					   OP_ALG_AAI_HMAC_PRECOMP,
2639 		}
2640 	},
2641 	{
2642 		.aead = {
2643 			.base = {
2644 				.cra_name = "echainiv(authenc(hmac(sha512),"
2645 					    "cbc(des)))",
2646 				.cra_driver_name = "echainiv-authenc-"
2647 						   "hmac-sha512-cbc-des-"
2648 						   "caam-qi2",
2649 				.cra_blocksize = DES_BLOCK_SIZE,
2650 			},
2651 			.setkey = aead_setkey,
2652 			.setauthsize = aead_setauthsize,
2653 			.encrypt = aead_encrypt,
2654 			.decrypt = aead_decrypt,
2655 			.ivsize = DES_BLOCK_SIZE,
2656 			.maxauthsize = SHA512_DIGEST_SIZE,
2657 		},
2658 		.caam = {
2659 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2660 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2661 					   OP_ALG_AAI_HMAC_PRECOMP,
2662 			.geniv = true,
2663 		}
2664 	},
2665 	{
2666 		.aead = {
2667 			.base = {
2668 				.cra_name = "authenc(hmac(md5),"
2669 					    "rfc3686(ctr(aes)))",
2670 				.cra_driver_name = "authenc-hmac-md5-"
2671 						   "rfc3686-ctr-aes-caam-qi2",
2672 				.cra_blocksize = 1,
2673 			},
2674 			.setkey = aead_setkey,
2675 			.setauthsize = aead_setauthsize,
2676 			.encrypt = aead_encrypt,
2677 			.decrypt = aead_decrypt,
2678 			.ivsize = CTR_RFC3686_IV_SIZE,
2679 			.maxauthsize = MD5_DIGEST_SIZE,
2680 		},
2681 		.caam = {
2682 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2683 					   OP_ALG_AAI_CTR_MOD128,
2684 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2685 					   OP_ALG_AAI_HMAC_PRECOMP,
2686 			.rfc3686 = true,
2687 		},
2688 	},
2689 	{
2690 		.aead = {
2691 			.base = {
2692 				.cra_name = "seqiv(authenc("
2693 					    "hmac(md5),rfc3686(ctr(aes))))",
2694 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2695 						   "rfc3686-ctr-aes-caam-qi2",
2696 				.cra_blocksize = 1,
2697 			},
2698 			.setkey = aead_setkey,
2699 			.setauthsize = aead_setauthsize,
2700 			.encrypt = aead_encrypt,
2701 			.decrypt = aead_decrypt,
2702 			.ivsize = CTR_RFC3686_IV_SIZE,
2703 			.maxauthsize = MD5_DIGEST_SIZE,
2704 		},
2705 		.caam = {
2706 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2707 					   OP_ALG_AAI_CTR_MOD128,
2708 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2709 					   OP_ALG_AAI_HMAC_PRECOMP,
2710 			.rfc3686 = true,
2711 			.geniv = true,
2712 		},
2713 	},
2714 	{
2715 		.aead = {
2716 			.base = {
2717 				.cra_name = "authenc(hmac(sha1),"
2718 					    "rfc3686(ctr(aes)))",
2719 				.cra_driver_name = "authenc-hmac-sha1-"
2720 						   "rfc3686-ctr-aes-caam-qi2",
2721 				.cra_blocksize = 1,
2722 			},
2723 			.setkey = aead_setkey,
2724 			.setauthsize = aead_setauthsize,
2725 			.encrypt = aead_encrypt,
2726 			.decrypt = aead_decrypt,
2727 			.ivsize = CTR_RFC3686_IV_SIZE,
2728 			.maxauthsize = SHA1_DIGEST_SIZE,
2729 		},
2730 		.caam = {
2731 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2732 					   OP_ALG_AAI_CTR_MOD128,
2733 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2734 					   OP_ALG_AAI_HMAC_PRECOMP,
2735 			.rfc3686 = true,
2736 		},
2737 	},
2738 	{
2739 		.aead = {
2740 			.base = {
2741 				.cra_name = "seqiv(authenc("
2742 					    "hmac(sha1),rfc3686(ctr(aes))))",
2743 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2744 						   "rfc3686-ctr-aes-caam-qi2",
2745 				.cra_blocksize = 1,
2746 			},
2747 			.setkey = aead_setkey,
2748 			.setauthsize = aead_setauthsize,
2749 			.encrypt = aead_encrypt,
2750 			.decrypt = aead_decrypt,
2751 			.ivsize = CTR_RFC3686_IV_SIZE,
2752 			.maxauthsize = SHA1_DIGEST_SIZE,
2753 		},
2754 		.caam = {
2755 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2756 					   OP_ALG_AAI_CTR_MOD128,
2757 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2758 					   OP_ALG_AAI_HMAC_PRECOMP,
2759 			.rfc3686 = true,
2760 			.geniv = true,
2761 		},
2762 	},
2763 	{
2764 		.aead = {
2765 			.base = {
2766 				.cra_name = "authenc(hmac(sha224),"
2767 					    "rfc3686(ctr(aes)))",
2768 				.cra_driver_name = "authenc-hmac-sha224-"
2769 						   "rfc3686-ctr-aes-caam-qi2",
2770 				.cra_blocksize = 1,
2771 			},
2772 			.setkey = aead_setkey,
2773 			.setauthsize = aead_setauthsize,
2774 			.encrypt = aead_encrypt,
2775 			.decrypt = aead_decrypt,
2776 			.ivsize = CTR_RFC3686_IV_SIZE,
2777 			.maxauthsize = SHA224_DIGEST_SIZE,
2778 		},
2779 		.caam = {
2780 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2781 					   OP_ALG_AAI_CTR_MOD128,
2782 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2783 					   OP_ALG_AAI_HMAC_PRECOMP,
2784 			.rfc3686 = true,
2785 		},
2786 	},
2787 	{
2788 		.aead = {
2789 			.base = {
2790 				.cra_name = "seqiv(authenc("
2791 					    "hmac(sha224),rfc3686(ctr(aes))))",
2792 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2793 						   "rfc3686-ctr-aes-caam-qi2",
2794 				.cra_blocksize = 1,
2795 			},
2796 			.setkey = aead_setkey,
2797 			.setauthsize = aead_setauthsize,
2798 			.encrypt = aead_encrypt,
2799 			.decrypt = aead_decrypt,
2800 			.ivsize = CTR_RFC3686_IV_SIZE,
2801 			.maxauthsize = SHA224_DIGEST_SIZE,
2802 		},
2803 		.caam = {
2804 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2805 					   OP_ALG_AAI_CTR_MOD128,
2806 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2807 					   OP_ALG_AAI_HMAC_PRECOMP,
2808 			.rfc3686 = true,
2809 			.geniv = true,
2810 		},
2811 	},
2812 	{
2813 		.aead = {
2814 			.base = {
2815 				.cra_name = "authenc(hmac(sha256),"
2816 					    "rfc3686(ctr(aes)))",
2817 				.cra_driver_name = "authenc-hmac-sha256-"
2818 						   "rfc3686-ctr-aes-caam-qi2",
2819 				.cra_blocksize = 1,
2820 			},
2821 			.setkey = aead_setkey,
2822 			.setauthsize = aead_setauthsize,
2823 			.encrypt = aead_encrypt,
2824 			.decrypt = aead_decrypt,
2825 			.ivsize = CTR_RFC3686_IV_SIZE,
2826 			.maxauthsize = SHA256_DIGEST_SIZE,
2827 		},
2828 		.caam = {
2829 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2830 					   OP_ALG_AAI_CTR_MOD128,
2831 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2832 					   OP_ALG_AAI_HMAC_PRECOMP,
2833 			.rfc3686 = true,
2834 		},
2835 	},
2836 	{
2837 		.aead = {
2838 			.base = {
2839 				.cra_name = "seqiv(authenc(hmac(sha256),"
2840 					    "rfc3686(ctr(aes))))",
2841 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2842 						   "rfc3686-ctr-aes-caam-qi2",
2843 				.cra_blocksize = 1,
2844 			},
2845 			.setkey = aead_setkey,
2846 			.setauthsize = aead_setauthsize,
2847 			.encrypt = aead_encrypt,
2848 			.decrypt = aead_decrypt,
2849 			.ivsize = CTR_RFC3686_IV_SIZE,
2850 			.maxauthsize = SHA256_DIGEST_SIZE,
2851 		},
2852 		.caam = {
2853 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2854 					   OP_ALG_AAI_CTR_MOD128,
2855 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2856 					   OP_ALG_AAI_HMAC_PRECOMP,
2857 			.rfc3686 = true,
2858 			.geniv = true,
2859 		},
2860 	},
2861 	{
2862 		.aead = {
2863 			.base = {
2864 				.cra_name = "authenc(hmac(sha384),"
2865 					    "rfc3686(ctr(aes)))",
2866 				.cra_driver_name = "authenc-hmac-sha384-"
2867 						   "rfc3686-ctr-aes-caam-qi2",
2868 				.cra_blocksize = 1,
2869 			},
2870 			.setkey = aead_setkey,
2871 			.setauthsize = aead_setauthsize,
2872 			.encrypt = aead_encrypt,
2873 			.decrypt = aead_decrypt,
2874 			.ivsize = CTR_RFC3686_IV_SIZE,
2875 			.maxauthsize = SHA384_DIGEST_SIZE,
2876 		},
2877 		.caam = {
2878 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2879 					   OP_ALG_AAI_CTR_MOD128,
2880 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2881 					   OP_ALG_AAI_HMAC_PRECOMP,
2882 			.rfc3686 = true,
2883 		},
2884 	},
2885 	{
2886 		.aead = {
2887 			.base = {
2888 				.cra_name = "seqiv(authenc(hmac(sha384),"
2889 					    "rfc3686(ctr(aes))))",
2890 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2891 						   "rfc3686-ctr-aes-caam-qi2",
2892 				.cra_blocksize = 1,
2893 			},
2894 			.setkey = aead_setkey,
2895 			.setauthsize = aead_setauthsize,
2896 			.encrypt = aead_encrypt,
2897 			.decrypt = aead_decrypt,
2898 			.ivsize = CTR_RFC3686_IV_SIZE,
2899 			.maxauthsize = SHA384_DIGEST_SIZE,
2900 		},
2901 		.caam = {
2902 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2903 					   OP_ALG_AAI_CTR_MOD128,
2904 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2905 					   OP_ALG_AAI_HMAC_PRECOMP,
2906 			.rfc3686 = true,
2907 			.geniv = true,
2908 		},
2909 	},
2910 	{
2911 		.aead = {
2912 			.base = {
2913 				.cra_name = "rfc7539(chacha20,poly1305)",
2914 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2915 						   "caam-qi2",
2916 				.cra_blocksize = 1,
2917 			},
2918 			.setkey = chachapoly_setkey,
2919 			.setauthsize = chachapoly_setauthsize,
2920 			.encrypt = aead_encrypt,
2921 			.decrypt = aead_decrypt,
2922 			.ivsize = CHACHAPOLY_IV_SIZE,
2923 			.maxauthsize = POLY1305_DIGEST_SIZE,
2924 		},
2925 		.caam = {
2926 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2927 					   OP_ALG_AAI_AEAD,
2928 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2929 					   OP_ALG_AAI_AEAD,
2930 			.nodkp = true,
2931 		},
2932 	},
2933 	{
2934 		.aead = {
2935 			.base = {
2936 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2937 				.cra_driver_name = "rfc7539esp-chacha20-"
2938 						   "poly1305-caam-qi2",
2939 				.cra_blocksize = 1,
2940 			},
2941 			.setkey = chachapoly_setkey,
2942 			.setauthsize = chachapoly_setauthsize,
2943 			.encrypt = aead_encrypt,
2944 			.decrypt = aead_decrypt,
2945 			.ivsize = 8,
2946 			.maxauthsize = POLY1305_DIGEST_SIZE,
2947 		},
2948 		.caam = {
2949 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2950 					   OP_ALG_AAI_AEAD,
2951 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2952 					   OP_ALG_AAI_AEAD,
2953 			.nodkp = true,
2954 		},
2955 	},
2956 	{
2957 		.aead = {
2958 			.base = {
2959 				.cra_name = "authenc(hmac(sha512),"
2960 					    "rfc3686(ctr(aes)))",
2961 				.cra_driver_name = "authenc-hmac-sha512-"
2962 						   "rfc3686-ctr-aes-caam-qi2",
2963 				.cra_blocksize = 1,
2964 			},
2965 			.setkey = aead_setkey,
2966 			.setauthsize = aead_setauthsize,
2967 			.encrypt = aead_encrypt,
2968 			.decrypt = aead_decrypt,
2969 			.ivsize = CTR_RFC3686_IV_SIZE,
2970 			.maxauthsize = SHA512_DIGEST_SIZE,
2971 		},
2972 		.caam = {
2973 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2974 					   OP_ALG_AAI_CTR_MOD128,
2975 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2976 					   OP_ALG_AAI_HMAC_PRECOMP,
2977 			.rfc3686 = true,
2978 		},
2979 	},
2980 	{
2981 		.aead = {
2982 			.base = {
2983 				.cra_name = "seqiv(authenc(hmac(sha512),"
2984 					    "rfc3686(ctr(aes))))",
2985 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2986 						   "rfc3686-ctr-aes-caam-qi2",
2987 				.cra_blocksize = 1,
2988 			},
2989 			.setkey = aead_setkey,
2990 			.setauthsize = aead_setauthsize,
2991 			.encrypt = aead_encrypt,
2992 			.decrypt = aead_decrypt,
2993 			.ivsize = CTR_RFC3686_IV_SIZE,
2994 			.maxauthsize = SHA512_DIGEST_SIZE,
2995 		},
2996 		.caam = {
2997 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2998 					   OP_ALG_AAI_CTR_MOD128,
2999 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3000 					   OP_ALG_AAI_HMAC_PRECOMP,
3001 			.rfc3686 = true,
3002 			.geniv = true,
3003 		},
3004 	},
3005 };
3006 
3007 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3008 {
3009 	struct skcipher_alg *alg = &t_alg->skcipher;
3010 
3011 	alg->base.cra_module = THIS_MODULE;
3012 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3013 	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3014 	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3015 			      CRYPTO_ALG_KERN_DRIVER_ONLY);
3016 
3017 	alg->init = caam_cra_init_skcipher;
3018 	alg->exit = caam_cra_exit;
3019 }
3020 
3021 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3022 {
3023 	struct aead_alg *alg = &t_alg->aead;
3024 
3025 	alg->base.cra_module = THIS_MODULE;
3026 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3027 	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3028 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3029 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
3030 
3031 	alg->init = caam_cra_init_aead;
3032 	alg->exit = caam_cra_exit_aead;
3033 }
3034 
3035 /* max hash key is max split key size */
3036 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
3037 
3038 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
3039 
3040 /* caam context sizes for hashes: running digest + 8 */
3041 #define HASH_MSG_LEN			8
3042 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3043 
3044 enum hash_optype {
3045 	UPDATE = 0,
3046 	UPDATE_FIRST,
3047 	FINALIZE,
3048 	DIGEST,
3049 	HASH_NUM_OP
3050 };
3051 
3052 /**
3053  * struct caam_hash_ctx - ahash per-session context
3054  * @flc: Flow Contexts array
3055  * @key: authentication key
3056  * @flc_dma: I/O virtual addresses of the Flow Contexts
3057  * @dev: dpseci device
3058  * @ctx_len: size of Context Register
3059  * @adata: hashing algorithm details
3060  */
3061 struct caam_hash_ctx {
3062 	struct caam_flc flc[HASH_NUM_OP];
3063 	u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3064 	dma_addr_t flc_dma[HASH_NUM_OP];
3065 	struct device *dev;
3066 	int ctx_len;
3067 	struct alginfo adata;
3068 };
3069 
3070 /* ahash state */
3071 struct caam_hash_state {
3072 	struct caam_request caam_req;
3073 	dma_addr_t buf_dma;
3074 	dma_addr_t ctx_dma;
3075 	int ctx_dma_len;
3076 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3077 	int buflen;
3078 	int next_buflen;
3079 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3080 	int (*update)(struct ahash_request *req);
3081 	int (*final)(struct ahash_request *req);
3082 	int (*finup)(struct ahash_request *req);
3083 };
3084 
3085 struct caam_export_state {
3086 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3087 	u8 caam_ctx[MAX_CTX_LEN];
3088 	int buflen;
3089 	int (*update)(struct ahash_request *req);
3090 	int (*final)(struct ahash_request *req);
3091 	int (*finup)(struct ahash_request *req);
3092 };
3093 
3094 /* Map current buffer in state (if length > 0) and put it in link table */
3095 static inline int buf_map_to_qm_sg(struct device *dev,
3096 				   struct dpaa2_sg_entry *qm_sg,
3097 				   struct caam_hash_state *state)
3098 {
3099 	int buflen = state->buflen;
3100 
3101 	if (!buflen)
3102 		return 0;
3103 
3104 	state->buf_dma = dma_map_single(dev, state->buf, buflen,
3105 					DMA_TO_DEVICE);
3106 	if (dma_mapping_error(dev, state->buf_dma)) {
3107 		dev_err(dev, "unable to map buf\n");
3108 		state->buf_dma = 0;
3109 		return -ENOMEM;
3110 	}
3111 
3112 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3113 
3114 	return 0;
3115 }
3116 
3117 /* Map state->caam_ctx, and add it to link table */
3118 static inline int ctx_map_to_qm_sg(struct device *dev,
3119 				   struct caam_hash_state *state, int ctx_len,
3120 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3121 {
3122 	state->ctx_dma_len = ctx_len;
3123 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3124 	if (dma_mapping_error(dev, state->ctx_dma)) {
3125 		dev_err(dev, "unable to map ctx\n");
3126 		state->ctx_dma = 0;
3127 		return -ENOMEM;
3128 	}
3129 
3130 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3131 
3132 	return 0;
3133 }
3134 
3135 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3136 {
3137 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3138 	int digestsize = crypto_ahash_digestsize(ahash);
3139 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3140 	struct caam_flc *flc;
3141 	u32 *desc;
3142 
3143 	/* ahash_update shared descriptor */
3144 	flc = &ctx->flc[UPDATE];
3145 	desc = flc->sh_desc;
3146 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3147 			  ctx->ctx_len, true, priv->sec_attr.era);
3148 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3149 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3150 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3151 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3152 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3153 			     1);
3154 
3155 	/* ahash_update_first shared descriptor */
3156 	flc = &ctx->flc[UPDATE_FIRST];
3157 	desc = flc->sh_desc;
3158 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3159 			  ctx->ctx_len, false, priv->sec_attr.era);
3160 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3161 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3162 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3163 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3164 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3165 			     1);
3166 
3167 	/* ahash_final shared descriptor */
3168 	flc = &ctx->flc[FINALIZE];
3169 	desc = flc->sh_desc;
3170 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3171 			  ctx->ctx_len, true, priv->sec_attr.era);
3172 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3173 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3174 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3175 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3176 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3177 			     1);
3178 
3179 	/* ahash_digest shared descriptor */
3180 	flc = &ctx->flc[DIGEST];
3181 	desc = flc->sh_desc;
3182 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3183 			  ctx->ctx_len, false, priv->sec_attr.era);
3184 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3185 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3186 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3187 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3188 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3189 			     1);
3190 
3191 	return 0;
3192 }
3193 
3194 struct split_key_sh_result {
3195 	struct completion completion;
3196 	int err;
3197 	struct device *dev;
3198 };
3199 
3200 static void split_key_sh_done(void *cbk_ctx, u32 err)
3201 {
3202 	struct split_key_sh_result *res = cbk_ctx;
3203 
3204 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3205 
3206 	res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3207 	complete(&res->completion);
3208 }
3209 
3210 /* Digest hash size if it is too large */
3211 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3212 			   u32 digestsize)
3213 {
3214 	struct caam_request *req_ctx;
3215 	u32 *desc;
3216 	struct split_key_sh_result result;
3217 	dma_addr_t key_dma;
3218 	struct caam_flc *flc;
3219 	dma_addr_t flc_dma;
3220 	int ret = -ENOMEM;
3221 	struct dpaa2_fl_entry *in_fle, *out_fle;
3222 
3223 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3224 	if (!req_ctx)
3225 		return -ENOMEM;
3226 
3227 	in_fle = &req_ctx->fd_flt[1];
3228 	out_fle = &req_ctx->fd_flt[0];
3229 
3230 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3231 	if (!flc)
3232 		goto err_flc;
3233 
3234 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3235 	if (dma_mapping_error(ctx->dev, key_dma)) {
3236 		dev_err(ctx->dev, "unable to map key memory\n");
3237 		goto err_key_dma;
3238 	}
3239 
3240 	desc = flc->sh_desc;
3241 
3242 	init_sh_desc(desc, 0);
3243 
3244 	/* descriptor to perform unkeyed hash on key_in */
3245 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3246 			 OP_ALG_AS_INITFINAL);
3247 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3248 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3249 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3250 			 LDST_SRCDST_BYTE_CONTEXT);
3251 
3252 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3253 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3254 				 desc_bytes(desc), DMA_TO_DEVICE);
3255 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3256 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3257 		goto err_flc_dma;
3258 	}
3259 
3260 	dpaa2_fl_set_final(in_fle, true);
3261 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3262 	dpaa2_fl_set_addr(in_fle, key_dma);
3263 	dpaa2_fl_set_len(in_fle, *keylen);
3264 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3265 	dpaa2_fl_set_addr(out_fle, key_dma);
3266 	dpaa2_fl_set_len(out_fle, digestsize);
3267 
3268 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3269 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3270 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3271 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3272 			     1);
3273 
3274 	result.err = 0;
3275 	init_completion(&result.completion);
3276 	result.dev = ctx->dev;
3277 
3278 	req_ctx->flc = flc;
3279 	req_ctx->flc_dma = flc_dma;
3280 	req_ctx->cbk = split_key_sh_done;
3281 	req_ctx->ctx = &result;
3282 
3283 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3284 	if (ret == -EINPROGRESS) {
3285 		/* in progress */
3286 		wait_for_completion(&result.completion);
3287 		ret = result.err;
3288 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3289 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3290 				     digestsize, 1);
3291 	}
3292 
3293 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3294 			 DMA_TO_DEVICE);
3295 err_flc_dma:
3296 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3297 err_key_dma:
3298 	kfree(flc);
3299 err_flc:
3300 	kfree(req_ctx);
3301 
3302 	*keylen = digestsize;
3303 
3304 	return ret;
3305 }
3306 
3307 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3308 			unsigned int keylen)
3309 {
3310 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3311 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3312 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3313 	int ret;
3314 	u8 *hashed_key = NULL;
3315 
3316 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3317 
3318 	if (keylen > blocksize) {
3319 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3320 		if (!hashed_key)
3321 			return -ENOMEM;
3322 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3323 		if (ret)
3324 			goto bad_free_key;
3325 		key = hashed_key;
3326 	}
3327 
3328 	ctx->adata.keylen = keylen;
3329 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3330 					      OP_ALG_ALGSEL_MASK);
3331 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3332 		goto bad_free_key;
3333 
3334 	ctx->adata.key_virt = key;
3335 	ctx->adata.key_inline = true;
3336 
3337 	/*
3338 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3339 	 * in invalid opcodes (last bytes of user key) in the resulting
3340 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3341 	 * addresses are needed.
3342 	 */
3343 	if (keylen > ctx->adata.keylen_pad) {
3344 		memcpy(ctx->key, key, keylen);
3345 		dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3346 					   ctx->adata.keylen_pad,
3347 					   DMA_TO_DEVICE);
3348 	}
3349 
3350 	ret = ahash_set_sh_desc(ahash);
3351 	kfree(hashed_key);
3352 	return ret;
3353 bad_free_key:
3354 	kfree(hashed_key);
3355 	return -EINVAL;
3356 }
3357 
3358 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3359 			       struct ahash_request *req)
3360 {
3361 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3362 
3363 	if (edesc->src_nents)
3364 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3365 
3366 	if (edesc->qm_sg_bytes)
3367 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3368 				 DMA_TO_DEVICE);
3369 
3370 	if (state->buf_dma) {
3371 		dma_unmap_single(dev, state->buf_dma, state->buflen,
3372 				 DMA_TO_DEVICE);
3373 		state->buf_dma = 0;
3374 	}
3375 }
3376 
3377 static inline void ahash_unmap_ctx(struct device *dev,
3378 				   struct ahash_edesc *edesc,
3379 				   struct ahash_request *req, u32 flag)
3380 {
3381 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3382 
3383 	if (state->ctx_dma) {
3384 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3385 		state->ctx_dma = 0;
3386 	}
3387 	ahash_unmap(dev, edesc, req);
3388 }
3389 
3390 static void ahash_done(void *cbk_ctx, u32 status)
3391 {
3392 	struct crypto_async_request *areq = cbk_ctx;
3393 	struct ahash_request *req = ahash_request_cast(areq);
3394 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3395 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3396 	struct ahash_edesc *edesc = state->caam_req.edesc;
3397 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3398 	int digestsize = crypto_ahash_digestsize(ahash);
3399 	int ecode = 0;
3400 
3401 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3402 
3403 	if (unlikely(status))
3404 		ecode = caam_qi2_strstatus(ctx->dev, status);
3405 
3406 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3407 	memcpy(req->result, state->caam_ctx, digestsize);
3408 	qi_cache_free(edesc);
3409 
3410 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3411 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3412 			     ctx->ctx_len, 1);
3413 
3414 	req->base.complete(&req->base, ecode);
3415 }
3416 
3417 static void ahash_done_bi(void *cbk_ctx, u32 status)
3418 {
3419 	struct crypto_async_request *areq = cbk_ctx;
3420 	struct ahash_request *req = ahash_request_cast(areq);
3421 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3422 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3423 	struct ahash_edesc *edesc = state->caam_req.edesc;
3424 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3425 	int ecode = 0;
3426 
3427 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3428 
3429 	if (unlikely(status))
3430 		ecode = caam_qi2_strstatus(ctx->dev, status);
3431 
3432 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3433 	qi_cache_free(edesc);
3434 
3435 	scatterwalk_map_and_copy(state->buf, req->src,
3436 				 req->nbytes - state->next_buflen,
3437 				 state->next_buflen, 0);
3438 	state->buflen = state->next_buflen;
3439 
3440 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3441 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3442 			     state->buflen, 1);
3443 
3444 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3445 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3446 			     ctx->ctx_len, 1);
3447 	if (req->result)
3448 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3449 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3450 				     crypto_ahash_digestsize(ahash), 1);
3451 
3452 	req->base.complete(&req->base, ecode);
3453 }
3454 
3455 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3456 {
3457 	struct crypto_async_request *areq = cbk_ctx;
3458 	struct ahash_request *req = ahash_request_cast(areq);
3459 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3460 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3461 	struct ahash_edesc *edesc = state->caam_req.edesc;
3462 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3463 	int digestsize = crypto_ahash_digestsize(ahash);
3464 	int ecode = 0;
3465 
3466 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3467 
3468 	if (unlikely(status))
3469 		ecode = caam_qi2_strstatus(ctx->dev, status);
3470 
3471 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3472 	memcpy(req->result, state->caam_ctx, digestsize);
3473 	qi_cache_free(edesc);
3474 
3475 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3476 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3477 			     ctx->ctx_len, 1);
3478 
3479 	req->base.complete(&req->base, ecode);
3480 }
3481 
3482 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3483 {
3484 	struct crypto_async_request *areq = cbk_ctx;
3485 	struct ahash_request *req = ahash_request_cast(areq);
3486 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3487 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3488 	struct ahash_edesc *edesc = state->caam_req.edesc;
3489 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3490 	int ecode = 0;
3491 
3492 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3493 
3494 	if (unlikely(status))
3495 		ecode = caam_qi2_strstatus(ctx->dev, status);
3496 
3497 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3498 	qi_cache_free(edesc);
3499 
3500 	scatterwalk_map_and_copy(state->buf, req->src,
3501 				 req->nbytes - state->next_buflen,
3502 				 state->next_buflen, 0);
3503 	state->buflen = state->next_buflen;
3504 
3505 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3506 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3507 			     state->buflen, 1);
3508 
3509 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3510 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3511 			     ctx->ctx_len, 1);
3512 	if (req->result)
3513 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3514 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3515 				     crypto_ahash_digestsize(ahash), 1);
3516 
3517 	req->base.complete(&req->base, ecode);
3518 }
3519 
3520 static int ahash_update_ctx(struct ahash_request *req)
3521 {
3522 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3523 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3524 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3525 	struct caam_request *req_ctx = &state->caam_req;
3526 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3527 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3528 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3529 		      GFP_KERNEL : GFP_ATOMIC;
3530 	u8 *buf = state->buf;
3531 	int *buflen = &state->buflen;
3532 	int *next_buflen = &state->next_buflen;
3533 	int in_len = *buflen + req->nbytes, to_hash;
3534 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3535 	struct ahash_edesc *edesc;
3536 	int ret = 0;
3537 
3538 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3539 	to_hash = in_len - *next_buflen;
3540 
3541 	if (to_hash) {
3542 		struct dpaa2_sg_entry *sg_table;
3543 		int src_len = req->nbytes - *next_buflen;
3544 
3545 		src_nents = sg_nents_for_len(req->src, src_len);
3546 		if (src_nents < 0) {
3547 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3548 			return src_nents;
3549 		}
3550 
3551 		if (src_nents) {
3552 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3553 						  DMA_TO_DEVICE);
3554 			if (!mapped_nents) {
3555 				dev_err(ctx->dev, "unable to DMA map source\n");
3556 				return -ENOMEM;
3557 			}
3558 		} else {
3559 			mapped_nents = 0;
3560 		}
3561 
3562 		/* allocate space for base edesc and link tables */
3563 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3564 		if (!edesc) {
3565 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3566 				     DMA_TO_DEVICE);
3567 			return -ENOMEM;
3568 		}
3569 
3570 		edesc->src_nents = src_nents;
3571 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3572 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3573 			      sizeof(*sg_table);
3574 		sg_table = &edesc->sgt[0];
3575 
3576 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3577 				       DMA_BIDIRECTIONAL);
3578 		if (ret)
3579 			goto unmap_ctx;
3580 
3581 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3582 		if (ret)
3583 			goto unmap_ctx;
3584 
3585 		if (mapped_nents) {
3586 			sg_to_qm_sg_last(req->src, src_len,
3587 					 sg_table + qm_sg_src_index, 0);
3588 		} else {
3589 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3590 					   true);
3591 		}
3592 
3593 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3594 						  qm_sg_bytes, DMA_TO_DEVICE);
3595 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3596 			dev_err(ctx->dev, "unable to map S/G table\n");
3597 			ret = -ENOMEM;
3598 			goto unmap_ctx;
3599 		}
3600 		edesc->qm_sg_bytes = qm_sg_bytes;
3601 
3602 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3603 		dpaa2_fl_set_final(in_fle, true);
3604 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3605 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3606 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3607 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3608 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3609 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3610 
3611 		req_ctx->flc = &ctx->flc[UPDATE];
3612 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3613 		req_ctx->cbk = ahash_done_bi;
3614 		req_ctx->ctx = &req->base;
3615 		req_ctx->edesc = edesc;
3616 
3617 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3618 		if (ret != -EINPROGRESS &&
3619 		    !(ret == -EBUSY &&
3620 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3621 			goto unmap_ctx;
3622 	} else if (*next_buflen) {
3623 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3624 					 req->nbytes, 0);
3625 		*buflen = *next_buflen;
3626 
3627 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3628 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3629 				     *buflen, 1);
3630 	}
3631 
3632 	return ret;
3633 unmap_ctx:
3634 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3635 	qi_cache_free(edesc);
3636 	return ret;
3637 }
3638 
3639 static int ahash_final_ctx(struct ahash_request *req)
3640 {
3641 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3642 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3643 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3644 	struct caam_request *req_ctx = &state->caam_req;
3645 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3646 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3647 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3648 		      GFP_KERNEL : GFP_ATOMIC;
3649 	int buflen = state->buflen;
3650 	int qm_sg_bytes;
3651 	int digestsize = crypto_ahash_digestsize(ahash);
3652 	struct ahash_edesc *edesc;
3653 	struct dpaa2_sg_entry *sg_table;
3654 	int ret;
3655 
3656 	/* allocate space for base edesc and link tables */
3657 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3658 	if (!edesc)
3659 		return -ENOMEM;
3660 
3661 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3662 	sg_table = &edesc->sgt[0];
3663 
3664 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3665 			       DMA_BIDIRECTIONAL);
3666 	if (ret)
3667 		goto unmap_ctx;
3668 
3669 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3670 	if (ret)
3671 		goto unmap_ctx;
3672 
3673 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3674 
3675 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3676 					  DMA_TO_DEVICE);
3677 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3678 		dev_err(ctx->dev, "unable to map S/G table\n");
3679 		ret = -ENOMEM;
3680 		goto unmap_ctx;
3681 	}
3682 	edesc->qm_sg_bytes = qm_sg_bytes;
3683 
3684 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3685 	dpaa2_fl_set_final(in_fle, true);
3686 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3687 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3688 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3689 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3690 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3691 	dpaa2_fl_set_len(out_fle, digestsize);
3692 
3693 	req_ctx->flc = &ctx->flc[FINALIZE];
3694 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3695 	req_ctx->cbk = ahash_done_ctx_src;
3696 	req_ctx->ctx = &req->base;
3697 	req_ctx->edesc = edesc;
3698 
3699 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3700 	if (ret == -EINPROGRESS ||
3701 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3702 		return ret;
3703 
3704 unmap_ctx:
3705 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3706 	qi_cache_free(edesc);
3707 	return ret;
3708 }
3709 
3710 static int ahash_finup_ctx(struct ahash_request *req)
3711 {
3712 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3713 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3714 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3715 	struct caam_request *req_ctx = &state->caam_req;
3716 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3717 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3718 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3719 		      GFP_KERNEL : GFP_ATOMIC;
3720 	int buflen = state->buflen;
3721 	int qm_sg_bytes, qm_sg_src_index;
3722 	int src_nents, mapped_nents;
3723 	int digestsize = crypto_ahash_digestsize(ahash);
3724 	struct ahash_edesc *edesc;
3725 	struct dpaa2_sg_entry *sg_table;
3726 	int ret;
3727 
3728 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3729 	if (src_nents < 0) {
3730 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3731 		return src_nents;
3732 	}
3733 
3734 	if (src_nents) {
3735 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3736 					  DMA_TO_DEVICE);
3737 		if (!mapped_nents) {
3738 			dev_err(ctx->dev, "unable to DMA map source\n");
3739 			return -ENOMEM;
3740 		}
3741 	} else {
3742 		mapped_nents = 0;
3743 	}
3744 
3745 	/* allocate space for base edesc and link tables */
3746 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3747 	if (!edesc) {
3748 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3749 		return -ENOMEM;
3750 	}
3751 
3752 	edesc->src_nents = src_nents;
3753 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3754 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3755 		      sizeof(*sg_table);
3756 	sg_table = &edesc->sgt[0];
3757 
3758 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3759 			       DMA_BIDIRECTIONAL);
3760 	if (ret)
3761 		goto unmap_ctx;
3762 
3763 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3764 	if (ret)
3765 		goto unmap_ctx;
3766 
3767 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3768 
3769 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3770 					  DMA_TO_DEVICE);
3771 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3772 		dev_err(ctx->dev, "unable to map S/G table\n");
3773 		ret = -ENOMEM;
3774 		goto unmap_ctx;
3775 	}
3776 	edesc->qm_sg_bytes = qm_sg_bytes;
3777 
3778 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3779 	dpaa2_fl_set_final(in_fle, true);
3780 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3781 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3782 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3783 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3784 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3785 	dpaa2_fl_set_len(out_fle, digestsize);
3786 
3787 	req_ctx->flc = &ctx->flc[FINALIZE];
3788 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3789 	req_ctx->cbk = ahash_done_ctx_src;
3790 	req_ctx->ctx = &req->base;
3791 	req_ctx->edesc = edesc;
3792 
3793 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3794 	if (ret == -EINPROGRESS ||
3795 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3796 		return ret;
3797 
3798 unmap_ctx:
3799 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3800 	qi_cache_free(edesc);
3801 	return ret;
3802 }
3803 
3804 static int ahash_digest(struct ahash_request *req)
3805 {
3806 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3807 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3808 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3809 	struct caam_request *req_ctx = &state->caam_req;
3810 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3811 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3812 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3813 		      GFP_KERNEL : GFP_ATOMIC;
3814 	int digestsize = crypto_ahash_digestsize(ahash);
3815 	int src_nents, mapped_nents;
3816 	struct ahash_edesc *edesc;
3817 	int ret = -ENOMEM;
3818 
3819 	state->buf_dma = 0;
3820 
3821 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3822 	if (src_nents < 0) {
3823 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3824 		return src_nents;
3825 	}
3826 
3827 	if (src_nents) {
3828 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3829 					  DMA_TO_DEVICE);
3830 		if (!mapped_nents) {
3831 			dev_err(ctx->dev, "unable to map source for DMA\n");
3832 			return ret;
3833 		}
3834 	} else {
3835 		mapped_nents = 0;
3836 	}
3837 
3838 	/* allocate space for base edesc and link tables */
3839 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3840 	if (!edesc) {
3841 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3842 		return ret;
3843 	}
3844 
3845 	edesc->src_nents = src_nents;
3846 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3847 
3848 	if (mapped_nents > 1) {
3849 		int qm_sg_bytes;
3850 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3851 
3852 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3853 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3854 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3855 						  qm_sg_bytes, DMA_TO_DEVICE);
3856 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3857 			dev_err(ctx->dev, "unable to map S/G table\n");
3858 			goto unmap;
3859 		}
3860 		edesc->qm_sg_bytes = qm_sg_bytes;
3861 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3862 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3863 	} else {
3864 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3865 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3866 	}
3867 
3868 	state->ctx_dma_len = digestsize;
3869 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3870 					DMA_FROM_DEVICE);
3871 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3872 		dev_err(ctx->dev, "unable to map ctx\n");
3873 		state->ctx_dma = 0;
3874 		goto unmap;
3875 	}
3876 
3877 	dpaa2_fl_set_final(in_fle, true);
3878 	dpaa2_fl_set_len(in_fle, req->nbytes);
3879 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3880 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3881 	dpaa2_fl_set_len(out_fle, digestsize);
3882 
3883 	req_ctx->flc = &ctx->flc[DIGEST];
3884 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3885 	req_ctx->cbk = ahash_done;
3886 	req_ctx->ctx = &req->base;
3887 	req_ctx->edesc = edesc;
3888 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3889 	if (ret == -EINPROGRESS ||
3890 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3891 		return ret;
3892 
3893 unmap:
3894 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3895 	qi_cache_free(edesc);
3896 	return ret;
3897 }
3898 
3899 static int ahash_final_no_ctx(struct ahash_request *req)
3900 {
3901 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3902 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3903 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3904 	struct caam_request *req_ctx = &state->caam_req;
3905 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3906 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3907 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3908 		      GFP_KERNEL : GFP_ATOMIC;
3909 	u8 *buf = state->buf;
3910 	int buflen = state->buflen;
3911 	int digestsize = crypto_ahash_digestsize(ahash);
3912 	struct ahash_edesc *edesc;
3913 	int ret = -ENOMEM;
3914 
3915 	/* allocate space for base edesc and link tables */
3916 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3917 	if (!edesc)
3918 		return ret;
3919 
3920 	if (buflen) {
3921 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3922 						DMA_TO_DEVICE);
3923 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3924 			dev_err(ctx->dev, "unable to map src\n");
3925 			goto unmap;
3926 		}
3927 	}
3928 
3929 	state->ctx_dma_len = digestsize;
3930 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3931 					DMA_FROM_DEVICE);
3932 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3933 		dev_err(ctx->dev, "unable to map ctx\n");
3934 		state->ctx_dma = 0;
3935 		goto unmap;
3936 	}
3937 
3938 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3939 	dpaa2_fl_set_final(in_fle, true);
3940 	/*
3941 	 * crypto engine requires the input entry to be present when
3942 	 * "frame list" FD is used.
3943 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3944 	 * in_fle zeroized (except for "Final" flag) is the best option.
3945 	 */
3946 	if (buflen) {
3947 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3948 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3949 		dpaa2_fl_set_len(in_fle, buflen);
3950 	}
3951 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3952 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3953 	dpaa2_fl_set_len(out_fle, digestsize);
3954 
3955 	req_ctx->flc = &ctx->flc[DIGEST];
3956 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3957 	req_ctx->cbk = ahash_done;
3958 	req_ctx->ctx = &req->base;
3959 	req_ctx->edesc = edesc;
3960 
3961 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3962 	if (ret == -EINPROGRESS ||
3963 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3964 		return ret;
3965 
3966 unmap:
3967 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3968 	qi_cache_free(edesc);
3969 	return ret;
3970 }
3971 
3972 static int ahash_update_no_ctx(struct ahash_request *req)
3973 {
3974 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3975 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3976 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3977 	struct caam_request *req_ctx = &state->caam_req;
3978 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3979 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3980 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3981 		      GFP_KERNEL : GFP_ATOMIC;
3982 	u8 *buf = state->buf;
3983 	int *buflen = &state->buflen;
3984 	int *next_buflen = &state->next_buflen;
3985 	int in_len = *buflen + req->nbytes, to_hash;
3986 	int qm_sg_bytes, src_nents, mapped_nents;
3987 	struct ahash_edesc *edesc;
3988 	int ret = 0;
3989 
3990 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3991 	to_hash = in_len - *next_buflen;
3992 
3993 	if (to_hash) {
3994 		struct dpaa2_sg_entry *sg_table;
3995 		int src_len = req->nbytes - *next_buflen;
3996 
3997 		src_nents = sg_nents_for_len(req->src, src_len);
3998 		if (src_nents < 0) {
3999 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4000 			return src_nents;
4001 		}
4002 
4003 		if (src_nents) {
4004 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4005 						  DMA_TO_DEVICE);
4006 			if (!mapped_nents) {
4007 				dev_err(ctx->dev, "unable to DMA map source\n");
4008 				return -ENOMEM;
4009 			}
4010 		} else {
4011 			mapped_nents = 0;
4012 		}
4013 
4014 		/* allocate space for base edesc and link tables */
4015 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4016 		if (!edesc) {
4017 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4018 				     DMA_TO_DEVICE);
4019 			return -ENOMEM;
4020 		}
4021 
4022 		edesc->src_nents = src_nents;
4023 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4024 			      sizeof(*sg_table);
4025 		sg_table = &edesc->sgt[0];
4026 
4027 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4028 		if (ret)
4029 			goto unmap_ctx;
4030 
4031 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4032 
4033 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4034 						  qm_sg_bytes, DMA_TO_DEVICE);
4035 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4036 			dev_err(ctx->dev, "unable to map S/G table\n");
4037 			ret = -ENOMEM;
4038 			goto unmap_ctx;
4039 		}
4040 		edesc->qm_sg_bytes = qm_sg_bytes;
4041 
4042 		state->ctx_dma_len = ctx->ctx_len;
4043 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4044 						ctx->ctx_len, DMA_FROM_DEVICE);
4045 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4046 			dev_err(ctx->dev, "unable to map ctx\n");
4047 			state->ctx_dma = 0;
4048 			ret = -ENOMEM;
4049 			goto unmap_ctx;
4050 		}
4051 
4052 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4053 		dpaa2_fl_set_final(in_fle, true);
4054 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4055 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4056 		dpaa2_fl_set_len(in_fle, to_hash);
4057 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4058 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4059 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4060 
4061 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4062 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4063 		req_ctx->cbk = ahash_done_ctx_dst;
4064 		req_ctx->ctx = &req->base;
4065 		req_ctx->edesc = edesc;
4066 
4067 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4068 		if (ret != -EINPROGRESS &&
4069 		    !(ret == -EBUSY &&
4070 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4071 			goto unmap_ctx;
4072 
4073 		state->update = ahash_update_ctx;
4074 		state->finup = ahash_finup_ctx;
4075 		state->final = ahash_final_ctx;
4076 	} else if (*next_buflen) {
4077 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4078 					 req->nbytes, 0);
4079 		*buflen = *next_buflen;
4080 
4081 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4082 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4083 				     *buflen, 1);
4084 	}
4085 
4086 	return ret;
4087 unmap_ctx:
4088 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4089 	qi_cache_free(edesc);
4090 	return ret;
4091 }
4092 
4093 static int ahash_finup_no_ctx(struct ahash_request *req)
4094 {
4095 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4096 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4097 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4098 	struct caam_request *req_ctx = &state->caam_req;
4099 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4100 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4101 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4102 		      GFP_KERNEL : GFP_ATOMIC;
4103 	int buflen = state->buflen;
4104 	int qm_sg_bytes, src_nents, mapped_nents;
4105 	int digestsize = crypto_ahash_digestsize(ahash);
4106 	struct ahash_edesc *edesc;
4107 	struct dpaa2_sg_entry *sg_table;
4108 	int ret = -ENOMEM;
4109 
4110 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4111 	if (src_nents < 0) {
4112 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4113 		return src_nents;
4114 	}
4115 
4116 	if (src_nents) {
4117 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4118 					  DMA_TO_DEVICE);
4119 		if (!mapped_nents) {
4120 			dev_err(ctx->dev, "unable to DMA map source\n");
4121 			return ret;
4122 		}
4123 	} else {
4124 		mapped_nents = 0;
4125 	}
4126 
4127 	/* allocate space for base edesc and link tables */
4128 	edesc = qi_cache_zalloc(GFP_DMA | flags);
4129 	if (!edesc) {
4130 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4131 		return ret;
4132 	}
4133 
4134 	edesc->src_nents = src_nents;
4135 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4136 	sg_table = &edesc->sgt[0];
4137 
4138 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4139 	if (ret)
4140 		goto unmap;
4141 
4142 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4143 
4144 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4145 					  DMA_TO_DEVICE);
4146 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4147 		dev_err(ctx->dev, "unable to map S/G table\n");
4148 		ret = -ENOMEM;
4149 		goto unmap;
4150 	}
4151 	edesc->qm_sg_bytes = qm_sg_bytes;
4152 
4153 	state->ctx_dma_len = digestsize;
4154 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4155 					DMA_FROM_DEVICE);
4156 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4157 		dev_err(ctx->dev, "unable to map ctx\n");
4158 		state->ctx_dma = 0;
4159 		ret = -ENOMEM;
4160 		goto unmap;
4161 	}
4162 
4163 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4164 	dpaa2_fl_set_final(in_fle, true);
4165 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4166 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4167 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4168 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4169 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4170 	dpaa2_fl_set_len(out_fle, digestsize);
4171 
4172 	req_ctx->flc = &ctx->flc[DIGEST];
4173 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4174 	req_ctx->cbk = ahash_done;
4175 	req_ctx->ctx = &req->base;
4176 	req_ctx->edesc = edesc;
4177 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4178 	if (ret != -EINPROGRESS &&
4179 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4180 		goto unmap;
4181 
4182 	return ret;
4183 unmap:
4184 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4185 	qi_cache_free(edesc);
4186 	return ret;
4187 }
4188 
4189 static int ahash_update_first(struct ahash_request *req)
4190 {
4191 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4192 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4193 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4194 	struct caam_request *req_ctx = &state->caam_req;
4195 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4196 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4197 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4198 		      GFP_KERNEL : GFP_ATOMIC;
4199 	u8 *buf = state->buf;
4200 	int *buflen = &state->buflen;
4201 	int *next_buflen = &state->next_buflen;
4202 	int to_hash;
4203 	int src_nents, mapped_nents;
4204 	struct ahash_edesc *edesc;
4205 	int ret = 0;
4206 
4207 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4208 				      1);
4209 	to_hash = req->nbytes - *next_buflen;
4210 
4211 	if (to_hash) {
4212 		struct dpaa2_sg_entry *sg_table;
4213 		int src_len = req->nbytes - *next_buflen;
4214 
4215 		src_nents = sg_nents_for_len(req->src, src_len);
4216 		if (src_nents < 0) {
4217 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4218 			return src_nents;
4219 		}
4220 
4221 		if (src_nents) {
4222 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4223 						  DMA_TO_DEVICE);
4224 			if (!mapped_nents) {
4225 				dev_err(ctx->dev, "unable to map source for DMA\n");
4226 				return -ENOMEM;
4227 			}
4228 		} else {
4229 			mapped_nents = 0;
4230 		}
4231 
4232 		/* allocate space for base edesc and link tables */
4233 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4234 		if (!edesc) {
4235 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4236 				     DMA_TO_DEVICE);
4237 			return -ENOMEM;
4238 		}
4239 
4240 		edesc->src_nents = src_nents;
4241 		sg_table = &edesc->sgt[0];
4242 
4243 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4244 		dpaa2_fl_set_final(in_fle, true);
4245 		dpaa2_fl_set_len(in_fle, to_hash);
4246 
4247 		if (mapped_nents > 1) {
4248 			int qm_sg_bytes;
4249 
4250 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4251 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4252 				      sizeof(*sg_table);
4253 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4254 							  qm_sg_bytes,
4255 							  DMA_TO_DEVICE);
4256 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4257 				dev_err(ctx->dev, "unable to map S/G table\n");
4258 				ret = -ENOMEM;
4259 				goto unmap_ctx;
4260 			}
4261 			edesc->qm_sg_bytes = qm_sg_bytes;
4262 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4263 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4264 		} else {
4265 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4266 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4267 		}
4268 
4269 		state->ctx_dma_len = ctx->ctx_len;
4270 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4271 						ctx->ctx_len, DMA_FROM_DEVICE);
4272 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4273 			dev_err(ctx->dev, "unable to map ctx\n");
4274 			state->ctx_dma = 0;
4275 			ret = -ENOMEM;
4276 			goto unmap_ctx;
4277 		}
4278 
4279 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4280 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4281 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4282 
4283 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4284 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4285 		req_ctx->cbk = ahash_done_ctx_dst;
4286 		req_ctx->ctx = &req->base;
4287 		req_ctx->edesc = edesc;
4288 
4289 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4290 		if (ret != -EINPROGRESS &&
4291 		    !(ret == -EBUSY && req->base.flags &
4292 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4293 			goto unmap_ctx;
4294 
4295 		state->update = ahash_update_ctx;
4296 		state->finup = ahash_finup_ctx;
4297 		state->final = ahash_final_ctx;
4298 	} else if (*next_buflen) {
4299 		state->update = ahash_update_no_ctx;
4300 		state->finup = ahash_finup_no_ctx;
4301 		state->final = ahash_final_no_ctx;
4302 		scatterwalk_map_and_copy(buf, req->src, 0,
4303 					 req->nbytes, 0);
4304 		*buflen = *next_buflen;
4305 
4306 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4307 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4308 				     *buflen, 1);
4309 	}
4310 
4311 	return ret;
4312 unmap_ctx:
4313 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4314 	qi_cache_free(edesc);
4315 	return ret;
4316 }
4317 
4318 static int ahash_finup_first(struct ahash_request *req)
4319 {
4320 	return ahash_digest(req);
4321 }
4322 
4323 static int ahash_init(struct ahash_request *req)
4324 {
4325 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4326 
4327 	state->update = ahash_update_first;
4328 	state->finup = ahash_finup_first;
4329 	state->final = ahash_final_no_ctx;
4330 
4331 	state->ctx_dma = 0;
4332 	state->ctx_dma_len = 0;
4333 	state->buf_dma = 0;
4334 	state->buflen = 0;
4335 	state->next_buflen = 0;
4336 
4337 	return 0;
4338 }
4339 
4340 static int ahash_update(struct ahash_request *req)
4341 {
4342 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4343 
4344 	return state->update(req);
4345 }
4346 
4347 static int ahash_finup(struct ahash_request *req)
4348 {
4349 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4350 
4351 	return state->finup(req);
4352 }
4353 
4354 static int ahash_final(struct ahash_request *req)
4355 {
4356 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4357 
4358 	return state->final(req);
4359 }
4360 
4361 static int ahash_export(struct ahash_request *req, void *out)
4362 {
4363 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4364 	struct caam_export_state *export = out;
4365 	u8 *buf = state->buf;
4366 	int len = state->buflen;
4367 
4368 	memcpy(export->buf, buf, len);
4369 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4370 	export->buflen = len;
4371 	export->update = state->update;
4372 	export->final = state->final;
4373 	export->finup = state->finup;
4374 
4375 	return 0;
4376 }
4377 
4378 static int ahash_import(struct ahash_request *req, const void *in)
4379 {
4380 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4381 	const struct caam_export_state *export = in;
4382 
4383 	memset(state, 0, sizeof(*state));
4384 	memcpy(state->buf, export->buf, export->buflen);
4385 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4386 	state->buflen = export->buflen;
4387 	state->update = export->update;
4388 	state->final = export->final;
4389 	state->finup = export->finup;
4390 
4391 	return 0;
4392 }
4393 
4394 struct caam_hash_template {
4395 	char name[CRYPTO_MAX_ALG_NAME];
4396 	char driver_name[CRYPTO_MAX_ALG_NAME];
4397 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4398 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4399 	unsigned int blocksize;
4400 	struct ahash_alg template_ahash;
4401 	u32 alg_type;
4402 };
4403 
4404 /* ahash descriptors */
4405 static struct caam_hash_template driver_hash[] = {
4406 	{
4407 		.name = "sha1",
4408 		.driver_name = "sha1-caam-qi2",
4409 		.hmac_name = "hmac(sha1)",
4410 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4411 		.blocksize = SHA1_BLOCK_SIZE,
4412 		.template_ahash = {
4413 			.init = ahash_init,
4414 			.update = ahash_update,
4415 			.final = ahash_final,
4416 			.finup = ahash_finup,
4417 			.digest = ahash_digest,
4418 			.export = ahash_export,
4419 			.import = ahash_import,
4420 			.setkey = ahash_setkey,
4421 			.halg = {
4422 				.digestsize = SHA1_DIGEST_SIZE,
4423 				.statesize = sizeof(struct caam_export_state),
4424 			},
4425 		},
4426 		.alg_type = OP_ALG_ALGSEL_SHA1,
4427 	}, {
4428 		.name = "sha224",
4429 		.driver_name = "sha224-caam-qi2",
4430 		.hmac_name = "hmac(sha224)",
4431 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4432 		.blocksize = SHA224_BLOCK_SIZE,
4433 		.template_ahash = {
4434 			.init = ahash_init,
4435 			.update = ahash_update,
4436 			.final = ahash_final,
4437 			.finup = ahash_finup,
4438 			.digest = ahash_digest,
4439 			.export = ahash_export,
4440 			.import = ahash_import,
4441 			.setkey = ahash_setkey,
4442 			.halg = {
4443 				.digestsize = SHA224_DIGEST_SIZE,
4444 				.statesize = sizeof(struct caam_export_state),
4445 			},
4446 		},
4447 		.alg_type = OP_ALG_ALGSEL_SHA224,
4448 	}, {
4449 		.name = "sha256",
4450 		.driver_name = "sha256-caam-qi2",
4451 		.hmac_name = "hmac(sha256)",
4452 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4453 		.blocksize = SHA256_BLOCK_SIZE,
4454 		.template_ahash = {
4455 			.init = ahash_init,
4456 			.update = ahash_update,
4457 			.final = ahash_final,
4458 			.finup = ahash_finup,
4459 			.digest = ahash_digest,
4460 			.export = ahash_export,
4461 			.import = ahash_import,
4462 			.setkey = ahash_setkey,
4463 			.halg = {
4464 				.digestsize = SHA256_DIGEST_SIZE,
4465 				.statesize = sizeof(struct caam_export_state),
4466 			},
4467 		},
4468 		.alg_type = OP_ALG_ALGSEL_SHA256,
4469 	}, {
4470 		.name = "sha384",
4471 		.driver_name = "sha384-caam-qi2",
4472 		.hmac_name = "hmac(sha384)",
4473 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4474 		.blocksize = SHA384_BLOCK_SIZE,
4475 		.template_ahash = {
4476 			.init = ahash_init,
4477 			.update = ahash_update,
4478 			.final = ahash_final,
4479 			.finup = ahash_finup,
4480 			.digest = ahash_digest,
4481 			.export = ahash_export,
4482 			.import = ahash_import,
4483 			.setkey = ahash_setkey,
4484 			.halg = {
4485 				.digestsize = SHA384_DIGEST_SIZE,
4486 				.statesize = sizeof(struct caam_export_state),
4487 			},
4488 		},
4489 		.alg_type = OP_ALG_ALGSEL_SHA384,
4490 	}, {
4491 		.name = "sha512",
4492 		.driver_name = "sha512-caam-qi2",
4493 		.hmac_name = "hmac(sha512)",
4494 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4495 		.blocksize = SHA512_BLOCK_SIZE,
4496 		.template_ahash = {
4497 			.init = ahash_init,
4498 			.update = ahash_update,
4499 			.final = ahash_final,
4500 			.finup = ahash_finup,
4501 			.digest = ahash_digest,
4502 			.export = ahash_export,
4503 			.import = ahash_import,
4504 			.setkey = ahash_setkey,
4505 			.halg = {
4506 				.digestsize = SHA512_DIGEST_SIZE,
4507 				.statesize = sizeof(struct caam_export_state),
4508 			},
4509 		},
4510 		.alg_type = OP_ALG_ALGSEL_SHA512,
4511 	}, {
4512 		.name = "md5",
4513 		.driver_name = "md5-caam-qi2",
4514 		.hmac_name = "hmac(md5)",
4515 		.hmac_driver_name = "hmac-md5-caam-qi2",
4516 		.blocksize = MD5_BLOCK_WORDS * 4,
4517 		.template_ahash = {
4518 			.init = ahash_init,
4519 			.update = ahash_update,
4520 			.final = ahash_final,
4521 			.finup = ahash_finup,
4522 			.digest = ahash_digest,
4523 			.export = ahash_export,
4524 			.import = ahash_import,
4525 			.setkey = ahash_setkey,
4526 			.halg = {
4527 				.digestsize = MD5_DIGEST_SIZE,
4528 				.statesize = sizeof(struct caam_export_state),
4529 			},
4530 		},
4531 		.alg_type = OP_ALG_ALGSEL_MD5,
4532 	}
4533 };
4534 
4535 struct caam_hash_alg {
4536 	struct list_head entry;
4537 	struct device *dev;
4538 	int alg_type;
4539 	struct ahash_alg ahash_alg;
4540 };
4541 
4542 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4543 {
4544 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4545 	struct crypto_alg *base = tfm->__crt_alg;
4546 	struct hash_alg_common *halg =
4547 		 container_of(base, struct hash_alg_common, base);
4548 	struct ahash_alg *alg =
4549 		 container_of(halg, struct ahash_alg, halg);
4550 	struct caam_hash_alg *caam_hash =
4551 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4552 	struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4553 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4554 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4555 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4556 					 HASH_MSG_LEN + 32,
4557 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4558 					 HASH_MSG_LEN + 64,
4559 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4560 	dma_addr_t dma_addr;
4561 	int i;
4562 
4563 	ctx->dev = caam_hash->dev;
4564 
4565 	if (alg->setkey) {
4566 		ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4567 							  ARRAY_SIZE(ctx->key),
4568 							  DMA_TO_DEVICE,
4569 							  DMA_ATTR_SKIP_CPU_SYNC);
4570 		if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4571 			dev_err(ctx->dev, "unable to map key\n");
4572 			return -ENOMEM;
4573 		}
4574 	}
4575 
4576 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4577 					DMA_BIDIRECTIONAL,
4578 					DMA_ATTR_SKIP_CPU_SYNC);
4579 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4580 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4581 		if (ctx->adata.key_dma)
4582 			dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4583 					       ARRAY_SIZE(ctx->key),
4584 					       DMA_TO_DEVICE,
4585 					       DMA_ATTR_SKIP_CPU_SYNC);
4586 		return -ENOMEM;
4587 	}
4588 
4589 	for (i = 0; i < HASH_NUM_OP; i++)
4590 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4591 
4592 	/* copy descriptor header template value */
4593 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4594 
4595 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4596 				   OP_ALG_ALGSEL_SUBMASK) >>
4597 				  OP_ALG_ALGSEL_SHIFT];
4598 
4599 	crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
4600 
4601 	/*
4602 	 * For keyed hash algorithms shared descriptors
4603 	 * will be created later in setkey() callback
4604 	 */
4605 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4606 }
4607 
4608 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4609 {
4610 	struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4611 
4612 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4613 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4614 	if (ctx->adata.key_dma)
4615 		dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4616 				       ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4617 				       DMA_ATTR_SKIP_CPU_SYNC);
4618 }
4619 
4620 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4621 	struct caam_hash_template *template, bool keyed)
4622 {
4623 	struct caam_hash_alg *t_alg;
4624 	struct ahash_alg *halg;
4625 	struct crypto_alg *alg;
4626 
4627 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4628 	if (!t_alg)
4629 		return ERR_PTR(-ENOMEM);
4630 
4631 	t_alg->ahash_alg = template->template_ahash;
4632 	halg = &t_alg->ahash_alg;
4633 	alg = &halg->halg.base;
4634 
4635 	if (keyed) {
4636 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4637 			 template->hmac_name);
4638 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4639 			 template->hmac_driver_name);
4640 	} else {
4641 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4642 			 template->name);
4643 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4644 			 template->driver_name);
4645 		t_alg->ahash_alg.setkey = NULL;
4646 	}
4647 	alg->cra_module = THIS_MODULE;
4648 	alg->cra_init = caam_hash_cra_init;
4649 	alg->cra_exit = caam_hash_cra_exit;
4650 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
4651 	alg->cra_priority = CAAM_CRA_PRIORITY;
4652 	alg->cra_blocksize = template->blocksize;
4653 	alg->cra_alignmask = 0;
4654 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4655 
4656 	t_alg->alg_type = template->alg_type;
4657 	t_alg->dev = dev;
4658 
4659 	return t_alg;
4660 }
4661 
4662 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4663 {
4664 	struct dpaa2_caam_priv_per_cpu *ppriv;
4665 
4666 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4667 	napi_schedule_irqoff(&ppriv->napi);
4668 }
4669 
4670 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4671 {
4672 	struct device *dev = priv->dev;
4673 	struct dpaa2_io_notification_ctx *nctx;
4674 	struct dpaa2_caam_priv_per_cpu *ppriv;
4675 	int err, i = 0, cpu;
4676 
4677 	for_each_online_cpu(cpu) {
4678 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4679 		ppriv->priv = priv;
4680 		nctx = &ppriv->nctx;
4681 		nctx->is_cdan = 0;
4682 		nctx->id = ppriv->rsp_fqid;
4683 		nctx->desired_cpu = cpu;
4684 		nctx->cb = dpaa2_caam_fqdan_cb;
4685 
4686 		/* Register notification callbacks */
4687 		ppriv->dpio = dpaa2_io_service_select(cpu);
4688 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4689 		if (unlikely(err)) {
4690 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4691 			nctx->cb = NULL;
4692 			/*
4693 			 * If no affine DPIO for this core, there's probably
4694 			 * none available for next cores either. Signal we want
4695 			 * to retry later, in case the DPIO devices weren't
4696 			 * probed yet.
4697 			 */
4698 			err = -EPROBE_DEFER;
4699 			goto err;
4700 		}
4701 
4702 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4703 						     dev);
4704 		if (unlikely(!ppriv->store)) {
4705 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4706 			err = -ENOMEM;
4707 			goto err;
4708 		}
4709 
4710 		if (++i == priv->num_pairs)
4711 			break;
4712 	}
4713 
4714 	return 0;
4715 
4716 err:
4717 	for_each_online_cpu(cpu) {
4718 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4719 		if (!ppriv->nctx.cb)
4720 			break;
4721 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4722 	}
4723 
4724 	for_each_online_cpu(cpu) {
4725 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4726 		if (!ppriv->store)
4727 			break;
4728 		dpaa2_io_store_destroy(ppriv->store);
4729 	}
4730 
4731 	return err;
4732 }
4733 
4734 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4735 {
4736 	struct dpaa2_caam_priv_per_cpu *ppriv;
4737 	int i = 0, cpu;
4738 
4739 	for_each_online_cpu(cpu) {
4740 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4741 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4742 					    priv->dev);
4743 		dpaa2_io_store_destroy(ppriv->store);
4744 
4745 		if (++i == priv->num_pairs)
4746 			return;
4747 	}
4748 }
4749 
4750 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4751 {
4752 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4753 	struct device *dev = priv->dev;
4754 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4755 	struct dpaa2_caam_priv_per_cpu *ppriv;
4756 	int err = 0, i = 0, cpu;
4757 
4758 	/* Configure Rx queues */
4759 	for_each_online_cpu(cpu) {
4760 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4761 
4762 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4763 				       DPSECI_QUEUE_OPT_USER_CTX;
4764 		rx_queue_cfg.order_preservation_en = 0;
4765 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4766 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4767 		/*
4768 		 * Rx priority (WQ) doesn't really matter, since we use
4769 		 * pull mode, i.e. volatile dequeues from specific FQs
4770 		 */
4771 		rx_queue_cfg.dest_cfg.priority = 0;
4772 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4773 
4774 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4775 					  &rx_queue_cfg);
4776 		if (err) {
4777 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4778 				err);
4779 			return err;
4780 		}
4781 
4782 		if (++i == priv->num_pairs)
4783 			break;
4784 	}
4785 
4786 	return err;
4787 }
4788 
4789 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4790 {
4791 	struct device *dev = priv->dev;
4792 
4793 	if (!priv->cscn_mem)
4794 		return;
4795 
4796 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4797 	kfree(priv->cscn_mem);
4798 }
4799 
4800 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4801 {
4802 	struct device *dev = priv->dev;
4803 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4804 	int err;
4805 
4806 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4807 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4808 		if (err)
4809 			dev_err(dev, "dpseci_reset() failed\n");
4810 	}
4811 
4812 	dpaa2_dpseci_congestion_free(priv);
4813 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4814 }
4815 
4816 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4817 				  const struct dpaa2_fd *fd)
4818 {
4819 	struct caam_request *req;
4820 	u32 fd_err;
4821 
4822 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4823 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4824 		return;
4825 	}
4826 
4827 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4828 	if (unlikely(fd_err))
4829 		dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4830 
4831 	/*
4832 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4833 	 * in FD[ERR] or FD[FRC].
4834 	 */
4835 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4836 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4837 			 DMA_BIDIRECTIONAL);
4838 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4839 }
4840 
4841 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4842 {
4843 	int err;
4844 
4845 	/* Retry while portal is busy */
4846 	do {
4847 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4848 					       ppriv->store);
4849 	} while (err == -EBUSY);
4850 
4851 	if (unlikely(err))
4852 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4853 
4854 	return err;
4855 }
4856 
4857 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4858 {
4859 	struct dpaa2_dq *dq;
4860 	int cleaned = 0, is_last;
4861 
4862 	do {
4863 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4864 		if (unlikely(!dq)) {
4865 			if (unlikely(!is_last)) {
4866 				dev_dbg(ppriv->priv->dev,
4867 					"FQ %d returned no valid frames\n",
4868 					ppriv->rsp_fqid);
4869 				/*
4870 				 * MUST retry until we get some sort of
4871 				 * valid response token (be it "empty dequeue"
4872 				 * or a valid frame).
4873 				 */
4874 				continue;
4875 			}
4876 			break;
4877 		}
4878 
4879 		/* Process FD */
4880 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4881 		cleaned++;
4882 	} while (!is_last);
4883 
4884 	return cleaned;
4885 }
4886 
4887 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4888 {
4889 	struct dpaa2_caam_priv_per_cpu *ppriv;
4890 	struct dpaa2_caam_priv *priv;
4891 	int err, cleaned = 0, store_cleaned;
4892 
4893 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4894 	priv = ppriv->priv;
4895 
4896 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4897 		return 0;
4898 
4899 	do {
4900 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4901 		cleaned += store_cleaned;
4902 
4903 		if (store_cleaned == 0 ||
4904 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4905 			break;
4906 
4907 		/* Try to dequeue some more */
4908 		err = dpaa2_caam_pull_fq(ppriv);
4909 		if (unlikely(err))
4910 			break;
4911 	} while (1);
4912 
4913 	if (cleaned < budget) {
4914 		napi_complete_done(napi, cleaned);
4915 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4916 		if (unlikely(err))
4917 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4918 				err);
4919 	}
4920 
4921 	return cleaned;
4922 }
4923 
4924 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4925 					 u16 token)
4926 {
4927 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4928 	struct device *dev = priv->dev;
4929 	int err;
4930 
4931 	/*
4932 	 * Congestion group feature supported starting with DPSECI API v5.1
4933 	 * and only when object has been created with this capability.
4934 	 */
4935 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4936 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4937 		return 0;
4938 
4939 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4940 				 GFP_KERNEL | GFP_DMA);
4941 	if (!priv->cscn_mem)
4942 		return -ENOMEM;
4943 
4944 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4945 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4946 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4947 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4948 		dev_err(dev, "Error mapping CSCN memory area\n");
4949 		err = -ENOMEM;
4950 		goto err_dma_map;
4951 	}
4952 
4953 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4954 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4955 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4956 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4957 	cong_notif_cfg.message_iova = priv->cscn_dma;
4958 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4959 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4960 					DPSECI_CGN_MODE_COHERENT_WRITE;
4961 
4962 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4963 						 &cong_notif_cfg);
4964 	if (err) {
4965 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4966 		goto err_set_cong;
4967 	}
4968 
4969 	return 0;
4970 
4971 err_set_cong:
4972 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4973 err_dma_map:
4974 	kfree(priv->cscn_mem);
4975 
4976 	return err;
4977 }
4978 
4979 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4980 {
4981 	struct device *dev = &ls_dev->dev;
4982 	struct dpaa2_caam_priv *priv;
4983 	struct dpaa2_caam_priv_per_cpu *ppriv;
4984 	int err, cpu;
4985 	u8 i;
4986 
4987 	priv = dev_get_drvdata(dev);
4988 
4989 	priv->dev = dev;
4990 	priv->dpsec_id = ls_dev->obj_desc.id;
4991 
4992 	/* Get a handle for the DPSECI this interface is associate with */
4993 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4994 	if (err) {
4995 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4996 		goto err_open;
4997 	}
4998 
4999 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
5000 				     &priv->minor_ver);
5001 	if (err) {
5002 		dev_err(dev, "dpseci_get_api_version() failed\n");
5003 		goto err_get_vers;
5004 	}
5005 
5006 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5007 
5008 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5009 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5010 		if (err) {
5011 			dev_err(dev, "dpseci_reset() failed\n");
5012 			goto err_get_vers;
5013 		}
5014 	}
5015 
5016 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5017 				    &priv->dpseci_attr);
5018 	if (err) {
5019 		dev_err(dev, "dpseci_get_attributes() failed\n");
5020 		goto err_get_vers;
5021 	}
5022 
5023 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5024 				  &priv->sec_attr);
5025 	if (err) {
5026 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
5027 		goto err_get_vers;
5028 	}
5029 
5030 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5031 	if (err) {
5032 		dev_err(dev, "setup_congestion() failed\n");
5033 		goto err_get_vers;
5034 	}
5035 
5036 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5037 			      priv->dpseci_attr.num_tx_queues);
5038 	if (priv->num_pairs > num_online_cpus()) {
5039 		dev_warn(dev, "%d queues won't be used\n",
5040 			 priv->num_pairs - num_online_cpus());
5041 		priv->num_pairs = num_online_cpus();
5042 	}
5043 
5044 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5045 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5046 					  &priv->rx_queue_attr[i]);
5047 		if (err) {
5048 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
5049 			goto err_get_rx_queue;
5050 		}
5051 	}
5052 
5053 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5054 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5055 					  &priv->tx_queue_attr[i]);
5056 		if (err) {
5057 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
5058 			goto err_get_rx_queue;
5059 		}
5060 	}
5061 
5062 	i = 0;
5063 	for_each_online_cpu(cpu) {
5064 		u8 j;
5065 
5066 		j = i % priv->num_pairs;
5067 
5068 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
5069 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5070 
5071 		/*
5072 		 * Allow all cores to enqueue, while only some of them
5073 		 * will take part in dequeuing.
5074 		 */
5075 		if (++i > priv->num_pairs)
5076 			continue;
5077 
5078 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5079 		ppriv->prio = j;
5080 
5081 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5082 			priv->rx_queue_attr[j].fqid,
5083 			priv->tx_queue_attr[j].fqid);
5084 
5085 		ppriv->net_dev.dev = *dev;
5086 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5087 		netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi,
5088 					 dpaa2_dpseci_poll,
5089 					 DPAA2_CAAM_NAPI_WEIGHT);
5090 	}
5091 
5092 	return 0;
5093 
5094 err_get_rx_queue:
5095 	dpaa2_dpseci_congestion_free(priv);
5096 err_get_vers:
5097 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5098 err_open:
5099 	return err;
5100 }
5101 
5102 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5103 {
5104 	struct device *dev = priv->dev;
5105 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5106 	struct dpaa2_caam_priv_per_cpu *ppriv;
5107 	int i;
5108 
5109 	for (i = 0; i < priv->num_pairs; i++) {
5110 		ppriv = per_cpu_ptr(priv->ppriv, i);
5111 		napi_enable(&ppriv->napi);
5112 	}
5113 
5114 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5115 }
5116 
5117 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5118 {
5119 	struct device *dev = priv->dev;
5120 	struct dpaa2_caam_priv_per_cpu *ppriv;
5121 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5122 	int i, err = 0, enabled;
5123 
5124 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5125 	if (err) {
5126 		dev_err(dev, "dpseci_disable() failed\n");
5127 		return err;
5128 	}
5129 
5130 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5131 	if (err) {
5132 		dev_err(dev, "dpseci_is_enabled() failed\n");
5133 		return err;
5134 	}
5135 
5136 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5137 
5138 	for (i = 0; i < priv->num_pairs; i++) {
5139 		ppriv = per_cpu_ptr(priv->ppriv, i);
5140 		napi_disable(&ppriv->napi);
5141 		netif_napi_del(&ppriv->napi);
5142 	}
5143 
5144 	return 0;
5145 }
5146 
5147 static struct list_head hash_list;
5148 
5149 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5150 {
5151 	struct device *dev;
5152 	struct dpaa2_caam_priv *priv;
5153 	int i, err = 0;
5154 	bool registered = false;
5155 
5156 	/*
5157 	 * There is no way to get CAAM endianness - there is no direct register
5158 	 * space access and MC f/w does not provide this attribute.
5159 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5160 	 * property.
5161 	 */
5162 	caam_little_end = true;
5163 
5164 	caam_imx = false;
5165 
5166 	dev = &dpseci_dev->dev;
5167 
5168 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5169 	if (!priv)
5170 		return -ENOMEM;
5171 
5172 	dev_set_drvdata(dev, priv);
5173 
5174 	priv->domain = iommu_get_domain_for_dev(dev);
5175 
5176 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5177 				     0, SLAB_CACHE_DMA, NULL);
5178 	if (!qi_cache) {
5179 		dev_err(dev, "Can't allocate SEC cache\n");
5180 		return -ENOMEM;
5181 	}
5182 
5183 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5184 	if (err) {
5185 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5186 		goto err_dma_mask;
5187 	}
5188 
5189 	/* Obtain a MC portal */
5190 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5191 	if (err) {
5192 		if (err == -ENXIO)
5193 			err = -EPROBE_DEFER;
5194 		else
5195 			dev_err(dev, "MC portal allocation failed\n");
5196 
5197 		goto err_dma_mask;
5198 	}
5199 
5200 	priv->ppriv = alloc_percpu(*priv->ppriv);
5201 	if (!priv->ppriv) {
5202 		dev_err(dev, "alloc_percpu() failed\n");
5203 		err = -ENOMEM;
5204 		goto err_alloc_ppriv;
5205 	}
5206 
5207 	/* DPSECI initialization */
5208 	err = dpaa2_dpseci_setup(dpseci_dev);
5209 	if (err) {
5210 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5211 		goto err_dpseci_setup;
5212 	}
5213 
5214 	/* DPIO */
5215 	err = dpaa2_dpseci_dpio_setup(priv);
5216 	if (err) {
5217 		dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5218 		goto err_dpio_setup;
5219 	}
5220 
5221 	/* DPSECI binding to DPIO */
5222 	err = dpaa2_dpseci_bind(priv);
5223 	if (err) {
5224 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5225 		goto err_bind;
5226 	}
5227 
5228 	/* DPSECI enable */
5229 	err = dpaa2_dpseci_enable(priv);
5230 	if (err) {
5231 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5232 		goto err_bind;
5233 	}
5234 
5235 	dpaa2_dpseci_debugfs_init(priv);
5236 
5237 	/* register crypto algorithms the device supports */
5238 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5239 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5240 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5241 
5242 		/* Skip DES algorithms if not supported by device */
5243 		if (!priv->sec_attr.des_acc_num &&
5244 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5245 		     alg_sel == OP_ALG_ALGSEL_DES))
5246 			continue;
5247 
5248 		/* Skip AES algorithms if not supported by device */
5249 		if (!priv->sec_attr.aes_acc_num &&
5250 		    alg_sel == OP_ALG_ALGSEL_AES)
5251 			continue;
5252 
5253 		/* Skip CHACHA20 algorithms if not supported by device */
5254 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5255 		    !priv->sec_attr.ccha_acc_num)
5256 			continue;
5257 
5258 		t_alg->caam.dev = dev;
5259 		caam_skcipher_alg_init(t_alg);
5260 
5261 		err = crypto_register_skcipher(&t_alg->skcipher);
5262 		if (err) {
5263 			dev_warn(dev, "%s alg registration failed: %d\n",
5264 				 t_alg->skcipher.base.cra_driver_name, err);
5265 			continue;
5266 		}
5267 
5268 		t_alg->registered = true;
5269 		registered = true;
5270 	}
5271 
5272 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5273 		struct caam_aead_alg *t_alg = driver_aeads + i;
5274 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5275 				 OP_ALG_ALGSEL_MASK;
5276 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5277 				 OP_ALG_ALGSEL_MASK;
5278 
5279 		/* Skip DES algorithms if not supported by device */
5280 		if (!priv->sec_attr.des_acc_num &&
5281 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5282 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5283 			continue;
5284 
5285 		/* Skip AES algorithms if not supported by device */
5286 		if (!priv->sec_attr.aes_acc_num &&
5287 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5288 			continue;
5289 
5290 		/* Skip CHACHA20 algorithms if not supported by device */
5291 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5292 		    !priv->sec_attr.ccha_acc_num)
5293 			continue;
5294 
5295 		/* Skip POLY1305 algorithms if not supported by device */
5296 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5297 		    !priv->sec_attr.ptha_acc_num)
5298 			continue;
5299 
5300 		/*
5301 		 * Skip algorithms requiring message digests
5302 		 * if MD not supported by device.
5303 		 */
5304 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5305 		    !priv->sec_attr.md_acc_num)
5306 			continue;
5307 
5308 		t_alg->caam.dev = dev;
5309 		caam_aead_alg_init(t_alg);
5310 
5311 		err = crypto_register_aead(&t_alg->aead);
5312 		if (err) {
5313 			dev_warn(dev, "%s alg registration failed: %d\n",
5314 				 t_alg->aead.base.cra_driver_name, err);
5315 			continue;
5316 		}
5317 
5318 		t_alg->registered = true;
5319 		registered = true;
5320 	}
5321 	if (registered)
5322 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5323 
5324 	/* register hash algorithms the device supports */
5325 	INIT_LIST_HEAD(&hash_list);
5326 
5327 	/*
5328 	 * Skip registration of any hashing algorithms if MD block
5329 	 * is not present.
5330 	 */
5331 	if (!priv->sec_attr.md_acc_num)
5332 		return 0;
5333 
5334 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5335 		struct caam_hash_alg *t_alg;
5336 		struct caam_hash_template *alg = driver_hash + i;
5337 
5338 		/* register hmac version */
5339 		t_alg = caam_hash_alloc(dev, alg, true);
5340 		if (IS_ERR(t_alg)) {
5341 			err = PTR_ERR(t_alg);
5342 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5343 				 alg->hmac_driver_name, err);
5344 			continue;
5345 		}
5346 
5347 		err = crypto_register_ahash(&t_alg->ahash_alg);
5348 		if (err) {
5349 			dev_warn(dev, "%s alg registration failed: %d\n",
5350 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5351 				 err);
5352 			kfree(t_alg);
5353 		} else {
5354 			list_add_tail(&t_alg->entry, &hash_list);
5355 		}
5356 
5357 		/* register unkeyed version */
5358 		t_alg = caam_hash_alloc(dev, alg, false);
5359 		if (IS_ERR(t_alg)) {
5360 			err = PTR_ERR(t_alg);
5361 			dev_warn(dev, "%s alg allocation failed: %d\n",
5362 				 alg->driver_name, err);
5363 			continue;
5364 		}
5365 
5366 		err = crypto_register_ahash(&t_alg->ahash_alg);
5367 		if (err) {
5368 			dev_warn(dev, "%s alg registration failed: %d\n",
5369 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5370 				 err);
5371 			kfree(t_alg);
5372 		} else {
5373 			list_add_tail(&t_alg->entry, &hash_list);
5374 		}
5375 	}
5376 	if (!list_empty(&hash_list))
5377 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5378 
5379 	return err;
5380 
5381 err_bind:
5382 	dpaa2_dpseci_dpio_free(priv);
5383 err_dpio_setup:
5384 	dpaa2_dpseci_free(priv);
5385 err_dpseci_setup:
5386 	free_percpu(priv->ppriv);
5387 err_alloc_ppriv:
5388 	fsl_mc_portal_free(priv->mc_io);
5389 err_dma_mask:
5390 	kmem_cache_destroy(qi_cache);
5391 
5392 	return err;
5393 }
5394 
5395 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5396 {
5397 	struct device *dev;
5398 	struct dpaa2_caam_priv *priv;
5399 	int i;
5400 
5401 	dev = &ls_dev->dev;
5402 	priv = dev_get_drvdata(dev);
5403 
5404 	dpaa2_dpseci_debugfs_exit(priv);
5405 
5406 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5407 		struct caam_aead_alg *t_alg = driver_aeads + i;
5408 
5409 		if (t_alg->registered)
5410 			crypto_unregister_aead(&t_alg->aead);
5411 	}
5412 
5413 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5414 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5415 
5416 		if (t_alg->registered)
5417 			crypto_unregister_skcipher(&t_alg->skcipher);
5418 	}
5419 
5420 	if (hash_list.next) {
5421 		struct caam_hash_alg *t_hash_alg, *p;
5422 
5423 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5424 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5425 			list_del(&t_hash_alg->entry);
5426 			kfree(t_hash_alg);
5427 		}
5428 	}
5429 
5430 	dpaa2_dpseci_disable(priv);
5431 	dpaa2_dpseci_dpio_free(priv);
5432 	dpaa2_dpseci_free(priv);
5433 	free_percpu(priv->ppriv);
5434 	fsl_mc_portal_free(priv->mc_io);
5435 	kmem_cache_destroy(qi_cache);
5436 
5437 	return 0;
5438 }
5439 
5440 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5441 {
5442 	struct dpaa2_fd fd;
5443 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5444 	struct dpaa2_caam_priv_per_cpu *ppriv;
5445 	int err = 0, i;
5446 
5447 	if (IS_ERR(req))
5448 		return PTR_ERR(req);
5449 
5450 	if (priv->cscn_mem) {
5451 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5452 					DPAA2_CSCN_SIZE,
5453 					DMA_FROM_DEVICE);
5454 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5455 			dev_dbg_ratelimited(dev, "Dropping request\n");
5456 			return -EBUSY;
5457 		}
5458 	}
5459 
5460 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5461 
5462 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5463 					 DMA_BIDIRECTIONAL);
5464 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5465 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5466 		goto err_out;
5467 	}
5468 
5469 	memset(&fd, 0, sizeof(fd));
5470 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5471 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5472 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5473 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5474 
5475 	ppriv = raw_cpu_ptr(priv->ppriv);
5476 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5477 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5478 						  &fd);
5479 		if (err != -EBUSY)
5480 			break;
5481 
5482 		cpu_relax();
5483 	}
5484 
5485 	if (unlikely(err)) {
5486 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5487 		goto err_out;
5488 	}
5489 
5490 	return -EINPROGRESS;
5491 
5492 err_out:
5493 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5494 			 DMA_BIDIRECTIONAL);
5495 	return -EIO;
5496 }
5497 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5498 
5499 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5500 	{
5501 		.vendor = FSL_MC_VENDOR_FREESCALE,
5502 		.obj_type = "dpseci",
5503 	},
5504 	{ .vendor = 0x0 }
5505 };
5506 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5507 
5508 static struct fsl_mc_driver dpaa2_caam_driver = {
5509 	.driver = {
5510 		.name		= KBUILD_MODNAME,
5511 		.owner		= THIS_MODULE,
5512 	},
5513 	.probe		= dpaa2_caam_probe,
5514 	.remove		= dpaa2_caam_remove,
5515 	.match_id_table = dpaa2_caam_match_id_table
5516 };
5517 
5518 MODULE_LICENSE("Dual BSD/GPL");
5519 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5520 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5521 
5522 module_fsl_mc_driver(dpaa2_caam_driver);
5523