1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
24 
25 #define CAAM_CRA_PRIORITY	2000
26 
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 				 SHA512_DIGEST_SIZE * 2)
30 
31 /*
32  * This is a a cache of buffers, from which the users of CAAM QI driver
33  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34  * NOTE: A more elegant solution would be to have some headroom in the frames
35  *       being processed. This can be added by the dpaa2-eth driver. This would
36  *       pose a problem for userspace application processing which cannot
37  *       know of this limitation. So for now, this will work.
38  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
39  */
40 static struct kmem_cache *qi_cache;
41 
42 struct caam_alg_entry {
43 	struct device *dev;
44 	int class1_alg_type;
45 	int class2_alg_type;
46 	bool rfc3686;
47 	bool geniv;
48 	bool nodkp;
49 };
50 
51 struct caam_aead_alg {
52 	struct aead_alg aead;
53 	struct caam_alg_entry caam;
54 	bool registered;
55 };
56 
57 struct caam_skcipher_alg {
58 	struct skcipher_alg skcipher;
59 	struct caam_alg_entry caam;
60 	bool registered;
61 };
62 
63 /**
64  * struct caam_ctx - per-session context
65  * @flc: Flow Contexts array
66  * @key:  [authentication key], encryption key
67  * @flc_dma: I/O virtual addresses of the Flow Contexts
68  * @key_dma: I/O virtual address of the key
69  * @dir: DMA direction for mapping key and Flow Contexts
70  * @dev: dpseci device
71  * @adata: authentication algorithm details
72  * @cdata: encryption algorithm details
73  * @authsize: authentication tag (a.k.a. ICV / MAC) size
74  * @xts_key_fallback: true if fallback tfm needs to be used due
75  *		      to unsupported xts key lengths
76  * @fallback: xts fallback tfm
77  */
78 struct caam_ctx {
79 	struct caam_flc flc[NUM_OP];
80 	u8 key[CAAM_MAX_KEY_SIZE];
81 	dma_addr_t flc_dma[NUM_OP];
82 	dma_addr_t key_dma;
83 	enum dma_data_direction dir;
84 	struct device *dev;
85 	struct alginfo adata;
86 	struct alginfo cdata;
87 	unsigned int authsize;
88 	bool xts_key_fallback;
89 	struct crypto_skcipher *fallback;
90 };
91 
dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv * priv,dma_addr_t iova_addr)92 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
93 				     dma_addr_t iova_addr)
94 {
95 	phys_addr_t phys_addr;
96 
97 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
98 				   iova_addr;
99 
100 	return phys_to_virt(phys_addr);
101 }
102 
103 /*
104  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
105  *
106  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
107  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
108  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
109  * hosting 16 SG entries.
110  *
111  * @flags - flags that would be used for the equivalent kmalloc(..) call
112  *
113  * Returns a pointer to a retrieved buffer on success or NULL on failure.
114  */
qi_cache_zalloc(gfp_t flags)115 static inline void *qi_cache_zalloc(gfp_t flags)
116 {
117 	return kmem_cache_zalloc(qi_cache, flags);
118 }
119 
120 /*
121  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
122  *
123  * @obj - buffer previously allocated by qi_cache_zalloc
124  *
125  * No checking is being done, the call is a passthrough call to
126  * kmem_cache_free(...)
127  */
qi_cache_free(void * obj)128 static inline void qi_cache_free(void *obj)
129 {
130 	kmem_cache_free(qi_cache, obj);
131 }
132 
to_caam_req(struct crypto_async_request * areq)133 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
134 {
135 	switch (crypto_tfm_alg_type(areq->tfm)) {
136 	case CRYPTO_ALG_TYPE_SKCIPHER:
137 		return skcipher_request_ctx(skcipher_request_cast(areq));
138 	case CRYPTO_ALG_TYPE_AEAD:
139 		return aead_request_ctx(container_of(areq, struct aead_request,
140 						     base));
141 	case CRYPTO_ALG_TYPE_AHASH:
142 		return ahash_request_ctx(ahash_request_cast(areq));
143 	default:
144 		return ERR_PTR(-EINVAL);
145 	}
146 }
147 
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum dma_data_direction iv_dir,dma_addr_t qm_sg_dma,int qm_sg_bytes)148 static void caam_unmap(struct device *dev, struct scatterlist *src,
149 		       struct scatterlist *dst, int src_nents,
150 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
151 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
152 		       int qm_sg_bytes)
153 {
154 	if (dst != src) {
155 		if (src_nents)
156 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
157 		if (dst_nents)
158 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
159 	} else {
160 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
161 	}
162 
163 	if (iv_dma)
164 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
165 
166 	if (qm_sg_bytes)
167 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
168 }
169 
aead_set_sh_desc(struct crypto_aead * aead)170 static int aead_set_sh_desc(struct crypto_aead *aead)
171 {
172 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
173 						 typeof(*alg), aead);
174 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
175 	unsigned int ivsize = crypto_aead_ivsize(aead);
176 	struct device *dev = ctx->dev;
177 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
178 	struct caam_flc *flc;
179 	u32 *desc;
180 	u32 ctx1_iv_off = 0;
181 	u32 *nonce = NULL;
182 	unsigned int data_len[2];
183 	u32 inl_mask;
184 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
185 			       OP_ALG_AAI_CTR_MOD128);
186 	const bool is_rfc3686 = alg->caam.rfc3686;
187 
188 	if (!ctx->cdata.keylen || !ctx->authsize)
189 		return 0;
190 
191 	/*
192 	 * AES-CTR needs to load IV in CONTEXT1 reg
193 	 * at an offset of 128bits (16bytes)
194 	 * CONTEXT1[255:128] = IV
195 	 */
196 	if (ctr_mode)
197 		ctx1_iv_off = 16;
198 
199 	/*
200 	 * RFC3686 specific:
201 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
202 	 */
203 	if (is_rfc3686) {
204 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
205 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
206 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
207 	}
208 
209 	/*
210 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
211 	 * in invalid opcodes (last bytes of user key) in the resulting
212 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
213 	 * addresses are needed.
214 	 */
215 	ctx->adata.key_virt = ctx->key;
216 	ctx->adata.key_dma = ctx->key_dma;
217 
218 	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
219 	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
220 
221 	data_len[0] = ctx->adata.keylen_pad;
222 	data_len[1] = ctx->cdata.keylen;
223 
224 	/* aead_encrypt shared descriptor */
225 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
226 						 DESC_QI_AEAD_ENC_LEN) +
227 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
228 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
229 			      ARRAY_SIZE(data_len)) < 0)
230 		return -EINVAL;
231 
232 	ctx->adata.key_inline = !!(inl_mask & 1);
233 	ctx->cdata.key_inline = !!(inl_mask & 2);
234 
235 	flc = &ctx->flc[ENCRYPT];
236 	desc = flc->sh_desc;
237 
238 	if (alg->caam.geniv)
239 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
240 					  ivsize, ctx->authsize, is_rfc3686,
241 					  nonce, ctx1_iv_off, true,
242 					  priv->sec_attr.era);
243 	else
244 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
245 				       ivsize, ctx->authsize, is_rfc3686, nonce,
246 				       ctx1_iv_off, true, priv->sec_attr.era);
247 
248 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
249 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
250 				   sizeof(flc->flc) + desc_bytes(desc),
251 				   ctx->dir);
252 
253 	/* aead_decrypt shared descriptor */
254 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
255 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
256 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
257 			      ARRAY_SIZE(data_len)) < 0)
258 		return -EINVAL;
259 
260 	ctx->adata.key_inline = !!(inl_mask & 1);
261 	ctx->cdata.key_inline = !!(inl_mask & 2);
262 
263 	flc = &ctx->flc[DECRYPT];
264 	desc = flc->sh_desc;
265 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
266 			       ivsize, ctx->authsize, alg->caam.geniv,
267 			       is_rfc3686, nonce, ctx1_iv_off, true,
268 			       priv->sec_attr.era);
269 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
270 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
271 				   sizeof(flc->flc) + desc_bytes(desc),
272 				   ctx->dir);
273 
274 	return 0;
275 }
276 
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)277 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
278 {
279 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
280 
281 	ctx->authsize = authsize;
282 	aead_set_sh_desc(authenc);
283 
284 	return 0;
285 }
286 
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)287 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
288 		       unsigned int keylen)
289 {
290 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
291 	struct device *dev = ctx->dev;
292 	struct crypto_authenc_keys keys;
293 
294 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
295 		goto badkey;
296 
297 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
298 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
299 		keys.authkeylen);
300 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
301 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
302 
303 	ctx->adata.keylen = keys.authkeylen;
304 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
305 					      OP_ALG_ALGSEL_MASK);
306 
307 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
308 		goto badkey;
309 
310 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
311 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
312 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
313 				   keys.enckeylen, ctx->dir);
314 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
315 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
316 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
317 
318 	ctx->cdata.keylen = keys.enckeylen;
319 
320 	memzero_explicit(&keys, sizeof(keys));
321 	return aead_set_sh_desc(aead);
322 badkey:
323 	memzero_explicit(&keys, sizeof(keys));
324 	return -EINVAL;
325 }
326 
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)327 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
328 			    unsigned int keylen)
329 {
330 	struct crypto_authenc_keys keys;
331 	int err;
332 
333 	err = crypto_authenc_extractkeys(&keys, key, keylen);
334 	if (unlikely(err))
335 		goto out;
336 
337 	err = -EINVAL;
338 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
339 		goto out;
340 
341 	err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
342 	      aead_setkey(aead, key, keylen);
343 
344 out:
345 	memzero_explicit(&keys, sizeof(keys));
346 	return err;
347 }
348 
aead_edesc_alloc(struct aead_request * req,bool encrypt)349 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
350 					   bool encrypt)
351 {
352 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
353 	struct caam_request *req_ctx = aead_request_ctx(req);
354 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
355 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
356 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
357 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
358 						 typeof(*alg), aead);
359 	struct device *dev = ctx->dev;
360 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
361 		      GFP_KERNEL : GFP_ATOMIC;
362 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
363 	int src_len, dst_len = 0;
364 	struct aead_edesc *edesc;
365 	dma_addr_t qm_sg_dma, iv_dma = 0;
366 	int ivsize = 0;
367 	unsigned int authsize = ctx->authsize;
368 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
369 	int in_len, out_len;
370 	struct dpaa2_sg_entry *sg_table;
371 
372 	/* allocate space for base edesc, link tables and IV */
373 	edesc = qi_cache_zalloc(GFP_DMA | flags);
374 	if (unlikely(!edesc)) {
375 		dev_err(dev, "could not allocate extended descriptor\n");
376 		return ERR_PTR(-ENOMEM);
377 	}
378 
379 	if (unlikely(req->dst != req->src)) {
380 		src_len = req->assoclen + req->cryptlen;
381 		dst_len = src_len + (encrypt ? authsize : (-authsize));
382 
383 		src_nents = sg_nents_for_len(req->src, src_len);
384 		if (unlikely(src_nents < 0)) {
385 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
386 				src_len);
387 			qi_cache_free(edesc);
388 			return ERR_PTR(src_nents);
389 		}
390 
391 		dst_nents = sg_nents_for_len(req->dst, dst_len);
392 		if (unlikely(dst_nents < 0)) {
393 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
394 				dst_len);
395 			qi_cache_free(edesc);
396 			return ERR_PTR(dst_nents);
397 		}
398 
399 		if (src_nents) {
400 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
401 						      DMA_TO_DEVICE);
402 			if (unlikely(!mapped_src_nents)) {
403 				dev_err(dev, "unable to map source\n");
404 				qi_cache_free(edesc);
405 				return ERR_PTR(-ENOMEM);
406 			}
407 		} else {
408 			mapped_src_nents = 0;
409 		}
410 
411 		if (dst_nents) {
412 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
413 						      DMA_FROM_DEVICE);
414 			if (unlikely(!mapped_dst_nents)) {
415 				dev_err(dev, "unable to map destination\n");
416 				dma_unmap_sg(dev, req->src, src_nents,
417 					     DMA_TO_DEVICE);
418 				qi_cache_free(edesc);
419 				return ERR_PTR(-ENOMEM);
420 			}
421 		} else {
422 			mapped_dst_nents = 0;
423 		}
424 	} else {
425 		src_len = req->assoclen + req->cryptlen +
426 			  (encrypt ? authsize : 0);
427 
428 		src_nents = sg_nents_for_len(req->src, src_len);
429 		if (unlikely(src_nents < 0)) {
430 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
431 				src_len);
432 			qi_cache_free(edesc);
433 			return ERR_PTR(src_nents);
434 		}
435 
436 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
437 					      DMA_BIDIRECTIONAL);
438 		if (unlikely(!mapped_src_nents)) {
439 			dev_err(dev, "unable to map source\n");
440 			qi_cache_free(edesc);
441 			return ERR_PTR(-ENOMEM);
442 		}
443 	}
444 
445 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
446 		ivsize = crypto_aead_ivsize(aead);
447 
448 	/*
449 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
450 	 * Input is not contiguous.
451 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
452 	 * the end of the table by allocating more S/G entries. Logic:
453 	 * if (src != dst && output S/G)
454 	 *      pad output S/G, if needed
455 	 * else if (src == dst && S/G)
456 	 *      overlapping S/Gs; pad one of them
457 	 * else if (input S/G) ...
458 	 *      pad input S/G, if needed
459 	 */
460 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
461 	if (mapped_dst_nents > 1)
462 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
463 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
464 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
465 				  1 + !!ivsize +
466 				  pad_sg_nents(mapped_src_nents));
467 	else
468 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
469 
470 	sg_table = &edesc->sgt[0];
471 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
472 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
473 		     CAAM_QI_MEMCACHE_SIZE)) {
474 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
475 			qm_sg_nents, ivsize);
476 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
477 			   0, DMA_NONE, 0, 0);
478 		qi_cache_free(edesc);
479 		return ERR_PTR(-ENOMEM);
480 	}
481 
482 	if (ivsize) {
483 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
484 
485 		/* Make sure IV is located in a DMAable area */
486 		memcpy(iv, req->iv, ivsize);
487 
488 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
489 		if (dma_mapping_error(dev, iv_dma)) {
490 			dev_err(dev, "unable to map IV\n");
491 			caam_unmap(dev, req->src, req->dst, src_nents,
492 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
493 			qi_cache_free(edesc);
494 			return ERR_PTR(-ENOMEM);
495 		}
496 	}
497 
498 	edesc->src_nents = src_nents;
499 	edesc->dst_nents = dst_nents;
500 	edesc->iv_dma = iv_dma;
501 
502 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
503 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
504 		/*
505 		 * The associated data comes already with the IV but we need
506 		 * to skip it when we authenticate or encrypt...
507 		 */
508 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
509 	else
510 		edesc->assoclen = cpu_to_caam32(req->assoclen);
511 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
512 					     DMA_TO_DEVICE);
513 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
514 		dev_err(dev, "unable to map assoclen\n");
515 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
516 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
517 		qi_cache_free(edesc);
518 		return ERR_PTR(-ENOMEM);
519 	}
520 
521 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
522 	qm_sg_index++;
523 	if (ivsize) {
524 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
525 		qm_sg_index++;
526 	}
527 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
528 	qm_sg_index += mapped_src_nents;
529 
530 	if (mapped_dst_nents > 1)
531 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
532 
533 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
534 	if (dma_mapping_error(dev, qm_sg_dma)) {
535 		dev_err(dev, "unable to map S/G table\n");
536 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
537 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
538 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
539 		qi_cache_free(edesc);
540 		return ERR_PTR(-ENOMEM);
541 	}
542 
543 	edesc->qm_sg_dma = qm_sg_dma;
544 	edesc->qm_sg_bytes = qm_sg_bytes;
545 
546 	out_len = req->assoclen + req->cryptlen +
547 		  (encrypt ? ctx->authsize : (-ctx->authsize));
548 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
549 
550 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
551 	dpaa2_fl_set_final(in_fle, true);
552 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
553 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
554 	dpaa2_fl_set_len(in_fle, in_len);
555 
556 	if (req->dst == req->src) {
557 		if (mapped_src_nents == 1) {
558 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
559 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
560 		} else {
561 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
562 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
563 					  (1 + !!ivsize) * sizeof(*sg_table));
564 		}
565 	} else if (!mapped_dst_nents) {
566 		/*
567 		 * crypto engine requires the output entry to be present when
568 		 * "frame list" FD is used.
569 		 * Since engine does not support FMT=2'b11 (unused entry type),
570 		 * leaving out_fle zeroized is the best option.
571 		 */
572 		goto skip_out_fle;
573 	} else if (mapped_dst_nents == 1) {
574 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
575 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
576 	} else {
577 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
578 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
579 				  sizeof(*sg_table));
580 	}
581 
582 	dpaa2_fl_set_len(out_fle, out_len);
583 
584 skip_out_fle:
585 	return edesc;
586 }
587 
chachapoly_set_sh_desc(struct crypto_aead * aead)588 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
589 {
590 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
591 	unsigned int ivsize = crypto_aead_ivsize(aead);
592 	struct device *dev = ctx->dev;
593 	struct caam_flc *flc;
594 	u32 *desc;
595 
596 	if (!ctx->cdata.keylen || !ctx->authsize)
597 		return 0;
598 
599 	flc = &ctx->flc[ENCRYPT];
600 	desc = flc->sh_desc;
601 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
602 			       ctx->authsize, true, true);
603 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
604 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
605 				   sizeof(flc->flc) + desc_bytes(desc),
606 				   ctx->dir);
607 
608 	flc = &ctx->flc[DECRYPT];
609 	desc = flc->sh_desc;
610 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
611 			       ctx->authsize, false, true);
612 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
613 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
614 				   sizeof(flc->flc) + desc_bytes(desc),
615 				   ctx->dir);
616 
617 	return 0;
618 }
619 
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)620 static int chachapoly_setauthsize(struct crypto_aead *aead,
621 				  unsigned int authsize)
622 {
623 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
624 
625 	if (authsize != POLY1305_DIGEST_SIZE)
626 		return -EINVAL;
627 
628 	ctx->authsize = authsize;
629 	return chachapoly_set_sh_desc(aead);
630 }
631 
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)632 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
633 			     unsigned int keylen)
634 {
635 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
636 	unsigned int ivsize = crypto_aead_ivsize(aead);
637 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
638 
639 	if (keylen != CHACHA_KEY_SIZE + saltlen)
640 		return -EINVAL;
641 
642 	ctx->cdata.key_virt = key;
643 	ctx->cdata.keylen = keylen - saltlen;
644 
645 	return chachapoly_set_sh_desc(aead);
646 }
647 
gcm_set_sh_desc(struct crypto_aead * aead)648 static int gcm_set_sh_desc(struct crypto_aead *aead)
649 {
650 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
651 	struct device *dev = ctx->dev;
652 	unsigned int ivsize = crypto_aead_ivsize(aead);
653 	struct caam_flc *flc;
654 	u32 *desc;
655 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
656 			ctx->cdata.keylen;
657 
658 	if (!ctx->cdata.keylen || !ctx->authsize)
659 		return 0;
660 
661 	/*
662 	 * AES GCM encrypt shared descriptor
663 	 * Job Descriptor and Shared Descriptor
664 	 * must fit into the 64-word Descriptor h/w Buffer
665 	 */
666 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
667 		ctx->cdata.key_inline = true;
668 		ctx->cdata.key_virt = ctx->key;
669 	} else {
670 		ctx->cdata.key_inline = false;
671 		ctx->cdata.key_dma = ctx->key_dma;
672 	}
673 
674 	flc = &ctx->flc[ENCRYPT];
675 	desc = flc->sh_desc;
676 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
677 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
678 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
679 				   sizeof(flc->flc) + desc_bytes(desc),
680 				   ctx->dir);
681 
682 	/*
683 	 * Job Descriptor and Shared Descriptors
684 	 * must all fit into the 64-word Descriptor h/w Buffer
685 	 */
686 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
687 		ctx->cdata.key_inline = true;
688 		ctx->cdata.key_virt = ctx->key;
689 	} else {
690 		ctx->cdata.key_inline = false;
691 		ctx->cdata.key_dma = ctx->key_dma;
692 	}
693 
694 	flc = &ctx->flc[DECRYPT];
695 	desc = flc->sh_desc;
696 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
697 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
698 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
699 				   sizeof(flc->flc) + desc_bytes(desc),
700 				   ctx->dir);
701 
702 	return 0;
703 }
704 
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)705 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
706 {
707 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
708 	int err;
709 
710 	err = crypto_gcm_check_authsize(authsize);
711 	if (err)
712 		return err;
713 
714 	ctx->authsize = authsize;
715 	gcm_set_sh_desc(authenc);
716 
717 	return 0;
718 }
719 
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)720 static int gcm_setkey(struct crypto_aead *aead,
721 		      const u8 *key, unsigned int keylen)
722 {
723 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
724 	struct device *dev = ctx->dev;
725 	int ret;
726 
727 	ret = aes_check_keylen(keylen);
728 	if (ret)
729 		return ret;
730 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
731 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
732 
733 	memcpy(ctx->key, key, keylen);
734 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
735 	ctx->cdata.keylen = keylen;
736 
737 	return gcm_set_sh_desc(aead);
738 }
739 
rfc4106_set_sh_desc(struct crypto_aead * aead)740 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
741 {
742 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
743 	struct device *dev = ctx->dev;
744 	unsigned int ivsize = crypto_aead_ivsize(aead);
745 	struct caam_flc *flc;
746 	u32 *desc;
747 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
748 			ctx->cdata.keylen;
749 
750 	if (!ctx->cdata.keylen || !ctx->authsize)
751 		return 0;
752 
753 	ctx->cdata.key_virt = ctx->key;
754 
755 	/*
756 	 * RFC4106 encrypt shared descriptor
757 	 * Job Descriptor and Shared Descriptor
758 	 * must fit into the 64-word Descriptor h/w Buffer
759 	 */
760 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
761 		ctx->cdata.key_inline = true;
762 	} else {
763 		ctx->cdata.key_inline = false;
764 		ctx->cdata.key_dma = ctx->key_dma;
765 	}
766 
767 	flc = &ctx->flc[ENCRYPT];
768 	desc = flc->sh_desc;
769 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
770 				  true);
771 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
772 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
773 				   sizeof(flc->flc) + desc_bytes(desc),
774 				   ctx->dir);
775 
776 	/*
777 	 * Job Descriptor and Shared Descriptors
778 	 * must all fit into the 64-word Descriptor h/w Buffer
779 	 */
780 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
781 		ctx->cdata.key_inline = true;
782 	} else {
783 		ctx->cdata.key_inline = false;
784 		ctx->cdata.key_dma = ctx->key_dma;
785 	}
786 
787 	flc = &ctx->flc[DECRYPT];
788 	desc = flc->sh_desc;
789 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
790 				  true);
791 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
792 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
793 				   sizeof(flc->flc) + desc_bytes(desc),
794 				   ctx->dir);
795 
796 	return 0;
797 }
798 
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)799 static int rfc4106_setauthsize(struct crypto_aead *authenc,
800 			       unsigned int authsize)
801 {
802 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
803 	int err;
804 
805 	err = crypto_rfc4106_check_authsize(authsize);
806 	if (err)
807 		return err;
808 
809 	ctx->authsize = authsize;
810 	rfc4106_set_sh_desc(authenc);
811 
812 	return 0;
813 }
814 
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)815 static int rfc4106_setkey(struct crypto_aead *aead,
816 			  const u8 *key, unsigned int keylen)
817 {
818 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
819 	struct device *dev = ctx->dev;
820 	int ret;
821 
822 	ret = aes_check_keylen(keylen - 4);
823 	if (ret)
824 		return ret;
825 
826 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
827 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
828 
829 	memcpy(ctx->key, key, keylen);
830 	/*
831 	 * The last four bytes of the key material are used as the salt value
832 	 * in the nonce. Update the AES key length.
833 	 */
834 	ctx->cdata.keylen = keylen - 4;
835 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
836 				   ctx->dir);
837 
838 	return rfc4106_set_sh_desc(aead);
839 }
840 
rfc4543_set_sh_desc(struct crypto_aead * aead)841 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
842 {
843 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
844 	struct device *dev = ctx->dev;
845 	unsigned int ivsize = crypto_aead_ivsize(aead);
846 	struct caam_flc *flc;
847 	u32 *desc;
848 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
849 			ctx->cdata.keylen;
850 
851 	if (!ctx->cdata.keylen || !ctx->authsize)
852 		return 0;
853 
854 	ctx->cdata.key_virt = ctx->key;
855 
856 	/*
857 	 * RFC4543 encrypt shared descriptor
858 	 * Job Descriptor and Shared Descriptor
859 	 * must fit into the 64-word Descriptor h/w Buffer
860 	 */
861 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
862 		ctx->cdata.key_inline = true;
863 	} else {
864 		ctx->cdata.key_inline = false;
865 		ctx->cdata.key_dma = ctx->key_dma;
866 	}
867 
868 	flc = &ctx->flc[ENCRYPT];
869 	desc = flc->sh_desc;
870 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
871 				  true);
872 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
873 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
874 				   sizeof(flc->flc) + desc_bytes(desc),
875 				   ctx->dir);
876 
877 	/*
878 	 * Job Descriptor and Shared Descriptors
879 	 * must all fit into the 64-word Descriptor h/w Buffer
880 	 */
881 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
882 		ctx->cdata.key_inline = true;
883 	} else {
884 		ctx->cdata.key_inline = false;
885 		ctx->cdata.key_dma = ctx->key_dma;
886 	}
887 
888 	flc = &ctx->flc[DECRYPT];
889 	desc = flc->sh_desc;
890 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
891 				  true);
892 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
893 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
894 				   sizeof(flc->flc) + desc_bytes(desc),
895 				   ctx->dir);
896 
897 	return 0;
898 }
899 
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)900 static int rfc4543_setauthsize(struct crypto_aead *authenc,
901 			       unsigned int authsize)
902 {
903 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
904 
905 	if (authsize != 16)
906 		return -EINVAL;
907 
908 	ctx->authsize = authsize;
909 	rfc4543_set_sh_desc(authenc);
910 
911 	return 0;
912 }
913 
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)914 static int rfc4543_setkey(struct crypto_aead *aead,
915 			  const u8 *key, unsigned int keylen)
916 {
917 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
918 	struct device *dev = ctx->dev;
919 	int ret;
920 
921 	ret = aes_check_keylen(keylen - 4);
922 	if (ret)
923 		return ret;
924 
925 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
926 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
927 
928 	memcpy(ctx->key, key, keylen);
929 	/*
930 	 * The last four bytes of the key material are used as the salt value
931 	 * in the nonce. Update the AES key length.
932 	 */
933 	ctx->cdata.keylen = keylen - 4;
934 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
935 				   ctx->dir);
936 
937 	return rfc4543_set_sh_desc(aead);
938 }
939 
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)940 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
941 			   unsigned int keylen, const u32 ctx1_iv_off)
942 {
943 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
944 	struct caam_skcipher_alg *alg =
945 		container_of(crypto_skcipher_alg(skcipher),
946 			     struct caam_skcipher_alg, skcipher);
947 	struct device *dev = ctx->dev;
948 	struct caam_flc *flc;
949 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
950 	u32 *desc;
951 	const bool is_rfc3686 = alg->caam.rfc3686;
952 
953 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
954 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
955 
956 	ctx->cdata.keylen = keylen;
957 	ctx->cdata.key_virt = key;
958 	ctx->cdata.key_inline = true;
959 
960 	/* skcipher_encrypt shared descriptor */
961 	flc = &ctx->flc[ENCRYPT];
962 	desc = flc->sh_desc;
963 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
964 				   ctx1_iv_off);
965 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
966 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
967 				   sizeof(flc->flc) + desc_bytes(desc),
968 				   ctx->dir);
969 
970 	/* skcipher_decrypt shared descriptor */
971 	flc = &ctx->flc[DECRYPT];
972 	desc = flc->sh_desc;
973 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
974 				   ctx1_iv_off);
975 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
976 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
977 				   sizeof(flc->flc) + desc_bytes(desc),
978 				   ctx->dir);
979 
980 	return 0;
981 }
982 
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)983 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
984 			       const u8 *key, unsigned int keylen)
985 {
986 	int err;
987 
988 	err = aes_check_keylen(keylen);
989 	if (err)
990 		return err;
991 
992 	return skcipher_setkey(skcipher, key, keylen, 0);
993 }
994 
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)995 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
996 				   const u8 *key, unsigned int keylen)
997 {
998 	u32 ctx1_iv_off;
999 	int err;
1000 
1001 	/*
1002 	 * RFC3686 specific:
1003 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1004 	 *	| *key = {KEY, NONCE}
1005 	 */
1006 	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1007 	keylen -= CTR_RFC3686_NONCE_SIZE;
1008 
1009 	err = aes_check_keylen(keylen);
1010 	if (err)
1011 		return err;
1012 
1013 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1014 }
1015 
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1016 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1017 			       const u8 *key, unsigned int keylen)
1018 {
1019 	u32 ctx1_iv_off;
1020 	int err;
1021 
1022 	/*
1023 	 * AES-CTR needs to load IV in CONTEXT1 reg
1024 	 * at an offset of 128bits (16bytes)
1025 	 * CONTEXT1[255:128] = IV
1026 	 */
1027 	ctx1_iv_off = 16;
1028 
1029 	err = aes_check_keylen(keylen);
1030 	if (err)
1031 		return err;
1032 
1033 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1034 }
1035 
chacha20_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1036 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1037 				    const u8 *key, unsigned int keylen)
1038 {
1039 	if (keylen != CHACHA_KEY_SIZE)
1040 		return -EINVAL;
1041 
1042 	return skcipher_setkey(skcipher, key, keylen, 0);
1043 }
1044 
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1045 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1046 			       const u8 *key, unsigned int keylen)
1047 {
1048 	return verify_skcipher_des_key(skcipher, key) ?:
1049 	       skcipher_setkey(skcipher, key, keylen, 0);
1050 }
1051 
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1052 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1053 			        const u8 *key, unsigned int keylen)
1054 {
1055 	return verify_skcipher_des3_key(skcipher, key) ?:
1056 	       skcipher_setkey(skcipher, key, keylen, 0);
1057 }
1058 
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1059 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1060 			       unsigned int keylen)
1061 {
1062 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1063 	struct device *dev = ctx->dev;
1064 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1065 	struct caam_flc *flc;
1066 	u32 *desc;
1067 	int err;
1068 
1069 	err = xts_verify_key(skcipher, key, keylen);
1070 	if (err) {
1071 		dev_dbg(dev, "key size mismatch\n");
1072 		return err;
1073 	}
1074 
1075 	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1076 		ctx->xts_key_fallback = true;
1077 
1078 	if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1079 		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1080 		if (err)
1081 			return err;
1082 	}
1083 
1084 	ctx->cdata.keylen = keylen;
1085 	ctx->cdata.key_virt = key;
1086 	ctx->cdata.key_inline = true;
1087 
1088 	/* xts_skcipher_encrypt shared descriptor */
1089 	flc = &ctx->flc[ENCRYPT];
1090 	desc = flc->sh_desc;
1091 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1092 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1093 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1094 				   sizeof(flc->flc) + desc_bytes(desc),
1095 				   ctx->dir);
1096 
1097 	/* xts_skcipher_decrypt shared descriptor */
1098 	flc = &ctx->flc[DECRYPT];
1099 	desc = flc->sh_desc;
1100 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1101 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1102 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1103 				   sizeof(flc->flc) + desc_bytes(desc),
1104 				   ctx->dir);
1105 
1106 	return 0;
1107 }
1108 
skcipher_edesc_alloc(struct skcipher_request * req)1109 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1110 {
1111 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1112 	struct caam_request *req_ctx = skcipher_request_ctx(req);
1113 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1114 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1115 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1116 	struct device *dev = ctx->dev;
1117 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118 		       GFP_KERNEL : GFP_ATOMIC;
1119 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1120 	struct skcipher_edesc *edesc;
1121 	dma_addr_t iv_dma;
1122 	u8 *iv;
1123 	int ivsize = crypto_skcipher_ivsize(skcipher);
1124 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1125 	struct dpaa2_sg_entry *sg_table;
1126 
1127 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1128 	if (unlikely(src_nents < 0)) {
1129 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1130 			req->cryptlen);
1131 		return ERR_PTR(src_nents);
1132 	}
1133 
1134 	if (unlikely(req->dst != req->src)) {
1135 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1136 		if (unlikely(dst_nents < 0)) {
1137 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1138 				req->cryptlen);
1139 			return ERR_PTR(dst_nents);
1140 		}
1141 
1142 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1143 					      DMA_TO_DEVICE);
1144 		if (unlikely(!mapped_src_nents)) {
1145 			dev_err(dev, "unable to map source\n");
1146 			return ERR_PTR(-ENOMEM);
1147 		}
1148 
1149 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1150 					      DMA_FROM_DEVICE);
1151 		if (unlikely(!mapped_dst_nents)) {
1152 			dev_err(dev, "unable to map destination\n");
1153 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1154 			return ERR_PTR(-ENOMEM);
1155 		}
1156 	} else {
1157 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1158 					      DMA_BIDIRECTIONAL);
1159 		if (unlikely(!mapped_src_nents)) {
1160 			dev_err(dev, "unable to map source\n");
1161 			return ERR_PTR(-ENOMEM);
1162 		}
1163 	}
1164 
1165 	qm_sg_ents = 1 + mapped_src_nents;
1166 	dst_sg_idx = qm_sg_ents;
1167 
1168 	/*
1169 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1170 	 * IV entries point to the same buffer
1171 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1172 	 *
1173 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1174 	 * the end of the table by allocating more S/G entries.
1175 	 */
1176 	if (req->src != req->dst)
1177 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1178 	else
1179 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1180 
1181 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1182 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1183 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1184 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1185 			qm_sg_ents, ivsize);
1186 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1187 			   0, DMA_NONE, 0, 0);
1188 		return ERR_PTR(-ENOMEM);
1189 	}
1190 
1191 	/* allocate space for base edesc, link tables and IV */
1192 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1193 	if (unlikely(!edesc)) {
1194 		dev_err(dev, "could not allocate extended descriptor\n");
1195 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1196 			   0, DMA_NONE, 0, 0);
1197 		return ERR_PTR(-ENOMEM);
1198 	}
1199 
1200 	/* Make sure IV is located in a DMAable area */
1201 	sg_table = &edesc->sgt[0];
1202 	iv = (u8 *)(sg_table + qm_sg_ents);
1203 	memcpy(iv, req->iv, ivsize);
1204 
1205 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1206 	if (dma_mapping_error(dev, iv_dma)) {
1207 		dev_err(dev, "unable to map IV\n");
1208 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1209 			   0, DMA_NONE, 0, 0);
1210 		qi_cache_free(edesc);
1211 		return ERR_PTR(-ENOMEM);
1212 	}
1213 
1214 	edesc->src_nents = src_nents;
1215 	edesc->dst_nents = dst_nents;
1216 	edesc->iv_dma = iv_dma;
1217 	edesc->qm_sg_bytes = qm_sg_bytes;
1218 
1219 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1220 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1221 
1222 	if (req->src != req->dst)
1223 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1224 
1225 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1226 			 ivsize, 0);
1227 
1228 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1229 					  DMA_TO_DEVICE);
1230 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1231 		dev_err(dev, "unable to map S/G table\n");
1232 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1233 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1234 		qi_cache_free(edesc);
1235 		return ERR_PTR(-ENOMEM);
1236 	}
1237 
1238 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1239 	dpaa2_fl_set_final(in_fle, true);
1240 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1241 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1242 
1243 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1244 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1245 
1246 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1247 
1248 	if (req->src == req->dst)
1249 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1250 				  sizeof(*sg_table));
1251 	else
1252 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1253 				  sizeof(*sg_table));
1254 
1255 	return edesc;
1256 }
1257 
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1258 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1259 		       struct aead_request *req)
1260 {
1261 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1262 	int ivsize = crypto_aead_ivsize(aead);
1263 
1264 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1265 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1266 		   edesc->qm_sg_bytes);
1267 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1268 }
1269 
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)1270 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1271 			   struct skcipher_request *req)
1272 {
1273 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1274 	int ivsize = crypto_skcipher_ivsize(skcipher);
1275 
1276 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1277 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1278 		   edesc->qm_sg_bytes);
1279 }
1280 
aead_encrypt_done(void * cbk_ctx,u32 status)1281 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1282 {
1283 	struct crypto_async_request *areq = cbk_ctx;
1284 	struct aead_request *req = container_of(areq, struct aead_request,
1285 						base);
1286 	struct caam_request *req_ctx = to_caam_req(areq);
1287 	struct aead_edesc *edesc = req_ctx->edesc;
1288 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1289 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1290 	int ecode = 0;
1291 
1292 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1293 
1294 	if (unlikely(status))
1295 		ecode = caam_qi2_strstatus(ctx->dev, status);
1296 
1297 	aead_unmap(ctx->dev, edesc, req);
1298 	qi_cache_free(edesc);
1299 	aead_request_complete(req, ecode);
1300 }
1301 
aead_decrypt_done(void * cbk_ctx,u32 status)1302 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1303 {
1304 	struct crypto_async_request *areq = cbk_ctx;
1305 	struct aead_request *req = container_of(areq, struct aead_request,
1306 						base);
1307 	struct caam_request *req_ctx = to_caam_req(areq);
1308 	struct aead_edesc *edesc = req_ctx->edesc;
1309 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1310 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1311 	int ecode = 0;
1312 
1313 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1314 
1315 	if (unlikely(status))
1316 		ecode = caam_qi2_strstatus(ctx->dev, status);
1317 
1318 	aead_unmap(ctx->dev, edesc, req);
1319 	qi_cache_free(edesc);
1320 	aead_request_complete(req, ecode);
1321 }
1322 
aead_encrypt(struct aead_request * req)1323 static int aead_encrypt(struct aead_request *req)
1324 {
1325 	struct aead_edesc *edesc;
1326 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1327 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1328 	struct caam_request *caam_req = aead_request_ctx(req);
1329 	int ret;
1330 
1331 	/* allocate extended descriptor */
1332 	edesc = aead_edesc_alloc(req, true);
1333 	if (IS_ERR(edesc))
1334 		return PTR_ERR(edesc);
1335 
1336 	caam_req->flc = &ctx->flc[ENCRYPT];
1337 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1338 	caam_req->cbk = aead_encrypt_done;
1339 	caam_req->ctx = &req->base;
1340 	caam_req->edesc = edesc;
1341 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1342 	if (ret != -EINPROGRESS &&
1343 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1344 		aead_unmap(ctx->dev, edesc, req);
1345 		qi_cache_free(edesc);
1346 	}
1347 
1348 	return ret;
1349 }
1350 
aead_decrypt(struct aead_request * req)1351 static int aead_decrypt(struct aead_request *req)
1352 {
1353 	struct aead_edesc *edesc;
1354 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1355 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1356 	struct caam_request *caam_req = aead_request_ctx(req);
1357 	int ret;
1358 
1359 	/* allocate extended descriptor */
1360 	edesc = aead_edesc_alloc(req, false);
1361 	if (IS_ERR(edesc))
1362 		return PTR_ERR(edesc);
1363 
1364 	caam_req->flc = &ctx->flc[DECRYPT];
1365 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1366 	caam_req->cbk = aead_decrypt_done;
1367 	caam_req->ctx = &req->base;
1368 	caam_req->edesc = edesc;
1369 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1370 	if (ret != -EINPROGRESS &&
1371 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1372 		aead_unmap(ctx->dev, edesc, req);
1373 		qi_cache_free(edesc);
1374 	}
1375 
1376 	return ret;
1377 }
1378 
ipsec_gcm_encrypt(struct aead_request * req)1379 static int ipsec_gcm_encrypt(struct aead_request *req)
1380 {
1381 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1382 }
1383 
ipsec_gcm_decrypt(struct aead_request * req)1384 static int ipsec_gcm_decrypt(struct aead_request *req)
1385 {
1386 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1387 }
1388 
skcipher_encrypt_done(void * cbk_ctx,u32 status)1389 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1390 {
1391 	struct crypto_async_request *areq = cbk_ctx;
1392 	struct skcipher_request *req = skcipher_request_cast(areq);
1393 	struct caam_request *req_ctx = to_caam_req(areq);
1394 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1395 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1396 	struct skcipher_edesc *edesc = req_ctx->edesc;
1397 	int ecode = 0;
1398 	int ivsize = crypto_skcipher_ivsize(skcipher);
1399 
1400 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1401 
1402 	if (unlikely(status))
1403 		ecode = caam_qi2_strstatus(ctx->dev, status);
1404 
1405 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1406 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1407 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1408 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1409 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1410 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1411 
1412 	skcipher_unmap(ctx->dev, edesc, req);
1413 
1414 	/*
1415 	 * The crypto API expects us to set the IV (req->iv) to the last
1416 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1417 	 * This is used e.g. by the CTS mode.
1418 	 */
1419 	if (!ecode)
1420 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1421 		       ivsize);
1422 
1423 	qi_cache_free(edesc);
1424 	skcipher_request_complete(req, ecode);
1425 }
1426 
skcipher_decrypt_done(void * cbk_ctx,u32 status)1427 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1428 {
1429 	struct crypto_async_request *areq = cbk_ctx;
1430 	struct skcipher_request *req = skcipher_request_cast(areq);
1431 	struct caam_request *req_ctx = to_caam_req(areq);
1432 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1433 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1434 	struct skcipher_edesc *edesc = req_ctx->edesc;
1435 	int ecode = 0;
1436 	int ivsize = crypto_skcipher_ivsize(skcipher);
1437 
1438 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1439 
1440 	if (unlikely(status))
1441 		ecode = caam_qi2_strstatus(ctx->dev, status);
1442 
1443 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1444 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1445 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1446 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1447 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1448 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1449 
1450 	skcipher_unmap(ctx->dev, edesc, req);
1451 
1452 	/*
1453 	 * The crypto API expects us to set the IV (req->iv) to the last
1454 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1455 	 * This is used e.g. by the CTS mode.
1456 	 */
1457 	if (!ecode)
1458 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1459 		       ivsize);
1460 
1461 	qi_cache_free(edesc);
1462 	skcipher_request_complete(req, ecode);
1463 }
1464 
xts_skcipher_ivsize(struct skcipher_request * req)1465 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1466 {
1467 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1468 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1469 
1470 	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1471 }
1472 
skcipher_encrypt(struct skcipher_request * req)1473 static int skcipher_encrypt(struct skcipher_request *req)
1474 {
1475 	struct skcipher_edesc *edesc;
1476 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1477 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1478 	struct caam_request *caam_req = skcipher_request_ctx(req);
1479 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1480 	int ret;
1481 
1482 	/*
1483 	 * XTS is expected to return an error even for input length = 0
1484 	 * Note that the case input length < block size will be caught during
1485 	 * HW offloading and return an error.
1486 	 */
1487 	if (!req->cryptlen && !ctx->fallback)
1488 		return 0;
1489 
1490 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1491 			      ctx->xts_key_fallback)) {
1492 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1493 		skcipher_request_set_callback(&caam_req->fallback_req,
1494 					      req->base.flags,
1495 					      req->base.complete,
1496 					      req->base.data);
1497 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1498 					   req->dst, req->cryptlen, req->iv);
1499 
1500 		return crypto_skcipher_encrypt(&caam_req->fallback_req);
1501 	}
1502 
1503 	/* allocate extended descriptor */
1504 	edesc = skcipher_edesc_alloc(req);
1505 	if (IS_ERR(edesc))
1506 		return PTR_ERR(edesc);
1507 
1508 	caam_req->flc = &ctx->flc[ENCRYPT];
1509 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1510 	caam_req->cbk = skcipher_encrypt_done;
1511 	caam_req->ctx = &req->base;
1512 	caam_req->edesc = edesc;
1513 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1514 	if (ret != -EINPROGRESS &&
1515 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1516 		skcipher_unmap(ctx->dev, edesc, req);
1517 		qi_cache_free(edesc);
1518 	}
1519 
1520 	return ret;
1521 }
1522 
skcipher_decrypt(struct skcipher_request * req)1523 static int skcipher_decrypt(struct skcipher_request *req)
1524 {
1525 	struct skcipher_edesc *edesc;
1526 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1527 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1528 	struct caam_request *caam_req = skcipher_request_ctx(req);
1529 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1530 	int ret;
1531 
1532 	/*
1533 	 * XTS is expected to return an error even for input length = 0
1534 	 * Note that the case input length < block size will be caught during
1535 	 * HW offloading and return an error.
1536 	 */
1537 	if (!req->cryptlen && !ctx->fallback)
1538 		return 0;
1539 
1540 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1541 			      ctx->xts_key_fallback)) {
1542 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1543 		skcipher_request_set_callback(&caam_req->fallback_req,
1544 					      req->base.flags,
1545 					      req->base.complete,
1546 					      req->base.data);
1547 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1548 					   req->dst, req->cryptlen, req->iv);
1549 
1550 		return crypto_skcipher_decrypt(&caam_req->fallback_req);
1551 	}
1552 
1553 	/* allocate extended descriptor */
1554 	edesc = skcipher_edesc_alloc(req);
1555 	if (IS_ERR(edesc))
1556 		return PTR_ERR(edesc);
1557 
1558 	caam_req->flc = &ctx->flc[DECRYPT];
1559 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1560 	caam_req->cbk = skcipher_decrypt_done;
1561 	caam_req->ctx = &req->base;
1562 	caam_req->edesc = edesc;
1563 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1564 	if (ret != -EINPROGRESS &&
1565 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1566 		skcipher_unmap(ctx->dev, edesc, req);
1567 		qi_cache_free(edesc);
1568 	}
1569 
1570 	return ret;
1571 }
1572 
caam_cra_init(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)1573 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1574 			 bool uses_dkp)
1575 {
1576 	dma_addr_t dma_addr;
1577 	int i;
1578 
1579 	/* copy descriptor header template value */
1580 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1581 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1582 
1583 	ctx->dev = caam->dev;
1584 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1585 
1586 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1587 					offsetof(struct caam_ctx, flc_dma),
1588 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1589 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1590 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1591 		return -ENOMEM;
1592 	}
1593 
1594 	for (i = 0; i < NUM_OP; i++)
1595 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1596 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1597 
1598 	return 0;
1599 }
1600 
caam_cra_init_skcipher(struct crypto_skcipher * tfm)1601 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1602 {
1603 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1604 	struct caam_skcipher_alg *caam_alg =
1605 		container_of(alg, typeof(*caam_alg), skcipher);
1606 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1607 	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1608 	int ret = 0;
1609 
1610 	if (alg_aai == OP_ALG_AAI_XTS) {
1611 		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1612 		struct crypto_skcipher *fallback;
1613 
1614 		fallback = crypto_alloc_skcipher(tfm_name, 0,
1615 						 CRYPTO_ALG_NEED_FALLBACK);
1616 		if (IS_ERR(fallback)) {
1617 			dev_err(caam_alg->caam.dev,
1618 				"Failed to allocate %s fallback: %ld\n",
1619 				tfm_name, PTR_ERR(fallback));
1620 			return PTR_ERR(fallback);
1621 		}
1622 
1623 		ctx->fallback = fallback;
1624 		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1625 					    crypto_skcipher_reqsize(fallback));
1626 	} else {
1627 		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1628 	}
1629 
1630 	ret = caam_cra_init(ctx, &caam_alg->caam, false);
1631 	if (ret && ctx->fallback)
1632 		crypto_free_skcipher(ctx->fallback);
1633 
1634 	return ret;
1635 }
1636 
caam_cra_init_aead(struct crypto_aead * tfm)1637 static int caam_cra_init_aead(struct crypto_aead *tfm)
1638 {
1639 	struct aead_alg *alg = crypto_aead_alg(tfm);
1640 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1641 						      aead);
1642 
1643 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1644 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1645 			     !caam_alg->caam.nodkp);
1646 }
1647 
caam_exit_common(struct caam_ctx * ctx)1648 static void caam_exit_common(struct caam_ctx *ctx)
1649 {
1650 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1651 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1652 			       DMA_ATTR_SKIP_CPU_SYNC);
1653 }
1654 
caam_cra_exit(struct crypto_skcipher * tfm)1655 static void caam_cra_exit(struct crypto_skcipher *tfm)
1656 {
1657 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1658 
1659 	if (ctx->fallback)
1660 		crypto_free_skcipher(ctx->fallback);
1661 	caam_exit_common(ctx);
1662 }
1663 
caam_cra_exit_aead(struct crypto_aead * tfm)1664 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1665 {
1666 	caam_exit_common(crypto_aead_ctx(tfm));
1667 }
1668 
1669 static struct caam_skcipher_alg driver_algs[] = {
1670 	{
1671 		.skcipher = {
1672 			.base = {
1673 				.cra_name = "cbc(aes)",
1674 				.cra_driver_name = "cbc-aes-caam-qi2",
1675 				.cra_blocksize = AES_BLOCK_SIZE,
1676 			},
1677 			.setkey = aes_skcipher_setkey,
1678 			.encrypt = skcipher_encrypt,
1679 			.decrypt = skcipher_decrypt,
1680 			.min_keysize = AES_MIN_KEY_SIZE,
1681 			.max_keysize = AES_MAX_KEY_SIZE,
1682 			.ivsize = AES_BLOCK_SIZE,
1683 		},
1684 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1685 	},
1686 	{
1687 		.skcipher = {
1688 			.base = {
1689 				.cra_name = "cbc(des3_ede)",
1690 				.cra_driver_name = "cbc-3des-caam-qi2",
1691 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1692 			},
1693 			.setkey = des3_skcipher_setkey,
1694 			.encrypt = skcipher_encrypt,
1695 			.decrypt = skcipher_decrypt,
1696 			.min_keysize = DES3_EDE_KEY_SIZE,
1697 			.max_keysize = DES3_EDE_KEY_SIZE,
1698 			.ivsize = DES3_EDE_BLOCK_SIZE,
1699 		},
1700 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1701 	},
1702 	{
1703 		.skcipher = {
1704 			.base = {
1705 				.cra_name = "cbc(des)",
1706 				.cra_driver_name = "cbc-des-caam-qi2",
1707 				.cra_blocksize = DES_BLOCK_SIZE,
1708 			},
1709 			.setkey = des_skcipher_setkey,
1710 			.encrypt = skcipher_encrypt,
1711 			.decrypt = skcipher_decrypt,
1712 			.min_keysize = DES_KEY_SIZE,
1713 			.max_keysize = DES_KEY_SIZE,
1714 			.ivsize = DES_BLOCK_SIZE,
1715 		},
1716 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1717 	},
1718 	{
1719 		.skcipher = {
1720 			.base = {
1721 				.cra_name = "ctr(aes)",
1722 				.cra_driver_name = "ctr-aes-caam-qi2",
1723 				.cra_blocksize = 1,
1724 			},
1725 			.setkey = ctr_skcipher_setkey,
1726 			.encrypt = skcipher_encrypt,
1727 			.decrypt = skcipher_decrypt,
1728 			.min_keysize = AES_MIN_KEY_SIZE,
1729 			.max_keysize = AES_MAX_KEY_SIZE,
1730 			.ivsize = AES_BLOCK_SIZE,
1731 			.chunksize = AES_BLOCK_SIZE,
1732 		},
1733 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1734 					OP_ALG_AAI_CTR_MOD128,
1735 	},
1736 	{
1737 		.skcipher = {
1738 			.base = {
1739 				.cra_name = "rfc3686(ctr(aes))",
1740 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1741 				.cra_blocksize = 1,
1742 			},
1743 			.setkey = rfc3686_skcipher_setkey,
1744 			.encrypt = skcipher_encrypt,
1745 			.decrypt = skcipher_decrypt,
1746 			.min_keysize = AES_MIN_KEY_SIZE +
1747 				       CTR_RFC3686_NONCE_SIZE,
1748 			.max_keysize = AES_MAX_KEY_SIZE +
1749 				       CTR_RFC3686_NONCE_SIZE,
1750 			.ivsize = CTR_RFC3686_IV_SIZE,
1751 			.chunksize = AES_BLOCK_SIZE,
1752 		},
1753 		.caam = {
1754 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1755 					   OP_ALG_AAI_CTR_MOD128,
1756 			.rfc3686 = true,
1757 		},
1758 	},
1759 	{
1760 		.skcipher = {
1761 			.base = {
1762 				.cra_name = "xts(aes)",
1763 				.cra_driver_name = "xts-aes-caam-qi2",
1764 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1765 				.cra_blocksize = AES_BLOCK_SIZE,
1766 			},
1767 			.setkey = xts_skcipher_setkey,
1768 			.encrypt = skcipher_encrypt,
1769 			.decrypt = skcipher_decrypt,
1770 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1771 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1772 			.ivsize = AES_BLOCK_SIZE,
1773 		},
1774 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1775 	},
1776 	{
1777 		.skcipher = {
1778 			.base = {
1779 				.cra_name = "chacha20",
1780 				.cra_driver_name = "chacha20-caam-qi2",
1781 				.cra_blocksize = 1,
1782 			},
1783 			.setkey = chacha20_skcipher_setkey,
1784 			.encrypt = skcipher_encrypt,
1785 			.decrypt = skcipher_decrypt,
1786 			.min_keysize = CHACHA_KEY_SIZE,
1787 			.max_keysize = CHACHA_KEY_SIZE,
1788 			.ivsize = CHACHA_IV_SIZE,
1789 		},
1790 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1791 	},
1792 };
1793 
1794 static struct caam_aead_alg driver_aeads[] = {
1795 	{
1796 		.aead = {
1797 			.base = {
1798 				.cra_name = "rfc4106(gcm(aes))",
1799 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1800 				.cra_blocksize = 1,
1801 			},
1802 			.setkey = rfc4106_setkey,
1803 			.setauthsize = rfc4106_setauthsize,
1804 			.encrypt = ipsec_gcm_encrypt,
1805 			.decrypt = ipsec_gcm_decrypt,
1806 			.ivsize = 8,
1807 			.maxauthsize = AES_BLOCK_SIZE,
1808 		},
1809 		.caam = {
1810 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1811 			.nodkp = true,
1812 		},
1813 	},
1814 	{
1815 		.aead = {
1816 			.base = {
1817 				.cra_name = "rfc4543(gcm(aes))",
1818 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1819 				.cra_blocksize = 1,
1820 			},
1821 			.setkey = rfc4543_setkey,
1822 			.setauthsize = rfc4543_setauthsize,
1823 			.encrypt = ipsec_gcm_encrypt,
1824 			.decrypt = ipsec_gcm_decrypt,
1825 			.ivsize = 8,
1826 			.maxauthsize = AES_BLOCK_SIZE,
1827 		},
1828 		.caam = {
1829 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1830 			.nodkp = true,
1831 		},
1832 	},
1833 	/* Galois Counter Mode */
1834 	{
1835 		.aead = {
1836 			.base = {
1837 				.cra_name = "gcm(aes)",
1838 				.cra_driver_name = "gcm-aes-caam-qi2",
1839 				.cra_blocksize = 1,
1840 			},
1841 			.setkey = gcm_setkey,
1842 			.setauthsize = gcm_setauthsize,
1843 			.encrypt = aead_encrypt,
1844 			.decrypt = aead_decrypt,
1845 			.ivsize = 12,
1846 			.maxauthsize = AES_BLOCK_SIZE,
1847 		},
1848 		.caam = {
1849 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1850 			.nodkp = true,
1851 		}
1852 	},
1853 	/* single-pass ipsec_esp descriptor */
1854 	{
1855 		.aead = {
1856 			.base = {
1857 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1858 				.cra_driver_name = "authenc-hmac-md5-"
1859 						   "cbc-aes-caam-qi2",
1860 				.cra_blocksize = AES_BLOCK_SIZE,
1861 			},
1862 			.setkey = aead_setkey,
1863 			.setauthsize = aead_setauthsize,
1864 			.encrypt = aead_encrypt,
1865 			.decrypt = aead_decrypt,
1866 			.ivsize = AES_BLOCK_SIZE,
1867 			.maxauthsize = MD5_DIGEST_SIZE,
1868 		},
1869 		.caam = {
1870 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1871 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1872 					   OP_ALG_AAI_HMAC_PRECOMP,
1873 		}
1874 	},
1875 	{
1876 		.aead = {
1877 			.base = {
1878 				.cra_name = "echainiv(authenc(hmac(md5),"
1879 					    "cbc(aes)))",
1880 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1881 						   "cbc-aes-caam-qi2",
1882 				.cra_blocksize = AES_BLOCK_SIZE,
1883 			},
1884 			.setkey = aead_setkey,
1885 			.setauthsize = aead_setauthsize,
1886 			.encrypt = aead_encrypt,
1887 			.decrypt = aead_decrypt,
1888 			.ivsize = AES_BLOCK_SIZE,
1889 			.maxauthsize = MD5_DIGEST_SIZE,
1890 		},
1891 		.caam = {
1892 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1893 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1894 					   OP_ALG_AAI_HMAC_PRECOMP,
1895 			.geniv = true,
1896 		}
1897 	},
1898 	{
1899 		.aead = {
1900 			.base = {
1901 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1902 				.cra_driver_name = "authenc-hmac-sha1-"
1903 						   "cbc-aes-caam-qi2",
1904 				.cra_blocksize = AES_BLOCK_SIZE,
1905 			},
1906 			.setkey = aead_setkey,
1907 			.setauthsize = aead_setauthsize,
1908 			.encrypt = aead_encrypt,
1909 			.decrypt = aead_decrypt,
1910 			.ivsize = AES_BLOCK_SIZE,
1911 			.maxauthsize = SHA1_DIGEST_SIZE,
1912 		},
1913 		.caam = {
1914 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1915 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1916 					   OP_ALG_AAI_HMAC_PRECOMP,
1917 		}
1918 	},
1919 	{
1920 		.aead = {
1921 			.base = {
1922 				.cra_name = "echainiv(authenc(hmac(sha1),"
1923 					    "cbc(aes)))",
1924 				.cra_driver_name = "echainiv-authenc-"
1925 						   "hmac-sha1-cbc-aes-caam-qi2",
1926 				.cra_blocksize = AES_BLOCK_SIZE,
1927 			},
1928 			.setkey = aead_setkey,
1929 			.setauthsize = aead_setauthsize,
1930 			.encrypt = aead_encrypt,
1931 			.decrypt = aead_decrypt,
1932 			.ivsize = AES_BLOCK_SIZE,
1933 			.maxauthsize = SHA1_DIGEST_SIZE,
1934 		},
1935 		.caam = {
1936 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1937 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1938 					   OP_ALG_AAI_HMAC_PRECOMP,
1939 			.geniv = true,
1940 		},
1941 	},
1942 	{
1943 		.aead = {
1944 			.base = {
1945 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1946 				.cra_driver_name = "authenc-hmac-sha224-"
1947 						   "cbc-aes-caam-qi2",
1948 				.cra_blocksize = AES_BLOCK_SIZE,
1949 			},
1950 			.setkey = aead_setkey,
1951 			.setauthsize = aead_setauthsize,
1952 			.encrypt = aead_encrypt,
1953 			.decrypt = aead_decrypt,
1954 			.ivsize = AES_BLOCK_SIZE,
1955 			.maxauthsize = SHA224_DIGEST_SIZE,
1956 		},
1957 		.caam = {
1958 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1959 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1960 					   OP_ALG_AAI_HMAC_PRECOMP,
1961 		}
1962 	},
1963 	{
1964 		.aead = {
1965 			.base = {
1966 				.cra_name = "echainiv(authenc(hmac(sha224),"
1967 					    "cbc(aes)))",
1968 				.cra_driver_name = "echainiv-authenc-"
1969 						   "hmac-sha224-cbc-aes-caam-qi2",
1970 				.cra_blocksize = AES_BLOCK_SIZE,
1971 			},
1972 			.setkey = aead_setkey,
1973 			.setauthsize = aead_setauthsize,
1974 			.encrypt = aead_encrypt,
1975 			.decrypt = aead_decrypt,
1976 			.ivsize = AES_BLOCK_SIZE,
1977 			.maxauthsize = SHA224_DIGEST_SIZE,
1978 		},
1979 		.caam = {
1980 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1981 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1982 					   OP_ALG_AAI_HMAC_PRECOMP,
1983 			.geniv = true,
1984 		}
1985 	},
1986 	{
1987 		.aead = {
1988 			.base = {
1989 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1990 				.cra_driver_name = "authenc-hmac-sha256-"
1991 						   "cbc-aes-caam-qi2",
1992 				.cra_blocksize = AES_BLOCK_SIZE,
1993 			},
1994 			.setkey = aead_setkey,
1995 			.setauthsize = aead_setauthsize,
1996 			.encrypt = aead_encrypt,
1997 			.decrypt = aead_decrypt,
1998 			.ivsize = AES_BLOCK_SIZE,
1999 			.maxauthsize = SHA256_DIGEST_SIZE,
2000 		},
2001 		.caam = {
2002 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2003 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2004 					   OP_ALG_AAI_HMAC_PRECOMP,
2005 		}
2006 	},
2007 	{
2008 		.aead = {
2009 			.base = {
2010 				.cra_name = "echainiv(authenc(hmac(sha256),"
2011 					    "cbc(aes)))",
2012 				.cra_driver_name = "echainiv-authenc-"
2013 						   "hmac-sha256-cbc-aes-"
2014 						   "caam-qi2",
2015 				.cra_blocksize = AES_BLOCK_SIZE,
2016 			},
2017 			.setkey = aead_setkey,
2018 			.setauthsize = aead_setauthsize,
2019 			.encrypt = aead_encrypt,
2020 			.decrypt = aead_decrypt,
2021 			.ivsize = AES_BLOCK_SIZE,
2022 			.maxauthsize = SHA256_DIGEST_SIZE,
2023 		},
2024 		.caam = {
2025 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2026 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2027 					   OP_ALG_AAI_HMAC_PRECOMP,
2028 			.geniv = true,
2029 		}
2030 	},
2031 	{
2032 		.aead = {
2033 			.base = {
2034 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2035 				.cra_driver_name = "authenc-hmac-sha384-"
2036 						   "cbc-aes-caam-qi2",
2037 				.cra_blocksize = AES_BLOCK_SIZE,
2038 			},
2039 			.setkey = aead_setkey,
2040 			.setauthsize = aead_setauthsize,
2041 			.encrypt = aead_encrypt,
2042 			.decrypt = aead_decrypt,
2043 			.ivsize = AES_BLOCK_SIZE,
2044 			.maxauthsize = SHA384_DIGEST_SIZE,
2045 		},
2046 		.caam = {
2047 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2048 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2049 					   OP_ALG_AAI_HMAC_PRECOMP,
2050 		}
2051 	},
2052 	{
2053 		.aead = {
2054 			.base = {
2055 				.cra_name = "echainiv(authenc(hmac(sha384),"
2056 					    "cbc(aes)))",
2057 				.cra_driver_name = "echainiv-authenc-"
2058 						   "hmac-sha384-cbc-aes-"
2059 						   "caam-qi2",
2060 				.cra_blocksize = AES_BLOCK_SIZE,
2061 			},
2062 			.setkey = aead_setkey,
2063 			.setauthsize = aead_setauthsize,
2064 			.encrypt = aead_encrypt,
2065 			.decrypt = aead_decrypt,
2066 			.ivsize = AES_BLOCK_SIZE,
2067 			.maxauthsize = SHA384_DIGEST_SIZE,
2068 		},
2069 		.caam = {
2070 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2071 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2072 					   OP_ALG_AAI_HMAC_PRECOMP,
2073 			.geniv = true,
2074 		}
2075 	},
2076 	{
2077 		.aead = {
2078 			.base = {
2079 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2080 				.cra_driver_name = "authenc-hmac-sha512-"
2081 						   "cbc-aes-caam-qi2",
2082 				.cra_blocksize = AES_BLOCK_SIZE,
2083 			},
2084 			.setkey = aead_setkey,
2085 			.setauthsize = aead_setauthsize,
2086 			.encrypt = aead_encrypt,
2087 			.decrypt = aead_decrypt,
2088 			.ivsize = AES_BLOCK_SIZE,
2089 			.maxauthsize = SHA512_DIGEST_SIZE,
2090 		},
2091 		.caam = {
2092 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2093 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2094 					   OP_ALG_AAI_HMAC_PRECOMP,
2095 		}
2096 	},
2097 	{
2098 		.aead = {
2099 			.base = {
2100 				.cra_name = "echainiv(authenc(hmac(sha512),"
2101 					    "cbc(aes)))",
2102 				.cra_driver_name = "echainiv-authenc-"
2103 						   "hmac-sha512-cbc-aes-"
2104 						   "caam-qi2",
2105 				.cra_blocksize = AES_BLOCK_SIZE,
2106 			},
2107 			.setkey = aead_setkey,
2108 			.setauthsize = aead_setauthsize,
2109 			.encrypt = aead_encrypt,
2110 			.decrypt = aead_decrypt,
2111 			.ivsize = AES_BLOCK_SIZE,
2112 			.maxauthsize = SHA512_DIGEST_SIZE,
2113 		},
2114 		.caam = {
2115 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2116 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2117 					   OP_ALG_AAI_HMAC_PRECOMP,
2118 			.geniv = true,
2119 		}
2120 	},
2121 	{
2122 		.aead = {
2123 			.base = {
2124 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2125 				.cra_driver_name = "authenc-hmac-md5-"
2126 						   "cbc-des3_ede-caam-qi2",
2127 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2128 			},
2129 			.setkey = des3_aead_setkey,
2130 			.setauthsize = aead_setauthsize,
2131 			.encrypt = aead_encrypt,
2132 			.decrypt = aead_decrypt,
2133 			.ivsize = DES3_EDE_BLOCK_SIZE,
2134 			.maxauthsize = MD5_DIGEST_SIZE,
2135 		},
2136 		.caam = {
2137 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2138 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2139 					   OP_ALG_AAI_HMAC_PRECOMP,
2140 		}
2141 	},
2142 	{
2143 		.aead = {
2144 			.base = {
2145 				.cra_name = "echainiv(authenc(hmac(md5),"
2146 					    "cbc(des3_ede)))",
2147 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2148 						   "cbc-des3_ede-caam-qi2",
2149 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2150 			},
2151 			.setkey = des3_aead_setkey,
2152 			.setauthsize = aead_setauthsize,
2153 			.encrypt = aead_encrypt,
2154 			.decrypt = aead_decrypt,
2155 			.ivsize = DES3_EDE_BLOCK_SIZE,
2156 			.maxauthsize = MD5_DIGEST_SIZE,
2157 		},
2158 		.caam = {
2159 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2160 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2161 					   OP_ALG_AAI_HMAC_PRECOMP,
2162 			.geniv = true,
2163 		}
2164 	},
2165 	{
2166 		.aead = {
2167 			.base = {
2168 				.cra_name = "authenc(hmac(sha1),"
2169 					    "cbc(des3_ede))",
2170 				.cra_driver_name = "authenc-hmac-sha1-"
2171 						   "cbc-des3_ede-caam-qi2",
2172 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2173 			},
2174 			.setkey = des3_aead_setkey,
2175 			.setauthsize = aead_setauthsize,
2176 			.encrypt = aead_encrypt,
2177 			.decrypt = aead_decrypt,
2178 			.ivsize = DES3_EDE_BLOCK_SIZE,
2179 			.maxauthsize = SHA1_DIGEST_SIZE,
2180 		},
2181 		.caam = {
2182 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2183 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2184 					   OP_ALG_AAI_HMAC_PRECOMP,
2185 		},
2186 	},
2187 	{
2188 		.aead = {
2189 			.base = {
2190 				.cra_name = "echainiv(authenc(hmac(sha1),"
2191 					    "cbc(des3_ede)))",
2192 				.cra_driver_name = "echainiv-authenc-"
2193 						   "hmac-sha1-"
2194 						   "cbc-des3_ede-caam-qi2",
2195 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2196 			},
2197 			.setkey = des3_aead_setkey,
2198 			.setauthsize = aead_setauthsize,
2199 			.encrypt = aead_encrypt,
2200 			.decrypt = aead_decrypt,
2201 			.ivsize = DES3_EDE_BLOCK_SIZE,
2202 			.maxauthsize = SHA1_DIGEST_SIZE,
2203 		},
2204 		.caam = {
2205 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2206 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2207 					   OP_ALG_AAI_HMAC_PRECOMP,
2208 			.geniv = true,
2209 		}
2210 	},
2211 	{
2212 		.aead = {
2213 			.base = {
2214 				.cra_name = "authenc(hmac(sha224),"
2215 					    "cbc(des3_ede))",
2216 				.cra_driver_name = "authenc-hmac-sha224-"
2217 						   "cbc-des3_ede-caam-qi2",
2218 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2219 			},
2220 			.setkey = des3_aead_setkey,
2221 			.setauthsize = aead_setauthsize,
2222 			.encrypt = aead_encrypt,
2223 			.decrypt = aead_decrypt,
2224 			.ivsize = DES3_EDE_BLOCK_SIZE,
2225 			.maxauthsize = SHA224_DIGEST_SIZE,
2226 		},
2227 		.caam = {
2228 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2229 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2230 					   OP_ALG_AAI_HMAC_PRECOMP,
2231 		},
2232 	},
2233 	{
2234 		.aead = {
2235 			.base = {
2236 				.cra_name = "echainiv(authenc(hmac(sha224),"
2237 					    "cbc(des3_ede)))",
2238 				.cra_driver_name = "echainiv-authenc-"
2239 						   "hmac-sha224-"
2240 						   "cbc-des3_ede-caam-qi2",
2241 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2242 			},
2243 			.setkey = des3_aead_setkey,
2244 			.setauthsize = aead_setauthsize,
2245 			.encrypt = aead_encrypt,
2246 			.decrypt = aead_decrypt,
2247 			.ivsize = DES3_EDE_BLOCK_SIZE,
2248 			.maxauthsize = SHA224_DIGEST_SIZE,
2249 		},
2250 		.caam = {
2251 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2252 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2253 					   OP_ALG_AAI_HMAC_PRECOMP,
2254 			.geniv = true,
2255 		}
2256 	},
2257 	{
2258 		.aead = {
2259 			.base = {
2260 				.cra_name = "authenc(hmac(sha256),"
2261 					    "cbc(des3_ede))",
2262 				.cra_driver_name = "authenc-hmac-sha256-"
2263 						   "cbc-des3_ede-caam-qi2",
2264 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2265 			},
2266 			.setkey = des3_aead_setkey,
2267 			.setauthsize = aead_setauthsize,
2268 			.encrypt = aead_encrypt,
2269 			.decrypt = aead_decrypt,
2270 			.ivsize = DES3_EDE_BLOCK_SIZE,
2271 			.maxauthsize = SHA256_DIGEST_SIZE,
2272 		},
2273 		.caam = {
2274 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2275 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2276 					   OP_ALG_AAI_HMAC_PRECOMP,
2277 		},
2278 	},
2279 	{
2280 		.aead = {
2281 			.base = {
2282 				.cra_name = "echainiv(authenc(hmac(sha256),"
2283 					    "cbc(des3_ede)))",
2284 				.cra_driver_name = "echainiv-authenc-"
2285 						   "hmac-sha256-"
2286 						   "cbc-des3_ede-caam-qi2",
2287 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2288 			},
2289 			.setkey = des3_aead_setkey,
2290 			.setauthsize = aead_setauthsize,
2291 			.encrypt = aead_encrypt,
2292 			.decrypt = aead_decrypt,
2293 			.ivsize = DES3_EDE_BLOCK_SIZE,
2294 			.maxauthsize = SHA256_DIGEST_SIZE,
2295 		},
2296 		.caam = {
2297 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2298 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2299 					   OP_ALG_AAI_HMAC_PRECOMP,
2300 			.geniv = true,
2301 		}
2302 	},
2303 	{
2304 		.aead = {
2305 			.base = {
2306 				.cra_name = "authenc(hmac(sha384),"
2307 					    "cbc(des3_ede))",
2308 				.cra_driver_name = "authenc-hmac-sha384-"
2309 						   "cbc-des3_ede-caam-qi2",
2310 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2311 			},
2312 			.setkey = des3_aead_setkey,
2313 			.setauthsize = aead_setauthsize,
2314 			.encrypt = aead_encrypt,
2315 			.decrypt = aead_decrypt,
2316 			.ivsize = DES3_EDE_BLOCK_SIZE,
2317 			.maxauthsize = SHA384_DIGEST_SIZE,
2318 		},
2319 		.caam = {
2320 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2321 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2322 					   OP_ALG_AAI_HMAC_PRECOMP,
2323 		},
2324 	},
2325 	{
2326 		.aead = {
2327 			.base = {
2328 				.cra_name = "echainiv(authenc(hmac(sha384),"
2329 					    "cbc(des3_ede)))",
2330 				.cra_driver_name = "echainiv-authenc-"
2331 						   "hmac-sha384-"
2332 						   "cbc-des3_ede-caam-qi2",
2333 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2334 			},
2335 			.setkey = des3_aead_setkey,
2336 			.setauthsize = aead_setauthsize,
2337 			.encrypt = aead_encrypt,
2338 			.decrypt = aead_decrypt,
2339 			.ivsize = DES3_EDE_BLOCK_SIZE,
2340 			.maxauthsize = SHA384_DIGEST_SIZE,
2341 		},
2342 		.caam = {
2343 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2344 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2345 					   OP_ALG_AAI_HMAC_PRECOMP,
2346 			.geniv = true,
2347 		}
2348 	},
2349 	{
2350 		.aead = {
2351 			.base = {
2352 				.cra_name = "authenc(hmac(sha512),"
2353 					    "cbc(des3_ede))",
2354 				.cra_driver_name = "authenc-hmac-sha512-"
2355 						   "cbc-des3_ede-caam-qi2",
2356 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2357 			},
2358 			.setkey = des3_aead_setkey,
2359 			.setauthsize = aead_setauthsize,
2360 			.encrypt = aead_encrypt,
2361 			.decrypt = aead_decrypt,
2362 			.ivsize = DES3_EDE_BLOCK_SIZE,
2363 			.maxauthsize = SHA512_DIGEST_SIZE,
2364 		},
2365 		.caam = {
2366 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2367 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2368 					   OP_ALG_AAI_HMAC_PRECOMP,
2369 		},
2370 	},
2371 	{
2372 		.aead = {
2373 			.base = {
2374 				.cra_name = "echainiv(authenc(hmac(sha512),"
2375 					    "cbc(des3_ede)))",
2376 				.cra_driver_name = "echainiv-authenc-"
2377 						   "hmac-sha512-"
2378 						   "cbc-des3_ede-caam-qi2",
2379 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2380 			},
2381 			.setkey = des3_aead_setkey,
2382 			.setauthsize = aead_setauthsize,
2383 			.encrypt = aead_encrypt,
2384 			.decrypt = aead_decrypt,
2385 			.ivsize = DES3_EDE_BLOCK_SIZE,
2386 			.maxauthsize = SHA512_DIGEST_SIZE,
2387 		},
2388 		.caam = {
2389 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2390 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2391 					   OP_ALG_AAI_HMAC_PRECOMP,
2392 			.geniv = true,
2393 		}
2394 	},
2395 	{
2396 		.aead = {
2397 			.base = {
2398 				.cra_name = "authenc(hmac(md5),cbc(des))",
2399 				.cra_driver_name = "authenc-hmac-md5-"
2400 						   "cbc-des-caam-qi2",
2401 				.cra_blocksize = DES_BLOCK_SIZE,
2402 			},
2403 			.setkey = aead_setkey,
2404 			.setauthsize = aead_setauthsize,
2405 			.encrypt = aead_encrypt,
2406 			.decrypt = aead_decrypt,
2407 			.ivsize = DES_BLOCK_SIZE,
2408 			.maxauthsize = MD5_DIGEST_SIZE,
2409 		},
2410 		.caam = {
2411 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2412 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2413 					   OP_ALG_AAI_HMAC_PRECOMP,
2414 		},
2415 	},
2416 	{
2417 		.aead = {
2418 			.base = {
2419 				.cra_name = "echainiv(authenc(hmac(md5),"
2420 					    "cbc(des)))",
2421 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2422 						   "cbc-des-caam-qi2",
2423 				.cra_blocksize = DES_BLOCK_SIZE,
2424 			},
2425 			.setkey = aead_setkey,
2426 			.setauthsize = aead_setauthsize,
2427 			.encrypt = aead_encrypt,
2428 			.decrypt = aead_decrypt,
2429 			.ivsize = DES_BLOCK_SIZE,
2430 			.maxauthsize = MD5_DIGEST_SIZE,
2431 		},
2432 		.caam = {
2433 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2434 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2435 					   OP_ALG_AAI_HMAC_PRECOMP,
2436 			.geniv = true,
2437 		}
2438 	},
2439 	{
2440 		.aead = {
2441 			.base = {
2442 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2443 				.cra_driver_name = "authenc-hmac-sha1-"
2444 						   "cbc-des-caam-qi2",
2445 				.cra_blocksize = DES_BLOCK_SIZE,
2446 			},
2447 			.setkey = aead_setkey,
2448 			.setauthsize = aead_setauthsize,
2449 			.encrypt = aead_encrypt,
2450 			.decrypt = aead_decrypt,
2451 			.ivsize = DES_BLOCK_SIZE,
2452 			.maxauthsize = SHA1_DIGEST_SIZE,
2453 		},
2454 		.caam = {
2455 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2456 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2457 					   OP_ALG_AAI_HMAC_PRECOMP,
2458 		},
2459 	},
2460 	{
2461 		.aead = {
2462 			.base = {
2463 				.cra_name = "echainiv(authenc(hmac(sha1),"
2464 					    "cbc(des)))",
2465 				.cra_driver_name = "echainiv-authenc-"
2466 						   "hmac-sha1-cbc-des-caam-qi2",
2467 				.cra_blocksize = DES_BLOCK_SIZE,
2468 			},
2469 			.setkey = aead_setkey,
2470 			.setauthsize = aead_setauthsize,
2471 			.encrypt = aead_encrypt,
2472 			.decrypt = aead_decrypt,
2473 			.ivsize = DES_BLOCK_SIZE,
2474 			.maxauthsize = SHA1_DIGEST_SIZE,
2475 		},
2476 		.caam = {
2477 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2478 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2479 					   OP_ALG_AAI_HMAC_PRECOMP,
2480 			.geniv = true,
2481 		}
2482 	},
2483 	{
2484 		.aead = {
2485 			.base = {
2486 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2487 				.cra_driver_name = "authenc-hmac-sha224-"
2488 						   "cbc-des-caam-qi2",
2489 				.cra_blocksize = DES_BLOCK_SIZE,
2490 			},
2491 			.setkey = aead_setkey,
2492 			.setauthsize = aead_setauthsize,
2493 			.encrypt = aead_encrypt,
2494 			.decrypt = aead_decrypt,
2495 			.ivsize = DES_BLOCK_SIZE,
2496 			.maxauthsize = SHA224_DIGEST_SIZE,
2497 		},
2498 		.caam = {
2499 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2500 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2501 					   OP_ALG_AAI_HMAC_PRECOMP,
2502 		},
2503 	},
2504 	{
2505 		.aead = {
2506 			.base = {
2507 				.cra_name = "echainiv(authenc(hmac(sha224),"
2508 					    "cbc(des)))",
2509 				.cra_driver_name = "echainiv-authenc-"
2510 						   "hmac-sha224-cbc-des-"
2511 						   "caam-qi2",
2512 				.cra_blocksize = DES_BLOCK_SIZE,
2513 			},
2514 			.setkey = aead_setkey,
2515 			.setauthsize = aead_setauthsize,
2516 			.encrypt = aead_encrypt,
2517 			.decrypt = aead_decrypt,
2518 			.ivsize = DES_BLOCK_SIZE,
2519 			.maxauthsize = SHA224_DIGEST_SIZE,
2520 		},
2521 		.caam = {
2522 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2523 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2524 					   OP_ALG_AAI_HMAC_PRECOMP,
2525 			.geniv = true,
2526 		}
2527 	},
2528 	{
2529 		.aead = {
2530 			.base = {
2531 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2532 				.cra_driver_name = "authenc-hmac-sha256-"
2533 						   "cbc-des-caam-qi2",
2534 				.cra_blocksize = DES_BLOCK_SIZE,
2535 			},
2536 			.setkey = aead_setkey,
2537 			.setauthsize = aead_setauthsize,
2538 			.encrypt = aead_encrypt,
2539 			.decrypt = aead_decrypt,
2540 			.ivsize = DES_BLOCK_SIZE,
2541 			.maxauthsize = SHA256_DIGEST_SIZE,
2542 		},
2543 		.caam = {
2544 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2545 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2546 					   OP_ALG_AAI_HMAC_PRECOMP,
2547 		},
2548 	},
2549 	{
2550 		.aead = {
2551 			.base = {
2552 				.cra_name = "echainiv(authenc(hmac(sha256),"
2553 					    "cbc(des)))",
2554 				.cra_driver_name = "echainiv-authenc-"
2555 						   "hmac-sha256-cbc-des-"
2556 						   "caam-qi2",
2557 				.cra_blocksize = DES_BLOCK_SIZE,
2558 			},
2559 			.setkey = aead_setkey,
2560 			.setauthsize = aead_setauthsize,
2561 			.encrypt = aead_encrypt,
2562 			.decrypt = aead_decrypt,
2563 			.ivsize = DES_BLOCK_SIZE,
2564 			.maxauthsize = SHA256_DIGEST_SIZE,
2565 		},
2566 		.caam = {
2567 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2568 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2569 					   OP_ALG_AAI_HMAC_PRECOMP,
2570 			.geniv = true,
2571 		},
2572 	},
2573 	{
2574 		.aead = {
2575 			.base = {
2576 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2577 				.cra_driver_name = "authenc-hmac-sha384-"
2578 						   "cbc-des-caam-qi2",
2579 				.cra_blocksize = DES_BLOCK_SIZE,
2580 			},
2581 			.setkey = aead_setkey,
2582 			.setauthsize = aead_setauthsize,
2583 			.encrypt = aead_encrypt,
2584 			.decrypt = aead_decrypt,
2585 			.ivsize = DES_BLOCK_SIZE,
2586 			.maxauthsize = SHA384_DIGEST_SIZE,
2587 		},
2588 		.caam = {
2589 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2590 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2591 					   OP_ALG_AAI_HMAC_PRECOMP,
2592 		},
2593 	},
2594 	{
2595 		.aead = {
2596 			.base = {
2597 				.cra_name = "echainiv(authenc(hmac(sha384),"
2598 					    "cbc(des)))",
2599 				.cra_driver_name = "echainiv-authenc-"
2600 						   "hmac-sha384-cbc-des-"
2601 						   "caam-qi2",
2602 				.cra_blocksize = DES_BLOCK_SIZE,
2603 			},
2604 			.setkey = aead_setkey,
2605 			.setauthsize = aead_setauthsize,
2606 			.encrypt = aead_encrypt,
2607 			.decrypt = aead_decrypt,
2608 			.ivsize = DES_BLOCK_SIZE,
2609 			.maxauthsize = SHA384_DIGEST_SIZE,
2610 		},
2611 		.caam = {
2612 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2613 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2614 					   OP_ALG_AAI_HMAC_PRECOMP,
2615 			.geniv = true,
2616 		}
2617 	},
2618 	{
2619 		.aead = {
2620 			.base = {
2621 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2622 				.cra_driver_name = "authenc-hmac-sha512-"
2623 						   "cbc-des-caam-qi2",
2624 				.cra_blocksize = DES_BLOCK_SIZE,
2625 			},
2626 			.setkey = aead_setkey,
2627 			.setauthsize = aead_setauthsize,
2628 			.encrypt = aead_encrypt,
2629 			.decrypt = aead_decrypt,
2630 			.ivsize = DES_BLOCK_SIZE,
2631 			.maxauthsize = SHA512_DIGEST_SIZE,
2632 		},
2633 		.caam = {
2634 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2635 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2636 					   OP_ALG_AAI_HMAC_PRECOMP,
2637 		}
2638 	},
2639 	{
2640 		.aead = {
2641 			.base = {
2642 				.cra_name = "echainiv(authenc(hmac(sha512),"
2643 					    "cbc(des)))",
2644 				.cra_driver_name = "echainiv-authenc-"
2645 						   "hmac-sha512-cbc-des-"
2646 						   "caam-qi2",
2647 				.cra_blocksize = DES_BLOCK_SIZE,
2648 			},
2649 			.setkey = aead_setkey,
2650 			.setauthsize = aead_setauthsize,
2651 			.encrypt = aead_encrypt,
2652 			.decrypt = aead_decrypt,
2653 			.ivsize = DES_BLOCK_SIZE,
2654 			.maxauthsize = SHA512_DIGEST_SIZE,
2655 		},
2656 		.caam = {
2657 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2658 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2659 					   OP_ALG_AAI_HMAC_PRECOMP,
2660 			.geniv = true,
2661 		}
2662 	},
2663 	{
2664 		.aead = {
2665 			.base = {
2666 				.cra_name = "authenc(hmac(md5),"
2667 					    "rfc3686(ctr(aes)))",
2668 				.cra_driver_name = "authenc-hmac-md5-"
2669 						   "rfc3686-ctr-aes-caam-qi2",
2670 				.cra_blocksize = 1,
2671 			},
2672 			.setkey = aead_setkey,
2673 			.setauthsize = aead_setauthsize,
2674 			.encrypt = aead_encrypt,
2675 			.decrypt = aead_decrypt,
2676 			.ivsize = CTR_RFC3686_IV_SIZE,
2677 			.maxauthsize = MD5_DIGEST_SIZE,
2678 		},
2679 		.caam = {
2680 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2681 					   OP_ALG_AAI_CTR_MOD128,
2682 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2683 					   OP_ALG_AAI_HMAC_PRECOMP,
2684 			.rfc3686 = true,
2685 		},
2686 	},
2687 	{
2688 		.aead = {
2689 			.base = {
2690 				.cra_name = "seqiv(authenc("
2691 					    "hmac(md5),rfc3686(ctr(aes))))",
2692 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2693 						   "rfc3686-ctr-aes-caam-qi2",
2694 				.cra_blocksize = 1,
2695 			},
2696 			.setkey = aead_setkey,
2697 			.setauthsize = aead_setauthsize,
2698 			.encrypt = aead_encrypt,
2699 			.decrypt = aead_decrypt,
2700 			.ivsize = CTR_RFC3686_IV_SIZE,
2701 			.maxauthsize = MD5_DIGEST_SIZE,
2702 		},
2703 		.caam = {
2704 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2705 					   OP_ALG_AAI_CTR_MOD128,
2706 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2707 					   OP_ALG_AAI_HMAC_PRECOMP,
2708 			.rfc3686 = true,
2709 			.geniv = true,
2710 		},
2711 	},
2712 	{
2713 		.aead = {
2714 			.base = {
2715 				.cra_name = "authenc(hmac(sha1),"
2716 					    "rfc3686(ctr(aes)))",
2717 				.cra_driver_name = "authenc-hmac-sha1-"
2718 						   "rfc3686-ctr-aes-caam-qi2",
2719 				.cra_blocksize = 1,
2720 			},
2721 			.setkey = aead_setkey,
2722 			.setauthsize = aead_setauthsize,
2723 			.encrypt = aead_encrypt,
2724 			.decrypt = aead_decrypt,
2725 			.ivsize = CTR_RFC3686_IV_SIZE,
2726 			.maxauthsize = SHA1_DIGEST_SIZE,
2727 		},
2728 		.caam = {
2729 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2730 					   OP_ALG_AAI_CTR_MOD128,
2731 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2732 					   OP_ALG_AAI_HMAC_PRECOMP,
2733 			.rfc3686 = true,
2734 		},
2735 	},
2736 	{
2737 		.aead = {
2738 			.base = {
2739 				.cra_name = "seqiv(authenc("
2740 					    "hmac(sha1),rfc3686(ctr(aes))))",
2741 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2742 						   "rfc3686-ctr-aes-caam-qi2",
2743 				.cra_blocksize = 1,
2744 			},
2745 			.setkey = aead_setkey,
2746 			.setauthsize = aead_setauthsize,
2747 			.encrypt = aead_encrypt,
2748 			.decrypt = aead_decrypt,
2749 			.ivsize = CTR_RFC3686_IV_SIZE,
2750 			.maxauthsize = SHA1_DIGEST_SIZE,
2751 		},
2752 		.caam = {
2753 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2754 					   OP_ALG_AAI_CTR_MOD128,
2755 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2756 					   OP_ALG_AAI_HMAC_PRECOMP,
2757 			.rfc3686 = true,
2758 			.geniv = true,
2759 		},
2760 	},
2761 	{
2762 		.aead = {
2763 			.base = {
2764 				.cra_name = "authenc(hmac(sha224),"
2765 					    "rfc3686(ctr(aes)))",
2766 				.cra_driver_name = "authenc-hmac-sha224-"
2767 						   "rfc3686-ctr-aes-caam-qi2",
2768 				.cra_blocksize = 1,
2769 			},
2770 			.setkey = aead_setkey,
2771 			.setauthsize = aead_setauthsize,
2772 			.encrypt = aead_encrypt,
2773 			.decrypt = aead_decrypt,
2774 			.ivsize = CTR_RFC3686_IV_SIZE,
2775 			.maxauthsize = SHA224_DIGEST_SIZE,
2776 		},
2777 		.caam = {
2778 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2779 					   OP_ALG_AAI_CTR_MOD128,
2780 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2781 					   OP_ALG_AAI_HMAC_PRECOMP,
2782 			.rfc3686 = true,
2783 		},
2784 	},
2785 	{
2786 		.aead = {
2787 			.base = {
2788 				.cra_name = "seqiv(authenc("
2789 					    "hmac(sha224),rfc3686(ctr(aes))))",
2790 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2791 						   "rfc3686-ctr-aes-caam-qi2",
2792 				.cra_blocksize = 1,
2793 			},
2794 			.setkey = aead_setkey,
2795 			.setauthsize = aead_setauthsize,
2796 			.encrypt = aead_encrypt,
2797 			.decrypt = aead_decrypt,
2798 			.ivsize = CTR_RFC3686_IV_SIZE,
2799 			.maxauthsize = SHA224_DIGEST_SIZE,
2800 		},
2801 		.caam = {
2802 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2803 					   OP_ALG_AAI_CTR_MOD128,
2804 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2805 					   OP_ALG_AAI_HMAC_PRECOMP,
2806 			.rfc3686 = true,
2807 			.geniv = true,
2808 		},
2809 	},
2810 	{
2811 		.aead = {
2812 			.base = {
2813 				.cra_name = "authenc(hmac(sha256),"
2814 					    "rfc3686(ctr(aes)))",
2815 				.cra_driver_name = "authenc-hmac-sha256-"
2816 						   "rfc3686-ctr-aes-caam-qi2",
2817 				.cra_blocksize = 1,
2818 			},
2819 			.setkey = aead_setkey,
2820 			.setauthsize = aead_setauthsize,
2821 			.encrypt = aead_encrypt,
2822 			.decrypt = aead_decrypt,
2823 			.ivsize = CTR_RFC3686_IV_SIZE,
2824 			.maxauthsize = SHA256_DIGEST_SIZE,
2825 		},
2826 		.caam = {
2827 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2828 					   OP_ALG_AAI_CTR_MOD128,
2829 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2830 					   OP_ALG_AAI_HMAC_PRECOMP,
2831 			.rfc3686 = true,
2832 		},
2833 	},
2834 	{
2835 		.aead = {
2836 			.base = {
2837 				.cra_name = "seqiv(authenc(hmac(sha256),"
2838 					    "rfc3686(ctr(aes))))",
2839 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2840 						   "rfc3686-ctr-aes-caam-qi2",
2841 				.cra_blocksize = 1,
2842 			},
2843 			.setkey = aead_setkey,
2844 			.setauthsize = aead_setauthsize,
2845 			.encrypt = aead_encrypt,
2846 			.decrypt = aead_decrypt,
2847 			.ivsize = CTR_RFC3686_IV_SIZE,
2848 			.maxauthsize = SHA256_DIGEST_SIZE,
2849 		},
2850 		.caam = {
2851 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2852 					   OP_ALG_AAI_CTR_MOD128,
2853 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2854 					   OP_ALG_AAI_HMAC_PRECOMP,
2855 			.rfc3686 = true,
2856 			.geniv = true,
2857 		},
2858 	},
2859 	{
2860 		.aead = {
2861 			.base = {
2862 				.cra_name = "authenc(hmac(sha384),"
2863 					    "rfc3686(ctr(aes)))",
2864 				.cra_driver_name = "authenc-hmac-sha384-"
2865 						   "rfc3686-ctr-aes-caam-qi2",
2866 				.cra_blocksize = 1,
2867 			},
2868 			.setkey = aead_setkey,
2869 			.setauthsize = aead_setauthsize,
2870 			.encrypt = aead_encrypt,
2871 			.decrypt = aead_decrypt,
2872 			.ivsize = CTR_RFC3686_IV_SIZE,
2873 			.maxauthsize = SHA384_DIGEST_SIZE,
2874 		},
2875 		.caam = {
2876 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2877 					   OP_ALG_AAI_CTR_MOD128,
2878 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2879 					   OP_ALG_AAI_HMAC_PRECOMP,
2880 			.rfc3686 = true,
2881 		},
2882 	},
2883 	{
2884 		.aead = {
2885 			.base = {
2886 				.cra_name = "seqiv(authenc(hmac(sha384),"
2887 					    "rfc3686(ctr(aes))))",
2888 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2889 						   "rfc3686-ctr-aes-caam-qi2",
2890 				.cra_blocksize = 1,
2891 			},
2892 			.setkey = aead_setkey,
2893 			.setauthsize = aead_setauthsize,
2894 			.encrypt = aead_encrypt,
2895 			.decrypt = aead_decrypt,
2896 			.ivsize = CTR_RFC3686_IV_SIZE,
2897 			.maxauthsize = SHA384_DIGEST_SIZE,
2898 		},
2899 		.caam = {
2900 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2901 					   OP_ALG_AAI_CTR_MOD128,
2902 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2903 					   OP_ALG_AAI_HMAC_PRECOMP,
2904 			.rfc3686 = true,
2905 			.geniv = true,
2906 		},
2907 	},
2908 	{
2909 		.aead = {
2910 			.base = {
2911 				.cra_name = "rfc7539(chacha20,poly1305)",
2912 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2913 						   "caam-qi2",
2914 				.cra_blocksize = 1,
2915 			},
2916 			.setkey = chachapoly_setkey,
2917 			.setauthsize = chachapoly_setauthsize,
2918 			.encrypt = aead_encrypt,
2919 			.decrypt = aead_decrypt,
2920 			.ivsize = CHACHAPOLY_IV_SIZE,
2921 			.maxauthsize = POLY1305_DIGEST_SIZE,
2922 		},
2923 		.caam = {
2924 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2925 					   OP_ALG_AAI_AEAD,
2926 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2927 					   OP_ALG_AAI_AEAD,
2928 			.nodkp = true,
2929 		},
2930 	},
2931 	{
2932 		.aead = {
2933 			.base = {
2934 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2935 				.cra_driver_name = "rfc7539esp-chacha20-"
2936 						   "poly1305-caam-qi2",
2937 				.cra_blocksize = 1,
2938 			},
2939 			.setkey = chachapoly_setkey,
2940 			.setauthsize = chachapoly_setauthsize,
2941 			.encrypt = aead_encrypt,
2942 			.decrypt = aead_decrypt,
2943 			.ivsize = 8,
2944 			.maxauthsize = POLY1305_DIGEST_SIZE,
2945 		},
2946 		.caam = {
2947 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2948 					   OP_ALG_AAI_AEAD,
2949 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2950 					   OP_ALG_AAI_AEAD,
2951 			.nodkp = true,
2952 		},
2953 	},
2954 	{
2955 		.aead = {
2956 			.base = {
2957 				.cra_name = "authenc(hmac(sha512),"
2958 					    "rfc3686(ctr(aes)))",
2959 				.cra_driver_name = "authenc-hmac-sha512-"
2960 						   "rfc3686-ctr-aes-caam-qi2",
2961 				.cra_blocksize = 1,
2962 			},
2963 			.setkey = aead_setkey,
2964 			.setauthsize = aead_setauthsize,
2965 			.encrypt = aead_encrypt,
2966 			.decrypt = aead_decrypt,
2967 			.ivsize = CTR_RFC3686_IV_SIZE,
2968 			.maxauthsize = SHA512_DIGEST_SIZE,
2969 		},
2970 		.caam = {
2971 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2972 					   OP_ALG_AAI_CTR_MOD128,
2973 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2974 					   OP_ALG_AAI_HMAC_PRECOMP,
2975 			.rfc3686 = true,
2976 		},
2977 	},
2978 	{
2979 		.aead = {
2980 			.base = {
2981 				.cra_name = "seqiv(authenc(hmac(sha512),"
2982 					    "rfc3686(ctr(aes))))",
2983 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2984 						   "rfc3686-ctr-aes-caam-qi2",
2985 				.cra_blocksize = 1,
2986 			},
2987 			.setkey = aead_setkey,
2988 			.setauthsize = aead_setauthsize,
2989 			.encrypt = aead_encrypt,
2990 			.decrypt = aead_decrypt,
2991 			.ivsize = CTR_RFC3686_IV_SIZE,
2992 			.maxauthsize = SHA512_DIGEST_SIZE,
2993 		},
2994 		.caam = {
2995 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2996 					   OP_ALG_AAI_CTR_MOD128,
2997 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2998 					   OP_ALG_AAI_HMAC_PRECOMP,
2999 			.rfc3686 = true,
3000 			.geniv = true,
3001 		},
3002 	},
3003 };
3004 
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3005 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3006 {
3007 	struct skcipher_alg *alg = &t_alg->skcipher;
3008 
3009 	alg->base.cra_module = THIS_MODULE;
3010 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3011 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3012 	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3013 			      CRYPTO_ALG_KERN_DRIVER_ONLY);
3014 
3015 	alg->init = caam_cra_init_skcipher;
3016 	alg->exit = caam_cra_exit;
3017 }
3018 
caam_aead_alg_init(struct caam_aead_alg * t_alg)3019 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3020 {
3021 	struct aead_alg *alg = &t_alg->aead;
3022 
3023 	alg->base.cra_module = THIS_MODULE;
3024 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3025 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3026 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3027 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
3028 
3029 	alg->init = caam_cra_init_aead;
3030 	alg->exit = caam_cra_exit_aead;
3031 }
3032 
3033 /* max hash key is max split key size */
3034 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
3035 
3036 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
3037 
3038 /* caam context sizes for hashes: running digest + 8 */
3039 #define HASH_MSG_LEN			8
3040 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3041 
3042 enum hash_optype {
3043 	UPDATE = 0,
3044 	UPDATE_FIRST,
3045 	FINALIZE,
3046 	DIGEST,
3047 	HASH_NUM_OP
3048 };
3049 
3050 /**
3051  * struct caam_hash_ctx - ahash per-session context
3052  * @flc: Flow Contexts array
3053  * @key: authentication key
3054  * @flc_dma: I/O virtual addresses of the Flow Contexts
3055  * @dev: dpseci device
3056  * @ctx_len: size of Context Register
3057  * @adata: hashing algorithm details
3058  */
3059 struct caam_hash_ctx {
3060 	struct caam_flc flc[HASH_NUM_OP];
3061 	u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3062 	dma_addr_t flc_dma[HASH_NUM_OP];
3063 	struct device *dev;
3064 	int ctx_len;
3065 	struct alginfo adata;
3066 };
3067 
3068 /* ahash state */
3069 struct caam_hash_state {
3070 	struct caam_request caam_req;
3071 	dma_addr_t buf_dma;
3072 	dma_addr_t ctx_dma;
3073 	int ctx_dma_len;
3074 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3075 	int buflen;
3076 	int next_buflen;
3077 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3078 	int (*update)(struct ahash_request *req);
3079 	int (*final)(struct ahash_request *req);
3080 	int (*finup)(struct ahash_request *req);
3081 };
3082 
3083 struct caam_export_state {
3084 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3085 	u8 caam_ctx[MAX_CTX_LEN];
3086 	int buflen;
3087 	int (*update)(struct ahash_request *req);
3088 	int (*final)(struct ahash_request *req);
3089 	int (*finup)(struct ahash_request *req);
3090 };
3091 
3092 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_qm_sg(struct device * dev,struct dpaa2_sg_entry * qm_sg,struct caam_hash_state * state)3093 static inline int buf_map_to_qm_sg(struct device *dev,
3094 				   struct dpaa2_sg_entry *qm_sg,
3095 				   struct caam_hash_state *state)
3096 {
3097 	int buflen = state->buflen;
3098 
3099 	if (!buflen)
3100 		return 0;
3101 
3102 	state->buf_dma = dma_map_single(dev, state->buf, buflen,
3103 					DMA_TO_DEVICE);
3104 	if (dma_mapping_error(dev, state->buf_dma)) {
3105 		dev_err(dev, "unable to map buf\n");
3106 		state->buf_dma = 0;
3107 		return -ENOMEM;
3108 	}
3109 
3110 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3111 
3112 	return 0;
3113 }
3114 
3115 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_qm_sg(struct device * dev,struct caam_hash_state * state,int ctx_len,struct dpaa2_sg_entry * qm_sg,u32 flag)3116 static inline int ctx_map_to_qm_sg(struct device *dev,
3117 				   struct caam_hash_state *state, int ctx_len,
3118 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3119 {
3120 	state->ctx_dma_len = ctx_len;
3121 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3122 	if (dma_mapping_error(dev, state->ctx_dma)) {
3123 		dev_err(dev, "unable to map ctx\n");
3124 		state->ctx_dma = 0;
3125 		return -ENOMEM;
3126 	}
3127 
3128 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3129 
3130 	return 0;
3131 }
3132 
ahash_set_sh_desc(struct crypto_ahash * ahash)3133 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3134 {
3135 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3136 	int digestsize = crypto_ahash_digestsize(ahash);
3137 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3138 	struct caam_flc *flc;
3139 	u32 *desc;
3140 
3141 	/* ahash_update shared descriptor */
3142 	flc = &ctx->flc[UPDATE];
3143 	desc = flc->sh_desc;
3144 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3145 			  ctx->ctx_len, true, priv->sec_attr.era);
3146 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3147 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3148 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3149 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3150 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3151 			     1);
3152 
3153 	/* ahash_update_first shared descriptor */
3154 	flc = &ctx->flc[UPDATE_FIRST];
3155 	desc = flc->sh_desc;
3156 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3157 			  ctx->ctx_len, false, priv->sec_attr.era);
3158 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3159 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3160 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3161 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3162 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3163 			     1);
3164 
3165 	/* ahash_final shared descriptor */
3166 	flc = &ctx->flc[FINALIZE];
3167 	desc = flc->sh_desc;
3168 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3169 			  ctx->ctx_len, true, priv->sec_attr.era);
3170 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3171 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3172 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3173 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3174 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3175 			     1);
3176 
3177 	/* ahash_digest shared descriptor */
3178 	flc = &ctx->flc[DIGEST];
3179 	desc = flc->sh_desc;
3180 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3181 			  ctx->ctx_len, false, priv->sec_attr.era);
3182 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3183 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3184 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3185 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3186 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3187 			     1);
3188 
3189 	return 0;
3190 }
3191 
3192 struct split_key_sh_result {
3193 	struct completion completion;
3194 	int err;
3195 	struct device *dev;
3196 };
3197 
split_key_sh_done(void * cbk_ctx,u32 err)3198 static void split_key_sh_done(void *cbk_ctx, u32 err)
3199 {
3200 	struct split_key_sh_result *res = cbk_ctx;
3201 
3202 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3203 
3204 	res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3205 	complete(&res->completion);
3206 }
3207 
3208 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)3209 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3210 			   u32 digestsize)
3211 {
3212 	struct caam_request *req_ctx;
3213 	u32 *desc;
3214 	struct split_key_sh_result result;
3215 	dma_addr_t key_dma;
3216 	struct caam_flc *flc;
3217 	dma_addr_t flc_dma;
3218 	int ret = -ENOMEM;
3219 	struct dpaa2_fl_entry *in_fle, *out_fle;
3220 
3221 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3222 	if (!req_ctx)
3223 		return -ENOMEM;
3224 
3225 	in_fle = &req_ctx->fd_flt[1];
3226 	out_fle = &req_ctx->fd_flt[0];
3227 
3228 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3229 	if (!flc)
3230 		goto err_flc;
3231 
3232 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3233 	if (dma_mapping_error(ctx->dev, key_dma)) {
3234 		dev_err(ctx->dev, "unable to map key memory\n");
3235 		goto err_key_dma;
3236 	}
3237 
3238 	desc = flc->sh_desc;
3239 
3240 	init_sh_desc(desc, 0);
3241 
3242 	/* descriptor to perform unkeyed hash on key_in */
3243 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3244 			 OP_ALG_AS_INITFINAL);
3245 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3246 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3247 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3248 			 LDST_SRCDST_BYTE_CONTEXT);
3249 
3250 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3251 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3252 				 desc_bytes(desc), DMA_TO_DEVICE);
3253 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3254 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3255 		goto err_flc_dma;
3256 	}
3257 
3258 	dpaa2_fl_set_final(in_fle, true);
3259 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3260 	dpaa2_fl_set_addr(in_fle, key_dma);
3261 	dpaa2_fl_set_len(in_fle, *keylen);
3262 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3263 	dpaa2_fl_set_addr(out_fle, key_dma);
3264 	dpaa2_fl_set_len(out_fle, digestsize);
3265 
3266 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3267 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3268 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3269 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3270 			     1);
3271 
3272 	result.err = 0;
3273 	init_completion(&result.completion);
3274 	result.dev = ctx->dev;
3275 
3276 	req_ctx->flc = flc;
3277 	req_ctx->flc_dma = flc_dma;
3278 	req_ctx->cbk = split_key_sh_done;
3279 	req_ctx->ctx = &result;
3280 
3281 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3282 	if (ret == -EINPROGRESS) {
3283 		/* in progress */
3284 		wait_for_completion(&result.completion);
3285 		ret = result.err;
3286 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3287 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3288 				     digestsize, 1);
3289 	}
3290 
3291 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3292 			 DMA_TO_DEVICE);
3293 err_flc_dma:
3294 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3295 err_key_dma:
3296 	kfree(flc);
3297 err_flc:
3298 	kfree(req_ctx);
3299 
3300 	*keylen = digestsize;
3301 
3302 	return ret;
3303 }
3304 
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)3305 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3306 			unsigned int keylen)
3307 {
3308 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3309 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3310 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3311 	int ret;
3312 	u8 *hashed_key = NULL;
3313 
3314 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3315 
3316 	if (keylen > blocksize) {
3317 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3318 		if (!hashed_key)
3319 			return -ENOMEM;
3320 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3321 		if (ret)
3322 			goto bad_free_key;
3323 		key = hashed_key;
3324 	}
3325 
3326 	ctx->adata.keylen = keylen;
3327 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3328 					      OP_ALG_ALGSEL_MASK);
3329 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3330 		goto bad_free_key;
3331 
3332 	ctx->adata.key_virt = key;
3333 	ctx->adata.key_inline = true;
3334 
3335 	/*
3336 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3337 	 * in invalid opcodes (last bytes of user key) in the resulting
3338 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3339 	 * addresses are needed.
3340 	 */
3341 	if (keylen > ctx->adata.keylen_pad) {
3342 		memcpy(ctx->key, key, keylen);
3343 		dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3344 					   ctx->adata.keylen_pad,
3345 					   DMA_TO_DEVICE);
3346 	}
3347 
3348 	ret = ahash_set_sh_desc(ahash);
3349 	kfree(hashed_key);
3350 	return ret;
3351 bad_free_key:
3352 	kfree(hashed_key);
3353 	return -EINVAL;
3354 }
3355 
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req)3356 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3357 			       struct ahash_request *req)
3358 {
3359 	struct caam_hash_state *state = ahash_request_ctx(req);
3360 
3361 	if (edesc->src_nents)
3362 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3363 
3364 	if (edesc->qm_sg_bytes)
3365 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3366 				 DMA_TO_DEVICE);
3367 
3368 	if (state->buf_dma) {
3369 		dma_unmap_single(dev, state->buf_dma, state->buflen,
3370 				 DMA_TO_DEVICE);
3371 		state->buf_dma = 0;
3372 	}
3373 }
3374 
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,u32 flag)3375 static inline void ahash_unmap_ctx(struct device *dev,
3376 				   struct ahash_edesc *edesc,
3377 				   struct ahash_request *req, u32 flag)
3378 {
3379 	struct caam_hash_state *state = ahash_request_ctx(req);
3380 
3381 	if (state->ctx_dma) {
3382 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3383 		state->ctx_dma = 0;
3384 	}
3385 	ahash_unmap(dev, edesc, req);
3386 }
3387 
ahash_done(void * cbk_ctx,u32 status)3388 static void ahash_done(void *cbk_ctx, u32 status)
3389 {
3390 	struct crypto_async_request *areq = cbk_ctx;
3391 	struct ahash_request *req = ahash_request_cast(areq);
3392 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3393 	struct caam_hash_state *state = ahash_request_ctx(req);
3394 	struct ahash_edesc *edesc = state->caam_req.edesc;
3395 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3396 	int digestsize = crypto_ahash_digestsize(ahash);
3397 	int ecode = 0;
3398 
3399 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3400 
3401 	if (unlikely(status))
3402 		ecode = caam_qi2_strstatus(ctx->dev, status);
3403 
3404 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3405 	memcpy(req->result, state->caam_ctx, digestsize);
3406 	qi_cache_free(edesc);
3407 
3408 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3409 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3410 			     ctx->ctx_len, 1);
3411 
3412 	req->base.complete(&req->base, ecode);
3413 }
3414 
ahash_done_bi(void * cbk_ctx,u32 status)3415 static void ahash_done_bi(void *cbk_ctx, u32 status)
3416 {
3417 	struct crypto_async_request *areq = cbk_ctx;
3418 	struct ahash_request *req = ahash_request_cast(areq);
3419 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3420 	struct caam_hash_state *state = ahash_request_ctx(req);
3421 	struct ahash_edesc *edesc = state->caam_req.edesc;
3422 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3423 	int ecode = 0;
3424 
3425 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3426 
3427 	if (unlikely(status))
3428 		ecode = caam_qi2_strstatus(ctx->dev, status);
3429 
3430 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3431 	qi_cache_free(edesc);
3432 
3433 	scatterwalk_map_and_copy(state->buf, req->src,
3434 				 req->nbytes - state->next_buflen,
3435 				 state->next_buflen, 0);
3436 	state->buflen = state->next_buflen;
3437 
3438 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3439 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3440 			     state->buflen, 1);
3441 
3442 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3443 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3444 			     ctx->ctx_len, 1);
3445 	if (req->result)
3446 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3447 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3448 				     crypto_ahash_digestsize(ahash), 1);
3449 
3450 	req->base.complete(&req->base, ecode);
3451 }
3452 
ahash_done_ctx_src(void * cbk_ctx,u32 status)3453 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3454 {
3455 	struct crypto_async_request *areq = cbk_ctx;
3456 	struct ahash_request *req = ahash_request_cast(areq);
3457 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3458 	struct caam_hash_state *state = ahash_request_ctx(req);
3459 	struct ahash_edesc *edesc = state->caam_req.edesc;
3460 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3461 	int digestsize = crypto_ahash_digestsize(ahash);
3462 	int ecode = 0;
3463 
3464 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3465 
3466 	if (unlikely(status))
3467 		ecode = caam_qi2_strstatus(ctx->dev, status);
3468 
3469 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3470 	memcpy(req->result, state->caam_ctx, digestsize);
3471 	qi_cache_free(edesc);
3472 
3473 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3474 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3475 			     ctx->ctx_len, 1);
3476 
3477 	req->base.complete(&req->base, ecode);
3478 }
3479 
ahash_done_ctx_dst(void * cbk_ctx,u32 status)3480 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3481 {
3482 	struct crypto_async_request *areq = cbk_ctx;
3483 	struct ahash_request *req = ahash_request_cast(areq);
3484 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3485 	struct caam_hash_state *state = ahash_request_ctx(req);
3486 	struct ahash_edesc *edesc = state->caam_req.edesc;
3487 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3488 	int ecode = 0;
3489 
3490 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3491 
3492 	if (unlikely(status))
3493 		ecode = caam_qi2_strstatus(ctx->dev, status);
3494 
3495 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3496 	qi_cache_free(edesc);
3497 
3498 	scatterwalk_map_and_copy(state->buf, req->src,
3499 				 req->nbytes - state->next_buflen,
3500 				 state->next_buflen, 0);
3501 	state->buflen = state->next_buflen;
3502 
3503 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3504 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3505 			     state->buflen, 1);
3506 
3507 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3508 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3509 			     ctx->ctx_len, 1);
3510 	if (req->result)
3511 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3512 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3513 				     crypto_ahash_digestsize(ahash), 1);
3514 
3515 	req->base.complete(&req->base, ecode);
3516 }
3517 
ahash_update_ctx(struct ahash_request * req)3518 static int ahash_update_ctx(struct ahash_request *req)
3519 {
3520 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3521 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3522 	struct caam_hash_state *state = ahash_request_ctx(req);
3523 	struct caam_request *req_ctx = &state->caam_req;
3524 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3525 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3526 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3527 		      GFP_KERNEL : GFP_ATOMIC;
3528 	u8 *buf = state->buf;
3529 	int *buflen = &state->buflen;
3530 	int *next_buflen = &state->next_buflen;
3531 	int in_len = *buflen + req->nbytes, to_hash;
3532 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3533 	struct ahash_edesc *edesc;
3534 	int ret = 0;
3535 
3536 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3537 	to_hash = in_len - *next_buflen;
3538 
3539 	if (to_hash) {
3540 		struct dpaa2_sg_entry *sg_table;
3541 		int src_len = req->nbytes - *next_buflen;
3542 
3543 		src_nents = sg_nents_for_len(req->src, src_len);
3544 		if (src_nents < 0) {
3545 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3546 			return src_nents;
3547 		}
3548 
3549 		if (src_nents) {
3550 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3551 						  DMA_TO_DEVICE);
3552 			if (!mapped_nents) {
3553 				dev_err(ctx->dev, "unable to DMA map source\n");
3554 				return -ENOMEM;
3555 			}
3556 		} else {
3557 			mapped_nents = 0;
3558 		}
3559 
3560 		/* allocate space for base edesc and link tables */
3561 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3562 		if (!edesc) {
3563 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3564 				     DMA_TO_DEVICE);
3565 			return -ENOMEM;
3566 		}
3567 
3568 		edesc->src_nents = src_nents;
3569 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3570 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3571 			      sizeof(*sg_table);
3572 		sg_table = &edesc->sgt[0];
3573 
3574 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3575 				       DMA_BIDIRECTIONAL);
3576 		if (ret)
3577 			goto unmap_ctx;
3578 
3579 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3580 		if (ret)
3581 			goto unmap_ctx;
3582 
3583 		if (mapped_nents) {
3584 			sg_to_qm_sg_last(req->src, src_len,
3585 					 sg_table + qm_sg_src_index, 0);
3586 		} else {
3587 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3588 					   true);
3589 		}
3590 
3591 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3592 						  qm_sg_bytes, DMA_TO_DEVICE);
3593 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3594 			dev_err(ctx->dev, "unable to map S/G table\n");
3595 			ret = -ENOMEM;
3596 			goto unmap_ctx;
3597 		}
3598 		edesc->qm_sg_bytes = qm_sg_bytes;
3599 
3600 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3601 		dpaa2_fl_set_final(in_fle, true);
3602 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3603 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3604 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3605 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3606 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3607 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3608 
3609 		req_ctx->flc = &ctx->flc[UPDATE];
3610 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3611 		req_ctx->cbk = ahash_done_bi;
3612 		req_ctx->ctx = &req->base;
3613 		req_ctx->edesc = edesc;
3614 
3615 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3616 		if (ret != -EINPROGRESS &&
3617 		    !(ret == -EBUSY &&
3618 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3619 			goto unmap_ctx;
3620 	} else if (*next_buflen) {
3621 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3622 					 req->nbytes, 0);
3623 		*buflen = *next_buflen;
3624 
3625 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3626 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3627 				     *buflen, 1);
3628 	}
3629 
3630 	return ret;
3631 unmap_ctx:
3632 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3633 	qi_cache_free(edesc);
3634 	return ret;
3635 }
3636 
ahash_final_ctx(struct ahash_request * req)3637 static int ahash_final_ctx(struct ahash_request *req)
3638 {
3639 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3640 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3641 	struct caam_hash_state *state = ahash_request_ctx(req);
3642 	struct caam_request *req_ctx = &state->caam_req;
3643 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3644 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3645 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3646 		      GFP_KERNEL : GFP_ATOMIC;
3647 	int buflen = state->buflen;
3648 	int qm_sg_bytes;
3649 	int digestsize = crypto_ahash_digestsize(ahash);
3650 	struct ahash_edesc *edesc;
3651 	struct dpaa2_sg_entry *sg_table;
3652 	int ret;
3653 
3654 	/* allocate space for base edesc and link tables */
3655 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3656 	if (!edesc)
3657 		return -ENOMEM;
3658 
3659 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3660 	sg_table = &edesc->sgt[0];
3661 
3662 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3663 			       DMA_BIDIRECTIONAL);
3664 	if (ret)
3665 		goto unmap_ctx;
3666 
3667 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3668 	if (ret)
3669 		goto unmap_ctx;
3670 
3671 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3672 
3673 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3674 					  DMA_TO_DEVICE);
3675 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3676 		dev_err(ctx->dev, "unable to map S/G table\n");
3677 		ret = -ENOMEM;
3678 		goto unmap_ctx;
3679 	}
3680 	edesc->qm_sg_bytes = qm_sg_bytes;
3681 
3682 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3683 	dpaa2_fl_set_final(in_fle, true);
3684 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3685 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3686 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3687 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3688 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3689 	dpaa2_fl_set_len(out_fle, digestsize);
3690 
3691 	req_ctx->flc = &ctx->flc[FINALIZE];
3692 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3693 	req_ctx->cbk = ahash_done_ctx_src;
3694 	req_ctx->ctx = &req->base;
3695 	req_ctx->edesc = edesc;
3696 
3697 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3698 	if (ret == -EINPROGRESS ||
3699 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3700 		return ret;
3701 
3702 unmap_ctx:
3703 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3704 	qi_cache_free(edesc);
3705 	return ret;
3706 }
3707 
ahash_finup_ctx(struct ahash_request * req)3708 static int ahash_finup_ctx(struct ahash_request *req)
3709 {
3710 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3711 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3712 	struct caam_hash_state *state = ahash_request_ctx(req);
3713 	struct caam_request *req_ctx = &state->caam_req;
3714 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3715 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3716 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3717 		      GFP_KERNEL : GFP_ATOMIC;
3718 	int buflen = state->buflen;
3719 	int qm_sg_bytes, qm_sg_src_index;
3720 	int src_nents, mapped_nents;
3721 	int digestsize = crypto_ahash_digestsize(ahash);
3722 	struct ahash_edesc *edesc;
3723 	struct dpaa2_sg_entry *sg_table;
3724 	int ret;
3725 
3726 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3727 	if (src_nents < 0) {
3728 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3729 		return src_nents;
3730 	}
3731 
3732 	if (src_nents) {
3733 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3734 					  DMA_TO_DEVICE);
3735 		if (!mapped_nents) {
3736 			dev_err(ctx->dev, "unable to DMA map source\n");
3737 			return -ENOMEM;
3738 		}
3739 	} else {
3740 		mapped_nents = 0;
3741 	}
3742 
3743 	/* allocate space for base edesc and link tables */
3744 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3745 	if (!edesc) {
3746 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3747 		return -ENOMEM;
3748 	}
3749 
3750 	edesc->src_nents = src_nents;
3751 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3752 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3753 		      sizeof(*sg_table);
3754 	sg_table = &edesc->sgt[0];
3755 
3756 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3757 			       DMA_BIDIRECTIONAL);
3758 	if (ret)
3759 		goto unmap_ctx;
3760 
3761 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3762 	if (ret)
3763 		goto unmap_ctx;
3764 
3765 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3766 
3767 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3768 					  DMA_TO_DEVICE);
3769 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3770 		dev_err(ctx->dev, "unable to map S/G table\n");
3771 		ret = -ENOMEM;
3772 		goto unmap_ctx;
3773 	}
3774 	edesc->qm_sg_bytes = qm_sg_bytes;
3775 
3776 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3777 	dpaa2_fl_set_final(in_fle, true);
3778 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3779 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3780 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3781 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3782 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3783 	dpaa2_fl_set_len(out_fle, digestsize);
3784 
3785 	req_ctx->flc = &ctx->flc[FINALIZE];
3786 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3787 	req_ctx->cbk = ahash_done_ctx_src;
3788 	req_ctx->ctx = &req->base;
3789 	req_ctx->edesc = edesc;
3790 
3791 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3792 	if (ret == -EINPROGRESS ||
3793 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3794 		return ret;
3795 
3796 unmap_ctx:
3797 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3798 	qi_cache_free(edesc);
3799 	return ret;
3800 }
3801 
ahash_digest(struct ahash_request * req)3802 static int ahash_digest(struct ahash_request *req)
3803 {
3804 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3805 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3806 	struct caam_hash_state *state = ahash_request_ctx(req);
3807 	struct caam_request *req_ctx = &state->caam_req;
3808 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3809 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3810 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3811 		      GFP_KERNEL : GFP_ATOMIC;
3812 	int digestsize = crypto_ahash_digestsize(ahash);
3813 	int src_nents, mapped_nents;
3814 	struct ahash_edesc *edesc;
3815 	int ret = -ENOMEM;
3816 
3817 	state->buf_dma = 0;
3818 
3819 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3820 	if (src_nents < 0) {
3821 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3822 		return src_nents;
3823 	}
3824 
3825 	if (src_nents) {
3826 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3827 					  DMA_TO_DEVICE);
3828 		if (!mapped_nents) {
3829 			dev_err(ctx->dev, "unable to map source for DMA\n");
3830 			return ret;
3831 		}
3832 	} else {
3833 		mapped_nents = 0;
3834 	}
3835 
3836 	/* allocate space for base edesc and link tables */
3837 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3838 	if (!edesc) {
3839 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3840 		return ret;
3841 	}
3842 
3843 	edesc->src_nents = src_nents;
3844 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3845 
3846 	if (mapped_nents > 1) {
3847 		int qm_sg_bytes;
3848 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3849 
3850 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3851 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3852 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3853 						  qm_sg_bytes, DMA_TO_DEVICE);
3854 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3855 			dev_err(ctx->dev, "unable to map S/G table\n");
3856 			goto unmap;
3857 		}
3858 		edesc->qm_sg_bytes = qm_sg_bytes;
3859 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3860 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3861 	} else {
3862 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3863 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3864 	}
3865 
3866 	state->ctx_dma_len = digestsize;
3867 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3868 					DMA_FROM_DEVICE);
3869 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3870 		dev_err(ctx->dev, "unable to map ctx\n");
3871 		state->ctx_dma = 0;
3872 		goto unmap;
3873 	}
3874 
3875 	dpaa2_fl_set_final(in_fle, true);
3876 	dpaa2_fl_set_len(in_fle, req->nbytes);
3877 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3878 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3879 	dpaa2_fl_set_len(out_fle, digestsize);
3880 
3881 	req_ctx->flc = &ctx->flc[DIGEST];
3882 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3883 	req_ctx->cbk = ahash_done;
3884 	req_ctx->ctx = &req->base;
3885 	req_ctx->edesc = edesc;
3886 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3887 	if (ret == -EINPROGRESS ||
3888 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3889 		return ret;
3890 
3891 unmap:
3892 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3893 	qi_cache_free(edesc);
3894 	return ret;
3895 }
3896 
ahash_final_no_ctx(struct ahash_request * req)3897 static int ahash_final_no_ctx(struct ahash_request *req)
3898 {
3899 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3900 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3901 	struct caam_hash_state *state = ahash_request_ctx(req);
3902 	struct caam_request *req_ctx = &state->caam_req;
3903 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3904 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3905 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3906 		      GFP_KERNEL : GFP_ATOMIC;
3907 	u8 *buf = state->buf;
3908 	int buflen = state->buflen;
3909 	int digestsize = crypto_ahash_digestsize(ahash);
3910 	struct ahash_edesc *edesc;
3911 	int ret = -ENOMEM;
3912 
3913 	/* allocate space for base edesc and link tables */
3914 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3915 	if (!edesc)
3916 		return ret;
3917 
3918 	if (buflen) {
3919 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3920 						DMA_TO_DEVICE);
3921 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3922 			dev_err(ctx->dev, "unable to map src\n");
3923 			goto unmap;
3924 		}
3925 	}
3926 
3927 	state->ctx_dma_len = digestsize;
3928 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3929 					DMA_FROM_DEVICE);
3930 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3931 		dev_err(ctx->dev, "unable to map ctx\n");
3932 		state->ctx_dma = 0;
3933 		goto unmap;
3934 	}
3935 
3936 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3937 	dpaa2_fl_set_final(in_fle, true);
3938 	/*
3939 	 * crypto engine requires the input entry to be present when
3940 	 * "frame list" FD is used.
3941 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3942 	 * in_fle zeroized (except for "Final" flag) is the best option.
3943 	 */
3944 	if (buflen) {
3945 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3946 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3947 		dpaa2_fl_set_len(in_fle, buflen);
3948 	}
3949 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3950 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3951 	dpaa2_fl_set_len(out_fle, digestsize);
3952 
3953 	req_ctx->flc = &ctx->flc[DIGEST];
3954 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3955 	req_ctx->cbk = ahash_done;
3956 	req_ctx->ctx = &req->base;
3957 	req_ctx->edesc = edesc;
3958 
3959 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3960 	if (ret == -EINPROGRESS ||
3961 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3962 		return ret;
3963 
3964 unmap:
3965 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3966 	qi_cache_free(edesc);
3967 	return ret;
3968 }
3969 
ahash_update_no_ctx(struct ahash_request * req)3970 static int ahash_update_no_ctx(struct ahash_request *req)
3971 {
3972 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3973 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3974 	struct caam_hash_state *state = ahash_request_ctx(req);
3975 	struct caam_request *req_ctx = &state->caam_req;
3976 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3977 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3978 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3979 		      GFP_KERNEL : GFP_ATOMIC;
3980 	u8 *buf = state->buf;
3981 	int *buflen = &state->buflen;
3982 	int *next_buflen = &state->next_buflen;
3983 	int in_len = *buflen + req->nbytes, to_hash;
3984 	int qm_sg_bytes, src_nents, mapped_nents;
3985 	struct ahash_edesc *edesc;
3986 	int ret = 0;
3987 
3988 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3989 	to_hash = in_len - *next_buflen;
3990 
3991 	if (to_hash) {
3992 		struct dpaa2_sg_entry *sg_table;
3993 		int src_len = req->nbytes - *next_buflen;
3994 
3995 		src_nents = sg_nents_for_len(req->src, src_len);
3996 		if (src_nents < 0) {
3997 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3998 			return src_nents;
3999 		}
4000 
4001 		if (src_nents) {
4002 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4003 						  DMA_TO_DEVICE);
4004 			if (!mapped_nents) {
4005 				dev_err(ctx->dev, "unable to DMA map source\n");
4006 				return -ENOMEM;
4007 			}
4008 		} else {
4009 			mapped_nents = 0;
4010 		}
4011 
4012 		/* allocate space for base edesc and link tables */
4013 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4014 		if (!edesc) {
4015 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4016 				     DMA_TO_DEVICE);
4017 			return -ENOMEM;
4018 		}
4019 
4020 		edesc->src_nents = src_nents;
4021 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4022 			      sizeof(*sg_table);
4023 		sg_table = &edesc->sgt[0];
4024 
4025 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4026 		if (ret)
4027 			goto unmap_ctx;
4028 
4029 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4030 
4031 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4032 						  qm_sg_bytes, DMA_TO_DEVICE);
4033 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4034 			dev_err(ctx->dev, "unable to map S/G table\n");
4035 			ret = -ENOMEM;
4036 			goto unmap_ctx;
4037 		}
4038 		edesc->qm_sg_bytes = qm_sg_bytes;
4039 
4040 		state->ctx_dma_len = ctx->ctx_len;
4041 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4042 						ctx->ctx_len, DMA_FROM_DEVICE);
4043 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4044 			dev_err(ctx->dev, "unable to map ctx\n");
4045 			state->ctx_dma = 0;
4046 			ret = -ENOMEM;
4047 			goto unmap_ctx;
4048 		}
4049 
4050 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4051 		dpaa2_fl_set_final(in_fle, true);
4052 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4053 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4054 		dpaa2_fl_set_len(in_fle, to_hash);
4055 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4056 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4057 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4058 
4059 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4060 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4061 		req_ctx->cbk = ahash_done_ctx_dst;
4062 		req_ctx->ctx = &req->base;
4063 		req_ctx->edesc = edesc;
4064 
4065 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4066 		if (ret != -EINPROGRESS &&
4067 		    !(ret == -EBUSY &&
4068 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4069 			goto unmap_ctx;
4070 
4071 		state->update = ahash_update_ctx;
4072 		state->finup = ahash_finup_ctx;
4073 		state->final = ahash_final_ctx;
4074 	} else if (*next_buflen) {
4075 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4076 					 req->nbytes, 0);
4077 		*buflen = *next_buflen;
4078 
4079 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4080 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4081 				     *buflen, 1);
4082 	}
4083 
4084 	return ret;
4085 unmap_ctx:
4086 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4087 	qi_cache_free(edesc);
4088 	return ret;
4089 }
4090 
ahash_finup_no_ctx(struct ahash_request * req)4091 static int ahash_finup_no_ctx(struct ahash_request *req)
4092 {
4093 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4094 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4095 	struct caam_hash_state *state = ahash_request_ctx(req);
4096 	struct caam_request *req_ctx = &state->caam_req;
4097 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4098 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4099 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4100 		      GFP_KERNEL : GFP_ATOMIC;
4101 	int buflen = state->buflen;
4102 	int qm_sg_bytes, src_nents, mapped_nents;
4103 	int digestsize = crypto_ahash_digestsize(ahash);
4104 	struct ahash_edesc *edesc;
4105 	struct dpaa2_sg_entry *sg_table;
4106 	int ret = -ENOMEM;
4107 
4108 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4109 	if (src_nents < 0) {
4110 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4111 		return src_nents;
4112 	}
4113 
4114 	if (src_nents) {
4115 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4116 					  DMA_TO_DEVICE);
4117 		if (!mapped_nents) {
4118 			dev_err(ctx->dev, "unable to DMA map source\n");
4119 			return ret;
4120 		}
4121 	} else {
4122 		mapped_nents = 0;
4123 	}
4124 
4125 	/* allocate space for base edesc and link tables */
4126 	edesc = qi_cache_zalloc(GFP_DMA | flags);
4127 	if (!edesc) {
4128 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4129 		return ret;
4130 	}
4131 
4132 	edesc->src_nents = src_nents;
4133 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4134 	sg_table = &edesc->sgt[0];
4135 
4136 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4137 	if (ret)
4138 		goto unmap;
4139 
4140 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4141 
4142 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4143 					  DMA_TO_DEVICE);
4144 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4145 		dev_err(ctx->dev, "unable to map S/G table\n");
4146 		ret = -ENOMEM;
4147 		goto unmap;
4148 	}
4149 	edesc->qm_sg_bytes = qm_sg_bytes;
4150 
4151 	state->ctx_dma_len = digestsize;
4152 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4153 					DMA_FROM_DEVICE);
4154 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4155 		dev_err(ctx->dev, "unable to map ctx\n");
4156 		state->ctx_dma = 0;
4157 		ret = -ENOMEM;
4158 		goto unmap;
4159 	}
4160 
4161 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4162 	dpaa2_fl_set_final(in_fle, true);
4163 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4164 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4165 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4166 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4167 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4168 	dpaa2_fl_set_len(out_fle, digestsize);
4169 
4170 	req_ctx->flc = &ctx->flc[DIGEST];
4171 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4172 	req_ctx->cbk = ahash_done;
4173 	req_ctx->ctx = &req->base;
4174 	req_ctx->edesc = edesc;
4175 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4176 	if (ret != -EINPROGRESS &&
4177 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4178 		goto unmap;
4179 
4180 	return ret;
4181 unmap:
4182 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4183 	qi_cache_free(edesc);
4184 	return ret;
4185 }
4186 
ahash_update_first(struct ahash_request * req)4187 static int ahash_update_first(struct ahash_request *req)
4188 {
4189 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4190 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4191 	struct caam_hash_state *state = ahash_request_ctx(req);
4192 	struct caam_request *req_ctx = &state->caam_req;
4193 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4194 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4195 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4196 		      GFP_KERNEL : GFP_ATOMIC;
4197 	u8 *buf = state->buf;
4198 	int *buflen = &state->buflen;
4199 	int *next_buflen = &state->next_buflen;
4200 	int to_hash;
4201 	int src_nents, mapped_nents;
4202 	struct ahash_edesc *edesc;
4203 	int ret = 0;
4204 
4205 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4206 				      1);
4207 	to_hash = req->nbytes - *next_buflen;
4208 
4209 	if (to_hash) {
4210 		struct dpaa2_sg_entry *sg_table;
4211 		int src_len = req->nbytes - *next_buflen;
4212 
4213 		src_nents = sg_nents_for_len(req->src, src_len);
4214 		if (src_nents < 0) {
4215 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4216 			return src_nents;
4217 		}
4218 
4219 		if (src_nents) {
4220 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4221 						  DMA_TO_DEVICE);
4222 			if (!mapped_nents) {
4223 				dev_err(ctx->dev, "unable to map source for DMA\n");
4224 				return -ENOMEM;
4225 			}
4226 		} else {
4227 			mapped_nents = 0;
4228 		}
4229 
4230 		/* allocate space for base edesc and link tables */
4231 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4232 		if (!edesc) {
4233 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4234 				     DMA_TO_DEVICE);
4235 			return -ENOMEM;
4236 		}
4237 
4238 		edesc->src_nents = src_nents;
4239 		sg_table = &edesc->sgt[0];
4240 
4241 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4242 		dpaa2_fl_set_final(in_fle, true);
4243 		dpaa2_fl_set_len(in_fle, to_hash);
4244 
4245 		if (mapped_nents > 1) {
4246 			int qm_sg_bytes;
4247 
4248 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4249 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4250 				      sizeof(*sg_table);
4251 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4252 							  qm_sg_bytes,
4253 							  DMA_TO_DEVICE);
4254 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4255 				dev_err(ctx->dev, "unable to map S/G table\n");
4256 				ret = -ENOMEM;
4257 				goto unmap_ctx;
4258 			}
4259 			edesc->qm_sg_bytes = qm_sg_bytes;
4260 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4261 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4262 		} else {
4263 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4264 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4265 		}
4266 
4267 		state->ctx_dma_len = ctx->ctx_len;
4268 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4269 						ctx->ctx_len, DMA_FROM_DEVICE);
4270 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4271 			dev_err(ctx->dev, "unable to map ctx\n");
4272 			state->ctx_dma = 0;
4273 			ret = -ENOMEM;
4274 			goto unmap_ctx;
4275 		}
4276 
4277 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4278 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4279 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4280 
4281 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4282 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4283 		req_ctx->cbk = ahash_done_ctx_dst;
4284 		req_ctx->ctx = &req->base;
4285 		req_ctx->edesc = edesc;
4286 
4287 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4288 		if (ret != -EINPROGRESS &&
4289 		    !(ret == -EBUSY && req->base.flags &
4290 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4291 			goto unmap_ctx;
4292 
4293 		state->update = ahash_update_ctx;
4294 		state->finup = ahash_finup_ctx;
4295 		state->final = ahash_final_ctx;
4296 	} else if (*next_buflen) {
4297 		state->update = ahash_update_no_ctx;
4298 		state->finup = ahash_finup_no_ctx;
4299 		state->final = ahash_final_no_ctx;
4300 		scatterwalk_map_and_copy(buf, req->src, 0,
4301 					 req->nbytes, 0);
4302 		*buflen = *next_buflen;
4303 
4304 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4305 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4306 				     *buflen, 1);
4307 	}
4308 
4309 	return ret;
4310 unmap_ctx:
4311 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4312 	qi_cache_free(edesc);
4313 	return ret;
4314 }
4315 
ahash_finup_first(struct ahash_request * req)4316 static int ahash_finup_first(struct ahash_request *req)
4317 {
4318 	return ahash_digest(req);
4319 }
4320 
ahash_init(struct ahash_request * req)4321 static int ahash_init(struct ahash_request *req)
4322 {
4323 	struct caam_hash_state *state = ahash_request_ctx(req);
4324 
4325 	state->update = ahash_update_first;
4326 	state->finup = ahash_finup_first;
4327 	state->final = ahash_final_no_ctx;
4328 
4329 	state->ctx_dma = 0;
4330 	state->ctx_dma_len = 0;
4331 	state->buf_dma = 0;
4332 	state->buflen = 0;
4333 	state->next_buflen = 0;
4334 
4335 	return 0;
4336 }
4337 
ahash_update(struct ahash_request * req)4338 static int ahash_update(struct ahash_request *req)
4339 {
4340 	struct caam_hash_state *state = ahash_request_ctx(req);
4341 
4342 	return state->update(req);
4343 }
4344 
ahash_finup(struct ahash_request * req)4345 static int ahash_finup(struct ahash_request *req)
4346 {
4347 	struct caam_hash_state *state = ahash_request_ctx(req);
4348 
4349 	return state->finup(req);
4350 }
4351 
ahash_final(struct ahash_request * req)4352 static int ahash_final(struct ahash_request *req)
4353 {
4354 	struct caam_hash_state *state = ahash_request_ctx(req);
4355 
4356 	return state->final(req);
4357 }
4358 
ahash_export(struct ahash_request * req,void * out)4359 static int ahash_export(struct ahash_request *req, void *out)
4360 {
4361 	struct caam_hash_state *state = ahash_request_ctx(req);
4362 	struct caam_export_state *export = out;
4363 	u8 *buf = state->buf;
4364 	int len = state->buflen;
4365 
4366 	memcpy(export->buf, buf, len);
4367 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4368 	export->buflen = len;
4369 	export->update = state->update;
4370 	export->final = state->final;
4371 	export->finup = state->finup;
4372 
4373 	return 0;
4374 }
4375 
ahash_import(struct ahash_request * req,const void * in)4376 static int ahash_import(struct ahash_request *req, const void *in)
4377 {
4378 	struct caam_hash_state *state = ahash_request_ctx(req);
4379 	const struct caam_export_state *export = in;
4380 
4381 	memset(state, 0, sizeof(*state));
4382 	memcpy(state->buf, export->buf, export->buflen);
4383 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4384 	state->buflen = export->buflen;
4385 	state->update = export->update;
4386 	state->final = export->final;
4387 	state->finup = export->finup;
4388 
4389 	return 0;
4390 }
4391 
4392 struct caam_hash_template {
4393 	char name[CRYPTO_MAX_ALG_NAME];
4394 	char driver_name[CRYPTO_MAX_ALG_NAME];
4395 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4396 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4397 	unsigned int blocksize;
4398 	struct ahash_alg template_ahash;
4399 	u32 alg_type;
4400 };
4401 
4402 /* ahash descriptors */
4403 static struct caam_hash_template driver_hash[] = {
4404 	{
4405 		.name = "sha1",
4406 		.driver_name = "sha1-caam-qi2",
4407 		.hmac_name = "hmac(sha1)",
4408 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4409 		.blocksize = SHA1_BLOCK_SIZE,
4410 		.template_ahash = {
4411 			.init = ahash_init,
4412 			.update = ahash_update,
4413 			.final = ahash_final,
4414 			.finup = ahash_finup,
4415 			.digest = ahash_digest,
4416 			.export = ahash_export,
4417 			.import = ahash_import,
4418 			.setkey = ahash_setkey,
4419 			.halg = {
4420 				.digestsize = SHA1_DIGEST_SIZE,
4421 				.statesize = sizeof(struct caam_export_state),
4422 			},
4423 		},
4424 		.alg_type = OP_ALG_ALGSEL_SHA1,
4425 	}, {
4426 		.name = "sha224",
4427 		.driver_name = "sha224-caam-qi2",
4428 		.hmac_name = "hmac(sha224)",
4429 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4430 		.blocksize = SHA224_BLOCK_SIZE,
4431 		.template_ahash = {
4432 			.init = ahash_init,
4433 			.update = ahash_update,
4434 			.final = ahash_final,
4435 			.finup = ahash_finup,
4436 			.digest = ahash_digest,
4437 			.export = ahash_export,
4438 			.import = ahash_import,
4439 			.setkey = ahash_setkey,
4440 			.halg = {
4441 				.digestsize = SHA224_DIGEST_SIZE,
4442 				.statesize = sizeof(struct caam_export_state),
4443 			},
4444 		},
4445 		.alg_type = OP_ALG_ALGSEL_SHA224,
4446 	}, {
4447 		.name = "sha256",
4448 		.driver_name = "sha256-caam-qi2",
4449 		.hmac_name = "hmac(sha256)",
4450 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4451 		.blocksize = SHA256_BLOCK_SIZE,
4452 		.template_ahash = {
4453 			.init = ahash_init,
4454 			.update = ahash_update,
4455 			.final = ahash_final,
4456 			.finup = ahash_finup,
4457 			.digest = ahash_digest,
4458 			.export = ahash_export,
4459 			.import = ahash_import,
4460 			.setkey = ahash_setkey,
4461 			.halg = {
4462 				.digestsize = SHA256_DIGEST_SIZE,
4463 				.statesize = sizeof(struct caam_export_state),
4464 			},
4465 		},
4466 		.alg_type = OP_ALG_ALGSEL_SHA256,
4467 	}, {
4468 		.name = "sha384",
4469 		.driver_name = "sha384-caam-qi2",
4470 		.hmac_name = "hmac(sha384)",
4471 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4472 		.blocksize = SHA384_BLOCK_SIZE,
4473 		.template_ahash = {
4474 			.init = ahash_init,
4475 			.update = ahash_update,
4476 			.final = ahash_final,
4477 			.finup = ahash_finup,
4478 			.digest = ahash_digest,
4479 			.export = ahash_export,
4480 			.import = ahash_import,
4481 			.setkey = ahash_setkey,
4482 			.halg = {
4483 				.digestsize = SHA384_DIGEST_SIZE,
4484 				.statesize = sizeof(struct caam_export_state),
4485 			},
4486 		},
4487 		.alg_type = OP_ALG_ALGSEL_SHA384,
4488 	}, {
4489 		.name = "sha512",
4490 		.driver_name = "sha512-caam-qi2",
4491 		.hmac_name = "hmac(sha512)",
4492 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4493 		.blocksize = SHA512_BLOCK_SIZE,
4494 		.template_ahash = {
4495 			.init = ahash_init,
4496 			.update = ahash_update,
4497 			.final = ahash_final,
4498 			.finup = ahash_finup,
4499 			.digest = ahash_digest,
4500 			.export = ahash_export,
4501 			.import = ahash_import,
4502 			.setkey = ahash_setkey,
4503 			.halg = {
4504 				.digestsize = SHA512_DIGEST_SIZE,
4505 				.statesize = sizeof(struct caam_export_state),
4506 			},
4507 		},
4508 		.alg_type = OP_ALG_ALGSEL_SHA512,
4509 	}, {
4510 		.name = "md5",
4511 		.driver_name = "md5-caam-qi2",
4512 		.hmac_name = "hmac(md5)",
4513 		.hmac_driver_name = "hmac-md5-caam-qi2",
4514 		.blocksize = MD5_BLOCK_WORDS * 4,
4515 		.template_ahash = {
4516 			.init = ahash_init,
4517 			.update = ahash_update,
4518 			.final = ahash_final,
4519 			.finup = ahash_finup,
4520 			.digest = ahash_digest,
4521 			.export = ahash_export,
4522 			.import = ahash_import,
4523 			.setkey = ahash_setkey,
4524 			.halg = {
4525 				.digestsize = MD5_DIGEST_SIZE,
4526 				.statesize = sizeof(struct caam_export_state),
4527 			},
4528 		},
4529 		.alg_type = OP_ALG_ALGSEL_MD5,
4530 	}
4531 };
4532 
4533 struct caam_hash_alg {
4534 	struct list_head entry;
4535 	struct device *dev;
4536 	int alg_type;
4537 	struct ahash_alg ahash_alg;
4538 };
4539 
caam_hash_cra_init(struct crypto_tfm * tfm)4540 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4541 {
4542 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4543 	struct crypto_alg *base = tfm->__crt_alg;
4544 	struct hash_alg_common *halg =
4545 		 container_of(base, struct hash_alg_common, base);
4546 	struct ahash_alg *alg =
4547 		 container_of(halg, struct ahash_alg, halg);
4548 	struct caam_hash_alg *caam_hash =
4549 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4550 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4551 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4552 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4553 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4554 					 HASH_MSG_LEN + 32,
4555 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4556 					 HASH_MSG_LEN + 64,
4557 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4558 	dma_addr_t dma_addr;
4559 	int i;
4560 
4561 	ctx->dev = caam_hash->dev;
4562 
4563 	if (alg->setkey) {
4564 		ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4565 							  ARRAY_SIZE(ctx->key),
4566 							  DMA_TO_DEVICE,
4567 							  DMA_ATTR_SKIP_CPU_SYNC);
4568 		if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4569 			dev_err(ctx->dev, "unable to map key\n");
4570 			return -ENOMEM;
4571 		}
4572 	}
4573 
4574 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4575 					DMA_BIDIRECTIONAL,
4576 					DMA_ATTR_SKIP_CPU_SYNC);
4577 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4578 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4579 		if (ctx->adata.key_dma)
4580 			dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4581 					       ARRAY_SIZE(ctx->key),
4582 					       DMA_TO_DEVICE,
4583 					       DMA_ATTR_SKIP_CPU_SYNC);
4584 		return -ENOMEM;
4585 	}
4586 
4587 	for (i = 0; i < HASH_NUM_OP; i++)
4588 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4589 
4590 	/* copy descriptor header template value */
4591 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4592 
4593 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4594 				   OP_ALG_ALGSEL_SUBMASK) >>
4595 				  OP_ALG_ALGSEL_SHIFT];
4596 
4597 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4598 				 sizeof(struct caam_hash_state));
4599 
4600 	/*
4601 	 * For keyed hash algorithms shared descriptors
4602 	 * will be created later in setkey() callback
4603 	 */
4604 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4605 }
4606 
caam_hash_cra_exit(struct crypto_tfm * tfm)4607 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4608 {
4609 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4610 
4611 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4612 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4613 	if (ctx->adata.key_dma)
4614 		dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4615 				       ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4616 				       DMA_ATTR_SKIP_CPU_SYNC);
4617 }
4618 
caam_hash_alloc(struct device * dev,struct caam_hash_template * template,bool keyed)4619 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4620 	struct caam_hash_template *template, bool keyed)
4621 {
4622 	struct caam_hash_alg *t_alg;
4623 	struct ahash_alg *halg;
4624 	struct crypto_alg *alg;
4625 
4626 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4627 	if (!t_alg)
4628 		return ERR_PTR(-ENOMEM);
4629 
4630 	t_alg->ahash_alg = template->template_ahash;
4631 	halg = &t_alg->ahash_alg;
4632 	alg = &halg->halg.base;
4633 
4634 	if (keyed) {
4635 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4636 			 template->hmac_name);
4637 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4638 			 template->hmac_driver_name);
4639 	} else {
4640 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4641 			 template->name);
4642 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4643 			 template->driver_name);
4644 		t_alg->ahash_alg.setkey = NULL;
4645 	}
4646 	alg->cra_module = THIS_MODULE;
4647 	alg->cra_init = caam_hash_cra_init;
4648 	alg->cra_exit = caam_hash_cra_exit;
4649 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4650 	alg->cra_priority = CAAM_CRA_PRIORITY;
4651 	alg->cra_blocksize = template->blocksize;
4652 	alg->cra_alignmask = 0;
4653 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4654 
4655 	t_alg->alg_type = template->alg_type;
4656 	t_alg->dev = dev;
4657 
4658 	return t_alg;
4659 }
4660 
dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)4661 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4662 {
4663 	struct dpaa2_caam_priv_per_cpu *ppriv;
4664 
4665 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4666 	napi_schedule_irqoff(&ppriv->napi);
4667 }
4668 
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv * priv)4669 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4670 {
4671 	struct device *dev = priv->dev;
4672 	struct dpaa2_io_notification_ctx *nctx;
4673 	struct dpaa2_caam_priv_per_cpu *ppriv;
4674 	int err, i = 0, cpu;
4675 
4676 	for_each_online_cpu(cpu) {
4677 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4678 		ppriv->priv = priv;
4679 		nctx = &ppriv->nctx;
4680 		nctx->is_cdan = 0;
4681 		nctx->id = ppriv->rsp_fqid;
4682 		nctx->desired_cpu = cpu;
4683 		nctx->cb = dpaa2_caam_fqdan_cb;
4684 
4685 		/* Register notification callbacks */
4686 		ppriv->dpio = dpaa2_io_service_select(cpu);
4687 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4688 		if (unlikely(err)) {
4689 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4690 			nctx->cb = NULL;
4691 			/*
4692 			 * If no affine DPIO for this core, there's probably
4693 			 * none available for next cores either. Signal we want
4694 			 * to retry later, in case the DPIO devices weren't
4695 			 * probed yet.
4696 			 */
4697 			err = -EPROBE_DEFER;
4698 			goto err;
4699 		}
4700 
4701 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4702 						     dev);
4703 		if (unlikely(!ppriv->store)) {
4704 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4705 			err = -ENOMEM;
4706 			goto err;
4707 		}
4708 
4709 		if (++i == priv->num_pairs)
4710 			break;
4711 	}
4712 
4713 	return 0;
4714 
4715 err:
4716 	for_each_online_cpu(cpu) {
4717 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4718 		if (!ppriv->nctx.cb)
4719 			break;
4720 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4721 	}
4722 
4723 	for_each_online_cpu(cpu) {
4724 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4725 		if (!ppriv->store)
4726 			break;
4727 		dpaa2_io_store_destroy(ppriv->store);
4728 	}
4729 
4730 	return err;
4731 }
4732 
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv * priv)4733 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4734 {
4735 	struct dpaa2_caam_priv_per_cpu *ppriv;
4736 	int i = 0, cpu;
4737 
4738 	for_each_online_cpu(cpu) {
4739 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4740 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4741 					    priv->dev);
4742 		dpaa2_io_store_destroy(ppriv->store);
4743 
4744 		if (++i == priv->num_pairs)
4745 			return;
4746 	}
4747 }
4748 
dpaa2_dpseci_bind(struct dpaa2_caam_priv * priv)4749 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4750 {
4751 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4752 	struct device *dev = priv->dev;
4753 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4754 	struct dpaa2_caam_priv_per_cpu *ppriv;
4755 	int err = 0, i = 0, cpu;
4756 
4757 	/* Configure Rx queues */
4758 	for_each_online_cpu(cpu) {
4759 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4760 
4761 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4762 				       DPSECI_QUEUE_OPT_USER_CTX;
4763 		rx_queue_cfg.order_preservation_en = 0;
4764 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4765 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4766 		/*
4767 		 * Rx priority (WQ) doesn't really matter, since we use
4768 		 * pull mode, i.e. volatile dequeues from specific FQs
4769 		 */
4770 		rx_queue_cfg.dest_cfg.priority = 0;
4771 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4772 
4773 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4774 					  &rx_queue_cfg);
4775 		if (err) {
4776 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4777 				err);
4778 			return err;
4779 		}
4780 
4781 		if (++i == priv->num_pairs)
4782 			break;
4783 	}
4784 
4785 	return err;
4786 }
4787 
dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv * priv)4788 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4789 {
4790 	struct device *dev = priv->dev;
4791 
4792 	if (!priv->cscn_mem)
4793 		return;
4794 
4795 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4796 	kfree(priv->cscn_mem);
4797 }
4798 
dpaa2_dpseci_free(struct dpaa2_caam_priv * priv)4799 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4800 {
4801 	struct device *dev = priv->dev;
4802 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4803 	int err;
4804 
4805 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4806 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4807 		if (err)
4808 			dev_err(dev, "dpseci_reset() failed\n");
4809 	}
4810 
4811 	dpaa2_dpseci_congestion_free(priv);
4812 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4813 }
4814 
dpaa2_caam_process_fd(struct dpaa2_caam_priv * priv,const struct dpaa2_fd * fd)4815 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4816 				  const struct dpaa2_fd *fd)
4817 {
4818 	struct caam_request *req;
4819 	u32 fd_err;
4820 
4821 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4822 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4823 		return;
4824 	}
4825 
4826 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4827 	if (unlikely(fd_err))
4828 		dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4829 
4830 	/*
4831 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4832 	 * in FD[ERR] or FD[FRC].
4833 	 */
4834 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4835 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4836 			 DMA_BIDIRECTIONAL);
4837 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4838 }
4839 
dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu * ppriv)4840 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4841 {
4842 	int err;
4843 
4844 	/* Retry while portal is busy */
4845 	do {
4846 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4847 					       ppriv->store);
4848 	} while (err == -EBUSY);
4849 
4850 	if (unlikely(err))
4851 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4852 
4853 	return err;
4854 }
4855 
dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu * ppriv)4856 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4857 {
4858 	struct dpaa2_dq *dq;
4859 	int cleaned = 0, is_last;
4860 
4861 	do {
4862 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4863 		if (unlikely(!dq)) {
4864 			if (unlikely(!is_last)) {
4865 				dev_dbg(ppriv->priv->dev,
4866 					"FQ %d returned no valid frames\n",
4867 					ppriv->rsp_fqid);
4868 				/*
4869 				 * MUST retry until we get some sort of
4870 				 * valid response token (be it "empty dequeue"
4871 				 * or a valid frame).
4872 				 */
4873 				continue;
4874 			}
4875 			break;
4876 		}
4877 
4878 		/* Process FD */
4879 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4880 		cleaned++;
4881 	} while (!is_last);
4882 
4883 	return cleaned;
4884 }
4885 
dpaa2_dpseci_poll(struct napi_struct * napi,int budget)4886 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4887 {
4888 	struct dpaa2_caam_priv_per_cpu *ppriv;
4889 	struct dpaa2_caam_priv *priv;
4890 	int err, cleaned = 0, store_cleaned;
4891 
4892 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4893 	priv = ppriv->priv;
4894 
4895 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4896 		return 0;
4897 
4898 	do {
4899 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4900 		cleaned += store_cleaned;
4901 
4902 		if (store_cleaned == 0 ||
4903 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4904 			break;
4905 
4906 		/* Try to dequeue some more */
4907 		err = dpaa2_caam_pull_fq(ppriv);
4908 		if (unlikely(err))
4909 			break;
4910 	} while (1);
4911 
4912 	if (cleaned < budget) {
4913 		napi_complete_done(napi, cleaned);
4914 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4915 		if (unlikely(err))
4916 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4917 				err);
4918 	}
4919 
4920 	return cleaned;
4921 }
4922 
dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv * priv,u16 token)4923 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4924 					 u16 token)
4925 {
4926 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4927 	struct device *dev = priv->dev;
4928 	int err;
4929 
4930 	/*
4931 	 * Congestion group feature supported starting with DPSECI API v5.1
4932 	 * and only when object has been created with this capability.
4933 	 */
4934 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4935 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4936 		return 0;
4937 
4938 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4939 				 GFP_KERNEL | GFP_DMA);
4940 	if (!priv->cscn_mem)
4941 		return -ENOMEM;
4942 
4943 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4944 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4945 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4946 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4947 		dev_err(dev, "Error mapping CSCN memory area\n");
4948 		err = -ENOMEM;
4949 		goto err_dma_map;
4950 	}
4951 
4952 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4953 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4954 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4955 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4956 	cong_notif_cfg.message_iova = priv->cscn_dma;
4957 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4958 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4959 					DPSECI_CGN_MODE_COHERENT_WRITE;
4960 
4961 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4962 						 &cong_notif_cfg);
4963 	if (err) {
4964 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4965 		goto err_set_cong;
4966 	}
4967 
4968 	return 0;
4969 
4970 err_set_cong:
4971 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4972 err_dma_map:
4973 	kfree(priv->cscn_mem);
4974 
4975 	return err;
4976 }
4977 
dpaa2_dpseci_setup(struct fsl_mc_device * ls_dev)4978 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4979 {
4980 	struct device *dev = &ls_dev->dev;
4981 	struct dpaa2_caam_priv *priv;
4982 	struct dpaa2_caam_priv_per_cpu *ppriv;
4983 	int err, cpu;
4984 	u8 i;
4985 
4986 	priv = dev_get_drvdata(dev);
4987 
4988 	priv->dev = dev;
4989 	priv->dpsec_id = ls_dev->obj_desc.id;
4990 
4991 	/* Get a handle for the DPSECI this interface is associate with */
4992 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4993 	if (err) {
4994 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4995 		goto err_open;
4996 	}
4997 
4998 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4999 				     &priv->minor_ver);
5000 	if (err) {
5001 		dev_err(dev, "dpseci_get_api_version() failed\n");
5002 		goto err_get_vers;
5003 	}
5004 
5005 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5006 
5007 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5008 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5009 		if (err) {
5010 			dev_err(dev, "dpseci_reset() failed\n");
5011 			goto err_get_vers;
5012 		}
5013 	}
5014 
5015 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5016 				    &priv->dpseci_attr);
5017 	if (err) {
5018 		dev_err(dev, "dpseci_get_attributes() failed\n");
5019 		goto err_get_vers;
5020 	}
5021 
5022 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5023 				  &priv->sec_attr);
5024 	if (err) {
5025 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
5026 		goto err_get_vers;
5027 	}
5028 
5029 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5030 	if (err) {
5031 		dev_err(dev, "setup_congestion() failed\n");
5032 		goto err_get_vers;
5033 	}
5034 
5035 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5036 			      priv->dpseci_attr.num_tx_queues);
5037 	if (priv->num_pairs > num_online_cpus()) {
5038 		dev_warn(dev, "%d queues won't be used\n",
5039 			 priv->num_pairs - num_online_cpus());
5040 		priv->num_pairs = num_online_cpus();
5041 	}
5042 
5043 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5044 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5045 					  &priv->rx_queue_attr[i]);
5046 		if (err) {
5047 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
5048 			goto err_get_rx_queue;
5049 		}
5050 	}
5051 
5052 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5053 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5054 					  &priv->tx_queue_attr[i]);
5055 		if (err) {
5056 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
5057 			goto err_get_rx_queue;
5058 		}
5059 	}
5060 
5061 	i = 0;
5062 	for_each_online_cpu(cpu) {
5063 		u8 j;
5064 
5065 		j = i % priv->num_pairs;
5066 
5067 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
5068 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5069 
5070 		/*
5071 		 * Allow all cores to enqueue, while only some of them
5072 		 * will take part in dequeuing.
5073 		 */
5074 		if (++i > priv->num_pairs)
5075 			continue;
5076 
5077 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5078 		ppriv->prio = j;
5079 
5080 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5081 			priv->rx_queue_attr[j].fqid,
5082 			priv->tx_queue_attr[j].fqid);
5083 
5084 		ppriv->net_dev.dev = *dev;
5085 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5086 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5087 			       DPAA2_CAAM_NAPI_WEIGHT);
5088 	}
5089 
5090 	return 0;
5091 
5092 err_get_rx_queue:
5093 	dpaa2_dpseci_congestion_free(priv);
5094 err_get_vers:
5095 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5096 err_open:
5097 	return err;
5098 }
5099 
dpaa2_dpseci_enable(struct dpaa2_caam_priv * priv)5100 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5101 {
5102 	struct device *dev = priv->dev;
5103 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5104 	struct dpaa2_caam_priv_per_cpu *ppriv;
5105 	int i;
5106 
5107 	for (i = 0; i < priv->num_pairs; i++) {
5108 		ppriv = per_cpu_ptr(priv->ppriv, i);
5109 		napi_enable(&ppriv->napi);
5110 	}
5111 
5112 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5113 }
5114 
dpaa2_dpseci_disable(struct dpaa2_caam_priv * priv)5115 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5116 {
5117 	struct device *dev = priv->dev;
5118 	struct dpaa2_caam_priv_per_cpu *ppriv;
5119 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5120 	int i, err = 0, enabled;
5121 
5122 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5123 	if (err) {
5124 		dev_err(dev, "dpseci_disable() failed\n");
5125 		return err;
5126 	}
5127 
5128 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5129 	if (err) {
5130 		dev_err(dev, "dpseci_is_enabled() failed\n");
5131 		return err;
5132 	}
5133 
5134 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5135 
5136 	for (i = 0; i < priv->num_pairs; i++) {
5137 		ppriv = per_cpu_ptr(priv->ppriv, i);
5138 		napi_disable(&ppriv->napi);
5139 		netif_napi_del(&ppriv->napi);
5140 	}
5141 
5142 	return 0;
5143 }
5144 
5145 static struct list_head hash_list;
5146 
dpaa2_caam_probe(struct fsl_mc_device * dpseci_dev)5147 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5148 {
5149 	struct device *dev;
5150 	struct dpaa2_caam_priv *priv;
5151 	int i, err = 0;
5152 	bool registered = false;
5153 
5154 	/*
5155 	 * There is no way to get CAAM endianness - there is no direct register
5156 	 * space access and MC f/w does not provide this attribute.
5157 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5158 	 * property.
5159 	 */
5160 	caam_little_end = true;
5161 
5162 	caam_imx = false;
5163 
5164 	dev = &dpseci_dev->dev;
5165 
5166 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5167 	if (!priv)
5168 		return -ENOMEM;
5169 
5170 	dev_set_drvdata(dev, priv);
5171 
5172 	priv->domain = iommu_get_domain_for_dev(dev);
5173 
5174 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5175 				     0, SLAB_CACHE_DMA, NULL);
5176 	if (!qi_cache) {
5177 		dev_err(dev, "Can't allocate SEC cache\n");
5178 		return -ENOMEM;
5179 	}
5180 
5181 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5182 	if (err) {
5183 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5184 		goto err_dma_mask;
5185 	}
5186 
5187 	/* Obtain a MC portal */
5188 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5189 	if (err) {
5190 		if (err == -ENXIO)
5191 			err = -EPROBE_DEFER;
5192 		else
5193 			dev_err(dev, "MC portal allocation failed\n");
5194 
5195 		goto err_dma_mask;
5196 	}
5197 
5198 	priv->ppriv = alloc_percpu(*priv->ppriv);
5199 	if (!priv->ppriv) {
5200 		dev_err(dev, "alloc_percpu() failed\n");
5201 		err = -ENOMEM;
5202 		goto err_alloc_ppriv;
5203 	}
5204 
5205 	/* DPSECI initialization */
5206 	err = dpaa2_dpseci_setup(dpseci_dev);
5207 	if (err) {
5208 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5209 		goto err_dpseci_setup;
5210 	}
5211 
5212 	/* DPIO */
5213 	err = dpaa2_dpseci_dpio_setup(priv);
5214 	if (err) {
5215 		dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5216 		goto err_dpio_setup;
5217 	}
5218 
5219 	/* DPSECI binding to DPIO */
5220 	err = dpaa2_dpseci_bind(priv);
5221 	if (err) {
5222 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5223 		goto err_bind;
5224 	}
5225 
5226 	/* DPSECI enable */
5227 	err = dpaa2_dpseci_enable(priv);
5228 	if (err) {
5229 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5230 		goto err_bind;
5231 	}
5232 
5233 	dpaa2_dpseci_debugfs_init(priv);
5234 
5235 	/* register crypto algorithms the device supports */
5236 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5237 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5238 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5239 
5240 		/* Skip DES algorithms if not supported by device */
5241 		if (!priv->sec_attr.des_acc_num &&
5242 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5243 		     alg_sel == OP_ALG_ALGSEL_DES))
5244 			continue;
5245 
5246 		/* Skip AES algorithms if not supported by device */
5247 		if (!priv->sec_attr.aes_acc_num &&
5248 		    alg_sel == OP_ALG_ALGSEL_AES)
5249 			continue;
5250 
5251 		/* Skip CHACHA20 algorithms if not supported by device */
5252 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5253 		    !priv->sec_attr.ccha_acc_num)
5254 			continue;
5255 
5256 		t_alg->caam.dev = dev;
5257 		caam_skcipher_alg_init(t_alg);
5258 
5259 		err = crypto_register_skcipher(&t_alg->skcipher);
5260 		if (err) {
5261 			dev_warn(dev, "%s alg registration failed: %d\n",
5262 				 t_alg->skcipher.base.cra_driver_name, err);
5263 			continue;
5264 		}
5265 
5266 		t_alg->registered = true;
5267 		registered = true;
5268 	}
5269 
5270 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5271 		struct caam_aead_alg *t_alg = driver_aeads + i;
5272 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5273 				 OP_ALG_ALGSEL_MASK;
5274 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5275 				 OP_ALG_ALGSEL_MASK;
5276 
5277 		/* Skip DES algorithms if not supported by device */
5278 		if (!priv->sec_attr.des_acc_num &&
5279 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5280 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5281 			continue;
5282 
5283 		/* Skip AES algorithms if not supported by device */
5284 		if (!priv->sec_attr.aes_acc_num &&
5285 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5286 			continue;
5287 
5288 		/* Skip CHACHA20 algorithms if not supported by device */
5289 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5290 		    !priv->sec_attr.ccha_acc_num)
5291 			continue;
5292 
5293 		/* Skip POLY1305 algorithms if not supported by device */
5294 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5295 		    !priv->sec_attr.ptha_acc_num)
5296 			continue;
5297 
5298 		/*
5299 		 * Skip algorithms requiring message digests
5300 		 * if MD not supported by device.
5301 		 */
5302 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5303 		    !priv->sec_attr.md_acc_num)
5304 			continue;
5305 
5306 		t_alg->caam.dev = dev;
5307 		caam_aead_alg_init(t_alg);
5308 
5309 		err = crypto_register_aead(&t_alg->aead);
5310 		if (err) {
5311 			dev_warn(dev, "%s alg registration failed: %d\n",
5312 				 t_alg->aead.base.cra_driver_name, err);
5313 			continue;
5314 		}
5315 
5316 		t_alg->registered = true;
5317 		registered = true;
5318 	}
5319 	if (registered)
5320 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5321 
5322 	/* register hash algorithms the device supports */
5323 	INIT_LIST_HEAD(&hash_list);
5324 
5325 	/*
5326 	 * Skip registration of any hashing algorithms if MD block
5327 	 * is not present.
5328 	 */
5329 	if (!priv->sec_attr.md_acc_num)
5330 		return 0;
5331 
5332 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5333 		struct caam_hash_alg *t_alg;
5334 		struct caam_hash_template *alg = driver_hash + i;
5335 
5336 		/* register hmac version */
5337 		t_alg = caam_hash_alloc(dev, alg, true);
5338 		if (IS_ERR(t_alg)) {
5339 			err = PTR_ERR(t_alg);
5340 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5341 				 alg->hmac_driver_name, err);
5342 			continue;
5343 		}
5344 
5345 		err = crypto_register_ahash(&t_alg->ahash_alg);
5346 		if (err) {
5347 			dev_warn(dev, "%s alg registration failed: %d\n",
5348 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5349 				 err);
5350 			kfree(t_alg);
5351 		} else {
5352 			list_add_tail(&t_alg->entry, &hash_list);
5353 		}
5354 
5355 		/* register unkeyed version */
5356 		t_alg = caam_hash_alloc(dev, alg, false);
5357 		if (IS_ERR(t_alg)) {
5358 			err = PTR_ERR(t_alg);
5359 			dev_warn(dev, "%s alg allocation failed: %d\n",
5360 				 alg->driver_name, err);
5361 			continue;
5362 		}
5363 
5364 		err = crypto_register_ahash(&t_alg->ahash_alg);
5365 		if (err) {
5366 			dev_warn(dev, "%s alg registration failed: %d\n",
5367 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5368 				 err);
5369 			kfree(t_alg);
5370 		} else {
5371 			list_add_tail(&t_alg->entry, &hash_list);
5372 		}
5373 	}
5374 	if (!list_empty(&hash_list))
5375 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5376 
5377 	return err;
5378 
5379 err_bind:
5380 	dpaa2_dpseci_dpio_free(priv);
5381 err_dpio_setup:
5382 	dpaa2_dpseci_free(priv);
5383 err_dpseci_setup:
5384 	free_percpu(priv->ppriv);
5385 err_alloc_ppriv:
5386 	fsl_mc_portal_free(priv->mc_io);
5387 err_dma_mask:
5388 	kmem_cache_destroy(qi_cache);
5389 
5390 	return err;
5391 }
5392 
dpaa2_caam_remove(struct fsl_mc_device * ls_dev)5393 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5394 {
5395 	struct device *dev;
5396 	struct dpaa2_caam_priv *priv;
5397 	int i;
5398 
5399 	dev = &ls_dev->dev;
5400 	priv = dev_get_drvdata(dev);
5401 
5402 	dpaa2_dpseci_debugfs_exit(priv);
5403 
5404 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5405 		struct caam_aead_alg *t_alg = driver_aeads + i;
5406 
5407 		if (t_alg->registered)
5408 			crypto_unregister_aead(&t_alg->aead);
5409 	}
5410 
5411 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5412 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5413 
5414 		if (t_alg->registered)
5415 			crypto_unregister_skcipher(&t_alg->skcipher);
5416 	}
5417 
5418 	if (hash_list.next) {
5419 		struct caam_hash_alg *t_hash_alg, *p;
5420 
5421 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5422 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5423 			list_del(&t_hash_alg->entry);
5424 			kfree(t_hash_alg);
5425 		}
5426 	}
5427 
5428 	dpaa2_dpseci_disable(priv);
5429 	dpaa2_dpseci_dpio_free(priv);
5430 	dpaa2_dpseci_free(priv);
5431 	free_percpu(priv->ppriv);
5432 	fsl_mc_portal_free(priv->mc_io);
5433 	kmem_cache_destroy(qi_cache);
5434 
5435 	return 0;
5436 }
5437 
dpaa2_caam_enqueue(struct device * dev,struct caam_request * req)5438 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5439 {
5440 	struct dpaa2_fd fd;
5441 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5442 	struct dpaa2_caam_priv_per_cpu *ppriv;
5443 	int err = 0, i;
5444 
5445 	if (IS_ERR(req))
5446 		return PTR_ERR(req);
5447 
5448 	if (priv->cscn_mem) {
5449 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5450 					DPAA2_CSCN_SIZE,
5451 					DMA_FROM_DEVICE);
5452 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5453 			dev_dbg_ratelimited(dev, "Dropping request\n");
5454 			return -EBUSY;
5455 		}
5456 	}
5457 
5458 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5459 
5460 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5461 					 DMA_BIDIRECTIONAL);
5462 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5463 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5464 		goto err_out;
5465 	}
5466 
5467 	memset(&fd, 0, sizeof(fd));
5468 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5469 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5470 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5471 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5472 
5473 	ppriv = this_cpu_ptr(priv->ppriv);
5474 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5475 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5476 						  &fd);
5477 		if (err != -EBUSY)
5478 			break;
5479 
5480 		cpu_relax();
5481 	}
5482 
5483 	if (unlikely(err)) {
5484 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5485 		goto err_out;
5486 	}
5487 
5488 	return -EINPROGRESS;
5489 
5490 err_out:
5491 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5492 			 DMA_BIDIRECTIONAL);
5493 	return -EIO;
5494 }
5495 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5496 
5497 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5498 	{
5499 		.vendor = FSL_MC_VENDOR_FREESCALE,
5500 		.obj_type = "dpseci",
5501 	},
5502 	{ .vendor = 0x0 }
5503 };
5504 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5505 
5506 static struct fsl_mc_driver dpaa2_caam_driver = {
5507 	.driver = {
5508 		.name		= KBUILD_MODNAME,
5509 		.owner		= THIS_MODULE,
5510 	},
5511 	.probe		= dpaa2_caam_probe,
5512 	.remove		= dpaa2_caam_remove,
5513 	.match_id_table = dpaa2_caam_match_id_table
5514 };
5515 
5516 MODULE_LICENSE("Dual BSD/GPL");
5517 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5518 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5519 
5520 module_fsl_mc_driver(dpaa2_caam_driver);
5521