xref: /linux/drivers/crypto/caam/caampkc.c (revision eff9771d)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * caam - Freescale FSL CAAM support for Public Key Cryptography
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9  * all the desired key parameters, input and output pointers.
10  */
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "error.h"
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
18 #include "caampkc.h"
19 
20 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22 				 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24 				 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26 				 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
28 
29 /* buffer filled with zeros, used for padding */
30 static u8 *zero_buffer;
31 
32 /*
33  * variable used to avoid double free of resources in case
34  * algorithm registration was unsuccessful
35  */
36 static bool init_done;
37 
38 struct caam_akcipher_alg {
39 	struct akcipher_alg akcipher;
40 	bool registered;
41 };
42 
43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 			 struct akcipher_request *req)
45 {
46 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47 
48 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
50 
51 	if (edesc->sec4_sg_bytes)
52 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53 				 DMA_TO_DEVICE);
54 }
55 
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 			  struct akcipher_request *req)
58 {
59 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 	struct caam_rsa_key *key = &ctx->key;
62 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63 
64 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66 }
67 
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 			      struct akcipher_request *req)
70 {
71 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 	struct caam_rsa_key *key = &ctx->key;
74 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75 
76 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78 }
79 
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 			      struct akcipher_request *req)
82 {
83 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 	struct caam_rsa_key *key = &ctx->key;
86 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 	size_t p_sz = key->p_sz;
88 	size_t q_sz = key->q_sz;
89 
90 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95 }
96 
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 			      struct akcipher_request *req)
99 {
100 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 	struct caam_rsa_key *key = &ctx->key;
103 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 	size_t p_sz = key->p_sz;
105 	size_t q_sz = key->q_sz;
106 
107 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
114 }
115 
116 /* RSA Job Completion handler */
117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118 {
119 	struct akcipher_request *req = context;
120 	struct rsa_edesc *edesc;
121 	int ecode = 0;
122 
123 	if (err)
124 		ecode = caam_jr_strstatus(dev, err);
125 
126 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
127 
128 	rsa_pub_unmap(dev, edesc, req);
129 	rsa_io_unmap(dev, edesc, req);
130 	kfree(edesc);
131 
132 	akcipher_request_complete(req, ecode);
133 }
134 
135 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
136 			     void *context)
137 {
138 	struct akcipher_request *req = context;
139 	struct rsa_edesc *edesc;
140 	int ecode = 0;
141 
142 	if (err)
143 		ecode = caam_jr_strstatus(dev, err);
144 
145 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
146 
147 	rsa_priv_f1_unmap(dev, edesc, req);
148 	rsa_io_unmap(dev, edesc, req);
149 	kfree(edesc);
150 
151 	akcipher_request_complete(req, ecode);
152 }
153 
154 static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
155 			     void *context)
156 {
157 	struct akcipher_request *req = context;
158 	struct rsa_edesc *edesc;
159 	int ecode = 0;
160 
161 	if (err)
162 		ecode = caam_jr_strstatus(dev, err);
163 
164 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
165 
166 	rsa_priv_f2_unmap(dev, edesc, req);
167 	rsa_io_unmap(dev, edesc, req);
168 	kfree(edesc);
169 
170 	akcipher_request_complete(req, ecode);
171 }
172 
173 static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
174 			     void *context)
175 {
176 	struct akcipher_request *req = context;
177 	struct rsa_edesc *edesc;
178 	int ecode = 0;
179 
180 	if (err)
181 		ecode = caam_jr_strstatus(dev, err);
182 
183 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
184 
185 	rsa_priv_f3_unmap(dev, edesc, req);
186 	rsa_io_unmap(dev, edesc, req);
187 	kfree(edesc);
188 
189 	akcipher_request_complete(req, ecode);
190 }
191 
192 /**
193  * Count leading zeros, need it to strip, from a given scatterlist
194  *
195  * @sgl   : scatterlist to count zeros from
196  * @nbytes: number of zeros, in bytes, to strip
197  * @flags : operation flags
198  */
199 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
200 					unsigned int nbytes,
201 					unsigned int flags)
202 {
203 	struct sg_mapping_iter miter;
204 	int lzeros, ents;
205 	unsigned int len;
206 	unsigned int tbytes = nbytes;
207 	const u8 *buff;
208 
209 	ents = sg_nents_for_len(sgl, nbytes);
210 	if (ents < 0)
211 		return ents;
212 
213 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
214 
215 	lzeros = 0;
216 	len = 0;
217 	while (nbytes > 0) {
218 		/* do not strip more than given bytes */
219 		while (len && !*buff && lzeros < nbytes) {
220 			lzeros++;
221 			len--;
222 			buff++;
223 		}
224 
225 		if (len && *buff)
226 			break;
227 
228 		sg_miter_next(&miter);
229 		buff = miter.addr;
230 		len = miter.length;
231 
232 		nbytes -= lzeros;
233 		lzeros = 0;
234 	}
235 
236 	miter.consumed = lzeros;
237 	sg_miter_stop(&miter);
238 	nbytes -= lzeros;
239 
240 	return tbytes - nbytes;
241 }
242 
243 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
244 					 size_t desclen)
245 {
246 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
247 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
248 	struct device *dev = ctx->dev;
249 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
250 	struct caam_rsa_key *key = &ctx->key;
251 	struct rsa_edesc *edesc;
252 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
253 		       GFP_KERNEL : GFP_ATOMIC;
254 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
255 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
256 	int src_nents, dst_nents;
257 	int mapped_src_nents, mapped_dst_nents;
258 	unsigned int diff_size = 0;
259 	int lzeros;
260 
261 	if (req->src_len > key->n_sz) {
262 		/*
263 		 * strip leading zeros and
264 		 * return the number of zeros to skip
265 		 */
266 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
267 						      key->n_sz, sg_flags);
268 		if (lzeros < 0)
269 			return ERR_PTR(lzeros);
270 
271 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
272 						      lzeros);
273 		req_ctx->fixup_src_len = req->src_len - lzeros;
274 	} else {
275 		/*
276 		 * input src is less then n key modulus,
277 		 * so there will be zero padding
278 		 */
279 		diff_size = key->n_sz - req->src_len;
280 		req_ctx->fixup_src = req->src;
281 		req_ctx->fixup_src_len = req->src_len;
282 	}
283 
284 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
285 				     req_ctx->fixup_src_len);
286 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
287 
288 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
289 				      DMA_TO_DEVICE);
290 	if (unlikely(!mapped_src_nents)) {
291 		dev_err(dev, "unable to map source\n");
292 		return ERR_PTR(-ENOMEM);
293 	}
294 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
295 				      DMA_FROM_DEVICE);
296 	if (unlikely(!mapped_dst_nents)) {
297 		dev_err(dev, "unable to map destination\n");
298 		goto src_fail;
299 	}
300 
301 	if (!diff_size && mapped_src_nents == 1)
302 		sec4_sg_len = 0; /* no need for an input hw s/g table */
303 	else
304 		sec4_sg_len = mapped_src_nents + !!diff_size;
305 	sec4_sg_index = sec4_sg_len;
306 
307 	if (mapped_dst_nents > 1)
308 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
309 	else
310 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
311 
312 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
313 
314 	/* allocate space for base edesc, hw desc commands and link tables */
315 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
316 			GFP_DMA | flags);
317 	if (!edesc)
318 		goto dst_fail;
319 
320 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
321 	if (diff_size)
322 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
323 				   0);
324 
325 	if (sec4_sg_index)
326 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
327 				   edesc->sec4_sg + !!diff_size, 0);
328 
329 	if (mapped_dst_nents > 1)
330 		sg_to_sec4_sg_last(req->dst, req->dst_len,
331 				   edesc->sec4_sg + sec4_sg_index, 0);
332 
333 	/* Save nents for later use in Job Descriptor */
334 	edesc->src_nents = src_nents;
335 	edesc->dst_nents = dst_nents;
336 
337 	if (!sec4_sg_bytes)
338 		return edesc;
339 
340 	edesc->mapped_src_nents = mapped_src_nents;
341 	edesc->mapped_dst_nents = mapped_dst_nents;
342 
343 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
344 					    sec4_sg_bytes, DMA_TO_DEVICE);
345 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
346 		dev_err(dev, "unable to map S/G table\n");
347 		goto sec4_sg_fail;
348 	}
349 
350 	edesc->sec4_sg_bytes = sec4_sg_bytes;
351 
352 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
353 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
354 			     edesc->sec4_sg_bytes, 1);
355 
356 	return edesc;
357 
358 sec4_sg_fail:
359 	kfree(edesc);
360 dst_fail:
361 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
362 src_fail:
363 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
364 	return ERR_PTR(-ENOMEM);
365 }
366 
367 static int set_rsa_pub_pdb(struct akcipher_request *req,
368 			   struct rsa_edesc *edesc)
369 {
370 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
371 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
372 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
373 	struct caam_rsa_key *key = &ctx->key;
374 	struct device *dev = ctx->dev;
375 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
376 	int sec4_sg_index = 0;
377 
378 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
379 	if (dma_mapping_error(dev, pdb->n_dma)) {
380 		dev_err(dev, "Unable to map RSA modulus memory\n");
381 		return -ENOMEM;
382 	}
383 
384 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
385 	if (dma_mapping_error(dev, pdb->e_dma)) {
386 		dev_err(dev, "Unable to map RSA public exponent memory\n");
387 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
388 		return -ENOMEM;
389 	}
390 
391 	if (edesc->mapped_src_nents > 1) {
392 		pdb->sgf |= RSA_PDB_SGF_F;
393 		pdb->f_dma = edesc->sec4_sg_dma;
394 		sec4_sg_index += edesc->mapped_src_nents;
395 	} else {
396 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
397 	}
398 
399 	if (edesc->mapped_dst_nents > 1) {
400 		pdb->sgf |= RSA_PDB_SGF_G;
401 		pdb->g_dma = edesc->sec4_sg_dma +
402 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
403 	} else {
404 		pdb->g_dma = sg_dma_address(req->dst);
405 	}
406 
407 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
408 	pdb->f_len = req_ctx->fixup_src_len;
409 
410 	return 0;
411 }
412 
413 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
414 			       struct rsa_edesc *edesc)
415 {
416 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
417 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
418 	struct caam_rsa_key *key = &ctx->key;
419 	struct device *dev = ctx->dev;
420 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
421 	int sec4_sg_index = 0;
422 
423 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
424 	if (dma_mapping_error(dev, pdb->n_dma)) {
425 		dev_err(dev, "Unable to map modulus memory\n");
426 		return -ENOMEM;
427 	}
428 
429 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
430 	if (dma_mapping_error(dev, pdb->d_dma)) {
431 		dev_err(dev, "Unable to map RSA private exponent memory\n");
432 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
433 		return -ENOMEM;
434 	}
435 
436 	if (edesc->mapped_src_nents > 1) {
437 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
438 		pdb->g_dma = edesc->sec4_sg_dma;
439 		sec4_sg_index += edesc->mapped_src_nents;
440 
441 	} else {
442 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
443 
444 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
445 	}
446 
447 	if (edesc->mapped_dst_nents > 1) {
448 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
449 		pdb->f_dma = edesc->sec4_sg_dma +
450 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
451 	} else {
452 		pdb->f_dma = sg_dma_address(req->dst);
453 	}
454 
455 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
456 
457 	return 0;
458 }
459 
460 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
461 			       struct rsa_edesc *edesc)
462 {
463 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
464 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
465 	struct caam_rsa_key *key = &ctx->key;
466 	struct device *dev = ctx->dev;
467 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
468 	int sec4_sg_index = 0;
469 	size_t p_sz = key->p_sz;
470 	size_t q_sz = key->q_sz;
471 
472 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
473 	if (dma_mapping_error(dev, pdb->d_dma)) {
474 		dev_err(dev, "Unable to map RSA private exponent memory\n");
475 		return -ENOMEM;
476 	}
477 
478 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
479 	if (dma_mapping_error(dev, pdb->p_dma)) {
480 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
481 		goto unmap_d;
482 	}
483 
484 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
485 	if (dma_mapping_error(dev, pdb->q_dma)) {
486 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
487 		goto unmap_p;
488 	}
489 
490 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
491 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
492 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
493 		goto unmap_q;
494 	}
495 
496 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
497 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
498 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
499 		goto unmap_tmp1;
500 	}
501 
502 	if (edesc->mapped_src_nents > 1) {
503 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
504 		pdb->g_dma = edesc->sec4_sg_dma;
505 		sec4_sg_index += edesc->mapped_src_nents;
506 	} else {
507 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
508 
509 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
510 	}
511 
512 	if (edesc->mapped_dst_nents > 1) {
513 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
514 		pdb->f_dma = edesc->sec4_sg_dma +
515 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
516 	} else {
517 		pdb->f_dma = sg_dma_address(req->dst);
518 	}
519 
520 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
521 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
522 
523 	return 0;
524 
525 unmap_tmp1:
526 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
527 unmap_q:
528 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
529 unmap_p:
530 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
531 unmap_d:
532 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
533 
534 	return -ENOMEM;
535 }
536 
537 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
538 			       struct rsa_edesc *edesc)
539 {
540 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
541 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
542 	struct caam_rsa_key *key = &ctx->key;
543 	struct device *dev = ctx->dev;
544 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
545 	int sec4_sg_index = 0;
546 	size_t p_sz = key->p_sz;
547 	size_t q_sz = key->q_sz;
548 
549 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
550 	if (dma_mapping_error(dev, pdb->p_dma)) {
551 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
552 		return -ENOMEM;
553 	}
554 
555 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
556 	if (dma_mapping_error(dev, pdb->q_dma)) {
557 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
558 		goto unmap_p;
559 	}
560 
561 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
562 	if (dma_mapping_error(dev, pdb->dp_dma)) {
563 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
564 		goto unmap_q;
565 	}
566 
567 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
568 	if (dma_mapping_error(dev, pdb->dq_dma)) {
569 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
570 		goto unmap_dp;
571 	}
572 
573 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
574 	if (dma_mapping_error(dev, pdb->c_dma)) {
575 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
576 		goto unmap_dq;
577 	}
578 
579 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
580 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
581 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
582 		goto unmap_qinv;
583 	}
584 
585 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
586 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
587 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
588 		goto unmap_tmp1;
589 	}
590 
591 	if (edesc->mapped_src_nents > 1) {
592 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
593 		pdb->g_dma = edesc->sec4_sg_dma;
594 		sec4_sg_index += edesc->mapped_src_nents;
595 	} else {
596 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
597 
598 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
599 	}
600 
601 	if (edesc->mapped_dst_nents > 1) {
602 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
603 		pdb->f_dma = edesc->sec4_sg_dma +
604 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
605 	} else {
606 		pdb->f_dma = sg_dma_address(req->dst);
607 	}
608 
609 	pdb->sgf |= key->n_sz;
610 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
611 
612 	return 0;
613 
614 unmap_tmp1:
615 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
616 unmap_qinv:
617 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
618 unmap_dq:
619 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
620 unmap_dp:
621 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
622 unmap_q:
623 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
624 unmap_p:
625 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
626 
627 	return -ENOMEM;
628 }
629 
630 static int caam_rsa_enc(struct akcipher_request *req)
631 {
632 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
633 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
634 	struct caam_rsa_key *key = &ctx->key;
635 	struct device *jrdev = ctx->dev;
636 	struct rsa_edesc *edesc;
637 	int ret;
638 
639 	if (unlikely(!key->n || !key->e))
640 		return -EINVAL;
641 
642 	if (req->dst_len < key->n_sz) {
643 		req->dst_len = key->n_sz;
644 		dev_err(jrdev, "Output buffer length less than parameter n\n");
645 		return -EOVERFLOW;
646 	}
647 
648 	/* Allocate extended descriptor */
649 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
650 	if (IS_ERR(edesc))
651 		return PTR_ERR(edesc);
652 
653 	/* Set RSA Encrypt Protocol Data Block */
654 	ret = set_rsa_pub_pdb(req, edesc);
655 	if (ret)
656 		goto init_fail;
657 
658 	/* Initialize Job Descriptor */
659 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
660 
661 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
662 	if (!ret)
663 		return -EINPROGRESS;
664 
665 	rsa_pub_unmap(jrdev, edesc, req);
666 
667 init_fail:
668 	rsa_io_unmap(jrdev, edesc, req);
669 	kfree(edesc);
670 	return ret;
671 }
672 
673 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
674 {
675 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
676 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
677 	struct device *jrdev = ctx->dev;
678 	struct rsa_edesc *edesc;
679 	int ret;
680 
681 	/* Allocate extended descriptor */
682 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
683 	if (IS_ERR(edesc))
684 		return PTR_ERR(edesc);
685 
686 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
687 	ret = set_rsa_priv_f1_pdb(req, edesc);
688 	if (ret)
689 		goto init_fail;
690 
691 	/* Initialize Job Descriptor */
692 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
693 
694 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
695 	if (!ret)
696 		return -EINPROGRESS;
697 
698 	rsa_priv_f1_unmap(jrdev, edesc, req);
699 
700 init_fail:
701 	rsa_io_unmap(jrdev, edesc, req);
702 	kfree(edesc);
703 	return ret;
704 }
705 
706 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
707 {
708 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
709 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
710 	struct device *jrdev = ctx->dev;
711 	struct rsa_edesc *edesc;
712 	int ret;
713 
714 	/* Allocate extended descriptor */
715 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
716 	if (IS_ERR(edesc))
717 		return PTR_ERR(edesc);
718 
719 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
720 	ret = set_rsa_priv_f2_pdb(req, edesc);
721 	if (ret)
722 		goto init_fail;
723 
724 	/* Initialize Job Descriptor */
725 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
726 
727 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
728 	if (!ret)
729 		return -EINPROGRESS;
730 
731 	rsa_priv_f2_unmap(jrdev, edesc, req);
732 
733 init_fail:
734 	rsa_io_unmap(jrdev, edesc, req);
735 	kfree(edesc);
736 	return ret;
737 }
738 
739 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
740 {
741 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
742 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
743 	struct device *jrdev = ctx->dev;
744 	struct rsa_edesc *edesc;
745 	int ret;
746 
747 	/* Allocate extended descriptor */
748 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
749 	if (IS_ERR(edesc))
750 		return PTR_ERR(edesc);
751 
752 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
753 	ret = set_rsa_priv_f3_pdb(req, edesc);
754 	if (ret)
755 		goto init_fail;
756 
757 	/* Initialize Job Descriptor */
758 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
759 
760 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
761 	if (!ret)
762 		return -EINPROGRESS;
763 
764 	rsa_priv_f3_unmap(jrdev, edesc, req);
765 
766 init_fail:
767 	rsa_io_unmap(jrdev, edesc, req);
768 	kfree(edesc);
769 	return ret;
770 }
771 
772 static int caam_rsa_dec(struct akcipher_request *req)
773 {
774 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
775 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
776 	struct caam_rsa_key *key = &ctx->key;
777 	int ret;
778 
779 	if (unlikely(!key->n || !key->d))
780 		return -EINVAL;
781 
782 	if (req->dst_len < key->n_sz) {
783 		req->dst_len = key->n_sz;
784 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
785 		return -EOVERFLOW;
786 	}
787 
788 	if (key->priv_form == FORM3)
789 		ret = caam_rsa_dec_priv_f3(req);
790 	else if (key->priv_form == FORM2)
791 		ret = caam_rsa_dec_priv_f2(req);
792 	else
793 		ret = caam_rsa_dec_priv_f1(req);
794 
795 	return ret;
796 }
797 
798 static void caam_rsa_free_key(struct caam_rsa_key *key)
799 {
800 	kzfree(key->d);
801 	kzfree(key->p);
802 	kzfree(key->q);
803 	kzfree(key->dp);
804 	kzfree(key->dq);
805 	kzfree(key->qinv);
806 	kzfree(key->tmp1);
807 	kzfree(key->tmp2);
808 	kfree(key->e);
809 	kfree(key->n);
810 	memset(key, 0, sizeof(*key));
811 }
812 
813 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
814 {
815 	while (!**ptr && *nbytes) {
816 		(*ptr)++;
817 		(*nbytes)--;
818 	}
819 }
820 
821 /**
822  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
823  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
824  * BER-encoding requires that the minimum number of bytes be used to encode the
825  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
826  * length.
827  *
828  * @ptr   : pointer to {dP, dQ, qInv} CRT member
829  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
830  * @dstlen: length in bytes of corresponding p or q prime factor
831  */
832 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
833 {
834 	u8 *dst;
835 
836 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
837 	if (!nbytes)
838 		return NULL;
839 
840 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
841 	if (!dst)
842 		return NULL;
843 
844 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
845 
846 	return dst;
847 }
848 
849 /**
850  * caam_read_raw_data - Read a raw byte stream as a positive integer.
851  * The function skips buffer's leading zeros, copies the remained data
852  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
853  * the address of the new buffer.
854  *
855  * @buf   : The data to read
856  * @nbytes: The amount of data to read
857  */
858 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
859 {
860 
861 	caam_rsa_drop_leading_zeros(&buf, nbytes);
862 	if (!*nbytes)
863 		return NULL;
864 
865 	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
866 }
867 
868 static int caam_rsa_check_key_length(unsigned int len)
869 {
870 	if (len > 4096)
871 		return -EINVAL;
872 	return 0;
873 }
874 
875 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
876 				unsigned int keylen)
877 {
878 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
879 	struct rsa_key raw_key = {NULL};
880 	struct caam_rsa_key *rsa_key = &ctx->key;
881 	int ret;
882 
883 	/* Free the old RSA key if any */
884 	caam_rsa_free_key(rsa_key);
885 
886 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
887 	if (ret)
888 		return ret;
889 
890 	/* Copy key in DMA zone */
891 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
892 	if (!rsa_key->e)
893 		goto err;
894 
895 	/*
896 	 * Skip leading zeros and copy the positive integer to a buffer
897 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
898 	 * expects a positive integer for the RSA modulus and uses its length as
899 	 * decryption output length.
900 	 */
901 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
902 	if (!rsa_key->n)
903 		goto err;
904 
905 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
906 		caam_rsa_free_key(rsa_key);
907 		return -EINVAL;
908 	}
909 
910 	rsa_key->e_sz = raw_key.e_sz;
911 	rsa_key->n_sz = raw_key.n_sz;
912 
913 	return 0;
914 err:
915 	caam_rsa_free_key(rsa_key);
916 	return -ENOMEM;
917 }
918 
919 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
920 				       struct rsa_key *raw_key)
921 {
922 	struct caam_rsa_key *rsa_key = &ctx->key;
923 	size_t p_sz = raw_key->p_sz;
924 	size_t q_sz = raw_key->q_sz;
925 
926 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
927 	if (!rsa_key->p)
928 		return;
929 	rsa_key->p_sz = p_sz;
930 
931 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
932 	if (!rsa_key->q)
933 		goto free_p;
934 	rsa_key->q_sz = q_sz;
935 
936 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
937 	if (!rsa_key->tmp1)
938 		goto free_q;
939 
940 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
941 	if (!rsa_key->tmp2)
942 		goto free_tmp1;
943 
944 	rsa_key->priv_form = FORM2;
945 
946 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
947 	if (!rsa_key->dp)
948 		goto free_tmp2;
949 
950 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
951 	if (!rsa_key->dq)
952 		goto free_dp;
953 
954 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
955 					  q_sz);
956 	if (!rsa_key->qinv)
957 		goto free_dq;
958 
959 	rsa_key->priv_form = FORM3;
960 
961 	return;
962 
963 free_dq:
964 	kzfree(rsa_key->dq);
965 free_dp:
966 	kzfree(rsa_key->dp);
967 free_tmp2:
968 	kzfree(rsa_key->tmp2);
969 free_tmp1:
970 	kzfree(rsa_key->tmp1);
971 free_q:
972 	kzfree(rsa_key->q);
973 free_p:
974 	kzfree(rsa_key->p);
975 }
976 
977 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
978 				 unsigned int keylen)
979 {
980 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
981 	struct rsa_key raw_key = {NULL};
982 	struct caam_rsa_key *rsa_key = &ctx->key;
983 	int ret;
984 
985 	/* Free the old RSA key if any */
986 	caam_rsa_free_key(rsa_key);
987 
988 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
989 	if (ret)
990 		return ret;
991 
992 	/* Copy key in DMA zone */
993 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
994 	if (!rsa_key->d)
995 		goto err;
996 
997 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
998 	if (!rsa_key->e)
999 		goto err;
1000 
1001 	/*
1002 	 * Skip leading zeros and copy the positive integer to a buffer
1003 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1004 	 * expects a positive integer for the RSA modulus and uses its length as
1005 	 * decryption output length.
1006 	 */
1007 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1008 	if (!rsa_key->n)
1009 		goto err;
1010 
1011 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1012 		caam_rsa_free_key(rsa_key);
1013 		return -EINVAL;
1014 	}
1015 
1016 	rsa_key->d_sz = raw_key.d_sz;
1017 	rsa_key->e_sz = raw_key.e_sz;
1018 	rsa_key->n_sz = raw_key.n_sz;
1019 
1020 	caam_rsa_set_priv_key_form(ctx, &raw_key);
1021 
1022 	return 0;
1023 
1024 err:
1025 	caam_rsa_free_key(rsa_key);
1026 	return -ENOMEM;
1027 }
1028 
1029 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1030 {
1031 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1032 
1033 	return ctx->key.n_sz;
1034 }
1035 
1036 /* Per session pkc's driver context creation function */
1037 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1038 {
1039 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1040 
1041 	ctx->dev = caam_jr_alloc();
1042 
1043 	if (IS_ERR(ctx->dev)) {
1044 		pr_err("Job Ring Device allocation for transform failed\n");
1045 		return PTR_ERR(ctx->dev);
1046 	}
1047 
1048 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1049 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1050 					  DMA_TO_DEVICE);
1051 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1052 		dev_err(ctx->dev, "unable to map padding\n");
1053 		caam_jr_free(ctx->dev);
1054 		return -ENOMEM;
1055 	}
1056 
1057 	return 0;
1058 }
1059 
1060 /* Per session pkc's driver context cleanup function */
1061 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1062 {
1063 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1064 	struct caam_rsa_key *key = &ctx->key;
1065 
1066 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1067 			 1, DMA_TO_DEVICE);
1068 	caam_rsa_free_key(key);
1069 	caam_jr_free(ctx->dev);
1070 }
1071 
1072 static struct caam_akcipher_alg caam_rsa = {
1073 	.akcipher = {
1074 		.encrypt = caam_rsa_enc,
1075 		.decrypt = caam_rsa_dec,
1076 		.set_pub_key = caam_rsa_set_pub_key,
1077 		.set_priv_key = caam_rsa_set_priv_key,
1078 		.max_size = caam_rsa_max_size,
1079 		.init = caam_rsa_init_tfm,
1080 		.exit = caam_rsa_exit_tfm,
1081 		.reqsize = sizeof(struct caam_rsa_req_ctx),
1082 		.base = {
1083 			.cra_name = "rsa",
1084 			.cra_driver_name = "rsa-caam",
1085 			.cra_priority = 3000,
1086 			.cra_module = THIS_MODULE,
1087 			.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1088 		},
1089 	}
1090 };
1091 
1092 /* Public Key Cryptography module initialization handler */
1093 int caam_pkc_init(struct device *ctrldev)
1094 {
1095 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1096 	u32 pk_inst;
1097 	int err;
1098 	init_done = false;
1099 
1100 	/* Determine public key hardware accelerator presence. */
1101 	if (priv->era < 10)
1102 		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1103 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1104 	else
1105 		pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
1106 
1107 	/* Do not register algorithms if PKHA is not present. */
1108 	if (!pk_inst)
1109 		return 0;
1110 
1111 	/* allocate zero buffer, used for padding input */
1112 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1113 			      GFP_KERNEL);
1114 	if (!zero_buffer)
1115 		return -ENOMEM;
1116 
1117 	err = crypto_register_akcipher(&caam_rsa.akcipher);
1118 
1119 	if (err) {
1120 		kfree(zero_buffer);
1121 		dev_warn(ctrldev, "%s alg registration failed\n",
1122 			 caam_rsa.akcipher.base.cra_driver_name);
1123 	} else {
1124 		init_done = true;
1125 		caam_rsa.registered = true;
1126 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1127 	}
1128 
1129 	return err;
1130 }
1131 
1132 void caam_pkc_exit(void)
1133 {
1134 	if (!init_done)
1135 		return;
1136 
1137 	if (caam_rsa.registered)
1138 		crypto_unregister_akcipher(&caam_rsa.akcipher);
1139 
1140 	kfree(zero_buffer);
1141 }
1142