1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/dh.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/internal/kpp.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/kpp.h>
9 #include <crypto/scatterwalk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <linux/module.h>
13 #include "hpre.h"
14 
15 struct hpre_ctx;
16 
17 #define HPRE_CRYPTO_ALG_PRI	1000
18 #define HPRE_ALIGN_SZ		64
19 #define HPRE_BITS_2_BYTES_SHIFT	3
20 #define HPRE_RSA_512BITS_KSZ	64
21 #define HPRE_RSA_1536BITS_KSZ	192
22 #define HPRE_CRT_PRMS		5
23 #define HPRE_CRT_Q		2
24 #define HPRE_CRT_P		3
25 #define HPRE_CRT_INV		4
26 #define HPRE_DH_G_FLAG		0x02
27 #define HPRE_TRY_SEND_TIMES	100
28 #define HPRE_INVLD_REQ_ID		(-1)
29 #define HPRE_DEV(ctx)		(&((ctx)->qp->qm->pdev->dev))
30 
31 #define HPRE_SQE_ALG_BITS	5
32 #define HPRE_SQE_DONE_SHIFT	30
33 #define HPRE_DH_MAX_P_SZ	512
34 
35 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
36 
37 struct hpre_rsa_ctx {
38 	/* low address: e--->n */
39 	char *pubkey;
40 	dma_addr_t dma_pubkey;
41 
42 	/* low address: d--->n */
43 	char *prikey;
44 	dma_addr_t dma_prikey;
45 
46 	/* low address: dq->dp->q->p->qinv */
47 	char *crt_prikey;
48 	dma_addr_t dma_crt_prikey;
49 
50 	struct crypto_akcipher *soft_tfm;
51 };
52 
53 struct hpre_dh_ctx {
54 	/*
55 	 * If base is g we compute the public key
56 	 *	ya = g^xa mod p; [RFC2631 sec 2.1.1]
57 	 * else if base if the counterpart public key we
58 	 * compute the shared secret
59 	 *	ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
60 	 */
61 	char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
62 	dma_addr_t dma_xa_p;
63 
64 	char *g; /* m */
65 	dma_addr_t dma_g;
66 };
67 
68 struct hpre_ctx {
69 	struct hisi_qp *qp;
70 	struct hpre_asym_request **req_list;
71 	spinlock_t req_lock;
72 	unsigned int key_sz;
73 	bool crt_g2_mode;
74 	struct idr req_idr;
75 	union {
76 		struct hpre_rsa_ctx rsa;
77 		struct hpre_dh_ctx dh;
78 	};
79 };
80 
81 struct hpre_asym_request {
82 	char *src;
83 	char *dst;
84 	struct hpre_sqe req;
85 	struct hpre_ctx *ctx;
86 	union {
87 		struct akcipher_request *rsa;
88 		struct kpp_request *dh;
89 	} areq;
90 	int err;
91 	int req_id;
92 	hpre_cb cb;
93 };
94 
95 static DEFINE_MUTEX(hpre_alg_lock);
96 static unsigned int hpre_active_devs;
97 
98 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
99 {
100 	unsigned long flags;
101 	int id;
102 
103 	spin_lock_irqsave(&ctx->req_lock, flags);
104 	id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
105 	spin_unlock_irqrestore(&ctx->req_lock, flags);
106 
107 	return id;
108 }
109 
110 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
111 {
112 	unsigned long flags;
113 
114 	spin_lock_irqsave(&ctx->req_lock, flags);
115 	idr_remove(&ctx->req_idr, req_id);
116 	spin_unlock_irqrestore(&ctx->req_lock, flags);
117 }
118 
119 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
120 {
121 	struct hpre_ctx *ctx;
122 	int id;
123 
124 	ctx = hpre_req->ctx;
125 	id = hpre_alloc_req_id(ctx);
126 	if (id < 0)
127 		return -EINVAL;
128 
129 	ctx->req_list[id] = hpre_req;
130 	hpre_req->req_id = id;
131 
132 	return id;
133 }
134 
135 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
136 {
137 	struct hpre_ctx *ctx = hpre_req->ctx;
138 	int id = hpre_req->req_id;
139 
140 	if (hpre_req->req_id >= 0) {
141 		hpre_req->req_id = HPRE_INVLD_REQ_ID;
142 		ctx->req_list[id] = NULL;
143 		hpre_free_req_id(ctx, id);
144 	}
145 }
146 
147 static struct hisi_qp *hpre_get_qp_and_start(void)
148 {
149 	struct hisi_qp *qp;
150 	struct hpre *hpre;
151 	int ret;
152 
153 	/* find the proper hpre device, which is near the current CPU core */
154 	hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
155 	if (!hpre) {
156 		pr_err("Can not find proper hpre device!\n");
157 		return ERR_PTR(-ENODEV);
158 	}
159 
160 	qp = hisi_qm_create_qp(&hpre->qm, 0);
161 	if (IS_ERR(qp)) {
162 		pci_err(hpre->qm.pdev, "Can not create qp!\n");
163 		return ERR_PTR(-ENODEV);
164 	}
165 
166 	ret = hisi_qm_start_qp(qp, 0);
167 	if (ret < 0) {
168 		hisi_qm_release_qp(qp);
169 		pci_err(hpre->qm.pdev, "Can not start qp!\n");
170 		return ERR_PTR(-EINVAL);
171 	}
172 
173 	return qp;
174 }
175 
176 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
177 			      struct scatterlist *data, unsigned int len,
178 			      int is_src, dma_addr_t *tmp)
179 {
180 	struct hpre_ctx *ctx = hpre_req->ctx;
181 	struct device *dev = HPRE_DEV(ctx);
182 	enum dma_data_direction dma_dir;
183 
184 	if (is_src) {
185 		hpre_req->src = NULL;
186 		dma_dir = DMA_TO_DEVICE;
187 	} else {
188 		hpre_req->dst = NULL;
189 		dma_dir = DMA_FROM_DEVICE;
190 	}
191 	*tmp = dma_map_single(dev, sg_virt(data),
192 			      len, dma_dir);
193 	if (dma_mapping_error(dev, *tmp)) {
194 		dev_err(dev, "dma map data err!\n");
195 		return -ENOMEM;
196 	}
197 
198 	return 0;
199 }
200 
201 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
202 			       struct scatterlist *data, unsigned int len,
203 			       int is_src, dma_addr_t *tmp)
204 {
205 	struct hpre_ctx *ctx = hpre_req->ctx;
206 	struct device *dev = HPRE_DEV(ctx);
207 	void *ptr;
208 	int shift;
209 
210 	shift = ctx->key_sz - len;
211 	if (shift < 0)
212 		return -EINVAL;
213 
214 	ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
215 	if (!ptr)
216 		return -ENOMEM;
217 
218 	if (is_src) {
219 		scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
220 		hpre_req->src = ptr;
221 	} else {
222 		hpre_req->dst = ptr;
223 	}
224 
225 	return 0;
226 }
227 
228 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
229 			 struct scatterlist *data, unsigned int len,
230 			 int is_src, int is_dh)
231 {
232 	struct hpre_sqe *msg = &hpre_req->req;
233 	struct hpre_ctx *ctx = hpre_req->ctx;
234 	dma_addr_t tmp;
235 	int ret;
236 
237 	/* when the data is dh's source, we should format it */
238 	if ((sg_is_last(data) && len == ctx->key_sz) &&
239 	    ((is_dh && !is_src) || !is_dh))
240 		ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
241 	else
242 		ret = hpre_prepare_dma_buf(hpre_req, data, len,
243 					  is_src, &tmp);
244 	if (ret)
245 		return ret;
246 
247 	if (is_src)
248 		msg->in = cpu_to_le64(tmp);
249 	else
250 		msg->out = cpu_to_le64(tmp);
251 
252 	return 0;
253 }
254 
255 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
256 			     struct hpre_asym_request *req,
257 			     struct scatterlist *dst, struct scatterlist *src)
258 {
259 	struct device *dev = HPRE_DEV(ctx);
260 	struct hpre_sqe *sqe = &req->req;
261 	dma_addr_t tmp;
262 
263 	tmp = le64_to_cpu(sqe->in);
264 	if (!tmp)
265 		return;
266 
267 	if (src) {
268 		if (req->src)
269 			dma_free_coherent(dev, ctx->key_sz,
270 					  req->src, tmp);
271 		else
272 			dma_unmap_single(dev, tmp,
273 					 ctx->key_sz, DMA_TO_DEVICE);
274 	}
275 
276 	tmp = le64_to_cpu(sqe->out);
277 	if (!tmp)
278 		return;
279 
280 	if (req->dst) {
281 		if (dst)
282 			scatterwalk_map_and_copy(req->dst, dst, 0,
283 						 ctx->key_sz, 1);
284 		dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
285 	} else {
286 		dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
287 	}
288 }
289 
290 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
291 			    void **kreq)
292 {
293 	struct hpre_asym_request *req;
294 	int err, id, done;
295 
296 #define HPRE_NO_HW_ERR		0
297 #define HPRE_HW_TASK_DONE	3
298 #define HREE_HW_ERR_MASK	0x7ff
299 #define HREE_SQE_DONE_MASK	0x3
300 	id = (int)le16_to_cpu(sqe->tag);
301 	req = ctx->req_list[id];
302 	hpre_rm_req_from_ctx(req);
303 	*kreq = req;
304 
305 	err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
306 		HREE_HW_ERR_MASK;
307 
308 	done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
309 		HREE_SQE_DONE_MASK;
310 
311 	if (err == HPRE_NO_HW_ERR &&  done == HPRE_HW_TASK_DONE)
312 		return  0;
313 
314 	return -EINVAL;
315 }
316 
317 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
318 {
319 	if (!ctx || !qp || qlen < 0)
320 		return -EINVAL;
321 
322 	spin_lock_init(&ctx->req_lock);
323 	ctx->qp = qp;
324 
325 	ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
326 	if (!ctx->req_list)
327 		return -ENOMEM;
328 	ctx->key_sz = 0;
329 	ctx->crt_g2_mode = false;
330 	idr_init(&ctx->req_idr);
331 
332 	return 0;
333 }
334 
335 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
336 {
337 	if (is_clear_all) {
338 		idr_destroy(&ctx->req_idr);
339 		kfree(ctx->req_list);
340 		hisi_qm_release_qp(ctx->qp);
341 	}
342 
343 	ctx->crt_g2_mode = false;
344 	ctx->key_sz = 0;
345 }
346 
347 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
348 {
349 	struct hpre_asym_request *req;
350 	struct kpp_request *areq;
351 	int ret;
352 
353 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
354 	areq = req->areq.dh;
355 	areq->dst_len = ctx->key_sz;
356 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
357 	kpp_request_complete(areq, ret);
358 }
359 
360 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
361 {
362 	struct hpre_asym_request *req;
363 	struct akcipher_request *areq;
364 	int ret;
365 
366 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
367 	areq = req->areq.rsa;
368 	areq->dst_len = ctx->key_sz;
369 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
370 	akcipher_request_complete(areq, ret);
371 }
372 
373 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
374 {
375 	struct hpre_ctx *ctx = qp->qp_ctx;
376 	struct hpre_sqe *sqe = resp;
377 
378 	ctx->req_list[sqe->tag]->cb(ctx, resp);
379 }
380 
381 static int hpre_ctx_init(struct hpre_ctx *ctx)
382 {
383 	struct hisi_qp *qp;
384 
385 	qp = hpre_get_qp_and_start();
386 	if (IS_ERR(qp))
387 		return PTR_ERR(qp);
388 
389 	qp->qp_ctx = ctx;
390 	qp->req_cb = hpre_alg_cb;
391 
392 	return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
393 }
394 
395 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
396 {
397 	struct hpre_asym_request *h_req;
398 	struct hpre_sqe *msg;
399 	int req_id;
400 	void *tmp;
401 
402 	if (is_rsa) {
403 		struct akcipher_request *akreq = req;
404 
405 		if (akreq->dst_len < ctx->key_sz) {
406 			akreq->dst_len = ctx->key_sz;
407 			return -EOVERFLOW;
408 		}
409 
410 		tmp = akcipher_request_ctx(akreq);
411 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
412 		h_req->cb = hpre_rsa_cb;
413 		h_req->areq.rsa = akreq;
414 		msg = &h_req->req;
415 		memset(msg, 0, sizeof(*msg));
416 	} else {
417 		struct kpp_request *kreq = req;
418 
419 		if (kreq->dst_len < ctx->key_sz) {
420 			kreq->dst_len = ctx->key_sz;
421 			return -EOVERFLOW;
422 		}
423 
424 		tmp = kpp_request_ctx(kreq);
425 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
426 		h_req->cb = hpre_dh_cb;
427 		h_req->areq.dh = kreq;
428 		msg = &h_req->req;
429 		memset(msg, 0, sizeof(*msg));
430 		msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
431 	}
432 
433 	msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
434 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
435 	h_req->ctx = ctx;
436 
437 	req_id = hpre_add_req_to_ctx(h_req);
438 	if (req_id < 0)
439 		return -EBUSY;
440 
441 	msg->tag = cpu_to_le16((u16)req_id);
442 
443 	return 0;
444 }
445 
446 #ifdef CONFIG_CRYPTO_DH
447 static int hpre_dh_compute_value(struct kpp_request *req)
448 {
449 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
450 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
451 	void *tmp = kpp_request_ctx(req);
452 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
453 	struct hpre_sqe *msg = &hpre_req->req;
454 	int ctr = 0;
455 	int ret;
456 
457 	if (!ctx)
458 		return -EINVAL;
459 
460 	ret = hpre_msg_request_set(ctx, req, false);
461 	if (ret)
462 		return ret;
463 
464 	if (req->src) {
465 		ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
466 		if (ret)
467 			goto clear_all;
468 	}
469 
470 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
471 	if (ret)
472 		goto clear_all;
473 
474 	if (ctx->crt_g2_mode && !req->src)
475 		msg->dw0 |= HPRE_ALG_DH_G2;
476 	else
477 		msg->dw0 |= HPRE_ALG_DH;
478 	do {
479 		ret = hisi_qp_send(ctx->qp, msg);
480 	} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
481 
482 	/* success */
483 	if (!ret)
484 		return -EINPROGRESS;
485 
486 clear_all:
487 	hpre_rm_req_from_ctx(hpre_req);
488 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
489 
490 	return ret;
491 }
492 
493 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
494 {
495 #define _HPRE_DH_GRP1		768
496 #define _HPRE_DH_GRP2		1024
497 #define _HPRE_DH_GRP5		1536
498 #define _HPRE_DH_GRP14		2048
499 #define _HPRE_DH_GRP15		3072
500 #define _HPRE_DH_GRP16		4096
501 	switch (key_sz) {
502 	case _HPRE_DH_GRP1:
503 	case _HPRE_DH_GRP2:
504 	case _HPRE_DH_GRP5:
505 	case _HPRE_DH_GRP14:
506 	case _HPRE_DH_GRP15:
507 	case _HPRE_DH_GRP16:
508 		return 0;
509 	}
510 
511 	return -EINVAL;
512 }
513 
514 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
515 {
516 	struct device *dev = HPRE_DEV(ctx);
517 	unsigned int sz;
518 
519 	if (params->p_size > HPRE_DH_MAX_P_SZ)
520 		return -EINVAL;
521 
522 	if (hpre_is_dh_params_length_valid(params->p_size <<
523 		HPRE_BITS_2_BYTES_SHIFT))
524 		return -EINVAL;
525 
526 	sz = ctx->key_sz = params->p_size;
527 	ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
528 				&ctx->dh.dma_xa_p, GFP_KERNEL);
529 	if (!ctx->dh.xa_p)
530 		return -ENOMEM;
531 
532 	memcpy(ctx->dh.xa_p + sz, params->p, sz);
533 
534 	/* If g equals 2 don't copy it */
535 	if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
536 		ctx->crt_g2_mode = true;
537 		return 0;
538 	}
539 
540 	ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
541 	if (!ctx->dh.g) {
542 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
543 				  ctx->dh.dma_xa_p);
544 		ctx->dh.xa_p = NULL;
545 		return -ENOMEM;
546 	}
547 
548 	memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
549 
550 	return 0;
551 }
552 
553 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
554 {
555 	struct device *dev = HPRE_DEV(ctx);
556 	unsigned int sz = ctx->key_sz;
557 
558 	if (is_clear_all)
559 		hisi_qm_stop_qp(ctx->qp);
560 
561 	if (ctx->dh.g) {
562 		memset(ctx->dh.g, 0, sz);
563 		dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
564 		ctx->dh.g = NULL;
565 	}
566 
567 	if (ctx->dh.xa_p) {
568 		memset(ctx->dh.xa_p, 0, sz);
569 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
570 				  ctx->dh.dma_xa_p);
571 		ctx->dh.xa_p = NULL;
572 	}
573 
574 	hpre_ctx_clear(ctx, is_clear_all);
575 }
576 
577 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
578 			      unsigned int len)
579 {
580 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
581 	struct dh params;
582 	int ret;
583 
584 	if (crypto_dh_decode_key(buf, len, &params) < 0)
585 		return -EINVAL;
586 
587 	/* Free old secret if any */
588 	hpre_dh_clear_ctx(ctx, false);
589 
590 	ret = hpre_dh_set_params(ctx, &params);
591 	if (ret < 0)
592 		goto err_clear_ctx;
593 
594 	memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
595 	       params.key_size);
596 
597 	return 0;
598 
599 err_clear_ctx:
600 	hpre_dh_clear_ctx(ctx, false);
601 	return ret;
602 }
603 
604 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
605 {
606 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
607 
608 	return ctx->key_sz;
609 }
610 
611 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
612 {
613 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
614 
615 	return hpre_ctx_init(ctx);
616 }
617 
618 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
619 {
620 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
621 
622 	hpre_dh_clear_ctx(ctx, true);
623 }
624 #endif
625 
626 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
627 {
628 	while (!**ptr && *len) {
629 		(*ptr)++;
630 		(*len)--;
631 	}
632 }
633 
634 static bool hpre_rsa_key_size_is_support(unsigned int len)
635 {
636 	unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
637 
638 #define _RSA_1024BITS_KEY_WDTH		1024
639 #define _RSA_2048BITS_KEY_WDTH		2048
640 #define _RSA_3072BITS_KEY_WDTH		3072
641 #define _RSA_4096BITS_KEY_WDTH		4096
642 
643 	switch (bits) {
644 	case _RSA_1024BITS_KEY_WDTH:
645 	case _RSA_2048BITS_KEY_WDTH:
646 	case _RSA_3072BITS_KEY_WDTH:
647 	case _RSA_4096BITS_KEY_WDTH:
648 		return true;
649 	default:
650 		return false;
651 	}
652 }
653 
654 static int hpre_rsa_enc(struct akcipher_request *req)
655 {
656 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
657 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
658 	void *tmp = akcipher_request_ctx(req);
659 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
660 	struct hpre_sqe *msg = &hpre_req->req;
661 	int ctr = 0;
662 	int ret;
663 
664 	if (!ctx)
665 		return -EINVAL;
666 
667 	/* For 512 and 1536 bits key size, use soft tfm instead */
668 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
669 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
670 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
671 		ret = crypto_akcipher_encrypt(req);
672 		akcipher_request_set_tfm(req, tfm);
673 		return ret;
674 	}
675 
676 	if (!ctx->rsa.pubkey)
677 		return -EINVAL;
678 
679 	ret = hpre_msg_request_set(ctx, req, true);
680 	if (ret)
681 		return ret;
682 
683 	msg->dw0 |= HPRE_ALG_NC_NCRT;
684 	msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
685 
686 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
687 	if (ret)
688 		goto clear_all;
689 
690 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
691 	if (ret)
692 		goto clear_all;
693 
694 	do {
695 		ret = hisi_qp_send(ctx->qp, msg);
696 	} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
697 
698 	/* success */
699 	if (!ret)
700 		return -EINPROGRESS;
701 
702 clear_all:
703 	hpre_rm_req_from_ctx(hpre_req);
704 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
705 
706 	return ret;
707 }
708 
709 static int hpre_rsa_dec(struct akcipher_request *req)
710 {
711 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
712 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
713 	void *tmp = akcipher_request_ctx(req);
714 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
715 	struct hpre_sqe *msg = &hpre_req->req;
716 	int ctr = 0;
717 	int ret;
718 
719 	if (!ctx)
720 		return -EINVAL;
721 
722 	/* For 512 and 1536 bits key size, use soft tfm instead */
723 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
724 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
725 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
726 		ret = crypto_akcipher_decrypt(req);
727 		akcipher_request_set_tfm(req, tfm);
728 		return ret;
729 	}
730 
731 	if (!ctx->rsa.prikey)
732 		return -EINVAL;
733 
734 	ret = hpre_msg_request_set(ctx, req, true);
735 	if (ret)
736 		return ret;
737 
738 	if (ctx->crt_g2_mode) {
739 		msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
740 		msg->dw0 |= HPRE_ALG_NC_CRT;
741 	} else {
742 		msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
743 		msg->dw0 |= HPRE_ALG_NC_NCRT;
744 	}
745 
746 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
747 	if (ret)
748 		goto clear_all;
749 
750 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
751 	if (ret)
752 		goto clear_all;
753 
754 	do {
755 		ret = hisi_qp_send(ctx->qp, msg);
756 	} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
757 
758 	/* success */
759 	if (!ret)
760 		return -EINPROGRESS;
761 
762 clear_all:
763 	hpre_rm_req_from_ctx(hpre_req);
764 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
765 
766 	return ret;
767 }
768 
769 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
770 			  size_t vlen, bool private)
771 {
772 	const char *ptr = value;
773 
774 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
775 
776 	ctx->key_sz = vlen;
777 
778 	/* if invalid key size provided, we use software tfm */
779 	if (!hpre_rsa_key_size_is_support(ctx->key_sz))
780 		return 0;
781 
782 	ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
783 					     &ctx->rsa.dma_pubkey,
784 					     GFP_KERNEL);
785 	if (!ctx->rsa.pubkey)
786 		return -ENOMEM;
787 
788 	if (private) {
789 		ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
790 						     &ctx->rsa.dma_prikey,
791 						     GFP_KERNEL);
792 		if (!ctx->rsa.prikey) {
793 			dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
794 					  ctx->rsa.pubkey,
795 					  ctx->rsa.dma_pubkey);
796 			ctx->rsa.pubkey = NULL;
797 			return -ENOMEM;
798 		}
799 		memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
800 	}
801 	memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
802 
803 	/* Using hardware HPRE to do RSA */
804 	return 1;
805 }
806 
807 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
808 			  size_t vlen)
809 {
810 	const char *ptr = value;
811 
812 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
813 
814 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
815 		ctx->rsa.pubkey = NULL;
816 		return -EINVAL;
817 	}
818 
819 	memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
820 
821 	return 0;
822 }
823 
824 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
825 			  size_t vlen)
826 {
827 	const char *ptr = value;
828 
829 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
830 
831 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
832 		return -EINVAL;
833 
834 	memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
835 
836 	return 0;
837 }
838 
839 static int hpre_crt_para_get(char *para, const char *raw,
840 			     unsigned int raw_sz, unsigned int para_size)
841 {
842 	const char *ptr = raw;
843 	size_t len = raw_sz;
844 
845 	hpre_rsa_drop_leading_zeros(&ptr, &len);
846 	if (!len || len > para_size)
847 		return -EINVAL;
848 
849 	memcpy(para + para_size - len, ptr, len);
850 
851 	return 0;
852 }
853 
854 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
855 {
856 	unsigned int hlf_ksz = ctx->key_sz >> 1;
857 	struct device *dev = HPRE_DEV(ctx);
858 	u64 offset;
859 	int ret;
860 
861 	ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
862 					&ctx->rsa.dma_crt_prikey,
863 					GFP_KERNEL);
864 	if (!ctx->rsa.crt_prikey)
865 		return -ENOMEM;
866 
867 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
868 				rsa_key->dq_sz, hlf_ksz);
869 	if (ret)
870 		goto free_key;
871 
872 	offset = hlf_ksz;
873 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
874 				rsa_key->dp_sz, hlf_ksz);
875 	if (ret)
876 		goto free_key;
877 
878 	offset = hlf_ksz * HPRE_CRT_Q;
879 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
880 				rsa_key->q, rsa_key->q_sz, hlf_ksz);
881 	if (ret)
882 		goto free_key;
883 
884 	offset = hlf_ksz * HPRE_CRT_P;
885 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
886 				rsa_key->p, rsa_key->p_sz, hlf_ksz);
887 	if (ret)
888 		goto free_key;
889 
890 	offset = hlf_ksz * HPRE_CRT_INV;
891 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
892 				rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
893 	if (ret)
894 		goto free_key;
895 
896 	ctx->crt_g2_mode = true;
897 
898 	return 0;
899 
900 free_key:
901 	offset = hlf_ksz * HPRE_CRT_PRMS;
902 	memset(ctx->rsa.crt_prikey, 0, offset);
903 	dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
904 			  ctx->rsa.dma_crt_prikey);
905 	ctx->rsa.crt_prikey = NULL;
906 	ctx->crt_g2_mode = false;
907 
908 	return ret;
909 }
910 
911 /* If it is clear all, all the resources of the QP will be cleaned. */
912 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
913 {
914 	unsigned int half_key_sz = ctx->key_sz >> 1;
915 	struct device *dev = HPRE_DEV(ctx);
916 
917 	if (is_clear_all)
918 		hisi_qm_stop_qp(ctx->qp);
919 
920 	if (ctx->rsa.pubkey) {
921 		dma_free_coherent(dev, ctx->key_sz << 1,
922 				  ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
923 		ctx->rsa.pubkey = NULL;
924 	}
925 
926 	if (ctx->rsa.crt_prikey) {
927 		memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS);
928 		dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
929 				  ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
930 		ctx->rsa.crt_prikey = NULL;
931 	}
932 
933 	if (ctx->rsa.prikey) {
934 		memset(ctx->rsa.prikey, 0, ctx->key_sz);
935 		dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
936 				  ctx->rsa.dma_prikey);
937 		ctx->rsa.prikey = NULL;
938 	}
939 
940 	hpre_ctx_clear(ctx, is_clear_all);
941 }
942 
943 /*
944  * we should judge if it is CRT or not,
945  * CRT: return true,  N-CRT: return false .
946  */
947 static bool hpre_is_crt_key(struct rsa_key *key)
948 {
949 	u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
950 		  key->qinv_sz;
951 
952 #define LEN_OF_NCRT_PARA	5
953 
954 	/* N-CRT less than 5 parameters */
955 	return len > LEN_OF_NCRT_PARA;
956 }
957 
958 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
959 			   unsigned int keylen, bool private)
960 {
961 	struct rsa_key rsa_key;
962 	int ret;
963 
964 	hpre_rsa_clear_ctx(ctx, false);
965 
966 	if (private)
967 		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
968 	else
969 		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
970 	if (ret < 0)
971 		return ret;
972 
973 	ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
974 	if (ret <= 0)
975 		return ret;
976 
977 	if (private) {
978 		ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
979 		if (ret < 0)
980 			goto free;
981 
982 		if (hpre_is_crt_key(&rsa_key)) {
983 			ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
984 			if (ret < 0)
985 				goto free;
986 		}
987 	}
988 
989 	ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
990 	if (ret < 0)
991 		goto free;
992 
993 	if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
994 		ret = -EINVAL;
995 		goto free;
996 	}
997 
998 	return 0;
999 
1000 free:
1001 	hpre_rsa_clear_ctx(ctx, false);
1002 	return ret;
1003 }
1004 
1005 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1006 			      unsigned int keylen)
1007 {
1008 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1009 	int ret;
1010 
1011 	ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1012 	if (ret)
1013 		return ret;
1014 
1015 	return hpre_rsa_setkey(ctx, key, keylen, false);
1016 }
1017 
1018 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1019 			       unsigned int keylen)
1020 {
1021 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1022 	int ret;
1023 
1024 	ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1025 	if (ret)
1026 		return ret;
1027 
1028 	return hpre_rsa_setkey(ctx, key, keylen, true);
1029 }
1030 
1031 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1032 {
1033 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1034 
1035 	/* For 512 and 1536 bits key size, use soft tfm instead */
1036 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1037 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1038 		return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1039 
1040 	return ctx->key_sz;
1041 }
1042 
1043 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1044 {
1045 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1046 
1047 	ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1048 	if (IS_ERR(ctx->rsa.soft_tfm)) {
1049 		pr_err("Can not alloc_akcipher!\n");
1050 		return PTR_ERR(ctx->rsa.soft_tfm);
1051 	}
1052 
1053 	return hpre_ctx_init(ctx);
1054 }
1055 
1056 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1057 {
1058 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1059 
1060 	hpre_rsa_clear_ctx(ctx, true);
1061 	crypto_free_akcipher(ctx->rsa.soft_tfm);
1062 }
1063 
1064 static struct akcipher_alg rsa = {
1065 	.sign = hpre_rsa_dec,
1066 	.verify = hpre_rsa_enc,
1067 	.encrypt = hpre_rsa_enc,
1068 	.decrypt = hpre_rsa_dec,
1069 	.set_pub_key = hpre_rsa_setpubkey,
1070 	.set_priv_key = hpre_rsa_setprivkey,
1071 	.max_size = hpre_rsa_max_size,
1072 	.init = hpre_rsa_init_tfm,
1073 	.exit = hpre_rsa_exit_tfm,
1074 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1075 	.base = {
1076 		.cra_ctxsize = sizeof(struct hpre_ctx),
1077 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1078 		.cra_name = "rsa",
1079 		.cra_driver_name = "hpre-rsa",
1080 		.cra_module = THIS_MODULE,
1081 	},
1082 };
1083 
1084 #ifdef CONFIG_CRYPTO_DH
1085 static struct kpp_alg dh = {
1086 	.set_secret = hpre_dh_set_secret,
1087 	.generate_public_key = hpre_dh_compute_value,
1088 	.compute_shared_secret = hpre_dh_compute_value,
1089 	.max_size = hpre_dh_max_size,
1090 	.init = hpre_dh_init_tfm,
1091 	.exit = hpre_dh_exit_tfm,
1092 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1093 	.base = {
1094 		.cra_ctxsize = sizeof(struct hpre_ctx),
1095 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1096 		.cra_name = "dh",
1097 		.cra_driver_name = "hpre-dh",
1098 		.cra_module = THIS_MODULE,
1099 	},
1100 };
1101 #endif
1102 
1103 int hpre_algs_register(void)
1104 {
1105 	int ret = 0;
1106 
1107 	mutex_lock(&hpre_alg_lock);
1108 	if (++hpre_active_devs == 1) {
1109 		rsa.base.cra_flags = 0;
1110 		ret = crypto_register_akcipher(&rsa);
1111 		if (ret)
1112 			goto unlock;
1113 #ifdef CONFIG_CRYPTO_DH
1114 		ret = crypto_register_kpp(&dh);
1115 		if (ret) {
1116 			crypto_unregister_akcipher(&rsa);
1117 			goto unlock;
1118 		}
1119 #endif
1120 	}
1121 
1122 unlock:
1123 	mutex_unlock(&hpre_alg_lock);
1124 	return ret;
1125 }
1126 
1127 void hpre_algs_unregister(void)
1128 {
1129 	mutex_lock(&hpre_alg_lock);
1130 	if (--hpre_active_devs == 0) {
1131 		crypto_unregister_akcipher(&rsa);
1132 #ifdef CONFIG_CRYPTO_DH
1133 		crypto_unregister_kpp(&dh);
1134 #endif
1135 	}
1136 	mutex_unlock(&hpre_alg_lock);
1137 }
1138