1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3
4 #include <crypto/aes.h>
5 #include <crypto/algapi.h>
6 #include <crypto/authenc.h>
7 #include <crypto/des.h>
8 #include <crypto/hash.h>
9 #include <crypto/internal/aead.h>
10 #include <crypto/internal/des.h>
11 #include <crypto/sha1.h>
12 #include <crypto/sha2.h>
13 #include <crypto/skcipher.h>
14 #include <crypto/xts.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/idr.h>
18
19 #include "sec.h"
20 #include "sec_crypto.h"
21
22 #define SEC_PRIORITY 4001
23 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
24 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
25 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
26 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
27
28 /* SEC sqe(bd) bit operational relative MACRO */
29 #define SEC_DE_OFFSET 1
30 #define SEC_CIPHER_OFFSET 4
31 #define SEC_SCENE_OFFSET 3
32 #define SEC_DST_SGL_OFFSET 2
33 #define SEC_SRC_SGL_OFFSET 7
34 #define SEC_CKEY_OFFSET 9
35 #define SEC_CMODE_OFFSET 12
36 #define SEC_AKEY_OFFSET 5
37 #define SEC_AEAD_ALG_OFFSET 11
38 #define SEC_AUTH_OFFSET 6
39
40 #define SEC_FLAG_OFFSET 7
41 #define SEC_FLAG_MASK 0x0780
42 #define SEC_TYPE_MASK 0x0F
43 #define SEC_DONE_MASK 0x0001
44
45 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
46 #define SEC_SGL_SGE_NR 128
47 #define SEC_CIPHER_AUTH 0xfe
48 #define SEC_AUTH_CIPHER 0x1
49 #define SEC_MAX_MAC_LEN 64
50 #define SEC_MAX_AAD_LEN 65535
51 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
52
53 #define SEC_PBUF_SZ 512
54 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
55 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
56 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
57 SEC_MAX_MAC_LEN * 2)
58 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
59 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
60 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
61 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
62 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
63 SEC_PBUF_LEFT_SZ)
64
65 #define SEC_SQE_LEN_RATE 4
66 #define SEC_SQE_CFLAG 2
67 #define SEC_SQE_AEAD_FLAG 3
68 #define SEC_SQE_DONE 0x1
69
70 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
sec_alloc_queue_id(struct sec_ctx * ctx,struct sec_req * req)71 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
72 {
73 if (req->c_req.encrypt)
74 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
75 ctx->hlf_q_num;
76
77 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
78 ctx->hlf_q_num;
79 }
80
sec_free_queue_id(struct sec_ctx * ctx,struct sec_req * req)81 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
82 {
83 if (req->c_req.encrypt)
84 atomic_dec(&ctx->enc_qcyclic);
85 else
86 atomic_dec(&ctx->dec_qcyclic);
87 }
88
sec_alloc_req_id(struct sec_req * req,struct sec_qp_ctx * qp_ctx)89 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
90 {
91 int req_id;
92
93 mutex_lock(&qp_ctx->req_lock);
94
95 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
96 0, QM_Q_DEPTH, GFP_ATOMIC);
97 mutex_unlock(&qp_ctx->req_lock);
98 if (unlikely(req_id < 0)) {
99 dev_err(req->ctx->dev, "alloc req id fail!\n");
100 return req_id;
101 }
102
103 req->qp_ctx = qp_ctx;
104 qp_ctx->req_list[req_id] = req;
105
106 return req_id;
107 }
108
sec_free_req_id(struct sec_req * req)109 static void sec_free_req_id(struct sec_req *req)
110 {
111 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
112 int req_id = req->req_id;
113
114 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
115 dev_err(req->ctx->dev, "free request id invalid!\n");
116 return;
117 }
118
119 qp_ctx->req_list[req_id] = NULL;
120 req->qp_ctx = NULL;
121
122 mutex_lock(&qp_ctx->req_lock);
123 idr_remove(&qp_ctx->req_idr, req_id);
124 mutex_unlock(&qp_ctx->req_lock);
125 }
126
sec_aead_verify(struct sec_req * req)127 static int sec_aead_verify(struct sec_req *req)
128 {
129 struct aead_request *aead_req = req->aead_req.aead_req;
130 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
131 size_t authsize = crypto_aead_authsize(tfm);
132 u8 *mac_out = req->aead_req.out_mac;
133 u8 *mac = mac_out + SEC_MAX_MAC_LEN;
134 struct scatterlist *sgl = aead_req->src;
135 size_t sz;
136
137 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
138 aead_req->cryptlen + aead_req->assoclen -
139 authsize);
140 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
141 dev_err(req->ctx->dev, "aead verify failure!\n");
142 return -EBADMSG;
143 }
144
145 return 0;
146 }
147
sec_req_cb(struct hisi_qp * qp,void * resp)148 static void sec_req_cb(struct hisi_qp *qp, void *resp)
149 {
150 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
151 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
152 struct sec_sqe *bd = resp;
153 struct sec_ctx *ctx;
154 struct sec_req *req;
155 u16 done, flag;
156 int err = 0;
157 u8 type;
158
159 type = bd->type_cipher_auth & SEC_TYPE_MASK;
160 if (unlikely(type != SEC_BD_TYPE2)) {
161 atomic64_inc(&dfx->err_bd_cnt);
162 pr_err("err bd type [%d]\n", type);
163 return;
164 }
165
166 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
167 if (unlikely(!req)) {
168 atomic64_inc(&dfx->invalid_req_cnt);
169 atomic_inc(&qp->qp_status.used);
170 return;
171 }
172 req->err_type = bd->type2.error_type;
173 ctx = req->ctx;
174 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
175 flag = (le16_to_cpu(bd->type2.done_flag) &
176 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
177 if (unlikely(req->err_type || done != SEC_SQE_DONE ||
178 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
179 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
180 dev_err_ratelimited(ctx->dev,
181 "err_type[%d],done[%d],flag[%d]\n",
182 req->err_type, done, flag);
183 err = -EIO;
184 atomic64_inc(&dfx->done_flag_cnt);
185 }
186
187 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
188 err = sec_aead_verify(req);
189
190 atomic64_inc(&dfx->recv_cnt);
191
192 ctx->req_op->buf_unmap(ctx, req);
193
194 ctx->req_op->callback(ctx, req, err);
195 }
196
sec_bd_send(struct sec_ctx * ctx,struct sec_req * req)197 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
198 {
199 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
200 int ret;
201
202 if (ctx->fake_req_limit <=
203 atomic_read(&qp_ctx->qp->qp_status.used) &&
204 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
205 return -EBUSY;
206
207 mutex_lock(&qp_ctx->req_lock);
208 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
209
210 if (ctx->fake_req_limit <=
211 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
212 list_add_tail(&req->backlog_head, &qp_ctx->backlog);
213 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
214 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
215 mutex_unlock(&qp_ctx->req_lock);
216 return -EBUSY;
217 }
218 mutex_unlock(&qp_ctx->req_lock);
219
220 if (unlikely(ret == -EBUSY))
221 return -ENOBUFS;
222
223 if (likely(!ret)) {
224 ret = -EINPROGRESS;
225 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
226 }
227
228 return ret;
229 }
230
231 /* Get DMA memory resources */
sec_alloc_civ_resource(struct device * dev,struct sec_alg_res * res)232 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
233 {
234 int i;
235
236 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
237 &res->c_ivin_dma, GFP_KERNEL);
238 if (!res->c_ivin)
239 return -ENOMEM;
240
241 for (i = 1; i < QM_Q_DEPTH; i++) {
242 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
243 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
244 }
245
246 return 0;
247 }
248
sec_free_civ_resource(struct device * dev,struct sec_alg_res * res)249 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
250 {
251 if (res->c_ivin)
252 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
253 res->c_ivin, res->c_ivin_dma);
254 }
255
sec_alloc_mac_resource(struct device * dev,struct sec_alg_res * res)256 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
257 {
258 int i;
259
260 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
261 &res->out_mac_dma, GFP_KERNEL);
262 if (!res->out_mac)
263 return -ENOMEM;
264
265 for (i = 1; i < QM_Q_DEPTH; i++) {
266 res[i].out_mac_dma = res->out_mac_dma +
267 i * (SEC_MAX_MAC_LEN << 1);
268 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
269 }
270
271 return 0;
272 }
273
sec_free_mac_resource(struct device * dev,struct sec_alg_res * res)274 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
275 {
276 if (res->out_mac)
277 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
278 res->out_mac, res->out_mac_dma);
279 }
280
sec_free_pbuf_resource(struct device * dev,struct sec_alg_res * res)281 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
282 {
283 if (res->pbuf)
284 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
285 res->pbuf, res->pbuf_dma);
286 }
287
288 /*
289 * To improve performance, pbuffer is used for
290 * small packets (< 512Bytes) as IOMMU translation using.
291 */
sec_alloc_pbuf_resource(struct device * dev,struct sec_alg_res * res)292 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
293 {
294 int pbuf_page_offset;
295 int i, j, k;
296
297 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
298 &res->pbuf_dma, GFP_KERNEL);
299 if (!res->pbuf)
300 return -ENOMEM;
301
302 /*
303 * SEC_PBUF_PKG contains data pbuf, iv and
304 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
305 * Every PAGE contains six SEC_PBUF_PKG
306 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
307 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
308 * for the SEC_TOTAL_PBUF_SZ
309 */
310 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
311 pbuf_page_offset = PAGE_SIZE * i;
312 for (j = 0; j < SEC_PBUF_NUM; j++) {
313 k = i * SEC_PBUF_NUM + j;
314 if (k == QM_Q_DEPTH)
315 break;
316 res[k].pbuf = res->pbuf +
317 j * SEC_PBUF_PKG + pbuf_page_offset;
318 res[k].pbuf_dma = res->pbuf_dma +
319 j * SEC_PBUF_PKG + pbuf_page_offset;
320 }
321 }
322
323 return 0;
324 }
325
sec_alg_resource_alloc(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)326 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
327 struct sec_qp_ctx *qp_ctx)
328 {
329 struct sec_alg_res *res = qp_ctx->res;
330 struct device *dev = ctx->dev;
331 int ret;
332
333 ret = sec_alloc_civ_resource(dev, res);
334 if (ret)
335 return ret;
336
337 if (ctx->alg_type == SEC_AEAD) {
338 ret = sec_alloc_mac_resource(dev, res);
339 if (ret)
340 goto alloc_fail;
341 }
342 if (ctx->pbuf_supported) {
343 ret = sec_alloc_pbuf_resource(dev, res);
344 if (ret) {
345 dev_err(dev, "fail to alloc pbuf dma resource!\n");
346 goto alloc_pbuf_fail;
347 }
348 }
349
350 return 0;
351
352 alloc_pbuf_fail:
353 if (ctx->alg_type == SEC_AEAD)
354 sec_free_mac_resource(dev, qp_ctx->res);
355 alloc_fail:
356 sec_free_civ_resource(dev, res);
357 return ret;
358 }
359
sec_alg_resource_free(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)360 static void sec_alg_resource_free(struct sec_ctx *ctx,
361 struct sec_qp_ctx *qp_ctx)
362 {
363 struct device *dev = ctx->dev;
364
365 sec_free_civ_resource(dev, qp_ctx->res);
366
367 if (ctx->pbuf_supported)
368 sec_free_pbuf_resource(dev, qp_ctx->res);
369 if (ctx->alg_type == SEC_AEAD)
370 sec_free_mac_resource(dev, qp_ctx->res);
371 }
372
sec_create_qp_ctx(struct hisi_qm * qm,struct sec_ctx * ctx,int qp_ctx_id,int alg_type)373 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
374 int qp_ctx_id, int alg_type)
375 {
376 struct device *dev = ctx->dev;
377 struct sec_qp_ctx *qp_ctx;
378 struct hisi_qp *qp;
379 int ret = -ENOMEM;
380
381 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
382 qp = ctx->qps[qp_ctx_id];
383 qp->req_type = 0;
384 qp->qp_ctx = qp_ctx;
385 qp->req_cb = sec_req_cb;
386 qp_ctx->qp = qp;
387 qp_ctx->ctx = ctx;
388
389 mutex_init(&qp_ctx->req_lock);
390 idr_init(&qp_ctx->req_idr);
391 INIT_LIST_HEAD(&qp_ctx->backlog);
392
393 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
394 SEC_SGL_SGE_NR);
395 if (IS_ERR(qp_ctx->c_in_pool)) {
396 dev_err(dev, "fail to create sgl pool for input!\n");
397 goto err_destroy_idr;
398 }
399
400 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
401 SEC_SGL_SGE_NR);
402 if (IS_ERR(qp_ctx->c_out_pool)) {
403 dev_err(dev, "fail to create sgl pool for output!\n");
404 goto err_free_c_in_pool;
405 }
406
407 ret = sec_alg_resource_alloc(ctx, qp_ctx);
408 if (ret)
409 goto err_free_c_out_pool;
410
411 ret = hisi_qm_start_qp(qp, 0);
412 if (ret < 0)
413 goto err_queue_free;
414
415 return 0;
416
417 err_queue_free:
418 sec_alg_resource_free(ctx, qp_ctx);
419 err_free_c_out_pool:
420 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
421 err_free_c_in_pool:
422 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
423 err_destroy_idr:
424 idr_destroy(&qp_ctx->req_idr);
425 return ret;
426 }
427
sec_release_qp_ctx(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)428 static void sec_release_qp_ctx(struct sec_ctx *ctx,
429 struct sec_qp_ctx *qp_ctx)
430 {
431 struct device *dev = ctx->dev;
432
433 hisi_qm_stop_qp(qp_ctx->qp);
434 sec_alg_resource_free(ctx, qp_ctx);
435
436 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
437 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
438
439 idr_destroy(&qp_ctx->req_idr);
440 }
441
sec_ctx_base_init(struct sec_ctx * ctx)442 static int sec_ctx_base_init(struct sec_ctx *ctx)
443 {
444 struct sec_dev *sec;
445 int i, ret;
446
447 ctx->qps = sec_create_qps();
448 if (!ctx->qps) {
449 pr_err("Can not create sec qps!\n");
450 return -ENODEV;
451 }
452
453 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
454 ctx->sec = sec;
455 ctx->dev = &sec->qm.pdev->dev;
456 ctx->hlf_q_num = sec->ctx_q_num >> 1;
457
458 ctx->pbuf_supported = ctx->sec->iommu_used;
459
460 /* Half of queue depth is taken as fake requests limit in the queue. */
461 ctx->fake_req_limit = QM_Q_DEPTH >> 1;
462 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
463 GFP_KERNEL);
464 if (!ctx->qp_ctx) {
465 ret = -ENOMEM;
466 goto err_destroy_qps;
467 }
468
469 for (i = 0; i < sec->ctx_q_num; i++) {
470 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
471 if (ret)
472 goto err_sec_release_qp_ctx;
473 }
474
475 return 0;
476
477 err_sec_release_qp_ctx:
478 for (i = i - 1; i >= 0; i--)
479 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
480 kfree(ctx->qp_ctx);
481 err_destroy_qps:
482 sec_destroy_qps(ctx->qps, sec->ctx_q_num);
483 return ret;
484 }
485
sec_ctx_base_uninit(struct sec_ctx * ctx)486 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
487 {
488 int i;
489
490 for (i = 0; i < ctx->sec->ctx_q_num; i++)
491 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
492
493 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
494 kfree(ctx->qp_ctx);
495 }
496
sec_cipher_init(struct sec_ctx * ctx)497 static int sec_cipher_init(struct sec_ctx *ctx)
498 {
499 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
500
501 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
502 &c_ctx->c_key_dma, GFP_KERNEL);
503 if (!c_ctx->c_key)
504 return -ENOMEM;
505
506 return 0;
507 }
508
sec_cipher_uninit(struct sec_ctx * ctx)509 static void sec_cipher_uninit(struct sec_ctx *ctx)
510 {
511 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
512
513 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
514 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
515 c_ctx->c_key, c_ctx->c_key_dma);
516 }
517
sec_auth_init(struct sec_ctx * ctx)518 static int sec_auth_init(struct sec_ctx *ctx)
519 {
520 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
521
522 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
523 &a_ctx->a_key_dma, GFP_KERNEL);
524 if (!a_ctx->a_key)
525 return -ENOMEM;
526
527 return 0;
528 }
529
sec_auth_uninit(struct sec_ctx * ctx)530 static void sec_auth_uninit(struct sec_ctx *ctx)
531 {
532 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
533
534 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
535 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
536 a_ctx->a_key, a_ctx->a_key_dma);
537 }
538
sec_skcipher_init(struct crypto_skcipher * tfm)539 static int sec_skcipher_init(struct crypto_skcipher *tfm)
540 {
541 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
542 int ret;
543
544 ctx->alg_type = SEC_SKCIPHER;
545 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
546 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
547 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
548 pr_err("get error skcipher iv size!\n");
549 return -EINVAL;
550 }
551
552 ret = sec_ctx_base_init(ctx);
553 if (ret)
554 return ret;
555
556 ret = sec_cipher_init(ctx);
557 if (ret)
558 goto err_cipher_init;
559
560 return 0;
561
562 err_cipher_init:
563 sec_ctx_base_uninit(ctx);
564 return ret;
565 }
566
sec_skcipher_uninit(struct crypto_skcipher * tfm)567 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
568 {
569 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
570
571 sec_cipher_uninit(ctx);
572 sec_ctx_base_uninit(ctx);
573 }
574
sec_skcipher_3des_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen,const enum sec_cmode c_mode)575 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
576 const u32 keylen,
577 const enum sec_cmode c_mode)
578 {
579 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
580 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
581 int ret;
582
583 ret = verify_skcipher_des3_key(tfm, key);
584 if (ret)
585 return ret;
586
587 switch (keylen) {
588 case SEC_DES3_2KEY_SIZE:
589 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
590 break;
591 case SEC_DES3_3KEY_SIZE:
592 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
593 break;
594 default:
595 return -EINVAL;
596 }
597
598 return 0;
599 }
600
sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx * c_ctx,const u32 keylen,const enum sec_cmode c_mode)601 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
602 const u32 keylen,
603 const enum sec_cmode c_mode)
604 {
605 if (c_mode == SEC_CMODE_XTS) {
606 switch (keylen) {
607 case SEC_XTS_MIN_KEY_SIZE:
608 c_ctx->c_key_len = SEC_CKEY_128BIT;
609 break;
610 case SEC_XTS_MAX_KEY_SIZE:
611 c_ctx->c_key_len = SEC_CKEY_256BIT;
612 break;
613 default:
614 pr_err("hisi_sec2: xts mode key error!\n");
615 return -EINVAL;
616 }
617 } else {
618 switch (keylen) {
619 case AES_KEYSIZE_128:
620 c_ctx->c_key_len = SEC_CKEY_128BIT;
621 break;
622 case AES_KEYSIZE_192:
623 c_ctx->c_key_len = SEC_CKEY_192BIT;
624 break;
625 case AES_KEYSIZE_256:
626 c_ctx->c_key_len = SEC_CKEY_256BIT;
627 break;
628 default:
629 pr_err("hisi_sec2: aes key error!\n");
630 return -EINVAL;
631 }
632 }
633
634 return 0;
635 }
636
sec_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen,const enum sec_calg c_alg,const enum sec_cmode c_mode)637 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
638 const u32 keylen, const enum sec_calg c_alg,
639 const enum sec_cmode c_mode)
640 {
641 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
642 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
643 struct device *dev = ctx->dev;
644 int ret;
645
646 if (c_mode == SEC_CMODE_XTS) {
647 ret = xts_verify_key(tfm, key, keylen);
648 if (ret) {
649 dev_err(dev, "xts mode key err!\n");
650 return ret;
651 }
652 }
653
654 c_ctx->c_alg = c_alg;
655 c_ctx->c_mode = c_mode;
656
657 switch (c_alg) {
658 case SEC_CALG_3DES:
659 ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
660 break;
661 case SEC_CALG_AES:
662 case SEC_CALG_SM4:
663 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
664 break;
665 default:
666 return -EINVAL;
667 }
668
669 if (ret) {
670 dev_err(dev, "set sec key err!\n");
671 return ret;
672 }
673
674 memcpy(c_ctx->c_key, key, keylen);
675
676 return 0;
677 }
678
679 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
680 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
681 u32 keylen) \
682 { \
683 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
684 }
685
GEN_SEC_SETKEY_FUNC(aes_ecb,SEC_CALG_AES,SEC_CMODE_ECB)686 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
687 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
688 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
689
690 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
691 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
692
693 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
694 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
695
696 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
697 struct scatterlist *src)
698 {
699 struct aead_request *aead_req = req->aead_req.aead_req;
700 struct sec_cipher_req *c_req = &req->c_req;
701 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
702 struct device *dev = ctx->dev;
703 int copy_size, pbuf_length;
704 int req_id = req->req_id;
705
706 if (ctx->alg_type == SEC_AEAD)
707 copy_size = aead_req->cryptlen + aead_req->assoclen;
708 else
709 copy_size = c_req->c_len;
710
711 pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
712 qp_ctx->res[req_id].pbuf,
713 copy_size);
714 if (unlikely(pbuf_length != copy_size)) {
715 dev_err(dev, "copy src data to pbuf error!\n");
716 return -EINVAL;
717 }
718
719 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
720 c_req->c_out_dma = c_req->c_in_dma;
721
722 return 0;
723 }
724
sec_cipher_pbuf_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * dst)725 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
726 struct scatterlist *dst)
727 {
728 struct aead_request *aead_req = req->aead_req.aead_req;
729 struct sec_cipher_req *c_req = &req->c_req;
730 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
731 struct device *dev = ctx->dev;
732 int copy_size, pbuf_length;
733 int req_id = req->req_id;
734
735 if (ctx->alg_type == SEC_AEAD)
736 copy_size = c_req->c_len + aead_req->assoclen;
737 else
738 copy_size = c_req->c_len;
739
740 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
741 qp_ctx->res[req_id].pbuf,
742 copy_size);
743 if (unlikely(pbuf_length != copy_size))
744 dev_err(dev, "copy pbuf data to dst error!\n");
745 }
746
sec_cipher_map(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)747 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
748 struct scatterlist *src, struct scatterlist *dst)
749 {
750 struct sec_cipher_req *c_req = &req->c_req;
751 struct sec_aead_req *a_req = &req->aead_req;
752 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
753 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
754 struct device *dev = ctx->dev;
755 int ret;
756
757 if (req->use_pbuf) {
758 ret = sec_cipher_pbuf_map(ctx, req, src);
759 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
760 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
761 if (ctx->alg_type == SEC_AEAD) {
762 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
763 a_req->out_mac_dma = res->pbuf_dma +
764 SEC_PBUF_MAC_OFFSET;
765 }
766
767 return ret;
768 }
769 c_req->c_ivin = res->c_ivin;
770 c_req->c_ivin_dma = res->c_ivin_dma;
771 if (ctx->alg_type == SEC_AEAD) {
772 a_req->out_mac = res->out_mac;
773 a_req->out_mac_dma = res->out_mac_dma;
774 }
775
776 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
777 qp_ctx->c_in_pool,
778 req->req_id,
779 &c_req->c_in_dma);
780
781 if (IS_ERR(c_req->c_in)) {
782 dev_err(dev, "fail to dma map input sgl buffers!\n");
783 return PTR_ERR(c_req->c_in);
784 }
785
786 if (dst == src) {
787 c_req->c_out = c_req->c_in;
788 c_req->c_out_dma = c_req->c_in_dma;
789 } else {
790 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
791 qp_ctx->c_out_pool,
792 req->req_id,
793 &c_req->c_out_dma);
794
795 if (IS_ERR(c_req->c_out)) {
796 dev_err(dev, "fail to dma map output sgl buffers!\n");
797 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
798 return PTR_ERR(c_req->c_out);
799 }
800 }
801
802 return 0;
803 }
804
sec_cipher_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)805 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
806 struct scatterlist *src, struct scatterlist *dst)
807 {
808 struct sec_cipher_req *c_req = &req->c_req;
809 struct device *dev = ctx->dev;
810
811 if (req->use_pbuf) {
812 sec_cipher_pbuf_unmap(ctx, req, dst);
813 } else {
814 if (dst != src)
815 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
816
817 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
818 }
819 }
820
sec_skcipher_sgl_map(struct sec_ctx * ctx,struct sec_req * req)821 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
822 {
823 struct skcipher_request *sq = req->c_req.sk_req;
824
825 return sec_cipher_map(ctx, req, sq->src, sq->dst);
826 }
827
sec_skcipher_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)828 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
829 {
830 struct skcipher_request *sq = req->c_req.sk_req;
831
832 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
833 }
834
sec_aead_aes_set_key(struct sec_cipher_ctx * c_ctx,struct crypto_authenc_keys * keys)835 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
836 struct crypto_authenc_keys *keys)
837 {
838 switch (keys->enckeylen) {
839 case AES_KEYSIZE_128:
840 c_ctx->c_key_len = SEC_CKEY_128BIT;
841 break;
842 case AES_KEYSIZE_192:
843 c_ctx->c_key_len = SEC_CKEY_192BIT;
844 break;
845 case AES_KEYSIZE_256:
846 c_ctx->c_key_len = SEC_CKEY_256BIT;
847 break;
848 default:
849 pr_err("hisi_sec2: aead aes key error!\n");
850 return -EINVAL;
851 }
852 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
853
854 return 0;
855 }
856
sec_aead_auth_set_key(struct sec_auth_ctx * ctx,struct crypto_authenc_keys * keys)857 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
858 struct crypto_authenc_keys *keys)
859 {
860 struct crypto_shash *hash_tfm = ctx->hash_tfm;
861 int blocksize, digestsize, ret;
862
863 if (!keys->authkeylen) {
864 pr_err("hisi_sec2: aead auth key error!\n");
865 return -EINVAL;
866 }
867
868 blocksize = crypto_shash_blocksize(hash_tfm);
869 digestsize = crypto_shash_digestsize(hash_tfm);
870 if (keys->authkeylen > blocksize) {
871 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
872 keys->authkeylen, ctx->a_key);
873 if (ret) {
874 pr_err("hisi_sec2: aead auth digest error!\n");
875 return -EINVAL;
876 }
877 ctx->a_key_len = digestsize;
878 } else {
879 memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
880 ctx->a_key_len = keys->authkeylen;
881 }
882
883 return 0;
884 }
885
sec_aead_setkey(struct crypto_aead * tfm,const u8 * key,const u32 keylen,const enum sec_hash_alg a_alg,const enum sec_calg c_alg,const enum sec_mac_len mac_len,const enum sec_cmode c_mode)886 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
887 const u32 keylen, const enum sec_hash_alg a_alg,
888 const enum sec_calg c_alg,
889 const enum sec_mac_len mac_len,
890 const enum sec_cmode c_mode)
891 {
892 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
893 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
894 struct device *dev = ctx->dev;
895 struct crypto_authenc_keys keys;
896 int ret;
897
898 ctx->a_ctx.a_alg = a_alg;
899 ctx->c_ctx.c_alg = c_alg;
900 ctx->a_ctx.mac_len = mac_len;
901 c_ctx->c_mode = c_mode;
902
903 if (crypto_authenc_extractkeys(&keys, key, keylen))
904 goto bad_key;
905
906 ret = sec_aead_aes_set_key(c_ctx, &keys);
907 if (ret) {
908 dev_err(dev, "set sec cipher key err!\n");
909 goto bad_key;
910 }
911
912 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
913 if (ret) {
914 dev_err(dev, "set sec auth key err!\n");
915 goto bad_key;
916 }
917
918 return 0;
919
920 bad_key:
921 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
922 return -EINVAL;
923 }
924
925
926 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
927 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
928 u32 keylen) \
929 { \
930 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
931 }
932
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1,SEC_A_HMAC_SHA1,SEC_CALG_AES,SEC_HMAC_SHA1_MAC,SEC_CMODE_CBC)933 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
934 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
935 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
936 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
937 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
938 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
939
940 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
941 {
942 struct aead_request *aq = req->aead_req.aead_req;
943
944 return sec_cipher_map(ctx, req, aq->src, aq->dst);
945 }
946
sec_aead_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)947 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
948 {
949 struct aead_request *aq = req->aead_req.aead_req;
950
951 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
952 }
953
sec_request_transfer(struct sec_ctx * ctx,struct sec_req * req)954 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
955 {
956 int ret;
957
958 ret = ctx->req_op->buf_map(ctx, req);
959 if (unlikely(ret))
960 return ret;
961
962 ctx->req_op->do_transfer(ctx, req);
963
964 ret = ctx->req_op->bd_fill(ctx, req);
965 if (unlikely(ret))
966 goto unmap_req_buf;
967
968 return ret;
969
970 unmap_req_buf:
971 ctx->req_op->buf_unmap(ctx, req);
972 return ret;
973 }
974
sec_request_untransfer(struct sec_ctx * ctx,struct sec_req * req)975 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
976 {
977 ctx->req_op->buf_unmap(ctx, req);
978 }
979
sec_skcipher_copy_iv(struct sec_ctx * ctx,struct sec_req * req)980 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
981 {
982 struct skcipher_request *sk_req = req->c_req.sk_req;
983 struct sec_cipher_req *c_req = &req->c_req;
984
985 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
986 }
987
sec_skcipher_bd_fill(struct sec_ctx * ctx,struct sec_req * req)988 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
989 {
990 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
991 struct sec_cipher_req *c_req = &req->c_req;
992 struct sec_sqe *sec_sqe = &req->sec_sqe;
993 u8 scene, sa_type, da_type;
994 u8 bd_type, cipher;
995 u8 de = 0;
996
997 memset(sec_sqe, 0, sizeof(struct sec_sqe));
998
999 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1000 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1001 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
1002 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1003
1004 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1005 SEC_CMODE_OFFSET);
1006 sec_sqe->type2.c_alg = c_ctx->c_alg;
1007 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1008 SEC_CKEY_OFFSET);
1009
1010 bd_type = SEC_BD_TYPE2;
1011 if (c_req->encrypt)
1012 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1013 else
1014 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1015 sec_sqe->type_cipher_auth = bd_type | cipher;
1016
1017 if (req->use_pbuf)
1018 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1019 else
1020 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1021 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1022 if (c_req->c_in_dma != c_req->c_out_dma)
1023 de = 0x1 << SEC_DE_OFFSET;
1024
1025 sec_sqe->sds_sa_type = (de | scene | sa_type);
1026
1027 /* Just set DST address type */
1028 if (req->use_pbuf)
1029 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1030 else
1031 da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1032 sec_sqe->sdm_addr_type |= da_type;
1033
1034 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1035 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1036
1037 return 0;
1038 }
1039
sec_update_iv(struct sec_req * req,enum sec_alg_type alg_type)1040 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1041 {
1042 struct aead_request *aead_req = req->aead_req.aead_req;
1043 struct skcipher_request *sk_req = req->c_req.sk_req;
1044 u32 iv_size = req->ctx->c_ctx.ivsize;
1045 struct scatterlist *sgl;
1046 unsigned int cryptlen;
1047 size_t sz;
1048 u8 *iv;
1049
1050 if (req->c_req.encrypt)
1051 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1052 else
1053 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1054
1055 if (alg_type == SEC_SKCIPHER) {
1056 iv = sk_req->iv;
1057 cryptlen = sk_req->cryptlen;
1058 } else {
1059 iv = aead_req->iv;
1060 cryptlen = aead_req->cryptlen;
1061 }
1062
1063 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1064 cryptlen - iv_size);
1065 if (unlikely(sz != iv_size))
1066 dev_err(req->ctx->dev, "copy output iv error!\n");
1067 }
1068
sec_back_req_clear(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)1069 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1070 struct sec_qp_ctx *qp_ctx)
1071 {
1072 struct sec_req *backlog_req = NULL;
1073
1074 mutex_lock(&qp_ctx->req_lock);
1075 if (ctx->fake_req_limit >=
1076 atomic_read(&qp_ctx->qp->qp_status.used) &&
1077 !list_empty(&qp_ctx->backlog)) {
1078 backlog_req = list_first_entry(&qp_ctx->backlog,
1079 typeof(*backlog_req), backlog_head);
1080 list_del(&backlog_req->backlog_head);
1081 }
1082 mutex_unlock(&qp_ctx->req_lock);
1083
1084 return backlog_req;
1085 }
1086
sec_skcipher_callback(struct sec_ctx * ctx,struct sec_req * req,int err)1087 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1088 int err)
1089 {
1090 struct skcipher_request *sk_req = req->c_req.sk_req;
1091 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1092 struct skcipher_request *backlog_sk_req;
1093 struct sec_req *backlog_req;
1094
1095 sec_free_req_id(req);
1096
1097 /* IV output at encrypto of CBC mode */
1098 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
1099 sec_update_iv(req, SEC_SKCIPHER);
1100
1101 while (1) {
1102 backlog_req = sec_back_req_clear(ctx, qp_ctx);
1103 if (!backlog_req)
1104 break;
1105
1106 backlog_sk_req = backlog_req->c_req.sk_req;
1107 backlog_sk_req->base.complete(&backlog_sk_req->base,
1108 -EINPROGRESS);
1109 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1110 }
1111
1112 sk_req->base.complete(&sk_req->base, err);
1113 }
1114
sec_aead_copy_iv(struct sec_ctx * ctx,struct sec_req * req)1115 static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1116 {
1117 struct aead_request *aead_req = req->aead_req.aead_req;
1118 struct sec_cipher_req *c_req = &req->c_req;
1119
1120 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1121 }
1122
sec_auth_bd_fill_ex(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe * sec_sqe)1123 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1124 struct sec_req *req, struct sec_sqe *sec_sqe)
1125 {
1126 struct sec_aead_req *a_req = &req->aead_req;
1127 struct sec_cipher_req *c_req = &req->c_req;
1128 struct aead_request *aq = a_req->aead_req;
1129
1130 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1131
1132 sec_sqe->type2.mac_key_alg =
1133 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1134
1135 sec_sqe->type2.mac_key_alg |=
1136 cpu_to_le32((u32)((ctx->a_key_len) /
1137 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1138
1139 sec_sqe->type2.mac_key_alg |=
1140 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1141
1142 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1143
1144 if (dir)
1145 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1146 else
1147 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1148
1149 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1150
1151 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1152
1153 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1154 }
1155
sec_aead_bd_fill(struct sec_ctx * ctx,struct sec_req * req)1156 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1157 {
1158 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1159 struct sec_sqe *sec_sqe = &req->sec_sqe;
1160 int ret;
1161
1162 ret = sec_skcipher_bd_fill(ctx, req);
1163 if (unlikely(ret)) {
1164 dev_err(ctx->dev, "skcipher bd fill is error!\n");
1165 return ret;
1166 }
1167
1168 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1169
1170 return 0;
1171 }
1172
sec_aead_callback(struct sec_ctx * c,struct sec_req * req,int err)1173 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1174 {
1175 struct aead_request *a_req = req->aead_req.aead_req;
1176 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1177 struct sec_aead_req *aead_req = &req->aead_req;
1178 struct sec_cipher_req *c_req = &req->c_req;
1179 size_t authsize = crypto_aead_authsize(tfm);
1180 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1181 struct aead_request *backlog_aead_req;
1182 struct sec_req *backlog_req;
1183 size_t sz;
1184
1185 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1186 sec_update_iv(req, SEC_AEAD);
1187
1188 /* Copy output mac */
1189 if (!err && c_req->encrypt) {
1190 struct scatterlist *sgl = a_req->dst;
1191
1192 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1193 aead_req->out_mac,
1194 authsize, a_req->cryptlen +
1195 a_req->assoclen);
1196
1197 if (unlikely(sz != authsize)) {
1198 dev_err(c->dev, "copy out mac err!\n");
1199 err = -EINVAL;
1200 }
1201 }
1202
1203 sec_free_req_id(req);
1204
1205 while (1) {
1206 backlog_req = sec_back_req_clear(c, qp_ctx);
1207 if (!backlog_req)
1208 break;
1209
1210 backlog_aead_req = backlog_req->aead_req.aead_req;
1211 backlog_aead_req->base.complete(&backlog_aead_req->base,
1212 -EINPROGRESS);
1213 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1214 }
1215
1216 a_req->base.complete(&a_req->base, err);
1217 }
1218
sec_request_uninit(struct sec_ctx * ctx,struct sec_req * req)1219 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1220 {
1221 sec_free_req_id(req);
1222 sec_free_queue_id(ctx, req);
1223 }
1224
sec_request_init(struct sec_ctx * ctx,struct sec_req * req)1225 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1226 {
1227 struct sec_qp_ctx *qp_ctx;
1228 int queue_id;
1229
1230 /* To load balance */
1231 queue_id = sec_alloc_queue_id(ctx, req);
1232 qp_ctx = &ctx->qp_ctx[queue_id];
1233
1234 req->req_id = sec_alloc_req_id(req, qp_ctx);
1235 if (unlikely(req->req_id < 0)) {
1236 sec_free_queue_id(ctx, req);
1237 return req->req_id;
1238 }
1239
1240 return 0;
1241 }
1242
sec_process(struct sec_ctx * ctx,struct sec_req * req)1243 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1244 {
1245 struct sec_cipher_req *c_req = &req->c_req;
1246 int ret;
1247
1248 ret = sec_request_init(ctx, req);
1249 if (unlikely(ret))
1250 return ret;
1251
1252 ret = sec_request_transfer(ctx, req);
1253 if (unlikely(ret))
1254 goto err_uninit_req;
1255
1256 /* Output IV as decrypto */
1257 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
1258 sec_update_iv(req, ctx->alg_type);
1259
1260 ret = ctx->req_op->bd_send(ctx, req);
1261 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1262 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1263 dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1264 goto err_send_req;
1265 }
1266
1267 return ret;
1268
1269 err_send_req:
1270 /* As failing, restore the IV from user */
1271 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1272 if (ctx->alg_type == SEC_SKCIPHER)
1273 memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1274 ctx->c_ctx.ivsize);
1275 else
1276 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1277 ctx->c_ctx.ivsize);
1278 }
1279
1280 sec_request_untransfer(ctx, req);
1281 err_uninit_req:
1282 sec_request_uninit(ctx, req);
1283 return ret;
1284 }
1285
1286 static const struct sec_req_op sec_skcipher_req_ops = {
1287 .buf_map = sec_skcipher_sgl_map,
1288 .buf_unmap = sec_skcipher_sgl_unmap,
1289 .do_transfer = sec_skcipher_copy_iv,
1290 .bd_fill = sec_skcipher_bd_fill,
1291 .bd_send = sec_bd_send,
1292 .callback = sec_skcipher_callback,
1293 .process = sec_process,
1294 };
1295
1296 static const struct sec_req_op sec_aead_req_ops = {
1297 .buf_map = sec_aead_sgl_map,
1298 .buf_unmap = sec_aead_sgl_unmap,
1299 .do_transfer = sec_aead_copy_iv,
1300 .bd_fill = sec_aead_bd_fill,
1301 .bd_send = sec_bd_send,
1302 .callback = sec_aead_callback,
1303 .process = sec_process,
1304 };
1305
sec_skcipher_ctx_init(struct crypto_skcipher * tfm)1306 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1307 {
1308 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1309
1310 ctx->req_op = &sec_skcipher_req_ops;
1311
1312 return sec_skcipher_init(tfm);
1313 }
1314
sec_skcipher_ctx_exit(struct crypto_skcipher * tfm)1315 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1316 {
1317 sec_skcipher_uninit(tfm);
1318 }
1319
sec_aead_init(struct crypto_aead * tfm)1320 static int sec_aead_init(struct crypto_aead *tfm)
1321 {
1322 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1323 int ret;
1324
1325 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1326 ctx->alg_type = SEC_AEAD;
1327 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1328 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1329 dev_err(ctx->dev, "get error aead iv size!\n");
1330 return -EINVAL;
1331 }
1332
1333 ctx->req_op = &sec_aead_req_ops;
1334 ret = sec_ctx_base_init(ctx);
1335 if (ret)
1336 return ret;
1337
1338 ret = sec_auth_init(ctx);
1339 if (ret)
1340 goto err_auth_init;
1341
1342 ret = sec_cipher_init(ctx);
1343 if (ret)
1344 goto err_cipher_init;
1345
1346 return ret;
1347
1348 err_cipher_init:
1349 sec_auth_uninit(ctx);
1350 err_auth_init:
1351 sec_ctx_base_uninit(ctx);
1352 return ret;
1353 }
1354
sec_aead_exit(struct crypto_aead * tfm)1355 static void sec_aead_exit(struct crypto_aead *tfm)
1356 {
1357 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1358
1359 sec_cipher_uninit(ctx);
1360 sec_auth_uninit(ctx);
1361 sec_ctx_base_uninit(ctx);
1362 }
1363
sec_aead_ctx_init(struct crypto_aead * tfm,const char * hash_name)1364 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1365 {
1366 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1367 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1368 int ret;
1369
1370 ret = sec_aead_init(tfm);
1371 if (ret) {
1372 pr_err("hisi_sec2: aead init error!\n");
1373 return ret;
1374 }
1375
1376 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1377 if (IS_ERR(auth_ctx->hash_tfm)) {
1378 dev_err(ctx->dev, "aead alloc shash error!\n");
1379 sec_aead_exit(tfm);
1380 return PTR_ERR(auth_ctx->hash_tfm);
1381 }
1382
1383 return 0;
1384 }
1385
sec_aead_ctx_exit(struct crypto_aead * tfm)1386 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1387 {
1388 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1389
1390 crypto_free_shash(ctx->a_ctx.hash_tfm);
1391 sec_aead_exit(tfm);
1392 }
1393
sec_aead_sha1_ctx_init(struct crypto_aead * tfm)1394 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1395 {
1396 return sec_aead_ctx_init(tfm, "sha1");
1397 }
1398
sec_aead_sha256_ctx_init(struct crypto_aead * tfm)1399 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1400 {
1401 return sec_aead_ctx_init(tfm, "sha256");
1402 }
1403
sec_aead_sha512_ctx_init(struct crypto_aead * tfm)1404 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1405 {
1406 return sec_aead_ctx_init(tfm, "sha512");
1407 }
1408
1409
sec_skcipher_cryptlen_ckeck(struct sec_ctx * ctx,struct sec_req * sreq)1410 static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
1411 struct sec_req *sreq)
1412 {
1413 u32 cryptlen = sreq->c_req.sk_req->cryptlen;
1414 struct device *dev = ctx->dev;
1415 u8 c_mode = ctx->c_ctx.c_mode;
1416 int ret = 0;
1417
1418 switch (c_mode) {
1419 case SEC_CMODE_XTS:
1420 if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
1421 dev_err(dev, "skcipher XTS mode input length error!\n");
1422 ret = -EINVAL;
1423 }
1424 break;
1425 case SEC_CMODE_ECB:
1426 case SEC_CMODE_CBC:
1427 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
1428 dev_err(dev, "skcipher AES input length error!\n");
1429 ret = -EINVAL;
1430 }
1431 break;
1432 default:
1433 ret = -EINVAL;
1434 }
1435
1436 return ret;
1437 }
1438
sec_skcipher_param_check(struct sec_ctx * ctx,struct sec_req * sreq)1439 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1440 {
1441 struct skcipher_request *sk_req = sreq->c_req.sk_req;
1442 struct device *dev = ctx->dev;
1443 u8 c_alg = ctx->c_ctx.c_alg;
1444
1445 if (unlikely(!sk_req->src || !sk_req->dst)) {
1446 dev_err(dev, "skcipher input param error!\n");
1447 return -EINVAL;
1448 }
1449 sreq->c_req.c_len = sk_req->cryptlen;
1450
1451 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
1452 sreq->use_pbuf = true;
1453 else
1454 sreq->use_pbuf = false;
1455
1456 if (c_alg == SEC_CALG_3DES) {
1457 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
1458 dev_err(dev, "skcipher 3des input length error!\n");
1459 return -EINVAL;
1460 }
1461 return 0;
1462 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
1463 return sec_skcipher_cryptlen_ckeck(ctx, sreq);
1464 }
1465
1466 dev_err(dev, "skcipher algorithm error!\n");
1467
1468 return -EINVAL;
1469 }
1470
sec_skcipher_crypto(struct skcipher_request * sk_req,bool encrypt)1471 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
1472 {
1473 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
1474 struct sec_req *req = skcipher_request_ctx(sk_req);
1475 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1476 int ret;
1477
1478 if (!sk_req->cryptlen)
1479 return 0;
1480
1481 req->flag = sk_req->base.flags;
1482 req->c_req.sk_req = sk_req;
1483 req->c_req.encrypt = encrypt;
1484 req->ctx = ctx;
1485
1486 ret = sec_skcipher_param_check(ctx, req);
1487 if (unlikely(ret))
1488 return -EINVAL;
1489
1490 return ctx->req_op->process(ctx, req);
1491 }
1492
sec_skcipher_encrypt(struct skcipher_request * sk_req)1493 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
1494 {
1495 return sec_skcipher_crypto(sk_req, true);
1496 }
1497
sec_skcipher_decrypt(struct skcipher_request * sk_req)1498 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
1499 {
1500 return sec_skcipher_crypto(sk_req, false);
1501 }
1502
1503 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
1504 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
1505 {\
1506 .base = {\
1507 .cra_name = sec_cra_name,\
1508 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1509 .cra_priority = SEC_PRIORITY,\
1510 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1511 .cra_blocksize = blk_size,\
1512 .cra_ctxsize = sizeof(struct sec_ctx),\
1513 .cra_module = THIS_MODULE,\
1514 },\
1515 .init = ctx_init,\
1516 .exit = ctx_exit,\
1517 .setkey = sec_set_key,\
1518 .decrypt = sec_skcipher_decrypt,\
1519 .encrypt = sec_skcipher_encrypt,\
1520 .min_keysize = sec_min_key_size,\
1521 .max_keysize = sec_max_key_size,\
1522 .ivsize = iv_size,\
1523 },
1524
1525 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
1526 max_key_size, blk_size, iv_size) \
1527 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
1528 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
1529
1530 static struct skcipher_alg sec_skciphers[] = {
1531 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
1532 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1533 AES_BLOCK_SIZE, 0)
1534
1535 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
1536 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1537 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1538
1539 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
1540 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
1541 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1542
1543 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
1544 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1545 DES3_EDE_BLOCK_SIZE, 0)
1546
1547 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
1548 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1549 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
1550
1551 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
1552 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
1553 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1554
1555 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
1556 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
1557 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1558 };
1559
sec_aead_param_check(struct sec_ctx * ctx,struct sec_req * sreq)1560 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1561 {
1562 struct aead_request *req = sreq->aead_req.aead_req;
1563 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1564 size_t authsize = crypto_aead_authsize(tfm);
1565 struct device *dev = ctx->dev;
1566 u8 c_alg = ctx->c_ctx.c_alg;
1567
1568 if (unlikely(!req->src || !req->dst || !req->cryptlen ||
1569 req->assoclen > SEC_MAX_AAD_LEN)) {
1570 dev_err(dev, "aead input param error!\n");
1571 return -EINVAL;
1572 }
1573
1574 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
1575 SEC_PBUF_SZ)
1576 sreq->use_pbuf = true;
1577 else
1578 sreq->use_pbuf = false;
1579
1580 /* Support AES only */
1581 if (unlikely(c_alg != SEC_CALG_AES)) {
1582 dev_err(dev, "aead crypto alg error!\n");
1583 return -EINVAL;
1584 }
1585 if (sreq->c_req.encrypt)
1586 sreq->c_req.c_len = req->cryptlen;
1587 else
1588 sreq->c_req.c_len = req->cryptlen - authsize;
1589
1590 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
1591 dev_err(dev, "aead crypto length error!\n");
1592 return -EINVAL;
1593 }
1594
1595 return 0;
1596 }
1597
sec_aead_crypto(struct aead_request * a_req,bool encrypt)1598 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
1599 {
1600 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1601 struct sec_req *req = aead_request_ctx(a_req);
1602 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1603 int ret;
1604
1605 req->flag = a_req->base.flags;
1606 req->aead_req.aead_req = a_req;
1607 req->c_req.encrypt = encrypt;
1608 req->ctx = ctx;
1609
1610 ret = sec_aead_param_check(ctx, req);
1611 if (unlikely(ret))
1612 return -EINVAL;
1613
1614 return ctx->req_op->process(ctx, req);
1615 }
1616
sec_aead_encrypt(struct aead_request * a_req)1617 static int sec_aead_encrypt(struct aead_request *a_req)
1618 {
1619 return sec_aead_crypto(a_req, true);
1620 }
1621
sec_aead_decrypt(struct aead_request * a_req)1622 static int sec_aead_decrypt(struct aead_request *a_req)
1623 {
1624 return sec_aead_crypto(a_req, false);
1625 }
1626
1627 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
1628 ctx_exit, blk_size, iv_size, max_authsize)\
1629 {\
1630 .base = {\
1631 .cra_name = sec_cra_name,\
1632 .cra_driver_name = "hisi_sec_"sec_cra_name,\
1633 .cra_priority = SEC_PRIORITY,\
1634 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1635 .cra_blocksize = blk_size,\
1636 .cra_ctxsize = sizeof(struct sec_ctx),\
1637 .cra_module = THIS_MODULE,\
1638 },\
1639 .init = ctx_init,\
1640 .exit = ctx_exit,\
1641 .setkey = sec_set_key,\
1642 .decrypt = sec_aead_decrypt,\
1643 .encrypt = sec_aead_encrypt,\
1644 .ivsize = iv_size,\
1645 .maxauthsize = max_authsize,\
1646 }
1647
1648 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
1649 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
1650 sec_aead_ctx_exit, blksize, ivsize, authsize)
1651
1652 static struct aead_alg sec_aeads[] = {
1653 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
1654 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
1655 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
1656
1657 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
1658 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
1659 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
1660
1661 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
1662 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
1663 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
1664 };
1665
sec_register_to_crypto(struct hisi_qm * qm)1666 int sec_register_to_crypto(struct hisi_qm *qm)
1667 {
1668 int ret;
1669
1670 /* To avoid repeat register */
1671 ret = crypto_register_skciphers(sec_skciphers,
1672 ARRAY_SIZE(sec_skciphers));
1673 if (ret)
1674 return ret;
1675
1676 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1677 if (ret)
1678 crypto_unregister_skciphers(sec_skciphers,
1679 ARRAY_SIZE(sec_skciphers));
1680 return ret;
1681 }
1682
sec_unregister_from_crypto(struct hisi_qm * qm)1683 void sec_unregister_from_crypto(struct hisi_qm *qm)
1684 {
1685 crypto_unregister_skciphers(sec_skciphers,
1686 ARRAY_SIZE(sec_skciphers));
1687 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1688 }
1689