1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/authenc.h>
6 #include <crypto/cryptd.h>
7 #include <crypto/des.h>
8 #include <crypto/internal/aead.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/xts.h>
12 #include <crypto/gcm.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/sort.h>
16 #include <linux/module.h>
17 #include "otx2_cptvf.h"
18 #include "otx2_cptvf_algs.h"
19 #include "otx2_cpt_reqmgr.h"
20 #include "cn10k_cpt.h"
21 
22 /* Size of salt in AES GCM mode */
23 #define AES_GCM_SALT_SIZE 4
24 /* Size of IV in AES GCM mode */
25 #define AES_GCM_IV_SIZE 8
26 /* Size of ICV (Integrity Check Value) in AES GCM mode */
27 #define AES_GCM_ICV_SIZE 16
28 /* Offset of IV in AES GCM mode */
29 #define AES_GCM_IV_OFFSET 8
30 #define CONTROL_WORD_LEN 8
31 #define KEY2_OFFSET 48
32 #define DMA_MODE_FLAG(dma_mode) \
33 	(((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
34 
35 /* Truncated SHA digest size */
36 #define SHA1_TRUNC_DIGEST_SIZE 12
37 #define SHA256_TRUNC_DIGEST_SIZE 16
38 #define SHA384_TRUNC_DIGEST_SIZE 24
39 #define SHA512_TRUNC_DIGEST_SIZE 32
40 
41 static DEFINE_MUTEX(mutex);
42 static int is_crypto_registered;
43 
44 struct cpt_device_desc {
45 	struct pci_dev *dev;
46 	int num_queues;
47 };
48 
49 struct cpt_device_table {
50 	atomic_t count;
51 	struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
52 };
53 
54 static struct cpt_device_table se_devices = {
55 	.count = ATOMIC_INIT(0)
56 };
57 
get_se_device(struct pci_dev ** pdev,int * cpu_num)58 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
59 {
60 	int count;
61 
62 	count = atomic_read(&se_devices.count);
63 	if (count < 1)
64 		return -ENODEV;
65 
66 	*cpu_num = get_cpu();
67 	/*
68 	 * On OcteonTX2 platform CPT instruction queue is bound to each
69 	 * local function LF, in turn LFs can be attached to PF
70 	 * or VF therefore we always use first device. We get maximum
71 	 * performance if one CPT queue is available for each cpu
72 	 * otherwise CPT queues need to be shared between cpus.
73 	 */
74 	if (*cpu_num >= se_devices.desc[0].num_queues)
75 		*cpu_num %= se_devices.desc[0].num_queues;
76 	*pdev = se_devices.desc[0].dev;
77 
78 	put_cpu();
79 
80 	return 0;
81 }
82 
validate_hmac_cipher_null(struct otx2_cpt_req_info * cpt_req)83 static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
84 {
85 	struct otx2_cpt_req_ctx *rctx;
86 	struct aead_request *req;
87 	struct crypto_aead *tfm;
88 
89 	req = container_of(cpt_req->areq, struct aead_request, base);
90 	tfm = crypto_aead_reqtfm(req);
91 	rctx = aead_request_ctx_dma(req);
92 	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
93 		   rctx->fctx.hmac.s.hmac_recv,
94 		   crypto_aead_authsize(tfm)) != 0)
95 		return -EBADMSG;
96 
97 	return 0;
98 }
99 
otx2_cpt_aead_callback(int status,void * arg1,void * arg2)100 static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
101 {
102 	struct otx2_cpt_inst_info *inst_info = arg2;
103 	struct crypto_async_request *areq = arg1;
104 	struct otx2_cpt_req_info *cpt_req;
105 	struct pci_dev *pdev;
106 
107 	if (inst_info) {
108 		cpt_req = inst_info->req;
109 		if (!status) {
110 			/*
111 			 * When selected cipher is NULL we need to manually
112 			 * verify whether calculated hmac value matches
113 			 * received hmac value
114 			 */
115 			if (cpt_req->req_type ==
116 			    OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
117 			    !cpt_req->is_enc)
118 				status = validate_hmac_cipher_null(cpt_req);
119 		}
120 		pdev = inst_info->pdev;
121 		otx2_cpt_info_destroy(pdev, inst_info);
122 	}
123 	if (areq)
124 		crypto_request_complete(areq, status);
125 }
126 
output_iv_copyback(struct crypto_async_request * areq)127 static void output_iv_copyback(struct crypto_async_request *areq)
128 {
129 	struct otx2_cpt_req_info *req_info;
130 	struct otx2_cpt_req_ctx *rctx;
131 	struct skcipher_request *sreq;
132 	struct crypto_skcipher *stfm;
133 	struct otx2_cpt_enc_ctx *ctx;
134 	u32 start, ivsize;
135 
136 	sreq = container_of(areq, struct skcipher_request, base);
137 	stfm = crypto_skcipher_reqtfm(sreq);
138 	ctx = crypto_skcipher_ctx(stfm);
139 	if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
140 	    ctx->cipher_type == OTX2_CPT_DES3_CBC) {
141 		rctx = skcipher_request_ctx_dma(sreq);
142 		req_info = &rctx->cpt_req;
143 		ivsize = crypto_skcipher_ivsize(stfm);
144 		start = sreq->cryptlen - ivsize;
145 
146 		if (req_info->is_enc) {
147 			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
148 						 ivsize, 0);
149 		} else {
150 			if (sreq->src != sreq->dst) {
151 				scatterwalk_map_and_copy(sreq->iv, sreq->src,
152 							 start, ivsize, 0);
153 			} else {
154 				memcpy(sreq->iv, req_info->iv_out, ivsize);
155 				kfree(req_info->iv_out);
156 			}
157 		}
158 	}
159 }
160 
otx2_cpt_skcipher_callback(int status,void * arg1,void * arg2)161 static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
162 {
163 	struct otx2_cpt_inst_info *inst_info = arg2;
164 	struct crypto_async_request *areq = arg1;
165 	struct pci_dev *pdev;
166 
167 	if (areq) {
168 		if (!status)
169 			output_iv_copyback(areq);
170 		if (inst_info) {
171 			pdev = inst_info->pdev;
172 			otx2_cpt_info_destroy(pdev, inst_info);
173 		}
174 		crypto_request_complete(areq, status);
175 	}
176 }
177 
update_input_data(struct otx2_cpt_req_info * req_info,struct scatterlist * inp_sg,u32 nbytes,u32 * argcnt)178 static inline void update_input_data(struct otx2_cpt_req_info *req_info,
179 				     struct scatterlist *inp_sg,
180 				     u32 nbytes, u32 *argcnt)
181 {
182 	req_info->req.dlen += nbytes;
183 
184 	while (nbytes) {
185 		u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
186 		u8 *ptr = sg_virt(inp_sg);
187 
188 		req_info->in[*argcnt].vptr = (void *)ptr;
189 		req_info->in[*argcnt].size = len;
190 		nbytes -= len;
191 		++(*argcnt);
192 		inp_sg = sg_next(inp_sg);
193 	}
194 }
195 
update_output_data(struct otx2_cpt_req_info * req_info,struct scatterlist * outp_sg,u32 offset,u32 nbytes,u32 * argcnt)196 static inline void update_output_data(struct otx2_cpt_req_info *req_info,
197 				      struct scatterlist *outp_sg,
198 				      u32 offset, u32 nbytes, u32 *argcnt)
199 {
200 	u32 len, sg_len;
201 	u8 *ptr;
202 
203 	req_info->rlen += nbytes;
204 
205 	while (nbytes) {
206 		sg_len = outp_sg->length - offset;
207 		len = (nbytes < sg_len) ? nbytes : sg_len;
208 		ptr = sg_virt(outp_sg);
209 
210 		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
211 		req_info->out[*argcnt].size = len;
212 		nbytes -= len;
213 		++(*argcnt);
214 		offset = 0;
215 		outp_sg = sg_next(outp_sg);
216 	}
217 }
218 
create_ctx_hdr(struct skcipher_request * req,u32 enc,u32 * argcnt)219 static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
220 				 u32 *argcnt)
221 {
222 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
223 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
224 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
225 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
226 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
227 	int ivsize = crypto_skcipher_ivsize(stfm);
228 	u32 start = req->cryptlen - ivsize;
229 	gfp_t flags;
230 
231 	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
232 			GFP_KERNEL : GFP_ATOMIC;
233 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
234 	req_info->ctrl.s.se_req = 1;
235 
236 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
237 				DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
238 	if (enc) {
239 		req_info->req.opcode.s.minor = 2;
240 	} else {
241 		req_info->req.opcode.s.minor = 3;
242 		if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
243 		    ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
244 		    req->src == req->dst) {
245 			req_info->iv_out = kmalloc(ivsize, flags);
246 			if (!req_info->iv_out)
247 				return -ENOMEM;
248 
249 			scatterwalk_map_and_copy(req_info->iv_out, req->src,
250 						 start, ivsize, 0);
251 		}
252 	}
253 	/* Encryption data length */
254 	req_info->req.param1 = req->cryptlen;
255 	/* Authentication data length */
256 	req_info->req.param2 = 0;
257 
258 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
259 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
260 	fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
261 
262 	if (ctx->cipher_type == OTX2_CPT_AES_XTS)
263 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
264 	else
265 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
266 
267 	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
268 
269 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
270 
271 	/*
272 	 * Storing  Packet Data Information in offset
273 	 * Control Word First 8 bytes
274 	 */
275 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
276 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
277 	req_info->req.dlen += CONTROL_WORD_LEN;
278 	++(*argcnt);
279 
280 	req_info->in[*argcnt].vptr = (u8 *)fctx;
281 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
282 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
283 
284 	++(*argcnt);
285 
286 	return 0;
287 }
288 
create_input_list(struct skcipher_request * req,u32 enc,u32 enc_iv_len)289 static inline int create_input_list(struct skcipher_request *req, u32 enc,
290 				    u32 enc_iv_len)
291 {
292 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
293 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
294 	u32 argcnt =  0;
295 	int ret;
296 
297 	ret = create_ctx_hdr(req, enc, &argcnt);
298 	if (ret)
299 		return ret;
300 
301 	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
302 	req_info->in_cnt = argcnt;
303 
304 	return 0;
305 }
306 
create_output_list(struct skcipher_request * req,u32 enc_iv_len)307 static inline void create_output_list(struct skcipher_request *req,
308 				      u32 enc_iv_len)
309 {
310 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
311 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
312 	u32 argcnt = 0;
313 
314 	/*
315 	 * OUTPUT Buffer Processing
316 	 * AES encryption/decryption output would be
317 	 * received in the following format
318 	 *
319 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
320 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
321 	 */
322 	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
323 	req_info->out_cnt = argcnt;
324 }
325 
skcipher_do_fallback(struct skcipher_request * req,bool is_enc)326 static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
327 {
328 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
329 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
330 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
331 	int ret;
332 
333 	if (ctx->fbk_cipher) {
334 		skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
335 		skcipher_request_set_callback(&rctx->sk_fbk_req,
336 					      req->base.flags,
337 					      req->base.complete,
338 					      req->base.data);
339 		skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
340 					   req->dst, req->cryptlen, req->iv);
341 		ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
342 			       crypto_skcipher_decrypt(&rctx->sk_fbk_req);
343 	} else {
344 		ret = -EINVAL;
345 	}
346 	return ret;
347 }
348 
cpt_enc_dec(struct skcipher_request * req,u32 enc)349 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
350 {
351 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
352 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
353 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
354 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
355 	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
356 	struct pci_dev *pdev;
357 	int status, cpu_num;
358 
359 	if (req->cryptlen == 0)
360 		return 0;
361 
362 	if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
363 		return -EINVAL;
364 
365 	if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
366 		return skcipher_do_fallback(req, enc);
367 
368 	/* Clear control words */
369 	rctx->ctrl_word.flags = 0;
370 	rctx->fctx.enc.enc_ctrl.u = 0;
371 
372 	status = create_input_list(req, enc, enc_iv_len);
373 	if (status)
374 		return status;
375 	create_output_list(req, enc_iv_len);
376 
377 	status = get_se_device(&pdev, &cpu_num);
378 	if (status)
379 		return status;
380 
381 	req_info->callback = otx2_cpt_skcipher_callback;
382 	req_info->areq = &req->base;
383 	req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
384 	req_info->is_enc = enc;
385 	req_info->is_trunc_hmac = false;
386 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
387 
388 	req_info->req.cptr = ctx->er_ctx.hw_ctx;
389 	req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
390 
391 	/*
392 	 * We perform an asynchronous send and once
393 	 * the request is completed the driver would
394 	 * intimate through registered call back functions
395 	 */
396 	status = otx2_cpt_do_request(pdev, req_info, cpu_num);
397 
398 	return status;
399 }
400 
otx2_cpt_skcipher_encrypt(struct skcipher_request * req)401 static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
402 {
403 	return cpt_enc_dec(req, true);
404 }
405 
otx2_cpt_skcipher_decrypt(struct skcipher_request * req)406 static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
407 {
408 	return cpt_enc_dec(req, false);
409 }
410 
otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)411 static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
412 				       const u8 *key, u32 keylen)
413 {
414 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
415 	const u8 *key2 = key + (keylen / 2);
416 	const u8 *key1 = key;
417 	int ret;
418 
419 	ret = xts_verify_key(tfm, key, keylen);
420 	if (ret)
421 		return ret;
422 	ctx->key_len = keylen;
423 	ctx->enc_align_len = 1;
424 	memcpy(ctx->enc_key, key1, keylen / 2);
425 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
426 	ctx->cipher_type = OTX2_CPT_AES_XTS;
427 	switch (ctx->key_len) {
428 	case 2 * AES_KEYSIZE_128:
429 		ctx->key_type = OTX2_CPT_AES_128_BIT;
430 		break;
431 	case 2 * AES_KEYSIZE_192:
432 		ctx->key_type = OTX2_CPT_AES_192_BIT;
433 		break;
434 	case 2 * AES_KEYSIZE_256:
435 		ctx->key_type = OTX2_CPT_AES_256_BIT;
436 		break;
437 	default:
438 		return -EINVAL;
439 	}
440 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
441 }
442 
cpt_des_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)443 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
444 			  u32 keylen, u8 cipher_type)
445 {
446 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
447 
448 	if (keylen != DES3_EDE_KEY_SIZE)
449 		return -EINVAL;
450 
451 	ctx->key_len = keylen;
452 	ctx->cipher_type = cipher_type;
453 	ctx->enc_align_len = 8;
454 
455 	memcpy(ctx->enc_key, key, keylen);
456 
457 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
458 }
459 
cpt_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)460 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
461 			  u32 keylen, u8 cipher_type)
462 {
463 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
464 
465 	switch (keylen) {
466 	case AES_KEYSIZE_128:
467 		ctx->key_type = OTX2_CPT_AES_128_BIT;
468 		break;
469 	case AES_KEYSIZE_192:
470 		ctx->key_type = OTX2_CPT_AES_192_BIT;
471 		break;
472 	case AES_KEYSIZE_256:
473 		ctx->key_type = OTX2_CPT_AES_256_BIT;
474 		break;
475 	default:
476 		return -EINVAL;
477 	}
478 	if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
479 		ctx->enc_align_len = 16;
480 	else
481 		ctx->enc_align_len = 1;
482 
483 	ctx->key_len = keylen;
484 	ctx->cipher_type = cipher_type;
485 
486 	memcpy(ctx->enc_key, key, keylen);
487 
488 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
489 }
490 
otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)491 static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
492 					    const u8 *key, u32 keylen)
493 {
494 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
495 }
496 
otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)497 static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
498 					    const u8 *key, u32 keylen)
499 {
500 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
501 }
502 
otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)503 static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
504 					     const u8 *key, u32 keylen)
505 {
506 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
507 }
508 
otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)509 static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
510 					     const u8 *key, u32 keylen)
511 {
512 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
513 }
514 
cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx * ctx,struct crypto_alg * alg)515 static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
516 				      struct crypto_alg *alg)
517 {
518 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
519 		ctx->fbk_cipher =
520 				crypto_alloc_skcipher(alg->cra_name, 0,
521 						      CRYPTO_ALG_ASYNC |
522 						      CRYPTO_ALG_NEED_FALLBACK);
523 		if (IS_ERR(ctx->fbk_cipher)) {
524 			pr_err("%s() failed to allocate fallback for %s\n",
525 				__func__, alg->cra_name);
526 			return PTR_ERR(ctx->fbk_cipher);
527 		}
528 	}
529 	return 0;
530 }
531 
otx2_cpt_enc_dec_init(struct crypto_skcipher * stfm)532 static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
533 {
534 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
535 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
536 	struct crypto_alg *alg = tfm->__crt_alg;
537 	struct pci_dev *pdev;
538 	int ret, cpu_num;
539 
540 	memset(ctx, 0, sizeof(*ctx));
541 	/*
542 	 * Additional memory for skcipher_request is
543 	 * allocated since the cryptd daemon uses
544 	 * this memory for request_ctx information
545 	 */
546 	crypto_skcipher_set_reqsize_dma(
547 		stfm, sizeof(struct otx2_cpt_req_ctx) +
548 		      sizeof(struct skcipher_request));
549 
550 	ret = get_se_device(&pdev, &cpu_num);
551 	if (ret)
552 		return ret;
553 
554 	ctx->pdev = pdev;
555 	ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
556 	if (ret)
557 		return ret;
558 
559 	return cpt_skcipher_fallback_init(ctx, alg);
560 }
561 
otx2_cpt_skcipher_exit(struct crypto_skcipher * tfm)562 static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
563 {
564 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
565 
566 	if (ctx->fbk_cipher) {
567 		crypto_free_skcipher(ctx->fbk_cipher);
568 		ctx->fbk_cipher = NULL;
569 	}
570 	cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
571 }
572 
cpt_aead_fallback_init(struct otx2_cpt_aead_ctx * ctx,struct crypto_alg * alg)573 static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
574 				  struct crypto_alg *alg)
575 {
576 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
577 		ctx->fbk_cipher =
578 			    crypto_alloc_aead(alg->cra_name, 0,
579 					      CRYPTO_ALG_ASYNC |
580 					      CRYPTO_ALG_NEED_FALLBACK);
581 		if (IS_ERR(ctx->fbk_cipher)) {
582 			pr_err("%s() failed to allocate fallback for %s\n",
583 				__func__, alg->cra_name);
584 			return PTR_ERR(ctx->fbk_cipher);
585 		}
586 	}
587 	return 0;
588 }
589 
cpt_aead_init(struct crypto_aead * atfm,u8 cipher_type,u8 mac_type)590 static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
591 {
592 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
593 	struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
594 	struct crypto_alg *alg = tfm->__crt_alg;
595 	struct pci_dev *pdev;
596 	int ret, cpu_num;
597 
598 	ctx->cipher_type = cipher_type;
599 	ctx->mac_type = mac_type;
600 
601 	/*
602 	 * When selected cipher is NULL we use HMAC opcode instead of
603 	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
604 	 * for calculating ipad and opad
605 	 */
606 	if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
607 		switch (ctx->mac_type) {
608 		case OTX2_CPT_SHA1:
609 			ctx->hashalg = crypto_alloc_shash("sha1", 0,
610 							  CRYPTO_ALG_ASYNC);
611 			if (IS_ERR(ctx->hashalg))
612 				return PTR_ERR(ctx->hashalg);
613 			break;
614 
615 		case OTX2_CPT_SHA256:
616 			ctx->hashalg = crypto_alloc_shash("sha256", 0,
617 							  CRYPTO_ALG_ASYNC);
618 			if (IS_ERR(ctx->hashalg))
619 				return PTR_ERR(ctx->hashalg);
620 			break;
621 
622 		case OTX2_CPT_SHA384:
623 			ctx->hashalg = crypto_alloc_shash("sha384", 0,
624 							  CRYPTO_ALG_ASYNC);
625 			if (IS_ERR(ctx->hashalg))
626 				return PTR_ERR(ctx->hashalg);
627 			break;
628 
629 		case OTX2_CPT_SHA512:
630 			ctx->hashalg = crypto_alloc_shash("sha512", 0,
631 							  CRYPTO_ALG_ASYNC);
632 			if (IS_ERR(ctx->hashalg))
633 				return PTR_ERR(ctx->hashalg);
634 			break;
635 		}
636 	}
637 	switch (ctx->cipher_type) {
638 	case OTX2_CPT_AES_CBC:
639 	case OTX2_CPT_AES_ECB:
640 		ctx->enc_align_len = 16;
641 		break;
642 	case OTX2_CPT_DES3_CBC:
643 	case OTX2_CPT_DES3_ECB:
644 		ctx->enc_align_len = 8;
645 		break;
646 	case OTX2_CPT_AES_GCM:
647 	case OTX2_CPT_CIPHER_NULL:
648 		ctx->enc_align_len = 1;
649 		break;
650 	}
651 	crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
652 
653 	ret = get_se_device(&pdev, &cpu_num);
654 	if (ret)
655 		return ret;
656 
657 	ctx->pdev = pdev;
658 	ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
659 	if (ret)
660 		return ret;
661 
662 	return cpt_aead_fallback_init(ctx, alg);
663 }
664 
otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead * tfm)665 static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
666 {
667 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
668 }
669 
otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead * tfm)670 static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
671 {
672 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
673 }
674 
otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead * tfm)675 static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
676 {
677 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
678 }
679 
otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead * tfm)680 static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
681 {
682 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
683 }
684 
otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead * tfm)685 static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
686 {
687 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
688 }
689 
otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead * tfm)690 static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
691 {
692 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
693 }
694 
otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead * tfm)695 static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
696 {
697 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
698 }
699 
otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead * tfm)700 static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
701 {
702 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
703 }
704 
otx2_cpt_aead_gcm_aes_init(struct crypto_aead * tfm)705 static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
706 {
707 	return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
708 }
709 
otx2_cpt_aead_exit(struct crypto_aead * tfm)710 static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
711 {
712 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
713 
714 	kfree(ctx->ipad);
715 	kfree(ctx->opad);
716 	if (ctx->hashalg)
717 		crypto_free_shash(ctx->hashalg);
718 	kfree(ctx->sdesc);
719 
720 	if (ctx->fbk_cipher) {
721 		crypto_free_aead(ctx->fbk_cipher);
722 		ctx->fbk_cipher = NULL;
723 	}
724 	cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
725 }
726 
otx2_cpt_aead_gcm_set_authsize(struct crypto_aead * tfm,unsigned int authsize)727 static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
728 					  unsigned int authsize)
729 {
730 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
731 
732 	if (crypto_rfc4106_check_authsize(authsize))
733 		return -EINVAL;
734 
735 	tfm->authsize = authsize;
736 	/* Set authsize for fallback case */
737 	if (ctx->fbk_cipher)
738 		ctx->fbk_cipher->authsize = authsize;
739 
740 	return 0;
741 }
742 
otx2_cpt_aead_set_authsize(struct crypto_aead * tfm,unsigned int authsize)743 static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
744 				      unsigned int authsize)
745 {
746 	tfm->authsize = authsize;
747 
748 	return 0;
749 }
750 
otx2_cpt_aead_null_set_authsize(struct crypto_aead * tfm,unsigned int authsize)751 static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
752 					   unsigned int authsize)
753 {
754 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
755 
756 	ctx->is_trunc_hmac = true;
757 	tfm->authsize = authsize;
758 
759 	return 0;
760 }
761 
alloc_sdesc(struct crypto_shash * alg)762 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
763 {
764 	struct otx2_cpt_sdesc *sdesc;
765 	int size;
766 
767 	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
768 	sdesc = kmalloc(size, GFP_KERNEL);
769 	if (!sdesc)
770 		return NULL;
771 
772 	sdesc->shash.tfm = alg;
773 
774 	return sdesc;
775 }
776 
swap_data32(void * buf,u32 len)777 static inline void swap_data32(void *buf, u32 len)
778 {
779 	cpu_to_be32_array(buf, buf, len / 4);
780 }
781 
swap_data64(void * buf,u32 len)782 static inline void swap_data64(void *buf, u32 len)
783 {
784 	u64 *src = buf;
785 	int i = 0;
786 
787 	for (i = 0 ; i < len / 8; i++, src++)
788 		cpu_to_be64s(src);
789 }
790 
copy_pad(u8 mac_type,u8 * out_pad,u8 * in_pad)791 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
792 {
793 	struct sha512_state *sha512;
794 	struct sha256_state *sha256;
795 	struct sha1_state *sha1;
796 
797 	switch (mac_type) {
798 	case OTX2_CPT_SHA1:
799 		sha1 = (struct sha1_state *) in_pad;
800 		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
801 		memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
802 		break;
803 
804 	case OTX2_CPT_SHA256:
805 		sha256 = (struct sha256_state *) in_pad;
806 		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
807 		memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
808 		break;
809 
810 	case OTX2_CPT_SHA384:
811 	case OTX2_CPT_SHA512:
812 		sha512 = (struct sha512_state *) in_pad;
813 		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
814 		memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
815 		break;
816 
817 	default:
818 		return -EINVAL;
819 	}
820 
821 	return 0;
822 }
823 
aead_hmac_init(struct crypto_aead * cipher)824 static int aead_hmac_init(struct crypto_aead *cipher)
825 {
826 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
827 	int state_size = crypto_shash_statesize(ctx->hashalg);
828 	int ds = crypto_shash_digestsize(ctx->hashalg);
829 	int bs = crypto_shash_blocksize(ctx->hashalg);
830 	int authkeylen = ctx->auth_key_len;
831 	u8 *ipad = NULL, *opad = NULL;
832 	int ret = 0, icount = 0;
833 
834 	ctx->sdesc = alloc_sdesc(ctx->hashalg);
835 	if (!ctx->sdesc)
836 		return -ENOMEM;
837 
838 	ctx->ipad = kzalloc(bs, GFP_KERNEL);
839 	if (!ctx->ipad) {
840 		ret = -ENOMEM;
841 		goto calc_fail;
842 	}
843 
844 	ctx->opad = kzalloc(bs, GFP_KERNEL);
845 	if (!ctx->opad) {
846 		ret = -ENOMEM;
847 		goto calc_fail;
848 	}
849 
850 	ipad = kzalloc(state_size, GFP_KERNEL);
851 	if (!ipad) {
852 		ret = -ENOMEM;
853 		goto calc_fail;
854 	}
855 
856 	opad = kzalloc(state_size, GFP_KERNEL);
857 	if (!opad) {
858 		ret = -ENOMEM;
859 		goto calc_fail;
860 	}
861 
862 	if (authkeylen > bs) {
863 		ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
864 					  authkeylen, ipad);
865 		if (ret)
866 			goto calc_fail;
867 
868 		authkeylen = ds;
869 	} else {
870 		memcpy(ipad, ctx->key, authkeylen);
871 	}
872 
873 	memset(ipad + authkeylen, 0, bs - authkeylen);
874 	memcpy(opad, ipad, bs);
875 
876 	for (icount = 0; icount < bs; icount++) {
877 		ipad[icount] ^= 0x36;
878 		opad[icount] ^= 0x5c;
879 	}
880 
881 	/*
882 	 * Partial Hash calculated from the software
883 	 * algorithm is retrieved for IPAD & OPAD
884 	 */
885 
886 	/* IPAD Calculation */
887 	crypto_shash_init(&ctx->sdesc->shash);
888 	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
889 	crypto_shash_export(&ctx->sdesc->shash, ipad);
890 	ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
891 	if (ret)
892 		goto calc_fail;
893 
894 	/* OPAD Calculation */
895 	crypto_shash_init(&ctx->sdesc->shash);
896 	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
897 	crypto_shash_export(&ctx->sdesc->shash, opad);
898 	ret = copy_pad(ctx->mac_type, ctx->opad, opad);
899 	if (ret)
900 		goto calc_fail;
901 
902 	kfree(ipad);
903 	kfree(opad);
904 
905 	return 0;
906 
907 calc_fail:
908 	kfree(ctx->ipad);
909 	ctx->ipad = NULL;
910 	kfree(ctx->opad);
911 	ctx->opad = NULL;
912 	kfree(ipad);
913 	kfree(opad);
914 	kfree(ctx->sdesc);
915 	ctx->sdesc = NULL;
916 
917 	return ret;
918 }
919 
otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)920 static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
921 					    const unsigned char *key,
922 					    unsigned int keylen)
923 {
924 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
925 	struct crypto_authenc_key_param *param;
926 	int enckeylen = 0, authkeylen = 0;
927 	struct rtattr *rta = (void *)key;
928 
929 	if (!RTA_OK(rta, keylen))
930 		return -EINVAL;
931 
932 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
933 		return -EINVAL;
934 
935 	if (RTA_PAYLOAD(rta) < sizeof(*param))
936 		return -EINVAL;
937 
938 	param = RTA_DATA(rta);
939 	enckeylen = be32_to_cpu(param->enckeylen);
940 	key += RTA_ALIGN(rta->rta_len);
941 	keylen -= RTA_ALIGN(rta->rta_len);
942 	if (keylen < enckeylen)
943 		return -EINVAL;
944 
945 	if (keylen > OTX2_CPT_MAX_KEY_SIZE)
946 		return -EINVAL;
947 
948 	authkeylen = keylen - enckeylen;
949 	memcpy(ctx->key, key, keylen);
950 
951 	switch (enckeylen) {
952 	case AES_KEYSIZE_128:
953 		ctx->key_type = OTX2_CPT_AES_128_BIT;
954 		break;
955 	case AES_KEYSIZE_192:
956 		ctx->key_type = OTX2_CPT_AES_192_BIT;
957 		break;
958 	case AES_KEYSIZE_256:
959 		ctx->key_type = OTX2_CPT_AES_256_BIT;
960 		break;
961 	default:
962 		/* Invalid key length */
963 		return -EINVAL;
964 	}
965 
966 	ctx->enc_key_len = enckeylen;
967 	ctx->auth_key_len = authkeylen;
968 
969 	return aead_hmac_init(cipher);
970 }
971 
otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)972 static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
973 					     const unsigned char *key,
974 					     unsigned int keylen)
975 {
976 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
977 	struct crypto_authenc_key_param *param;
978 	struct rtattr *rta = (void *)key;
979 	int enckeylen = 0;
980 
981 	if (!RTA_OK(rta, keylen))
982 		return -EINVAL;
983 
984 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
985 		return -EINVAL;
986 
987 	if (RTA_PAYLOAD(rta) < sizeof(*param))
988 		return -EINVAL;
989 
990 	param = RTA_DATA(rta);
991 	enckeylen = be32_to_cpu(param->enckeylen);
992 	key += RTA_ALIGN(rta->rta_len);
993 	keylen -= RTA_ALIGN(rta->rta_len);
994 	if (enckeylen != 0)
995 		return -EINVAL;
996 
997 	if (keylen > OTX2_CPT_MAX_KEY_SIZE)
998 		return -EINVAL;
999 
1000 	memcpy(ctx->key, key, keylen);
1001 	ctx->enc_key_len = enckeylen;
1002 	ctx->auth_key_len = keylen;
1003 
1004 	return 0;
1005 }
1006 
otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)1007 static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
1008 					const unsigned char *key,
1009 					unsigned int keylen)
1010 {
1011 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
1012 
1013 	/*
1014 	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
1015 	 * and salt (4 bytes)
1016 	 */
1017 	switch (keylen) {
1018 	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
1019 		ctx->key_type = OTX2_CPT_AES_128_BIT;
1020 		ctx->enc_key_len = AES_KEYSIZE_128;
1021 		break;
1022 	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
1023 		ctx->key_type = OTX2_CPT_AES_192_BIT;
1024 		ctx->enc_key_len = AES_KEYSIZE_192;
1025 		break;
1026 	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
1027 		ctx->key_type = OTX2_CPT_AES_256_BIT;
1028 		ctx->enc_key_len = AES_KEYSIZE_256;
1029 		break;
1030 	default:
1031 		/* Invalid key and salt length */
1032 		return -EINVAL;
1033 	}
1034 
1035 	/* Store encryption key and salt */
1036 	memcpy(ctx->key, key, keylen);
1037 
1038 	return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
1039 }
1040 
create_aead_ctx_hdr(struct aead_request * req,u32 enc,u32 * argcnt)1041 static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
1042 				      u32 *argcnt)
1043 {
1044 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1045 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1046 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1047 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1048 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
1049 	int mac_len = crypto_aead_authsize(tfm);
1050 	int ds;
1051 
1052 	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
1053 
1054 	switch (ctx->cipher_type) {
1055 	case OTX2_CPT_AES_CBC:
1056 		if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
1057 			return -EINVAL;
1058 
1059 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
1060 		/* Copy encryption key to context */
1061 		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
1062 		       ctx->enc_key_len);
1063 		/* Copy IV to context */
1064 		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
1065 
1066 		ds = crypto_shash_digestsize(ctx->hashalg);
1067 		if (ctx->mac_type == OTX2_CPT_SHA384)
1068 			ds = SHA512_DIGEST_SIZE;
1069 		if (ctx->ipad)
1070 			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
1071 		if (ctx->opad)
1072 			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
1073 		break;
1074 
1075 	case OTX2_CPT_AES_GCM:
1076 		if (crypto_ipsec_check_assoclen(req->assoclen))
1077 			return -EINVAL;
1078 
1079 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
1080 		/* Copy encryption key to context */
1081 		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1082 		/* Copy salt to context */
1083 		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1084 		       AES_GCM_SALT_SIZE);
1085 
1086 		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1087 		break;
1088 
1089 	default:
1090 		/* Unknown cipher type */
1091 		return -EINVAL;
1092 	}
1093 	cpu_to_be64s(&rctx->ctrl_word.flags);
1094 
1095 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1096 	req_info->ctrl.s.se_req = 1;
1097 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
1098 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1099 	if (enc) {
1100 		req_info->req.opcode.s.minor = 2;
1101 		req_info->req.param1 = req->cryptlen;
1102 		req_info->req.param2 = req->cryptlen + req->assoclen;
1103 	} else {
1104 		req_info->req.opcode.s.minor = 3;
1105 		req_info->req.param1 = req->cryptlen - mac_len;
1106 		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1107 	}
1108 
1109 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1110 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1111 	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1112 	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1113 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
1114 
1115 	/*
1116 	 * Storing Packet Data Information in offset
1117 	 * Control Word First 8 bytes
1118 	 */
1119 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1120 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1121 	req_info->req.dlen += CONTROL_WORD_LEN;
1122 	++(*argcnt);
1123 
1124 	req_info->in[*argcnt].vptr = (u8 *)fctx;
1125 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
1126 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
1127 	++(*argcnt);
1128 
1129 	return 0;
1130 }
1131 
create_hmac_ctx_hdr(struct aead_request * req,u32 * argcnt,u32 enc)1132 static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1133 				      u32 enc)
1134 {
1135 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1136 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1137 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1138 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1139 
1140 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1141 	req_info->ctrl.s.se_req = 1;
1142 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
1143 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1144 	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1145 
1146 	req_info->req.opcode.s.minor = 0;
1147 	req_info->req.param1 = ctx->auth_key_len;
1148 	req_info->req.param2 = ctx->mac_type << 8;
1149 
1150 	/* Add authentication key */
1151 	req_info->in[*argcnt].vptr = ctx->key;
1152 	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1153 	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1154 	++(*argcnt);
1155 }
1156 
create_aead_input_list(struct aead_request * req,u32 enc)1157 static inline int create_aead_input_list(struct aead_request *req, u32 enc)
1158 {
1159 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1160 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1161 	u32 inputlen =  req->cryptlen + req->assoclen;
1162 	u32 status, argcnt = 0;
1163 
1164 	status = create_aead_ctx_hdr(req, enc, &argcnt);
1165 	if (status)
1166 		return status;
1167 	update_input_data(req_info, req->src, inputlen, &argcnt);
1168 	req_info->in_cnt = argcnt;
1169 
1170 	return 0;
1171 }
1172 
create_aead_output_list(struct aead_request * req,u32 enc,u32 mac_len)1173 static inline void create_aead_output_list(struct aead_request *req, u32 enc,
1174 					   u32 mac_len)
1175 {
1176 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1177 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1178 	u32 argcnt = 0, outputlen = 0;
1179 
1180 	if (enc)
1181 		outputlen = req->cryptlen +  req->assoclen + mac_len;
1182 	else
1183 		outputlen = req->cryptlen + req->assoclen - mac_len;
1184 
1185 	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1186 	req_info->out_cnt = argcnt;
1187 }
1188 
create_aead_null_input_list(struct aead_request * req,u32 enc,u32 mac_len)1189 static inline void create_aead_null_input_list(struct aead_request *req,
1190 					       u32 enc, u32 mac_len)
1191 {
1192 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1193 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1194 	u32 inputlen, argcnt = 0;
1195 
1196 	if (enc)
1197 		inputlen =  req->cryptlen + req->assoclen;
1198 	else
1199 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1200 
1201 	create_hmac_ctx_hdr(req, &argcnt, enc);
1202 	update_input_data(req_info, req->src, inputlen, &argcnt);
1203 	req_info->in_cnt = argcnt;
1204 }
1205 
create_aead_null_output_list(struct aead_request * req,u32 enc,u32 mac_len)1206 static inline int create_aead_null_output_list(struct aead_request *req,
1207 					       u32 enc, u32 mac_len)
1208 {
1209 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1210 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1211 	struct scatterlist *dst;
1212 	u8 *ptr = NULL;
1213 	int argcnt = 0, status, offset;
1214 	u32 inputlen;
1215 
1216 	if (enc)
1217 		inputlen =  req->cryptlen + req->assoclen;
1218 	else
1219 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1220 
1221 	/*
1222 	 * If source and destination are different
1223 	 * then copy payload to destination
1224 	 */
1225 	if (req->src != req->dst) {
1226 
1227 		ptr = kmalloc(inputlen, (req_info->areq->flags &
1228 					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1229 					 GFP_KERNEL : GFP_ATOMIC);
1230 		if (!ptr)
1231 			return -ENOMEM;
1232 
1233 		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1234 					   inputlen);
1235 		if (status != inputlen) {
1236 			status = -EINVAL;
1237 			goto error_free;
1238 		}
1239 		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1240 					     inputlen);
1241 		if (status != inputlen) {
1242 			status = -EINVAL;
1243 			goto error_free;
1244 		}
1245 		kfree(ptr);
1246 	}
1247 
1248 	if (enc) {
1249 		/*
1250 		 * In an encryption scenario hmac needs
1251 		 * to be appended after payload
1252 		 */
1253 		dst = req->dst;
1254 		offset = inputlen;
1255 		while (offset >= dst->length) {
1256 			offset -= dst->length;
1257 			dst = sg_next(dst);
1258 			if (!dst)
1259 				return -ENOENT;
1260 		}
1261 
1262 		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1263 	} else {
1264 		/*
1265 		 * In a decryption scenario calculated hmac for received
1266 		 * payload needs to be compare with hmac received
1267 		 */
1268 		status = sg_copy_buffer(req->src, sg_nents(req->src),
1269 					rctx->fctx.hmac.s.hmac_recv, mac_len,
1270 					inputlen, true);
1271 		if (status != mac_len)
1272 			return -EINVAL;
1273 
1274 		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1275 		req_info->out[argcnt].size = mac_len;
1276 		argcnt++;
1277 	}
1278 
1279 	req_info->out_cnt = argcnt;
1280 	return 0;
1281 
1282 error_free:
1283 	kfree(ptr);
1284 	return status;
1285 }
1286 
aead_do_fallback(struct aead_request * req,bool is_enc)1287 static int aead_do_fallback(struct aead_request *req, bool is_enc)
1288 {
1289 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1290 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1291 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead);
1292 	int ret;
1293 
1294 	if (ctx->fbk_cipher) {
1295 		/* Store the cipher tfm and then use the fallback tfm */
1296 		aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
1297 		aead_request_set_callback(&rctx->fbk_req, req->base.flags,
1298 					  req->base.complete, req->base.data);
1299 		aead_request_set_crypt(&rctx->fbk_req, req->src,
1300 				       req->dst, req->cryptlen, req->iv);
1301 		aead_request_set_ad(&rctx->fbk_req, req->assoclen);
1302 		ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
1303 			       crypto_aead_decrypt(&rctx->fbk_req);
1304 	} else {
1305 		ret = -EINVAL;
1306 	}
1307 
1308 	return ret;
1309 }
1310 
cpt_aead_enc_dec(struct aead_request * req,u8 reg_type,u8 enc)1311 static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1312 {
1313 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1314 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1315 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1316 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1317 	struct pci_dev *pdev;
1318 	int status, cpu_num;
1319 
1320 	/* Clear control words */
1321 	rctx->ctrl_word.flags = 0;
1322 	rctx->fctx.enc.enc_ctrl.u = 0;
1323 
1324 	req_info->callback = otx2_cpt_aead_callback;
1325 	req_info->areq = &req->base;
1326 	req_info->req_type = reg_type;
1327 	req_info->is_enc = enc;
1328 	req_info->is_trunc_hmac = false;
1329 
1330 	req_info->req.cptr = ctx->er_ctx.hw_ctx;
1331 	req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
1332 
1333 	switch (reg_type) {
1334 	case OTX2_CPT_AEAD_ENC_DEC_REQ:
1335 		status = create_aead_input_list(req, enc);
1336 		if (status)
1337 			return status;
1338 		create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
1339 		break;
1340 
1341 	case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
1342 		create_aead_null_input_list(req, enc,
1343 					    crypto_aead_authsize(tfm));
1344 		status = create_aead_null_output_list(req, enc,
1345 						crypto_aead_authsize(tfm));
1346 		if (status)
1347 			return status;
1348 		break;
1349 
1350 	default:
1351 		return -EINVAL;
1352 	}
1353 	if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
1354 		return -EINVAL;
1355 
1356 	if (!req_info->req.param2 ||
1357 	    (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
1358 	    (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
1359 		return aead_do_fallback(req, enc);
1360 
1361 	status = get_se_device(&pdev, &cpu_num);
1362 	if (status)
1363 		return status;
1364 
1365 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
1366 
1367 	/*
1368 	 * We perform an asynchronous send and once
1369 	 * the request is completed the driver would
1370 	 * intimate through registered call back functions
1371 	 */
1372 	return otx2_cpt_do_request(pdev, req_info, cpu_num);
1373 }
1374 
otx2_cpt_aead_encrypt(struct aead_request * req)1375 static int otx2_cpt_aead_encrypt(struct aead_request *req)
1376 {
1377 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
1378 }
1379 
otx2_cpt_aead_decrypt(struct aead_request * req)1380 static int otx2_cpt_aead_decrypt(struct aead_request *req)
1381 {
1382 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
1383 }
1384 
otx2_cpt_aead_null_encrypt(struct aead_request * req)1385 static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
1386 {
1387 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1388 }
1389 
otx2_cpt_aead_null_decrypt(struct aead_request * req)1390 static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
1391 {
1392 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1393 }
1394 
1395 static struct skcipher_alg otx2_cpt_skciphers[] = { {
1396 	.base.cra_name = "xts(aes)",
1397 	.base.cra_driver_name = "cpt_xts_aes",
1398 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1399 	.base.cra_blocksize = AES_BLOCK_SIZE,
1400 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1401 	.base.cra_alignmask = 7,
1402 	.base.cra_priority = 4001,
1403 	.base.cra_module = THIS_MODULE,
1404 
1405 	.init = otx2_cpt_enc_dec_init,
1406 	.exit = otx2_cpt_skcipher_exit,
1407 	.ivsize = AES_BLOCK_SIZE,
1408 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1409 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1410 	.setkey = otx2_cpt_skcipher_xts_setkey,
1411 	.encrypt = otx2_cpt_skcipher_encrypt,
1412 	.decrypt = otx2_cpt_skcipher_decrypt,
1413 }, {
1414 	.base.cra_name = "cbc(aes)",
1415 	.base.cra_driver_name = "cpt_cbc_aes",
1416 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1417 	.base.cra_blocksize = AES_BLOCK_SIZE,
1418 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1419 	.base.cra_alignmask = 7,
1420 	.base.cra_priority = 4001,
1421 	.base.cra_module = THIS_MODULE,
1422 
1423 	.init = otx2_cpt_enc_dec_init,
1424 	.exit = otx2_cpt_skcipher_exit,
1425 	.ivsize = AES_BLOCK_SIZE,
1426 	.min_keysize = AES_MIN_KEY_SIZE,
1427 	.max_keysize = AES_MAX_KEY_SIZE,
1428 	.setkey = otx2_cpt_skcipher_cbc_aes_setkey,
1429 	.encrypt = otx2_cpt_skcipher_encrypt,
1430 	.decrypt = otx2_cpt_skcipher_decrypt,
1431 }, {
1432 	.base.cra_name = "ecb(aes)",
1433 	.base.cra_driver_name = "cpt_ecb_aes",
1434 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1435 	.base.cra_blocksize = AES_BLOCK_SIZE,
1436 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1437 	.base.cra_alignmask = 7,
1438 	.base.cra_priority = 4001,
1439 	.base.cra_module = THIS_MODULE,
1440 
1441 	.init = otx2_cpt_enc_dec_init,
1442 	.exit = otx2_cpt_skcipher_exit,
1443 	.ivsize = 0,
1444 	.min_keysize = AES_MIN_KEY_SIZE,
1445 	.max_keysize = AES_MAX_KEY_SIZE,
1446 	.setkey = otx2_cpt_skcipher_ecb_aes_setkey,
1447 	.encrypt = otx2_cpt_skcipher_encrypt,
1448 	.decrypt = otx2_cpt_skcipher_decrypt,
1449 }, {
1450 	.base.cra_name = "cbc(des3_ede)",
1451 	.base.cra_driver_name = "cpt_cbc_des3_ede",
1452 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1453 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1454 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1455 	.base.cra_alignmask = 7,
1456 	.base.cra_priority = 4001,
1457 	.base.cra_module = THIS_MODULE,
1458 
1459 	.init = otx2_cpt_enc_dec_init,
1460 	.exit = otx2_cpt_skcipher_exit,
1461 	.min_keysize = DES3_EDE_KEY_SIZE,
1462 	.max_keysize = DES3_EDE_KEY_SIZE,
1463 	.ivsize = DES_BLOCK_SIZE,
1464 	.setkey = otx2_cpt_skcipher_cbc_des3_setkey,
1465 	.encrypt = otx2_cpt_skcipher_encrypt,
1466 	.decrypt = otx2_cpt_skcipher_decrypt,
1467 }, {
1468 	.base.cra_name = "ecb(des3_ede)",
1469 	.base.cra_driver_name = "cpt_ecb_des3_ede",
1470 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1471 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1472 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1473 	.base.cra_alignmask = 7,
1474 	.base.cra_priority = 4001,
1475 	.base.cra_module = THIS_MODULE,
1476 
1477 	.init = otx2_cpt_enc_dec_init,
1478 	.exit = otx2_cpt_skcipher_exit,
1479 	.min_keysize = DES3_EDE_KEY_SIZE,
1480 	.max_keysize = DES3_EDE_KEY_SIZE,
1481 	.ivsize = 0,
1482 	.setkey = otx2_cpt_skcipher_ecb_des3_setkey,
1483 	.encrypt = otx2_cpt_skcipher_encrypt,
1484 	.decrypt = otx2_cpt_skcipher_decrypt,
1485 } };
1486 
1487 static struct aead_alg otx2_cpt_aeads[] = { {
1488 	.base = {
1489 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1490 		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1491 		.cra_blocksize = AES_BLOCK_SIZE,
1492 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1493 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1494 		.cra_priority = 4001,
1495 		.cra_alignmask = 0,
1496 		.cra_module = THIS_MODULE,
1497 	},
1498 	.init = otx2_cpt_aead_cbc_aes_sha1_init,
1499 	.exit = otx2_cpt_aead_exit,
1500 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1501 	.setauthsize = otx2_cpt_aead_set_authsize,
1502 	.encrypt = otx2_cpt_aead_encrypt,
1503 	.decrypt = otx2_cpt_aead_decrypt,
1504 	.ivsize = AES_BLOCK_SIZE,
1505 	.maxauthsize = SHA1_DIGEST_SIZE,
1506 }, {
1507 	.base = {
1508 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1509 		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1510 		.cra_blocksize = AES_BLOCK_SIZE,
1511 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1512 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1513 		.cra_priority = 4001,
1514 		.cra_alignmask = 0,
1515 		.cra_module = THIS_MODULE,
1516 	},
1517 	.init = otx2_cpt_aead_cbc_aes_sha256_init,
1518 	.exit = otx2_cpt_aead_exit,
1519 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1520 	.setauthsize = otx2_cpt_aead_set_authsize,
1521 	.encrypt = otx2_cpt_aead_encrypt,
1522 	.decrypt = otx2_cpt_aead_decrypt,
1523 	.ivsize = AES_BLOCK_SIZE,
1524 	.maxauthsize = SHA256_DIGEST_SIZE,
1525 }, {
1526 	.base = {
1527 		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1528 		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1529 		.cra_blocksize = AES_BLOCK_SIZE,
1530 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1531 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1532 		.cra_priority = 4001,
1533 		.cra_alignmask = 0,
1534 		.cra_module = THIS_MODULE,
1535 	},
1536 	.init = otx2_cpt_aead_cbc_aes_sha384_init,
1537 	.exit = otx2_cpt_aead_exit,
1538 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1539 	.setauthsize = otx2_cpt_aead_set_authsize,
1540 	.encrypt = otx2_cpt_aead_encrypt,
1541 	.decrypt = otx2_cpt_aead_decrypt,
1542 	.ivsize = AES_BLOCK_SIZE,
1543 	.maxauthsize = SHA384_DIGEST_SIZE,
1544 }, {
1545 	.base = {
1546 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1547 		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1548 		.cra_blocksize = AES_BLOCK_SIZE,
1549 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1550 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1551 		.cra_priority = 4001,
1552 		.cra_alignmask = 0,
1553 		.cra_module = THIS_MODULE,
1554 	},
1555 	.init = otx2_cpt_aead_cbc_aes_sha512_init,
1556 	.exit = otx2_cpt_aead_exit,
1557 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1558 	.setauthsize = otx2_cpt_aead_set_authsize,
1559 	.encrypt = otx2_cpt_aead_encrypt,
1560 	.decrypt = otx2_cpt_aead_decrypt,
1561 	.ivsize = AES_BLOCK_SIZE,
1562 	.maxauthsize = SHA512_DIGEST_SIZE,
1563 }, {
1564 	.base = {
1565 		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1566 		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1567 		.cra_blocksize = 1,
1568 		.cra_flags = CRYPTO_ALG_ASYNC,
1569 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1570 		.cra_priority = 4001,
1571 		.cra_alignmask = 0,
1572 		.cra_module = THIS_MODULE,
1573 	},
1574 	.init = otx2_cpt_aead_ecb_null_sha1_init,
1575 	.exit = otx2_cpt_aead_exit,
1576 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1577 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1578 	.encrypt = otx2_cpt_aead_null_encrypt,
1579 	.decrypt = otx2_cpt_aead_null_decrypt,
1580 	.ivsize = 0,
1581 	.maxauthsize = SHA1_DIGEST_SIZE,
1582 }, {
1583 	.base = {
1584 		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1585 		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1586 		.cra_blocksize = 1,
1587 		.cra_flags = CRYPTO_ALG_ASYNC,
1588 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1589 		.cra_priority = 4001,
1590 		.cra_alignmask = 0,
1591 		.cra_module = THIS_MODULE,
1592 	},
1593 	.init = otx2_cpt_aead_ecb_null_sha256_init,
1594 	.exit = otx2_cpt_aead_exit,
1595 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1596 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1597 	.encrypt = otx2_cpt_aead_null_encrypt,
1598 	.decrypt = otx2_cpt_aead_null_decrypt,
1599 	.ivsize = 0,
1600 	.maxauthsize = SHA256_DIGEST_SIZE,
1601 }, {
1602 	.base = {
1603 		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1604 		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1605 		.cra_blocksize = 1,
1606 		.cra_flags = CRYPTO_ALG_ASYNC,
1607 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1608 		.cra_priority = 4001,
1609 		.cra_alignmask = 0,
1610 		.cra_module = THIS_MODULE,
1611 	},
1612 	.init = otx2_cpt_aead_ecb_null_sha384_init,
1613 	.exit = otx2_cpt_aead_exit,
1614 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1615 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1616 	.encrypt = otx2_cpt_aead_null_encrypt,
1617 	.decrypt = otx2_cpt_aead_null_decrypt,
1618 	.ivsize = 0,
1619 	.maxauthsize = SHA384_DIGEST_SIZE,
1620 }, {
1621 	.base = {
1622 		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1623 		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1624 		.cra_blocksize = 1,
1625 		.cra_flags = CRYPTO_ALG_ASYNC,
1626 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1627 		.cra_priority = 4001,
1628 		.cra_alignmask = 0,
1629 		.cra_module = THIS_MODULE,
1630 	},
1631 	.init = otx2_cpt_aead_ecb_null_sha512_init,
1632 	.exit = otx2_cpt_aead_exit,
1633 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1634 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1635 	.encrypt = otx2_cpt_aead_null_encrypt,
1636 	.decrypt = otx2_cpt_aead_null_decrypt,
1637 	.ivsize = 0,
1638 	.maxauthsize = SHA512_DIGEST_SIZE,
1639 }, {
1640 	.base = {
1641 		.cra_name = "rfc4106(gcm(aes))",
1642 		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1643 		.cra_blocksize = 1,
1644 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1645 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1646 		.cra_priority = 4001,
1647 		.cra_alignmask = 0,
1648 		.cra_module = THIS_MODULE,
1649 	},
1650 	.init = otx2_cpt_aead_gcm_aes_init,
1651 	.exit = otx2_cpt_aead_exit,
1652 	.setkey = otx2_cpt_aead_gcm_aes_setkey,
1653 	.setauthsize = otx2_cpt_aead_gcm_set_authsize,
1654 	.encrypt = otx2_cpt_aead_encrypt,
1655 	.decrypt = otx2_cpt_aead_decrypt,
1656 	.ivsize = AES_GCM_IV_SIZE,
1657 	.maxauthsize = AES_GCM_ICV_SIZE,
1658 } };
1659 
cpt_register_algs(void)1660 static inline int cpt_register_algs(void)
1661 {
1662 	int i, err = 0;
1663 
1664 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
1665 		otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1666 
1667 	err = crypto_register_skciphers(otx2_cpt_skciphers,
1668 					ARRAY_SIZE(otx2_cpt_skciphers));
1669 	if (err)
1670 		return err;
1671 
1672 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
1673 		otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1674 
1675 	err = crypto_register_aeads(otx2_cpt_aeads,
1676 				    ARRAY_SIZE(otx2_cpt_aeads));
1677 	if (err) {
1678 		crypto_unregister_skciphers(otx2_cpt_skciphers,
1679 					    ARRAY_SIZE(otx2_cpt_skciphers));
1680 		return err;
1681 	}
1682 
1683 	return 0;
1684 }
1685 
cpt_unregister_algs(void)1686 static inline void cpt_unregister_algs(void)
1687 {
1688 	crypto_unregister_skciphers(otx2_cpt_skciphers,
1689 				    ARRAY_SIZE(otx2_cpt_skciphers));
1690 	crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
1691 }
1692 
compare_func(const void * lptr,const void * rptr)1693 static int compare_func(const void *lptr, const void *rptr)
1694 {
1695 	const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1696 	const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1697 
1698 	if (ldesc->dev->devfn < rdesc->dev->devfn)
1699 		return -1;
1700 	if (ldesc->dev->devfn > rdesc->dev->devfn)
1701 		return 1;
1702 	return 0;
1703 }
1704 
swap_func(void * lptr,void * rptr,int size)1705 static void swap_func(void *lptr, void *rptr, int size)
1706 {
1707 	struct cpt_device_desc *ldesc = lptr;
1708 	struct cpt_device_desc *rdesc = rptr;
1709 
1710 	swap(*ldesc, *rdesc);
1711 }
1712 
otx2_cpt_crypto_init(struct pci_dev * pdev,struct module * mod,int num_queues,int num_devices)1713 int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1714 			 int num_queues, int num_devices)
1715 {
1716 	int ret = 0;
1717 	int count;
1718 
1719 	mutex_lock(&mutex);
1720 	count = atomic_read(&se_devices.count);
1721 	if (count >= OTX2_CPT_MAX_LFS_NUM) {
1722 		dev_err(&pdev->dev, "No space to add a new device\n");
1723 		ret = -ENOSPC;
1724 		goto unlock;
1725 	}
1726 	se_devices.desc[count].num_queues = num_queues;
1727 	se_devices.desc[count++].dev = pdev;
1728 	atomic_inc(&se_devices.count);
1729 
1730 	if (atomic_read(&se_devices.count) == num_devices &&
1731 	    is_crypto_registered == false) {
1732 		if (cpt_register_algs()) {
1733 			dev_err(&pdev->dev,
1734 				"Error in registering crypto algorithms\n");
1735 			ret =  -EINVAL;
1736 			goto unlock;
1737 		}
1738 		try_module_get(mod);
1739 		is_crypto_registered = true;
1740 	}
1741 	sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1742 	     compare_func, swap_func);
1743 
1744 unlock:
1745 	mutex_unlock(&mutex);
1746 	return ret;
1747 }
1748 
otx2_cpt_crypto_exit(struct pci_dev * pdev,struct module * mod)1749 void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
1750 {
1751 	struct cpt_device_table *dev_tbl;
1752 	bool dev_found = false;
1753 	int i, j, count;
1754 
1755 	mutex_lock(&mutex);
1756 
1757 	dev_tbl = &se_devices;
1758 	count = atomic_read(&dev_tbl->count);
1759 	for (i = 0; i < count; i++) {
1760 		if (pdev == dev_tbl->desc[i].dev) {
1761 			for (j = i; j < count-1; j++)
1762 				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1763 			dev_found = true;
1764 			break;
1765 		}
1766 	}
1767 
1768 	if (!dev_found) {
1769 		dev_err(&pdev->dev, "%s device not found\n", __func__);
1770 		goto unlock;
1771 	}
1772 	if (atomic_dec_and_test(&se_devices.count)) {
1773 		cpt_unregister_algs();
1774 		module_put(mod);
1775 		is_crypto_registered = false;
1776 	}
1777 
1778 unlock:
1779 	mutex_unlock(&mutex);
1780 }
1781