1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-cipher.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7  *
8  * This file add support for AES cipher with 128,192,256 bits keysize in
9  * CBC and ECB mode.
10  *
11  * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
12  */
13 
14 #include <linux/bottom_half.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/io.h>
18 #include <linux/pm_runtime.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/internal/skcipher.h>
22 #include "sun8i-ce.h"
23 
24 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
25 {
26 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
27 	struct scatterlist *sg;
28 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
29 	struct sun8i_ce_alg_template *algt;
30 	unsigned int todo, len;
31 
32 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
33 
34 	if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
35 	    sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
36 		algt->stat_fb_maxsg++;
37 		return true;
38 	}
39 
40 	if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
41 		algt->stat_fb_leniv++;
42 		return true;
43 	}
44 
45 	if (areq->cryptlen == 0) {
46 		algt->stat_fb_len0++;
47 		return true;
48 	}
49 
50 	if (areq->cryptlen % 16) {
51 		algt->stat_fb_mod16++;
52 		return true;
53 	}
54 
55 	len = areq->cryptlen;
56 	sg = areq->src;
57 	while (sg) {
58 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
59 			algt->stat_fb_srcali++;
60 			return true;
61 		}
62 		todo = min(len, sg->length);
63 		if (todo % 4) {
64 			algt->stat_fb_srclen++;
65 			return true;
66 		}
67 		len -= todo;
68 		sg = sg_next(sg);
69 	}
70 
71 	len = areq->cryptlen;
72 	sg = areq->dst;
73 	while (sg) {
74 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
75 			algt->stat_fb_dstali++;
76 			return true;
77 		}
78 		todo = min(len, sg->length);
79 		if (todo % 4) {
80 			algt->stat_fb_dstlen++;
81 			return true;
82 		}
83 		len -= todo;
84 		sg = sg_next(sg);
85 	}
86 	return false;
87 }
88 
89 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
90 {
91 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
92 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
93 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
94 	int err;
95 
96 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
97 		struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
98 		struct sun8i_ce_alg_template *algt __maybe_unused;
99 
100 		algt = container_of(alg, struct sun8i_ce_alg_template,
101 				    alg.skcipher.base);
102 
103 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
104 		algt->stat_fb++;
105 #endif
106 	}
107 
108 	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
109 	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
110 				      areq->base.complete, areq->base.data);
111 	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
112 				   areq->cryptlen, areq->iv);
113 	if (rctx->op_dir & CE_DECRYPTION)
114 		err = crypto_skcipher_decrypt(&rctx->fallback_req);
115 	else
116 		err = crypto_skcipher_encrypt(&rctx->fallback_req);
117 	return err;
118 }
119 
120 static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
121 {
122 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
123 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
124 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
125 	struct sun8i_ce_dev *ce = op->ce;
126 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
127 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
128 	struct sun8i_ce_alg_template *algt;
129 	struct sun8i_ce_flow *chan;
130 	struct ce_task *cet;
131 	struct scatterlist *sg;
132 	unsigned int todo, len, offset, ivsize;
133 	u32 common, sym;
134 	int flow, i;
135 	int nr_sgs = 0;
136 	int nr_sgd = 0;
137 	int err = 0;
138 	int ns = sg_nents_for_len(areq->src, areq->cryptlen);
139 	int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
140 
141 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
142 
143 	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
144 		crypto_tfm_alg_name(areq->base.tfm),
145 		areq->cryptlen,
146 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
147 		op->keylen);
148 
149 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
150 	algt->stat_req++;
151 #endif
152 
153 	flow = rctx->flow;
154 
155 	chan = &ce->chanlist[flow];
156 
157 	cet = chan->tl;
158 	memset(cet, 0, sizeof(struct ce_task));
159 
160 	cet->t_id = cpu_to_le32(flow);
161 	common = ce->variant->alg_cipher[algt->ce_algo_id];
162 	common |= rctx->op_dir | CE_COMM_INT;
163 	cet->t_common_ctl = cpu_to_le32(common);
164 	/* CTS and recent CE (H6) need length in bytes, in word otherwise */
165 	if (ce->variant->cipher_t_dlen_in_bytes)
166 		cet->t_dlen = cpu_to_le32(areq->cryptlen);
167 	else
168 		cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
169 
170 	sym = ce->variant->op_mode[algt->ce_blockmode];
171 	len = op->keylen;
172 	switch (len) {
173 	case 128 / 8:
174 		sym |= CE_AES_128BITS;
175 		break;
176 	case 192 / 8:
177 		sym |= CE_AES_192BITS;
178 		break;
179 	case 256 / 8:
180 		sym |= CE_AES_256BITS;
181 		break;
182 	}
183 
184 	cet->t_sym_ctl = cpu_to_le32(sym);
185 	cet->t_asym_ctl = 0;
186 
187 	rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
188 	if (dma_mapping_error(ce->dev, rctx->addr_key)) {
189 		dev_err(ce->dev, "Cannot DMA MAP KEY\n");
190 		err = -EFAULT;
191 		goto theend;
192 	}
193 	cet->t_key = cpu_to_le32(rctx->addr_key);
194 
195 	ivsize = crypto_skcipher_ivsize(tfm);
196 	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
197 		rctx->ivlen = ivsize;
198 		if (rctx->op_dir & CE_DECRYPTION) {
199 			offset = areq->cryptlen - ivsize;
200 			scatterwalk_map_and_copy(chan->backup_iv, areq->src,
201 						 offset, ivsize, 0);
202 		}
203 		memcpy(chan->bounce_iv, areq->iv, ivsize);
204 		rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
205 					       DMA_TO_DEVICE);
206 		if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
207 			dev_err(ce->dev, "Cannot DMA MAP IV\n");
208 			err = -ENOMEM;
209 			goto theend_iv;
210 		}
211 		cet->t_iv = cpu_to_le32(rctx->addr_iv);
212 	}
213 
214 	if (areq->src == areq->dst) {
215 		nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
216 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
217 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
218 			err = -EINVAL;
219 			goto theend_iv;
220 		}
221 		nr_sgd = nr_sgs;
222 	} else {
223 		nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
224 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
225 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
226 			err = -EINVAL;
227 			goto theend_iv;
228 		}
229 		nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
230 		if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
231 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
232 			err = -EINVAL;
233 			goto theend_sgs;
234 		}
235 	}
236 
237 	len = areq->cryptlen;
238 	for_each_sg(areq->src, sg, nr_sgs, i) {
239 		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
240 		todo = min(len, sg_dma_len(sg));
241 		cet->t_src[i].len = cpu_to_le32(todo / 4);
242 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
243 			areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
244 		len -= todo;
245 	}
246 	if (len > 0) {
247 		dev_err(ce->dev, "remaining len %d\n", len);
248 		err = -EINVAL;
249 		goto theend_sgs;
250 	}
251 
252 	len = areq->cryptlen;
253 	for_each_sg(areq->dst, sg, nr_sgd, i) {
254 		cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
255 		todo = min(len, sg_dma_len(sg));
256 		cet->t_dst[i].len = cpu_to_le32(todo / 4);
257 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
258 			areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
259 		len -= todo;
260 	}
261 	if (len > 0) {
262 		dev_err(ce->dev, "remaining len %d\n", len);
263 		err = -EINVAL;
264 		goto theend_sgs;
265 	}
266 
267 	chan->timeout = areq->cryptlen;
268 	rctx->nr_sgs = nr_sgs;
269 	rctx->nr_sgd = nr_sgd;
270 	return 0;
271 
272 theend_sgs:
273 	if (areq->src == areq->dst) {
274 		dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
275 	} else {
276 		if (nr_sgs > 0)
277 			dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
278 		dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
279 	}
280 
281 theend_iv:
282 	if (areq->iv && ivsize > 0) {
283 		if (rctx->addr_iv)
284 			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
285 		offset = areq->cryptlen - ivsize;
286 		if (rctx->op_dir & CE_DECRYPTION) {
287 			memcpy(areq->iv, chan->backup_iv, ivsize);
288 			memzero_explicit(chan->backup_iv, ivsize);
289 		} else {
290 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
291 						 ivsize, 0);
292 		}
293 		memzero_explicit(chan->bounce_iv, ivsize);
294 	}
295 
296 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
297 
298 theend:
299 	return err;
300 }
301 
302 static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
303 {
304 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
305 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
306 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
307 	struct sun8i_ce_dev *ce = op->ce;
308 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
309 	int flow, err;
310 
311 	flow = rctx->flow;
312 	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
313 	local_bh_disable();
314 	crypto_finalize_skcipher_request(engine, breq, err);
315 	local_bh_enable();
316 }
317 
318 static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
319 				      void *async_req)
320 {
321 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
322 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
323 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
324 	struct sun8i_ce_dev *ce = op->ce;
325 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
326 	struct sun8i_ce_flow *chan;
327 	struct ce_task *cet;
328 	unsigned int ivsize, offset;
329 	int nr_sgs = rctx->nr_sgs;
330 	int nr_sgd = rctx->nr_sgd;
331 	int flow;
332 
333 	flow = rctx->flow;
334 	chan = &ce->chanlist[flow];
335 	cet = chan->tl;
336 	ivsize = crypto_skcipher_ivsize(tfm);
337 
338 	if (areq->src == areq->dst) {
339 		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
340 	} else {
341 		if (nr_sgs > 0)
342 			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
343 		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
344 	}
345 
346 	if (areq->iv && ivsize > 0) {
347 		if (cet->t_iv)
348 			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
349 		offset = areq->cryptlen - ivsize;
350 		if (rctx->op_dir & CE_DECRYPTION) {
351 			memcpy(areq->iv, chan->backup_iv, ivsize);
352 			memzero_explicit(chan->backup_iv, ivsize);
353 		} else {
354 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
355 						 ivsize, 0);
356 		}
357 		memzero_explicit(chan->bounce_iv, ivsize);
358 	}
359 
360 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
361 }
362 
363 int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
364 {
365 	int err = sun8i_ce_cipher_prepare(engine, areq);
366 
367 	if (err)
368 		return err;
369 
370 	sun8i_ce_cipher_run(engine, areq);
371 	sun8i_ce_cipher_unprepare(engine, areq);
372 	return 0;
373 }
374 
375 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
376 {
377 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
378 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
379 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
380 	struct crypto_engine *engine;
381 	int e;
382 
383 	rctx->op_dir = CE_DECRYPTION;
384 	if (sun8i_ce_cipher_need_fallback(areq))
385 		return sun8i_ce_cipher_fallback(areq);
386 
387 	e = sun8i_ce_get_engine_number(op->ce);
388 	rctx->flow = e;
389 	engine = op->ce->chanlist[e].engine;
390 
391 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
392 }
393 
394 int sun8i_ce_skencrypt(struct skcipher_request *areq)
395 {
396 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
397 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
398 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
399 	struct crypto_engine *engine;
400 	int e;
401 
402 	rctx->op_dir = CE_ENCRYPTION;
403 	if (sun8i_ce_cipher_need_fallback(areq))
404 		return sun8i_ce_cipher_fallback(areq);
405 
406 	e = sun8i_ce_get_engine_number(op->ce);
407 	rctx->flow = e;
408 	engine = op->ce->chanlist[e].engine;
409 
410 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
411 }
412 
413 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
414 {
415 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
416 	struct sun8i_ce_alg_template *algt;
417 	const char *name = crypto_tfm_alg_name(tfm);
418 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
419 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
420 	int err;
421 
422 	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
423 
424 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
425 	op->ce = algt->ce;
426 
427 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
428 	if (IS_ERR(op->fallback_tfm)) {
429 		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
430 			name, PTR_ERR(op->fallback_tfm));
431 		return PTR_ERR(op->fallback_tfm);
432 	}
433 
434 	crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
435 				    crypto_skcipher_reqsize(op->fallback_tfm));
436 
437 	memcpy(algt->fbname,
438 	       crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
439 	       CRYPTO_MAX_ALG_NAME);
440 
441 	err = pm_runtime_get_sync(op->ce->dev);
442 	if (err < 0)
443 		goto error_pm;
444 
445 	return 0;
446 error_pm:
447 	pm_runtime_put_noidle(op->ce->dev);
448 	crypto_free_skcipher(op->fallback_tfm);
449 	return err;
450 }
451 
452 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
453 {
454 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
455 
456 	kfree_sensitive(op->key);
457 	crypto_free_skcipher(op->fallback_tfm);
458 	pm_runtime_put_sync_suspend(op->ce->dev);
459 }
460 
461 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
462 			unsigned int keylen)
463 {
464 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
465 	struct sun8i_ce_dev *ce = op->ce;
466 
467 	switch (keylen) {
468 	case 128 / 8:
469 		break;
470 	case 192 / 8:
471 		break;
472 	case 256 / 8:
473 		break;
474 	default:
475 		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
476 		return -EINVAL;
477 	}
478 	kfree_sensitive(op->key);
479 	op->keylen = keylen;
480 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
481 	if (!op->key)
482 		return -ENOMEM;
483 
484 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
485 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
486 
487 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
488 }
489 
490 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
491 			 unsigned int keylen)
492 {
493 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
494 	int err;
495 
496 	err = verify_skcipher_des3_key(tfm, key);
497 	if (err)
498 		return err;
499 
500 	kfree_sensitive(op->key);
501 	op->keylen = keylen;
502 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
503 	if (!op->key)
504 		return -ENOMEM;
505 
506 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
507 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
508 
509 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
510 }
511