1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-hash.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
9  *
10  * You could find the datasheet in Documentation/arch/arm/sunxi.rst
11  */
12 
13 #include <crypto/internal/hash.h>
14 #include <crypto/md5.h>
15 #include <crypto/sha1.h>
16 #include <crypto/sha2.h>
17 #include <linux/bottom_half.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kernel.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include "sun8i-ce.h"
25 
sun8i_ce_hash_init_tfm(struct crypto_ahash * tfm)26 int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
27 {
28 	struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
29 	struct ahash_alg *alg = crypto_ahash_alg(tfm);
30 	struct sun8i_ce_alg_template *algt;
31 	int err;
32 
33 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
34 	op->ce = algt->ce;
35 
36 	/* FALLBACK */
37 	op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
38 					      CRYPTO_ALG_NEED_FALLBACK);
39 	if (IS_ERR(op->fallback_tfm)) {
40 		dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
41 		return PTR_ERR(op->fallback_tfm);
42 	}
43 
44 	crypto_ahash_set_statesize(tfm,
45 				   crypto_ahash_statesize(op->fallback_tfm));
46 
47 	crypto_ahash_set_reqsize(tfm,
48 				 sizeof(struct sun8i_ce_hash_reqctx) +
49 				 crypto_ahash_reqsize(op->fallback_tfm));
50 
51 	memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
52 	       CRYPTO_MAX_ALG_NAME);
53 
54 	err = pm_runtime_get_sync(op->ce->dev);
55 	if (err < 0)
56 		goto error_pm;
57 	return 0;
58 error_pm:
59 	pm_runtime_put_noidle(op->ce->dev);
60 	crypto_free_ahash(op->fallback_tfm);
61 	return err;
62 }
63 
sun8i_ce_hash_exit_tfm(struct crypto_ahash * tfm)64 void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
65 {
66 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
67 
68 	crypto_free_ahash(tfmctx->fallback_tfm);
69 	pm_runtime_put_sync_suspend(tfmctx->ce->dev);
70 }
71 
sun8i_ce_hash_init(struct ahash_request * areq)72 int sun8i_ce_hash_init(struct ahash_request *areq)
73 {
74 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
75 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
76 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
77 
78 	memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
79 
80 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
81 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
82 
83 	return crypto_ahash_init(&rctx->fallback_req);
84 }
85 
sun8i_ce_hash_export(struct ahash_request * areq,void * out)86 int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
87 {
88 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
89 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
90 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
91 
92 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
93 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
94 
95 	return crypto_ahash_export(&rctx->fallback_req, out);
96 }
97 
sun8i_ce_hash_import(struct ahash_request * areq,const void * in)98 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
99 {
100 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
101 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
102 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
103 
104 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
105 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
106 
107 	return crypto_ahash_import(&rctx->fallback_req, in);
108 }
109 
sun8i_ce_hash_final(struct ahash_request * areq)110 int sun8i_ce_hash_final(struct ahash_request *areq)
111 {
112 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
113 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
114 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
115 
116 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
117 	rctx->fallback_req.base.flags = areq->base.flags &
118 					CRYPTO_TFM_REQ_MAY_SLEEP;
119 	rctx->fallback_req.result = areq->result;
120 
121 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
122 		struct sun8i_ce_alg_template *algt __maybe_unused;
123 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
124 
125 		algt = container_of(alg, struct sun8i_ce_alg_template,
126 				    alg.hash.base);
127 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
128 		algt->stat_fb++;
129 #endif
130 	}
131 
132 	return crypto_ahash_final(&rctx->fallback_req);
133 }
134 
sun8i_ce_hash_update(struct ahash_request * areq)135 int sun8i_ce_hash_update(struct ahash_request *areq)
136 {
137 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
138 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
139 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
140 
141 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
142 	rctx->fallback_req.base.flags = areq->base.flags &
143 					CRYPTO_TFM_REQ_MAY_SLEEP;
144 	rctx->fallback_req.nbytes = areq->nbytes;
145 	rctx->fallback_req.src = areq->src;
146 
147 	return crypto_ahash_update(&rctx->fallback_req);
148 }
149 
sun8i_ce_hash_finup(struct ahash_request * areq)150 int sun8i_ce_hash_finup(struct ahash_request *areq)
151 {
152 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
153 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
154 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
155 
156 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
157 	rctx->fallback_req.base.flags = areq->base.flags &
158 					CRYPTO_TFM_REQ_MAY_SLEEP;
159 
160 	rctx->fallback_req.nbytes = areq->nbytes;
161 	rctx->fallback_req.src = areq->src;
162 	rctx->fallback_req.result = areq->result;
163 
164 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
165 		struct sun8i_ce_alg_template *algt __maybe_unused;
166 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
167 
168 		algt = container_of(alg, struct sun8i_ce_alg_template,
169 				    alg.hash.base);
170 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
171 		algt->stat_fb++;
172 #endif
173 	}
174 
175 	return crypto_ahash_finup(&rctx->fallback_req);
176 }
177 
sun8i_ce_hash_digest_fb(struct ahash_request * areq)178 static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
179 {
180 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
181 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
182 	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
183 
184 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
185 	rctx->fallback_req.base.flags = areq->base.flags &
186 					CRYPTO_TFM_REQ_MAY_SLEEP;
187 
188 	rctx->fallback_req.nbytes = areq->nbytes;
189 	rctx->fallback_req.src = areq->src;
190 	rctx->fallback_req.result = areq->result;
191 
192 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
193 		struct sun8i_ce_alg_template *algt __maybe_unused;
194 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
195 
196 		algt = container_of(alg, struct sun8i_ce_alg_template,
197 				    alg.hash.base);
198 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
199 		algt->stat_fb++;
200 #endif
201 	}
202 
203 	return crypto_ahash_digest(&rctx->fallback_req);
204 }
205 
sun8i_ce_hash_need_fallback(struct ahash_request * areq)206 static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
207 {
208 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
209 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
210 	struct sun8i_ce_alg_template *algt;
211 	struct scatterlist *sg;
212 
213 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
214 
215 	if (areq->nbytes == 0) {
216 		algt->stat_fb_len0++;
217 		return true;
218 	}
219 	/* we need to reserve one SG for padding one */
220 	if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
221 		algt->stat_fb_maxsg++;
222 		return true;
223 	}
224 	sg = areq->src;
225 	while (sg) {
226 		if (sg->length % 4) {
227 			algt->stat_fb_srclen++;
228 			return true;
229 		}
230 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
231 			algt->stat_fb_srcali++;
232 			return true;
233 		}
234 		sg = sg_next(sg);
235 	}
236 	return false;
237 }
238 
sun8i_ce_hash_digest(struct ahash_request * areq)239 int sun8i_ce_hash_digest(struct ahash_request *areq)
240 {
241 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
242 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
243 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
244 	struct sun8i_ce_alg_template *algt;
245 	struct sun8i_ce_dev *ce;
246 	struct crypto_engine *engine;
247 	struct scatterlist *sg;
248 	int nr_sgs, e, i;
249 
250 	if (sun8i_ce_hash_need_fallback(areq))
251 		return sun8i_ce_hash_digest_fb(areq);
252 
253 	nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
254 	if (nr_sgs > MAX_SG - 1)
255 		return sun8i_ce_hash_digest_fb(areq);
256 
257 	for_each_sg(areq->src, sg, nr_sgs, i) {
258 		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
259 			return sun8i_ce_hash_digest_fb(areq);
260 	}
261 
262 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
263 	ce = algt->ce;
264 
265 	e = sun8i_ce_get_engine_number(ce);
266 	rctx->flow = e;
267 	engine = ce->chanlist[e].engine;
268 
269 	return crypto_transfer_hash_request_to_engine(engine, areq);
270 }
271 
hash_pad(__le32 * buf,unsigned int bufsize,u64 padi,u64 byte_count,bool le,int bs)272 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
273 {
274 	u64 fill, min_fill, j, k;
275 	__be64 *bebits;
276 	__le64 *lebits;
277 
278 	j = padi;
279 	buf[j++] = cpu_to_le32(0x80);
280 
281 	if (bs == 64) {
282 		fill = 64 - (byte_count % 64);
283 		min_fill = 2 * sizeof(u32) + sizeof(u32);
284 	} else {
285 		fill = 128 - (byte_count % 128);
286 		min_fill = 4 * sizeof(u32) + sizeof(u32);
287 	}
288 
289 	if (fill < min_fill)
290 		fill += bs;
291 
292 	k = j;
293 	j += (fill - min_fill) / sizeof(u32);
294 	if (j * 4 > bufsize) {
295 		pr_err("%s OVERFLOW %llu\n", __func__, j);
296 		return 0;
297 	}
298 	for (; k < j; k++)
299 		buf[k] = 0;
300 
301 	if (le) {
302 		/* MD5 */
303 		lebits = (__le64 *)&buf[j];
304 		*lebits = cpu_to_le64(byte_count << 3);
305 		j += 2;
306 	} else {
307 		if (bs == 64) {
308 			/* sha1 sha224 sha256 */
309 			bebits = (__be64 *)&buf[j];
310 			*bebits = cpu_to_be64(byte_count << 3);
311 			j += 2;
312 		} else {
313 			/* sha384 sha512*/
314 			bebits = (__be64 *)&buf[j];
315 			*bebits = cpu_to_be64(byte_count >> 61);
316 			j += 2;
317 			bebits = (__be64 *)&buf[j];
318 			*bebits = cpu_to_be64(byte_count << 3);
319 			j += 2;
320 		}
321 	}
322 	if (j * 4 > bufsize) {
323 		pr_err("%s OVERFLOW %llu\n", __func__, j);
324 		return 0;
325 	}
326 
327 	return j;
328 }
329 
sun8i_ce_hash_run(struct crypto_engine * engine,void * breq)330 int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
331 {
332 	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
333 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
334 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
335 	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
336 	struct sun8i_ce_alg_template *algt;
337 	struct sun8i_ce_dev *ce;
338 	struct sun8i_ce_flow *chan;
339 	struct ce_task *cet;
340 	struct scatterlist *sg;
341 	int nr_sgs, flow, err;
342 	unsigned int len;
343 	u32 common;
344 	u64 byte_count;
345 	__le32 *bf;
346 	void *buf = NULL;
347 	int j, i, todo;
348 	void *result = NULL;
349 	u64 bs;
350 	int digestsize;
351 	dma_addr_t addr_res, addr_pad;
352 	int ns = sg_nents_for_len(areq->src, areq->nbytes);
353 
354 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
355 	ce = algt->ce;
356 
357 	bs = algt->alg.hash.base.halg.base.cra_blocksize;
358 	digestsize = algt->alg.hash.base.halg.digestsize;
359 	if (digestsize == SHA224_DIGEST_SIZE)
360 		digestsize = SHA256_DIGEST_SIZE;
361 	if (digestsize == SHA384_DIGEST_SIZE)
362 		digestsize = SHA512_DIGEST_SIZE;
363 
364 	/* the padding could be up to two block. */
365 	buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
366 	if (!buf) {
367 		err = -ENOMEM;
368 		goto theend;
369 	}
370 	bf = (__le32 *)buf;
371 
372 	result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
373 	if (!result) {
374 		err = -ENOMEM;
375 		goto theend;
376 	}
377 
378 	flow = rctx->flow;
379 	chan = &ce->chanlist[flow];
380 
381 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
382 	algt->stat_req++;
383 #endif
384 	dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
385 
386 	cet = chan->tl;
387 	memset(cet, 0, sizeof(struct ce_task));
388 
389 	cet->t_id = cpu_to_le32(flow);
390 	common = ce->variant->alg_hash[algt->ce_algo_id];
391 	common |= CE_COMM_INT;
392 	cet->t_common_ctl = cpu_to_le32(common);
393 
394 	cet->t_sym_ctl = 0;
395 	cet->t_asym_ctl = 0;
396 
397 	nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
398 	if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
399 		dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
400 		err = -EINVAL;
401 		goto theend;
402 	}
403 
404 	len = areq->nbytes;
405 	for_each_sg(areq->src, sg, nr_sgs, i) {
406 		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
407 		todo = min(len, sg_dma_len(sg));
408 		cet->t_src[i].len = cpu_to_le32(todo / 4);
409 		len -= todo;
410 	}
411 	if (len > 0) {
412 		dev_err(ce->dev, "remaining len %d\n", len);
413 		err = -EINVAL;
414 		goto theend;
415 	}
416 	addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
417 	cet->t_dst[0].addr = cpu_to_le32(addr_res);
418 	cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
419 	if (dma_mapping_error(ce->dev, addr_res)) {
420 		dev_err(ce->dev, "DMA map dest\n");
421 		err = -EINVAL;
422 		goto theend;
423 	}
424 
425 	byte_count = areq->nbytes;
426 	j = 0;
427 
428 	switch (algt->ce_algo_id) {
429 	case CE_ID_HASH_MD5:
430 		j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
431 		break;
432 	case CE_ID_HASH_SHA1:
433 	case CE_ID_HASH_SHA224:
434 	case CE_ID_HASH_SHA256:
435 		j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
436 		break;
437 	case CE_ID_HASH_SHA384:
438 	case CE_ID_HASH_SHA512:
439 		j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
440 		break;
441 	}
442 	if (!j) {
443 		err = -EINVAL;
444 		goto theend;
445 	}
446 
447 	addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
448 	cet->t_src[i].addr = cpu_to_le32(addr_pad);
449 	cet->t_src[i].len = cpu_to_le32(j);
450 	if (dma_mapping_error(ce->dev, addr_pad)) {
451 		dev_err(ce->dev, "DMA error on padding SG\n");
452 		err = -EINVAL;
453 		goto theend;
454 	}
455 
456 	if (ce->variant->hash_t_dlen_in_bits)
457 		cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
458 	else
459 		cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
460 
461 	chan->timeout = areq->nbytes;
462 
463 	err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
464 
465 	dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
466 	dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
467 	dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
468 
469 
470 	memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
471 theend:
472 	kfree(buf);
473 	kfree(result);
474 	local_bh_disable();
475 	crypto_finalize_hash_request(engine, breq, err);
476 	local_bh_enable();
477 	return 0;
478 }
479