1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ss-hash.c - hardware cryptographic offloader for
4  * Allwinner A80/A83T SoC
5  *
6  * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7  *
8  * This file add support for MD5 and SHA1/SHA224/SHA256.
9  *
10  * You could find the datasheet in Documentation/arm/sunxi.rst
11  */
12 #include <linux/bottom_half.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/hmac.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/sha1.h>
20 #include <crypto/sha2.h>
21 #include <crypto/md5.h>
22 #include "sun8i-ss.h"
23 
24 static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
25 			    unsigned int keylen)
26 {
27 	struct crypto_shash *xtfm;
28 	struct shash_desc *sdesc;
29 	size_t len;
30 	int ret = 0;
31 
32 	xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
33 	if (!xtfm)
34 		return -ENOMEM;
35 
36 	len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
37 	sdesc = kmalloc(len, GFP_KERNEL);
38 	if (!sdesc) {
39 		ret = -ENOMEM;
40 		goto err_hashkey_sdesc;
41 	}
42 	sdesc->tfm = xtfm;
43 
44 	ret = crypto_shash_init(sdesc);
45 	if (ret) {
46 		dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
47 		goto err_hashkey;
48 	}
49 	ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
50 	if (ret)
51 		dev_err(tfmctx->ss->dev, "shash finup error\n");
52 err_hashkey:
53 	kfree(sdesc);
54 err_hashkey_sdesc:
55 	crypto_free_shash(xtfm);
56 	return ret;
57 }
58 
59 int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
60 			 unsigned int keylen)
61 {
62 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
63 	struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
64 	struct sun8i_ss_alg_template *algt;
65 	int digestsize, i;
66 	int bs = crypto_ahash_blocksize(ahash);
67 	int ret;
68 
69 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
70 	digestsize = algt->alg.hash.halg.digestsize;
71 
72 	if (keylen > bs) {
73 		ret = sun8i_ss_hashkey(tfmctx, key, keylen);
74 		if (ret)
75 			return ret;
76 		tfmctx->keylen = digestsize;
77 	} else {
78 		tfmctx->keylen = keylen;
79 		memcpy(tfmctx->key, key, keylen);
80 	}
81 
82 	tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
83 	if (!tfmctx->ipad)
84 		return -ENOMEM;
85 	tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
86 	if (!tfmctx->opad) {
87 		ret = -ENOMEM;
88 		goto err_opad;
89 	}
90 
91 	memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
92 	memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
93 	memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
94 	for (i = 0; i < bs; i++) {
95 		tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
96 		tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
97 	}
98 
99 	ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
100 	if (!ret)
101 		return 0;
102 
103 	memzero_explicit(tfmctx->key, keylen);
104 	kfree_sensitive(tfmctx->opad);
105 err_opad:
106 	kfree_sensitive(tfmctx->ipad);
107 	return ret;
108 }
109 
110 int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
111 {
112 	struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
113 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
114 	struct sun8i_ss_alg_template *algt;
115 	int err;
116 
117 	memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
118 
119 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
120 	op->ss = algt->ss;
121 
122 	op->enginectx.op.do_one_request = sun8i_ss_hash_run;
123 	op->enginectx.op.prepare_request = NULL;
124 	op->enginectx.op.unprepare_request = NULL;
125 
126 	/* FALLBACK */
127 	op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
128 					      CRYPTO_ALG_NEED_FALLBACK);
129 	if (IS_ERR(op->fallback_tfm)) {
130 		dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
131 		return PTR_ERR(op->fallback_tfm);
132 	}
133 
134 	if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
135 		algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
136 
137 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
138 				 sizeof(struct sun8i_ss_hash_reqctx) +
139 				 crypto_ahash_reqsize(op->fallback_tfm));
140 
141 	memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
142 
143 	err = pm_runtime_get_sync(op->ss->dev);
144 	if (err < 0)
145 		goto error_pm;
146 	return 0;
147 error_pm:
148 	pm_runtime_put_noidle(op->ss->dev);
149 	crypto_free_ahash(op->fallback_tfm);
150 	return err;
151 }
152 
153 void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
154 {
155 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
156 
157 	kfree_sensitive(tfmctx->ipad);
158 	kfree_sensitive(tfmctx->opad);
159 
160 	crypto_free_ahash(tfmctx->fallback_tfm);
161 	pm_runtime_put_sync_suspend(tfmctx->ss->dev);
162 }
163 
164 int sun8i_ss_hash_init(struct ahash_request *areq)
165 {
166 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
167 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
168 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
169 
170 	memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
171 
172 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
173 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
174 
175 	return crypto_ahash_init(&rctx->fallback_req);
176 }
177 
178 int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
179 {
180 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
181 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
182 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
183 
184 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
185 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
186 
187 	return crypto_ahash_export(&rctx->fallback_req, out);
188 }
189 
190 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
191 {
192 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
193 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
194 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
195 
196 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
197 	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
198 
199 	return crypto_ahash_import(&rctx->fallback_req, in);
200 }
201 
202 int sun8i_ss_hash_final(struct ahash_request *areq)
203 {
204 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
205 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
206 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
207 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
208 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
209 	struct sun8i_ss_alg_template *algt;
210 #endif
211 
212 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
213 	rctx->fallback_req.base.flags = areq->base.flags &
214 					CRYPTO_TFM_REQ_MAY_SLEEP;
215 	rctx->fallback_req.result = areq->result;
216 
217 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
218 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
219 	algt->stat_fb++;
220 #endif
221 
222 	return crypto_ahash_final(&rctx->fallback_req);
223 }
224 
225 int sun8i_ss_hash_update(struct ahash_request *areq)
226 {
227 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
228 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
229 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
230 
231 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
232 	rctx->fallback_req.base.flags = areq->base.flags &
233 					CRYPTO_TFM_REQ_MAY_SLEEP;
234 	rctx->fallback_req.nbytes = areq->nbytes;
235 	rctx->fallback_req.src = areq->src;
236 
237 	return crypto_ahash_update(&rctx->fallback_req);
238 }
239 
240 int sun8i_ss_hash_finup(struct ahash_request *areq)
241 {
242 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
243 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
244 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
245 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
246 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
247 	struct sun8i_ss_alg_template *algt;
248 #endif
249 
250 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
251 	rctx->fallback_req.base.flags = areq->base.flags &
252 					CRYPTO_TFM_REQ_MAY_SLEEP;
253 
254 	rctx->fallback_req.nbytes = areq->nbytes;
255 	rctx->fallback_req.src = areq->src;
256 	rctx->fallback_req.result = areq->result;
257 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
258 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
259 	algt->stat_fb++;
260 #endif
261 
262 	return crypto_ahash_finup(&rctx->fallback_req);
263 }
264 
265 static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
266 {
267 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
268 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
269 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
270 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
271 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
272 	struct sun8i_ss_alg_template *algt;
273 #endif
274 
275 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
276 	rctx->fallback_req.base.flags = areq->base.flags &
277 					CRYPTO_TFM_REQ_MAY_SLEEP;
278 
279 	rctx->fallback_req.nbytes = areq->nbytes;
280 	rctx->fallback_req.src = areq->src;
281 	rctx->fallback_req.result = areq->result;
282 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
283 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
284 	algt->stat_fb++;
285 #endif
286 
287 	return crypto_ahash_digest(&rctx->fallback_req);
288 }
289 
290 static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
291 				  struct sun8i_ss_hash_reqctx *rctx,
292 				  const char *name)
293 {
294 	int flow = rctx->flow;
295 	u32 v = SS_START;
296 	int i;
297 
298 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
299 	ss->flows[flow].stat_req++;
300 #endif
301 
302 	/* choose between stream0/stream1 */
303 	if (flow)
304 		v |= SS_FLOW1;
305 	else
306 		v |= SS_FLOW0;
307 
308 	v |= rctx->method;
309 
310 	for (i = 0; i < MAX_SG; i++) {
311 		if (!rctx->t_dst[i].addr)
312 			break;
313 
314 		mutex_lock(&ss->mlock);
315 		if (i > 0) {
316 			v |= BIT(17);
317 			writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
318 			writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
319 		}
320 
321 		dev_dbg(ss->dev,
322 			"Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
323 			i, flow, name, v,
324 			rctx->t_src[i].len, rctx->t_dst[i].len,
325 			rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
326 
327 		writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
328 		writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
329 		writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
330 		writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
331 
332 		reinit_completion(&ss->flows[flow].complete);
333 		ss->flows[flow].status = 0;
334 		wmb();
335 
336 		writel(v, ss->base + SS_CTL_REG);
337 		mutex_unlock(&ss->mlock);
338 		wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
339 							  msecs_to_jiffies(2000));
340 		if (ss->flows[flow].status == 0) {
341 			dev_err(ss->dev, "DMA timeout for %s\n", name);
342 			return -EFAULT;
343 		}
344 	}
345 
346 	return 0;
347 }
348 
349 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
350 {
351 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
352 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
353 	struct sun8i_ss_alg_template *algt;
354 	struct scatterlist *sg;
355 
356 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
357 
358 	if (areq->nbytes == 0) {
359 		algt->stat_fb_len++;
360 		return true;
361 	}
362 
363 	if (areq->nbytes >= MAX_PAD_SIZE - 64) {
364 		algt->stat_fb_len++;
365 		return true;
366 	}
367 
368 	/* we need to reserve one SG for the padding one */
369 	if (sg_nents(areq->src) > MAX_SG - 1) {
370 		algt->stat_fb_sgnum++;
371 		return true;
372 	}
373 
374 	sg = areq->src;
375 	while (sg) {
376 		/* SS can operate hash only on full block size
377 		 * since SS support only MD5,sha1,sha224 and sha256, blocksize
378 		 * is always 64
379 		 */
380 		/* Only the last block could be bounced to the pad buffer */
381 		if (sg->length % 64 && sg_next(sg)) {
382 			algt->stat_fb_sglen++;
383 			return true;
384 		}
385 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
386 			algt->stat_fb_align++;
387 			return true;
388 		}
389 		if (sg->length % 4) {
390 			algt->stat_fb_sglen++;
391 			return true;
392 		}
393 		sg = sg_next(sg);
394 	}
395 	return false;
396 }
397 
398 int sun8i_ss_hash_digest(struct ahash_request *areq)
399 {
400 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
401 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
402 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
403 	struct sun8i_ss_alg_template *algt;
404 	struct sun8i_ss_dev *ss;
405 	struct crypto_engine *engine;
406 	int e;
407 
408 	if (sun8i_ss_hash_need_fallback(areq))
409 		return sun8i_ss_hash_digest_fb(areq);
410 
411 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
412 	ss = algt->ss;
413 
414 	e = sun8i_ss_get_engine_number(ss);
415 	rctx->flow = e;
416 	engine = ss->flows[e].engine;
417 
418 	return crypto_transfer_hash_request_to_engine(engine, areq);
419 }
420 
421 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
422 {
423 	u64 fill, min_fill, j, k;
424 	__be64 *bebits;
425 	__le64 *lebits;
426 
427 	j = padi;
428 	buf[j++] = cpu_to_le32(0x80);
429 
430 	if (bs == 64) {
431 		fill = 64 - (byte_count % 64);
432 		min_fill = 2 * sizeof(u32) + sizeof(u32);
433 	} else {
434 		fill = 128 - (byte_count % 128);
435 		min_fill = 4 * sizeof(u32) + sizeof(u32);
436 	}
437 
438 	if (fill < min_fill)
439 		fill += bs;
440 
441 	k = j;
442 	j += (fill - min_fill) / sizeof(u32);
443 	if (j * 4 > bufsize) {
444 		pr_err("%s OVERFLOW %llu\n", __func__, j);
445 		return 0;
446 	}
447 	for (; k < j; k++)
448 		buf[k] = 0;
449 
450 	if (le) {
451 		/* MD5 */
452 		lebits = (__le64 *)&buf[j];
453 		*lebits = cpu_to_le64(byte_count << 3);
454 		j += 2;
455 	} else {
456 		if (bs == 64) {
457 			/* sha1 sha224 sha256 */
458 			bebits = (__be64 *)&buf[j];
459 			*bebits = cpu_to_be64(byte_count << 3);
460 			j += 2;
461 		} else {
462 			/* sha384 sha512*/
463 			bebits = (__be64 *)&buf[j];
464 			*bebits = cpu_to_be64(byte_count >> 61);
465 			j += 2;
466 			bebits = (__be64 *)&buf[j];
467 			*bebits = cpu_to_be64(byte_count << 3);
468 			j += 2;
469 		}
470 	}
471 	if (j * 4 > bufsize) {
472 		pr_err("%s OVERFLOW %llu\n", __func__, j);
473 		return 0;
474 	}
475 
476 	return j;
477 }
478 
479 /* sun8i_ss_hash_run - run an ahash request
480  * Send the data of the request to the SS along with an extra SG with padding
481  */
482 int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
483 {
484 	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
485 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
486 	struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
487 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
488 	struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
489 	struct sun8i_ss_alg_template *algt;
490 	struct sun8i_ss_dev *ss;
491 	struct scatterlist *sg;
492 	int bs = crypto_ahash_blocksize(tfm);
493 	int nr_sgs, err, digestsize;
494 	unsigned int len;
495 	u64 byte_count;
496 	void *pad, *result;
497 	int j, i, k, todo;
498 	dma_addr_t addr_res, addr_pad, addr_xpad;
499 	__le32 *bf;
500 	/* HMAC step:
501 	 * 0: normal hashing
502 	 * 1: IPAD
503 	 * 2: OPAD
504 	 */
505 	int hmac = 0;
506 
507 	algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
508 	ss = algt->ss;
509 
510 	digestsize = algt->alg.hash.halg.digestsize;
511 	if (digestsize == SHA224_DIGEST_SIZE)
512 		digestsize = SHA256_DIGEST_SIZE;
513 
514 	result = ss->flows[rctx->flow].result;
515 	pad = ss->flows[rctx->flow].pad;
516 	bf = (__le32 *)pad;
517 
518 	for (i = 0; i < MAX_SG; i++) {
519 		rctx->t_dst[i].addr = 0;
520 		rctx->t_dst[i].len = 0;
521 	}
522 
523 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
524 	algt->stat_req++;
525 #endif
526 
527 	rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
528 
529 	nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
530 	if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
531 		dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
532 		err = -EINVAL;
533 		goto theend;
534 	}
535 
536 	addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
537 	if (dma_mapping_error(ss->dev, addr_res)) {
538 		dev_err(ss->dev, "DMA map dest\n");
539 		err = -EINVAL;
540 		goto err_dma_result;
541 	}
542 
543 	j = 0;
544 	len = areq->nbytes;
545 	sg = areq->src;
546 	i = 0;
547 	while (len > 0 && sg) {
548 		if (sg_dma_len(sg) == 0) {
549 			sg = sg_next(sg);
550 			continue;
551 		}
552 		todo = min(len, sg_dma_len(sg));
553 		/* only the last SG could be with a size not modulo64 */
554 		if (todo % 64 == 0) {
555 			rctx->t_src[i].addr = sg_dma_address(sg);
556 			rctx->t_src[i].len = todo / 4;
557 			rctx->t_dst[i].addr = addr_res;
558 			rctx->t_dst[i].len = digestsize / 4;
559 			len -= todo;
560 		} else {
561 			scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
562 			j += todo / 4;
563 			len -= todo;
564 		}
565 		sg = sg_next(sg);
566 		i++;
567 	}
568 	if (len > 0) {
569 		dev_err(ss->dev, "remaining len %d\n", len);
570 		err = -EINVAL;
571 		goto theend;
572 	}
573 
574 	if (j > 0)
575 		i--;
576 
577 retry:
578 	byte_count = areq->nbytes;
579 	if (tfmctx->keylen && hmac == 0) {
580 		hmac = 1;
581 		/* shift all SG one slot up, to free slot 0 for IPAD */
582 		for (k = 6; k >= 0; k--) {
583 			rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
584 			rctx->t_src[k + 1].len = rctx->t_src[k].len;
585 			rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
586 			rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
587 		}
588 		addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
589 		if (dma_mapping_error(ss->dev, addr_xpad)) {
590 			dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
591 			goto err_dma_xpad;
592 		}
593 		rctx->t_src[0].addr = addr_xpad;
594 		rctx->t_src[0].len = bs / 4;
595 		rctx->t_dst[0].addr = addr_res;
596 		rctx->t_dst[0].len = digestsize / 4;
597 		i++;
598 		byte_count = areq->nbytes + bs;
599 	}
600 	if (tfmctx->keylen && hmac == 2) {
601 		for (i = 0; i < MAX_SG; i++) {
602 			rctx->t_src[i].addr = 0;
603 			rctx->t_src[i].len = 0;
604 			rctx->t_dst[i].addr = 0;
605 			rctx->t_dst[i].len = 0;
606 		}
607 
608 		addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
609 		if (dma_mapping_error(ss->dev, addr_res)) {
610 			dev_err(ss->dev, "Fail to create DMA mapping of result\n");
611 			err = -EINVAL;
612 			goto err_dma_result;
613 		}
614 		addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
615 		if (dma_mapping_error(ss->dev, addr_xpad)) {
616 			dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
617 			goto err_dma_xpad;
618 		}
619 		rctx->t_src[0].addr = addr_xpad;
620 		rctx->t_src[0].len = bs / 4;
621 
622 		memcpy(bf, result, digestsize);
623 		j = digestsize / 4;
624 		i = 1;
625 		byte_count = digestsize + bs;
626 
627 		rctx->t_dst[0].addr = addr_res;
628 		rctx->t_dst[0].len = digestsize / 4;
629 	}
630 
631 	switch (algt->ss_algo_id) {
632 	case SS_ID_HASH_MD5:
633 		j = hash_pad(bf, 4096, j, byte_count, true, bs);
634 		break;
635 	case SS_ID_HASH_SHA1:
636 	case SS_ID_HASH_SHA224:
637 	case SS_ID_HASH_SHA256:
638 		j = hash_pad(bf, 4096, j, byte_count, false, bs);
639 		break;
640 	}
641 	if (!j) {
642 		err = -EINVAL;
643 		goto theend;
644 	}
645 
646 	addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
647 	if (dma_mapping_error(ss->dev, addr_pad)) {
648 		dev_err(ss->dev, "DMA error on padding SG\n");
649 		err = -EINVAL;
650 		goto err_dma_pad;
651 	}
652 	rctx->t_src[i].addr = addr_pad;
653 	rctx->t_src[i].len = j;
654 	rctx->t_dst[i].addr = addr_res;
655 	rctx->t_dst[i].len = digestsize / 4;
656 
657 	err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
658 
659 	/*
660 	 * mini helper for checking dma map/unmap
661 	 * flow start for hmac = 0 (and HMAC = 1)
662 	 * HMAC = 0
663 	 * MAP src
664 	 * MAP res
665 	 *
666 	 * retry:
667 	 * if hmac then hmac = 1
668 	 *	MAP xpad (ipad)
669 	 * if hmac == 2
670 	 *	MAP res
671 	 *	MAP xpad (opad)
672 	 * MAP pad
673 	 * ACTION!
674 	 * UNMAP pad
675 	 * if hmac
676 	 *	UNMAP xpad
677 	 * UNMAP res
678 	 * if hmac < 2
679 	 *	UNMAP SRC
680 	 *
681 	 * if hmac = 1 then hmac = 2 goto retry
682 	 */
683 
684 	dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
685 
686 err_dma_pad:
687 	if (hmac > 0)
688 		dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
689 err_dma_xpad:
690 	dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
691 err_dma_result:
692 	if (hmac < 2)
693 		dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
694 			     DMA_TO_DEVICE);
695 	if (hmac == 1 && !err) {
696 		hmac = 2;
697 		goto retry;
698 	}
699 
700 	if (!err)
701 		memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
702 theend:
703 	local_bh_disable();
704 	crypto_finalize_hash_request(engine, breq, err);
705 	local_bh_enable();
706 	return 0;
707 }
708