1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4  *
5  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6  *
7  * This file add support for AES cipher with 128,192,256 bits
8  * keysize in CBC and ECB mode.
9  * Add support also for DES and 3DES in CBC and ECB mode.
10  *
11  * You could find the datasheet in Documentation/arm/sunxi.rst
12  */
13 #include "sun4i-ss.h"
14 
15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
16 {
17 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 	struct sun4i_ss_ctx *ss = op->ss;
20 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 	u32 mode = ctx->mode;
23 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 	u32 rx_cnt = SS_RX_DEFAULT;
25 	u32 tx_cnt = 0;
26 	u32 spaces;
27 	u32 v;
28 	int err = 0;
29 	unsigned int i;
30 	unsigned int ileft = areq->cryptlen;
31 	unsigned int oleft = areq->cryptlen;
32 	unsigned int todo;
33 	struct sg_mapping_iter mi, mo;
34 	unsigned int oi, oo; /* offset for in and out */
35 	unsigned long flags;
36 
37 	if (!areq->cryptlen)
38 		return 0;
39 
40 	if (!areq->src || !areq->dst) {
41 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
42 		return -EINVAL;
43 	}
44 
45 	spin_lock_irqsave(&ss->slock, flags);
46 
47 	for (i = 0; i < op->keylen; i += 4)
48 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
49 
50 	if (areq->iv) {
51 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
52 			v = *(u32 *)(areq->iv + i * 4);
53 			writel(v, ss->base + SS_IV0 + i * 4);
54 		}
55 	}
56 	writel(mode, ss->base + SS_CTL);
57 
58 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
59 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
60 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
61 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
62 	sg_miter_next(&mi);
63 	sg_miter_next(&mo);
64 	if (!mi.addr || !mo.addr) {
65 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
66 		err = -EINVAL;
67 		goto release_ss;
68 	}
69 
70 	ileft = areq->cryptlen / 4;
71 	oleft = areq->cryptlen / 4;
72 	oi = 0;
73 	oo = 0;
74 	do {
75 		todo = min(rx_cnt, ileft);
76 		todo = min_t(size_t, todo, (mi.length - oi) / 4);
77 		if (todo) {
78 			ileft -= todo;
79 			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
80 			oi += todo * 4;
81 		}
82 		if (oi == mi.length) {
83 			sg_miter_next(&mi);
84 			oi = 0;
85 		}
86 
87 		spaces = readl(ss->base + SS_FCSR);
88 		rx_cnt = SS_RXFIFO_SPACES(spaces);
89 		tx_cnt = SS_TXFIFO_SPACES(spaces);
90 
91 		todo = min(tx_cnt, oleft);
92 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
93 		if (todo) {
94 			oleft -= todo;
95 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
96 			oo += todo * 4;
97 		}
98 		if (oo == mo.length) {
99 			sg_miter_next(&mo);
100 			oo = 0;
101 		}
102 	} while (oleft);
103 
104 	if (areq->iv) {
105 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
106 			v = readl(ss->base + SS_IV0 + i * 4);
107 			*(u32 *)(areq->iv + i * 4) = v;
108 		}
109 	}
110 
111 release_ss:
112 	sg_miter_stop(&mi);
113 	sg_miter_stop(&mo);
114 	writel(0, ss->base + SS_CTL);
115 	spin_unlock_irqrestore(&ss->slock, flags);
116 	return err;
117 }
118 
119 
120 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
121 {
122 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
123 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
124 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
125 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
126 	int err;
127 
128 	skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
129 	skcipher_request_set_callback(subreq, areq->base.flags, NULL,
130 				      NULL);
131 	skcipher_request_set_crypt(subreq, areq->src, areq->dst,
132 				   areq->cryptlen, areq->iv);
133 	if (ctx->mode & SS_DECRYPTION)
134 		err = crypto_skcipher_decrypt(subreq);
135 	else
136 		err = crypto_skcipher_encrypt(subreq);
137 	skcipher_request_zero(subreq);
138 
139 	return err;
140 }
141 
142 /* Generic function that support SG with size not multiple of 4 */
143 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
144 {
145 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
146 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
147 	struct sun4i_ss_ctx *ss = op->ss;
148 	int no_chunk = 1;
149 	struct scatterlist *in_sg = areq->src;
150 	struct scatterlist *out_sg = areq->dst;
151 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
152 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
153 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
154 	struct sun4i_ss_alg_template *algt;
155 	u32 mode = ctx->mode;
156 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
157 	u32 rx_cnt = SS_RX_DEFAULT;
158 	u32 tx_cnt = 0;
159 	u32 v;
160 	u32 spaces;
161 	int err = 0;
162 	unsigned int i;
163 	unsigned int ileft = areq->cryptlen;
164 	unsigned int oleft = areq->cryptlen;
165 	unsigned int todo;
166 	struct sg_mapping_iter mi, mo;
167 	unsigned int oi, oo;	/* offset for in and out */
168 	unsigned int ob = 0;	/* offset in buf */
169 	unsigned int obo = 0;	/* offset in bufo*/
170 	unsigned int obl = 0;	/* length of data in bufo */
171 	unsigned long flags;
172 	bool need_fallback;
173 
174 	if (!areq->cryptlen)
175 		return 0;
176 
177 	if (!areq->src || !areq->dst) {
178 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
179 		return -EINVAL;
180 	}
181 
182 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
183 	if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
184 		need_fallback = true;
185 
186 	/*
187 	 * if we have only SGs with size multiple of 4,
188 	 * we can use the SS optimized function
189 	 */
190 	while (in_sg && no_chunk == 1) {
191 		if (in_sg->length % 4)
192 			no_chunk = 0;
193 		in_sg = sg_next(in_sg);
194 	}
195 	while (out_sg && no_chunk == 1) {
196 		if (out_sg->length % 4)
197 			no_chunk = 0;
198 		out_sg = sg_next(out_sg);
199 	}
200 
201 	if (no_chunk == 1 && !need_fallback)
202 		return sun4i_ss_opti_poll(areq);
203 
204 	if (need_fallback)
205 		return sun4i_ss_cipher_poll_fallback(areq);
206 
207 	spin_lock_irqsave(&ss->slock, flags);
208 
209 	for (i = 0; i < op->keylen; i += 4)
210 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
211 
212 	if (areq->iv) {
213 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
214 			v = *(u32 *)(areq->iv + i * 4);
215 			writel(v, ss->base + SS_IV0 + i * 4);
216 		}
217 	}
218 	writel(mode, ss->base + SS_CTL);
219 
220 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
221 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
222 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
223 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
224 	sg_miter_next(&mi);
225 	sg_miter_next(&mo);
226 	if (!mi.addr || !mo.addr) {
227 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
228 		err = -EINVAL;
229 		goto release_ss;
230 	}
231 	ileft = areq->cryptlen;
232 	oleft = areq->cryptlen;
233 	oi = 0;
234 	oo = 0;
235 
236 	while (oleft) {
237 		if (ileft) {
238 			char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
239 
240 			/*
241 			 * todo is the number of consecutive 4byte word that we
242 			 * can read from current SG
243 			 */
244 			todo = min(rx_cnt, ileft / 4);
245 			todo = min_t(size_t, todo, (mi.length - oi) / 4);
246 			if (todo && !ob) {
247 				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
248 					todo);
249 				ileft -= todo * 4;
250 				oi += todo * 4;
251 			} else {
252 				/*
253 				 * not enough consecutive bytes, so we need to
254 				 * linearize in buf. todo is in bytes
255 				 * After that copy, if we have a multiple of 4
256 				 * we need to be able to write all buf in one
257 				 * pass, so it is why we min() with rx_cnt
258 				 */
259 				todo = min(rx_cnt * 4 - ob, ileft);
260 				todo = min_t(size_t, todo, mi.length - oi);
261 				memcpy(buf + ob, mi.addr + oi, todo);
262 				ileft -= todo;
263 				oi += todo;
264 				ob += todo;
265 				if (!(ob % 4)) {
266 					writesl(ss->base + SS_RXFIFO, buf,
267 						ob / 4);
268 					ob = 0;
269 				}
270 			}
271 			if (oi == mi.length) {
272 				sg_miter_next(&mi);
273 				oi = 0;
274 			}
275 		}
276 
277 		spaces = readl(ss->base + SS_FCSR);
278 		rx_cnt = SS_RXFIFO_SPACES(spaces);
279 		tx_cnt = SS_TXFIFO_SPACES(spaces);
280 		dev_dbg(ss->dev,
281 			"%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
282 			mode,
283 			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
284 			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
285 
286 		if (!tx_cnt)
287 			continue;
288 		/* todo in 4bytes word */
289 		todo = min(tx_cnt, oleft / 4);
290 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
291 		if (todo) {
292 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
293 			oleft -= todo * 4;
294 			oo += todo * 4;
295 			if (oo == mo.length) {
296 				sg_miter_next(&mo);
297 				oo = 0;
298 			}
299 		} else {
300 			char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
301 
302 			/*
303 			 * read obl bytes in bufo, we read at maximum for
304 			 * emptying the device
305 			 */
306 			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
307 			obl = tx_cnt * 4;
308 			obo = 0;
309 			do {
310 				/*
311 				 * how many bytes we can copy ?
312 				 * no more than remaining SG size
313 				 * no more than remaining buffer
314 				 * no need to test against oleft
315 				 */
316 				todo = min_t(size_t,
317 					     mo.length - oo, obl - obo);
318 				memcpy(mo.addr + oo, bufo + obo, todo);
319 				oleft -= todo;
320 				obo += todo;
321 				oo += todo;
322 				if (oo == mo.length) {
323 					sg_miter_next(&mo);
324 					oo = 0;
325 				}
326 			} while (obo < obl);
327 			/* bufo must be fully used here */
328 		}
329 	}
330 	if (areq->iv) {
331 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
332 			v = readl(ss->base + SS_IV0 + i * 4);
333 			*(u32 *)(areq->iv + i * 4) = v;
334 		}
335 	}
336 
337 release_ss:
338 	sg_miter_stop(&mi);
339 	sg_miter_stop(&mo);
340 	writel(0, ss->base + SS_CTL);
341 	spin_unlock_irqrestore(&ss->slock, flags);
342 
343 	return err;
344 }
345 
346 /* CBC AES */
347 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
348 {
349 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
350 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
351 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
352 
353 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
354 		op->keymode;
355 	return sun4i_ss_cipher_poll(areq);
356 }
357 
358 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
359 {
360 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
361 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
362 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
363 
364 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
365 		op->keymode;
366 	return sun4i_ss_cipher_poll(areq);
367 }
368 
369 /* ECB AES */
370 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
371 {
372 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
373 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
374 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
375 
376 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
377 		op->keymode;
378 	return sun4i_ss_cipher_poll(areq);
379 }
380 
381 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
382 {
383 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
384 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
385 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
386 
387 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
388 		op->keymode;
389 	return sun4i_ss_cipher_poll(areq);
390 }
391 
392 /* CBC DES */
393 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
394 {
395 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
396 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
397 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
398 
399 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
400 		op->keymode;
401 	return sun4i_ss_cipher_poll(areq);
402 }
403 
404 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
405 {
406 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
407 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
408 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
409 
410 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
411 		op->keymode;
412 	return sun4i_ss_cipher_poll(areq);
413 }
414 
415 /* ECB DES */
416 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
417 {
418 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
419 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
420 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
421 
422 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
423 		op->keymode;
424 	return sun4i_ss_cipher_poll(areq);
425 }
426 
427 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
428 {
429 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
430 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
431 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
432 
433 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
434 		op->keymode;
435 	return sun4i_ss_cipher_poll(areq);
436 }
437 
438 /* CBC 3DES */
439 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
440 {
441 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
442 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
443 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
444 
445 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
446 		op->keymode;
447 	return sun4i_ss_cipher_poll(areq);
448 }
449 
450 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
451 {
452 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
453 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
454 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
455 
456 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
457 		op->keymode;
458 	return sun4i_ss_cipher_poll(areq);
459 }
460 
461 /* ECB 3DES */
462 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
463 {
464 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
465 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
466 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
467 
468 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
469 		op->keymode;
470 	return sun4i_ss_cipher_poll(areq);
471 }
472 
473 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
474 {
475 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
476 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
477 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
478 
479 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
480 		op->keymode;
481 	return sun4i_ss_cipher_poll(areq);
482 }
483 
484 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
485 {
486 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
487 	struct sun4i_ss_alg_template *algt;
488 	const char *name = crypto_tfm_alg_name(tfm);
489 	int err;
490 
491 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
492 
493 	algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
494 			    alg.crypto.base);
495 	op->ss = algt->ss;
496 
497 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
498 				    sizeof(struct sun4i_cipher_req_ctx));
499 
500 	op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
501 	if (IS_ERR(op->fallback_tfm)) {
502 		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
503 			name, PTR_ERR(op->fallback_tfm));
504 		return PTR_ERR(op->fallback_tfm);
505 	}
506 
507 	err = pm_runtime_get_sync(op->ss->dev);
508 	if (err < 0)
509 		goto error_pm;
510 
511 	return 0;
512 error_pm:
513 	crypto_free_sync_skcipher(op->fallback_tfm);
514 	return err;
515 }
516 
517 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
518 {
519 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
520 
521 	crypto_free_sync_skcipher(op->fallback_tfm);
522 	pm_runtime_put(op->ss->dev);
523 }
524 
525 /* check and set the AES key, prepare the mode to be used */
526 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
527 			unsigned int keylen)
528 {
529 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
530 	struct sun4i_ss_ctx *ss = op->ss;
531 
532 	switch (keylen) {
533 	case 128 / 8:
534 		op->keymode = SS_AES_128BITS;
535 		break;
536 	case 192 / 8:
537 		op->keymode = SS_AES_192BITS;
538 		break;
539 	case 256 / 8:
540 		op->keymode = SS_AES_256BITS;
541 		break;
542 	default:
543 		dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
544 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
545 		return -EINVAL;
546 	}
547 	op->keylen = keylen;
548 	memcpy(op->key, key, keylen);
549 
550 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
551 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
552 
553 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
554 }
555 
556 /* check and set the DES key, prepare the mode to be used */
557 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
558 			unsigned int keylen)
559 {
560 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
561 	int err;
562 
563 	err = verify_skcipher_des_key(tfm, key);
564 	if (err)
565 		return err;
566 
567 	op->keylen = keylen;
568 	memcpy(op->key, key, keylen);
569 
570 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
571 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
572 
573 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
574 }
575 
576 /* check and set the 3DES key, prepare the mode to be used */
577 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
578 			 unsigned int keylen)
579 {
580 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
581 	int err;
582 
583 	err = verify_skcipher_des3_key(tfm, key);
584 	if (err)
585 		return err;
586 
587 	op->keylen = keylen;
588 	memcpy(op->key, key, keylen);
589 
590 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
591 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
592 
593 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
594 
595 }
596