xref: /linux/drivers/crypto/sahara.c (revision 5deff027)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 
21 #include <linux/clk.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/spinlock.h>
32 
33 #define SHA_BUFFER_LEN		PAGE_SIZE
34 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
35 
36 #define SAHARA_NAME "sahara"
37 #define SAHARA_VERSION_3	3
38 #define SAHARA_VERSION_4	4
39 #define SAHARA_TIMEOUT_MS	1000
40 #define SAHARA_MAX_HW_DESC	2
41 #define SAHARA_MAX_HW_LINK	20
42 
43 #define FLAGS_MODE_MASK		0x000f
44 #define FLAGS_ENCRYPT		BIT(0)
45 #define FLAGS_CBC		BIT(1)
46 
47 #define SAHARA_HDR_BASE			0x00800000
48 #define SAHARA_HDR_SKHA_ALG_AES	0
49 #define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
50 #define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
51 #define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
52 #define SAHARA_HDR_FORM_DATA		(5 << 16)
53 #define SAHARA_HDR_FORM_KEY		(8 << 16)
54 #define SAHARA_HDR_LLO			(1 << 24)
55 #define SAHARA_HDR_CHA_SKHA		(1 << 28)
56 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
57 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
58 
59 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
60 #define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
61 #define SAHARA_HDR_MDHA_HASH		0xA0850000
62 #define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
63 #define SAHARA_HDR_MDHA_ALG_SHA1	0
64 #define SAHARA_HDR_MDHA_ALG_MD5		1
65 #define SAHARA_HDR_MDHA_ALG_SHA256	2
66 #define SAHARA_HDR_MDHA_ALG_SHA224	3
67 #define SAHARA_HDR_MDHA_PDATA		(1 << 2)
68 #define SAHARA_HDR_MDHA_HMAC		(1 << 3)
69 #define SAHARA_HDR_MDHA_INIT		(1 << 5)
70 #define SAHARA_HDR_MDHA_IPAD		(1 << 6)
71 #define SAHARA_HDR_MDHA_OPAD		(1 << 7)
72 #define SAHARA_HDR_MDHA_SWAP		(1 << 8)
73 #define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
74 #define SAHARA_HDR_MDHA_SSL		(1 << 10)
75 
76 /* SAHARA can only process one request at a time */
77 #define SAHARA_QUEUE_LENGTH	1
78 
79 #define SAHARA_REG_VERSION	0x00
80 #define SAHARA_REG_DAR		0x04
81 #define SAHARA_REG_CONTROL	0x08
82 #define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
83 #define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
84 #define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
85 #define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
86 #define SAHARA_REG_CMD		0x0C
87 #define		SAHARA_CMD_RESET		(1 << 0)
88 #define		SAHARA_CMD_CLEAR_INT		(1 << 8)
89 #define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
90 #define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
91 #define		SAHARA_CMD_MODE_BATCH		(1 << 16)
92 #define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
93 #define	SAHARA_REG_STATUS	0x10
94 #define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
95 #define			SAHARA_STATE_IDLE	0
96 #define			SAHARA_STATE_BUSY	1
97 #define			SAHARA_STATE_ERR	2
98 #define			SAHARA_STATE_FAULT	3
99 #define			SAHARA_STATE_COMPLETE	4
100 #define			SAHARA_STATE_COMP_FLAG	(1 << 2)
101 #define		SAHARA_STATUS_DAR_FULL		(1 << 3)
102 #define		SAHARA_STATUS_ERROR		(1 << 4)
103 #define		SAHARA_STATUS_SECURE		(1 << 5)
104 #define		SAHARA_STATUS_FAIL		(1 << 6)
105 #define		SAHARA_STATUS_INIT		(1 << 7)
106 #define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
107 #define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
108 #define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
109 #define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
110 #define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
111 #define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
112 #define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
113 #define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
114 #define SAHARA_REG_ERRSTATUS	0x14
115 #define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
116 #define			SAHARA_ERRSOURCE_CHA	14
117 #define			SAHARA_ERRSOURCE_DMA	15
118 #define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
119 #define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
120 #define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
121 #define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
122 #define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
123 #define SAHARA_REG_FADDR	0x18
124 #define SAHARA_REG_CDAR		0x1C
125 #define SAHARA_REG_IDAR		0x20
126 
127 struct sahara_hw_desc {
128 	u32	hdr;
129 	u32	len1;
130 	u32	p1;
131 	u32	len2;
132 	u32	p2;
133 	u32	next;
134 };
135 
136 struct sahara_hw_link {
137 	u32	len;
138 	u32	p;
139 	u32	next;
140 };
141 
142 struct sahara_ctx {
143 	/* AES-specific context */
144 	int keylen;
145 	u8 key[AES_KEYSIZE_128];
146 	struct crypto_skcipher *fallback;
147 };
148 
149 struct sahara_aes_reqctx {
150 	unsigned long mode;
151 	u8 iv_out[AES_BLOCK_SIZE];
152 	struct skcipher_request fallback_req;	// keep at the end
153 };
154 
155 /*
156  * struct sahara_sha_reqctx - private data per request
157  * @buf: holds data for requests smaller than block_size
158  * @rembuf: used to prepare one block_size-aligned request
159  * @context: hw-specific context for request. Digest is extracted from this
160  * @mode: specifies what type of hw-descriptor needs to be built
161  * @digest_size: length of digest for this request
162  * @context_size: length of hw-context for this request.
163  *                Always digest_size + 4
164  * @buf_cnt: number of bytes saved in buf
165  * @sg_in_idx: number of hw links
166  * @in_sg: scatterlist for input data
167  * @in_sg_chain: scatterlists for chained input data
168  * @total: total number of bytes for transfer
169  * @last: is this the last block
170  * @first: is this the first block
171  * @active: inside a transfer
172  */
173 struct sahara_sha_reqctx {
174 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
175 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 	u8			context[SHA256_DIGEST_SIZE + 4];
177 	unsigned int		mode;
178 	unsigned int		digest_size;
179 	unsigned int		context_size;
180 	unsigned int		buf_cnt;
181 	unsigned int		sg_in_idx;
182 	struct scatterlist	*in_sg;
183 	struct scatterlist	in_sg_chain[2];
184 	size_t			total;
185 	unsigned int		last;
186 	unsigned int		first;
187 	unsigned int		active;
188 };
189 
190 struct sahara_dev {
191 	struct device		*device;
192 	unsigned int		version;
193 	void __iomem		*regs_base;
194 	struct clk		*clk_ipg;
195 	struct clk		*clk_ahb;
196 	spinlock_t		queue_spinlock;
197 	struct task_struct	*kthread;
198 	struct completion	dma_completion;
199 
200 	struct sahara_ctx	*ctx;
201 	struct crypto_queue	queue;
202 	unsigned long		flags;
203 
204 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
205 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
206 
207 	u8			*key_base;
208 	dma_addr_t		key_phys_base;
209 
210 	u8			*iv_base;
211 	dma_addr_t		iv_phys_base;
212 
213 	u8			*context_base;
214 	dma_addr_t		context_phys_base;
215 
216 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
217 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
218 
219 	size_t			total;
220 	struct scatterlist	*in_sg;
221 	int		nb_in_sg;
222 	struct scatterlist	*out_sg;
223 	int		nb_out_sg;
224 };
225 
226 static struct sahara_dev *dev_ptr;
227 
228 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
229 {
230 	writel(data, dev->regs_base + reg);
231 }
232 
233 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
234 {
235 	return readl(dev->regs_base + reg);
236 }
237 
238 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
239 {
240 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
241 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
242 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
243 
244 	if (dev->flags & FLAGS_CBC) {
245 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
246 		hdr ^= SAHARA_HDR_PARITY_BIT;
247 	}
248 
249 	if (dev->flags & FLAGS_ENCRYPT) {
250 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
251 		hdr ^= SAHARA_HDR_PARITY_BIT;
252 	}
253 
254 	return hdr;
255 }
256 
257 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
258 {
259 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
260 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
261 }
262 
263 static const char *sahara_err_src[16] = {
264 	"No error",
265 	"Header error",
266 	"Descriptor length error",
267 	"Descriptor length or pointer error",
268 	"Link length error",
269 	"Link pointer error",
270 	"Input buffer error",
271 	"Output buffer error",
272 	"Output buffer starvation",
273 	"Internal state fault",
274 	"General descriptor problem",
275 	"Reserved",
276 	"Descriptor address error",
277 	"Link address error",
278 	"CHA error",
279 	"DMA error"
280 };
281 
282 static const char *sahara_err_dmasize[4] = {
283 	"Byte transfer",
284 	"Half-word transfer",
285 	"Word transfer",
286 	"Reserved"
287 };
288 
289 static const char *sahara_err_dmasrc[8] = {
290 	"No error",
291 	"AHB bus error",
292 	"Internal IP bus error",
293 	"Parity error",
294 	"DMA crosses 256 byte boundary",
295 	"DMA is busy",
296 	"Reserved",
297 	"DMA HW error"
298 };
299 
300 static const char *sahara_cha_errsrc[12] = {
301 	"Input buffer non-empty",
302 	"Illegal address",
303 	"Illegal mode",
304 	"Illegal data size",
305 	"Illegal key size",
306 	"Write during processing",
307 	"CTX read during processing",
308 	"HW error",
309 	"Input buffer disabled/underflow",
310 	"Output buffer disabled/overflow",
311 	"DES key parity error",
312 	"Reserved"
313 };
314 
315 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
316 
317 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
318 {
319 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
320 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
321 
322 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
323 
324 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
325 
326 	if (source == SAHARA_ERRSOURCE_DMA) {
327 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
328 			dev_err(dev->device, "		* DMA read.\n");
329 		else
330 			dev_err(dev->device, "		* DMA write.\n");
331 
332 		dev_err(dev->device, "		* %s.\n",
333 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
334 		dev_err(dev->device, "		* %s.\n",
335 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
336 	} else if (source == SAHARA_ERRSOURCE_CHA) {
337 		dev_err(dev->device, "		* %s.\n",
338 			sahara_cha_errsrc[chasrc]);
339 		dev_err(dev->device, "		* %s.\n",
340 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
341 	}
342 	dev_err(dev->device, "\n");
343 }
344 
345 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
346 
347 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
348 {
349 	u8 state;
350 
351 	if (!__is_defined(DEBUG))
352 		return;
353 
354 	state = SAHARA_STATUS_GET_STATE(status);
355 
356 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
357 		__func__, status);
358 
359 	dev_dbg(dev->device, "	- State = %d:\n", state);
360 	if (state & SAHARA_STATE_COMP_FLAG)
361 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
362 
363 	dev_dbg(dev->device, "		* %s.\n",
364 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
365 
366 	if (status & SAHARA_STATUS_DAR_FULL)
367 		dev_dbg(dev->device, "	- DAR Full.\n");
368 	if (status & SAHARA_STATUS_ERROR)
369 		dev_dbg(dev->device, "	- Error.\n");
370 	if (status & SAHARA_STATUS_SECURE)
371 		dev_dbg(dev->device, "	- Secure.\n");
372 	if (status & SAHARA_STATUS_FAIL)
373 		dev_dbg(dev->device, "	- Fail.\n");
374 	if (status & SAHARA_STATUS_RNG_RESEED)
375 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
376 	if (status & SAHARA_STATUS_ACTIVE_RNG)
377 		dev_dbg(dev->device, "	- RNG Active.\n");
378 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
379 		dev_dbg(dev->device, "	- MDHA Active.\n");
380 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
381 		dev_dbg(dev->device, "	- SKHA Active.\n");
382 
383 	if (status & SAHARA_STATUS_MODE_BATCH)
384 		dev_dbg(dev->device, "	- Batch Mode.\n");
385 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
386 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
387 	else if (status & SAHARA_STATUS_MODE_DEBUG)
388 		dev_dbg(dev->device, "	- Debug Mode.\n");
389 
390 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
391 	       SAHARA_STATUS_GET_ISTATE(status));
392 
393 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
394 		sahara_read(dev, SAHARA_REG_CDAR));
395 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
396 		sahara_read(dev, SAHARA_REG_IDAR));
397 }
398 
399 static void sahara_dump_descriptors(struct sahara_dev *dev)
400 {
401 	int i;
402 
403 	if (!__is_defined(DEBUG))
404 		return;
405 
406 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
407 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
408 			i, &dev->hw_phys_desc[i]);
409 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
410 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
411 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
412 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
413 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
414 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
415 			dev->hw_desc[i]->next);
416 	}
417 	dev_dbg(dev->device, "\n");
418 }
419 
420 static void sahara_dump_links(struct sahara_dev *dev)
421 {
422 	int i;
423 
424 	if (!__is_defined(DEBUG))
425 		return;
426 
427 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
428 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
429 			i, &dev->hw_phys_link[i]);
430 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
431 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
432 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
433 			dev->hw_link[i]->next);
434 	}
435 	dev_dbg(dev->device, "\n");
436 }
437 
438 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
439 {
440 	struct sahara_ctx *ctx = dev->ctx;
441 	struct scatterlist *sg;
442 	int ret;
443 	int i, j;
444 	int idx = 0;
445 	u32 len;
446 
447 	memcpy(dev->key_base, ctx->key, ctx->keylen);
448 
449 	if (dev->flags & FLAGS_CBC) {
450 		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
451 		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
452 	} else {
453 		dev->hw_desc[idx]->len1 = 0;
454 		dev->hw_desc[idx]->p1 = 0;
455 	}
456 	dev->hw_desc[idx]->len2 = ctx->keylen;
457 	dev->hw_desc[idx]->p2 = dev->key_phys_base;
458 	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
459 	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
460 
461 	idx++;
462 
463 
464 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
465 	if (dev->nb_in_sg < 0) {
466 		dev_err(dev->device, "Invalid numbers of src SG.\n");
467 		return dev->nb_in_sg;
468 	}
469 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
470 	if (dev->nb_out_sg < 0) {
471 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
472 		return dev->nb_out_sg;
473 	}
474 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
475 		dev_err(dev->device, "not enough hw links (%d)\n",
476 			dev->nb_in_sg + dev->nb_out_sg);
477 		return -EINVAL;
478 	}
479 
480 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
481 			 DMA_TO_DEVICE);
482 	if (!ret) {
483 		dev_err(dev->device, "couldn't map in sg\n");
484 		return -EINVAL;
485 	}
486 
487 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
488 			 DMA_FROM_DEVICE);
489 	if (!ret) {
490 		dev_err(dev->device, "couldn't map out sg\n");
491 		goto unmap_in;
492 	}
493 
494 	/* Create input links */
495 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
496 	sg = dev->in_sg;
497 	len = dev->total;
498 	for (i = 0; i < dev->nb_in_sg; i++) {
499 		dev->hw_link[i]->len = min(len, sg->length);
500 		dev->hw_link[i]->p = sg->dma_address;
501 		if (i == (dev->nb_in_sg - 1)) {
502 			dev->hw_link[i]->next = 0;
503 		} else {
504 			len -= min(len, sg->length);
505 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
506 			sg = sg_next(sg);
507 		}
508 	}
509 
510 	/* Create output links */
511 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
512 	sg = dev->out_sg;
513 	len = dev->total;
514 	for (j = i; j < dev->nb_out_sg + i; j++) {
515 		dev->hw_link[j]->len = min(len, sg->length);
516 		dev->hw_link[j]->p = sg->dma_address;
517 		if (j == (dev->nb_out_sg + i - 1)) {
518 			dev->hw_link[j]->next = 0;
519 		} else {
520 			len -= min(len, sg->length);
521 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
522 			sg = sg_next(sg);
523 		}
524 	}
525 
526 	/* Fill remaining fields of hw_desc[1] */
527 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
528 	dev->hw_desc[idx]->len1 = dev->total;
529 	dev->hw_desc[idx]->len2 = dev->total;
530 	dev->hw_desc[idx]->next = 0;
531 
532 	sahara_dump_descriptors(dev);
533 	sahara_dump_links(dev);
534 
535 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
536 
537 	return 0;
538 
539 unmap_in:
540 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
541 		DMA_TO_DEVICE);
542 
543 	return -EINVAL;
544 }
545 
546 static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
547 {
548 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
549 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
550 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
551 
552 	/* Update IV buffer to contain the last ciphertext block */
553 	if (rctx->mode & FLAGS_ENCRYPT) {
554 		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
555 				   ivsize, req->cryptlen - ivsize);
556 	} else {
557 		memcpy(req->iv, rctx->iv_out, ivsize);
558 	}
559 }
560 
561 static int sahara_aes_process(struct skcipher_request *req)
562 {
563 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
564 	struct sahara_dev *dev = dev_ptr;
565 	struct sahara_ctx *ctx;
566 	struct sahara_aes_reqctx *rctx;
567 	int ret;
568 	unsigned long timeout;
569 
570 	/* Request is ready to be dispatched by the device */
571 	dev_dbg(dev->device,
572 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
573 		req->cryptlen, req->src, req->dst);
574 
575 	/* assign new request to device */
576 	dev->total = req->cryptlen;
577 	dev->in_sg = req->src;
578 	dev->out_sg = req->dst;
579 
580 	rctx = skcipher_request_ctx(req);
581 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
582 	rctx->mode &= FLAGS_MODE_MASK;
583 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
584 
585 	if ((dev->flags & FLAGS_CBC) && req->iv) {
586 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
587 
588 		memcpy(dev->iv_base, req->iv, ivsize);
589 
590 		if (!(dev->flags & FLAGS_ENCRYPT)) {
591 			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
592 					   rctx->iv_out, ivsize,
593 					   req->cryptlen - ivsize);
594 		}
595 	}
596 
597 	/* assign new context to device */
598 	dev->ctx = ctx;
599 
600 	reinit_completion(&dev->dma_completion);
601 
602 	ret = sahara_hw_descriptor_create(dev);
603 	if (ret)
604 		return -EINVAL;
605 
606 	timeout = wait_for_completion_timeout(&dev->dma_completion,
607 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
608 
609 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
610 		DMA_FROM_DEVICE);
611 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
612 		DMA_TO_DEVICE);
613 
614 	if (!timeout) {
615 		dev_err(dev->device, "AES timeout\n");
616 		return -ETIMEDOUT;
617 	}
618 
619 	if ((dev->flags & FLAGS_CBC) && req->iv)
620 		sahara_aes_cbc_update_iv(req);
621 
622 	return 0;
623 }
624 
625 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
626 			     unsigned int keylen)
627 {
628 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
629 
630 	ctx->keylen = keylen;
631 
632 	/* SAHARA only supports 128bit keys */
633 	if (keylen == AES_KEYSIZE_128) {
634 		memcpy(ctx->key, key, keylen);
635 		return 0;
636 	}
637 
638 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
639 		return -EINVAL;
640 
641 	/*
642 	 * The requested key size is not supported by HW, do a fallback.
643 	 */
644 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
645 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
646 						 CRYPTO_TFM_REQ_MASK);
647 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
648 }
649 
650 static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
651 {
652 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
653 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
654 		crypto_skcipher_reqtfm(req));
655 
656 	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
657 	skcipher_request_set_callback(&rctx->fallback_req,
658 				      req->base.flags,
659 				      req->base.complete,
660 				      req->base.data);
661 	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
662 				   req->dst, req->cryptlen, req->iv);
663 
664 	if (mode & FLAGS_ENCRYPT)
665 		return crypto_skcipher_encrypt(&rctx->fallback_req);
666 
667 	return crypto_skcipher_decrypt(&rctx->fallback_req);
668 }
669 
670 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
671 {
672 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
673 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
674 		crypto_skcipher_reqtfm(req));
675 	struct sahara_dev *dev = dev_ptr;
676 	int err = 0;
677 
678 	if (!req->cryptlen)
679 		return 0;
680 
681 	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
682 		return sahara_aes_fallback(req, mode);
683 
684 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
685 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
686 
687 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
688 		dev_err(dev->device,
689 			"request size is not exact amount of AES blocks\n");
690 		return -EINVAL;
691 	}
692 
693 	rctx->mode = mode;
694 
695 	spin_lock_bh(&dev->queue_spinlock);
696 	err = crypto_enqueue_request(&dev->queue, &req->base);
697 	spin_unlock_bh(&dev->queue_spinlock);
698 
699 	wake_up_process(dev->kthread);
700 
701 	return err;
702 }
703 
704 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
705 {
706 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
707 }
708 
709 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
710 {
711 	return sahara_aes_crypt(req, 0);
712 }
713 
714 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
715 {
716 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
717 }
718 
719 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
720 {
721 	return sahara_aes_crypt(req, FLAGS_CBC);
722 }
723 
724 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
725 {
726 	const char *name = crypto_tfm_alg_name(&tfm->base);
727 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
728 
729 	ctx->fallback = crypto_alloc_skcipher(name, 0,
730 					      CRYPTO_ALG_NEED_FALLBACK);
731 	if (IS_ERR(ctx->fallback)) {
732 		pr_err("Error allocating fallback algo %s\n", name);
733 		return PTR_ERR(ctx->fallback);
734 	}
735 
736 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
737 					 crypto_skcipher_reqsize(ctx->fallback));
738 
739 	return 0;
740 }
741 
742 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
743 {
744 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
745 
746 	crypto_free_skcipher(ctx->fallback);
747 }
748 
749 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
750 			      struct sahara_sha_reqctx *rctx)
751 {
752 	u32 hdr = 0;
753 
754 	hdr = rctx->mode;
755 
756 	if (rctx->first) {
757 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
758 		hdr |= SAHARA_HDR_MDHA_INIT;
759 	} else {
760 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
761 	}
762 
763 	if (rctx->last)
764 		hdr |= SAHARA_HDR_MDHA_PDATA;
765 
766 	if (hweight_long(hdr) % 2 == 0)
767 		hdr |= SAHARA_HDR_PARITY_BIT;
768 
769 	return hdr;
770 }
771 
772 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
773 				       struct sahara_sha_reqctx *rctx,
774 				       int start)
775 {
776 	struct scatterlist *sg;
777 	unsigned int i;
778 	int ret;
779 
780 	dev->in_sg = rctx->in_sg;
781 
782 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
783 	if (dev->nb_in_sg < 0) {
784 		dev_err(dev->device, "Invalid numbers of src SG.\n");
785 		return dev->nb_in_sg;
786 	}
787 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
788 		dev_err(dev->device, "not enough hw links (%d)\n",
789 			dev->nb_in_sg + dev->nb_out_sg);
790 		return -EINVAL;
791 	}
792 
793 	sg = dev->in_sg;
794 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
795 	if (!ret)
796 		return -EFAULT;
797 
798 	for (i = start; i < dev->nb_in_sg + start; i++) {
799 		dev->hw_link[i]->len = sg->length;
800 		dev->hw_link[i]->p = sg->dma_address;
801 		if (i == (dev->nb_in_sg + start - 1)) {
802 			dev->hw_link[i]->next = 0;
803 		} else {
804 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
805 			sg = sg_next(sg);
806 		}
807 	}
808 
809 	return i;
810 }
811 
812 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
813 						struct sahara_sha_reqctx *rctx,
814 						struct ahash_request *req,
815 						int index)
816 {
817 	unsigned result_len;
818 	int i = index;
819 
820 	if (rctx->first)
821 		/* Create initial descriptor: #8*/
822 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
823 	else
824 		/* Create hash descriptor: #10. Must follow #6. */
825 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
826 
827 	dev->hw_desc[index]->len1 = rctx->total;
828 	if (dev->hw_desc[index]->len1 == 0) {
829 		/* if len1 is 0, p1 must be 0, too */
830 		dev->hw_desc[index]->p1 = 0;
831 		rctx->sg_in_idx = 0;
832 	} else {
833 		/* Create input links */
834 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
835 		i = sahara_sha_hw_links_create(dev, rctx, index);
836 
837 		rctx->sg_in_idx = index;
838 		if (i < 0)
839 			return i;
840 	}
841 
842 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
843 
844 	/* Save the context for the next operation */
845 	result_len = rctx->context_size;
846 	dev->hw_link[i]->p = dev->context_phys_base;
847 
848 	dev->hw_link[i]->len = result_len;
849 	dev->hw_desc[index]->len2 = result_len;
850 
851 	dev->hw_link[i]->next = 0;
852 
853 	return 0;
854 }
855 
856 /*
857  * Load descriptor aka #6
858  *
859  * To load a previously saved context back to the MDHA unit
860  *
861  * p1: Saved Context
862  * p2: NULL
863  *
864  */
865 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
866 						struct sahara_sha_reqctx *rctx,
867 						struct ahash_request *req,
868 						int index)
869 {
870 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
871 
872 	dev->hw_desc[index]->len1 = rctx->context_size;
873 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
874 	dev->hw_desc[index]->len2 = 0;
875 	dev->hw_desc[index]->p2 = 0;
876 
877 	dev->hw_link[index]->len = rctx->context_size;
878 	dev->hw_link[index]->p = dev->context_phys_base;
879 	dev->hw_link[index]->next = 0;
880 
881 	return 0;
882 }
883 
884 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
885 {
886 	if (!sg || !sg->length)
887 		return nbytes;
888 
889 	while (nbytes && sg) {
890 		if (nbytes <= sg->length) {
891 			sg->length = nbytes;
892 			sg_mark_end(sg);
893 			break;
894 		}
895 		nbytes -= sg->length;
896 		sg = sg_next(sg);
897 	}
898 
899 	return nbytes;
900 }
901 
902 static int sahara_sha_prepare_request(struct ahash_request *req)
903 {
904 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
905 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
906 	unsigned int hash_later;
907 	unsigned int block_size;
908 	unsigned int len;
909 
910 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
911 
912 	/* append bytes from previous operation */
913 	len = rctx->buf_cnt + req->nbytes;
914 
915 	/* only the last transfer can be padded in hardware */
916 	if (!rctx->last && (len < block_size)) {
917 		/* to few data, save for next operation */
918 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
919 					 0, req->nbytes, 0);
920 		rctx->buf_cnt += req->nbytes;
921 
922 		return 0;
923 	}
924 
925 	/* add data from previous operation first */
926 	if (rctx->buf_cnt)
927 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
928 
929 	/* data must always be a multiple of block_size */
930 	hash_later = rctx->last ? 0 : len & (block_size - 1);
931 	if (hash_later) {
932 		unsigned int offset = req->nbytes - hash_later;
933 		/* Save remaining bytes for later use */
934 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
935 					hash_later, 0);
936 	}
937 
938 	/* nbytes should now be multiple of blocksize */
939 	req->nbytes = req->nbytes - hash_later;
940 
941 	sahara_walk_and_recalc(req->src, req->nbytes);
942 
943 	/* have data from previous operation and current */
944 	if (rctx->buf_cnt && req->nbytes) {
945 		sg_init_table(rctx->in_sg_chain, 2);
946 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
947 
948 		sg_chain(rctx->in_sg_chain, 2, req->src);
949 
950 		rctx->total = req->nbytes + rctx->buf_cnt;
951 		rctx->in_sg = rctx->in_sg_chain;
952 
953 		req->src = rctx->in_sg_chain;
954 	/* only data from previous operation */
955 	} else if (rctx->buf_cnt) {
956 		if (req->src)
957 			rctx->in_sg = req->src;
958 		else
959 			rctx->in_sg = rctx->in_sg_chain;
960 		/* buf was copied into rembuf above */
961 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
962 		rctx->total = rctx->buf_cnt;
963 	/* no data from previous operation */
964 	} else {
965 		rctx->in_sg = req->src;
966 		rctx->total = req->nbytes;
967 		req->src = rctx->in_sg;
968 	}
969 
970 	/* on next call, we only have the remaining data in the buffer */
971 	rctx->buf_cnt = hash_later;
972 
973 	return -EINPROGRESS;
974 }
975 
976 static int sahara_sha_process(struct ahash_request *req)
977 {
978 	struct sahara_dev *dev = dev_ptr;
979 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
980 	int ret;
981 	unsigned long timeout;
982 
983 	ret = sahara_sha_prepare_request(req);
984 	if (!ret)
985 		return ret;
986 
987 	if (rctx->first) {
988 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
989 		if (ret)
990 			return ret;
991 
992 		dev->hw_desc[0]->next = 0;
993 		rctx->first = 0;
994 	} else {
995 		memcpy(dev->context_base, rctx->context, rctx->context_size);
996 
997 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
998 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
999 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1000 		if (ret)
1001 			return ret;
1002 
1003 		dev->hw_desc[1]->next = 0;
1004 	}
1005 
1006 	sahara_dump_descriptors(dev);
1007 	sahara_dump_links(dev);
1008 
1009 	reinit_completion(&dev->dma_completion);
1010 
1011 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1012 
1013 	timeout = wait_for_completion_timeout(&dev->dma_completion,
1014 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1015 
1016 	if (rctx->sg_in_idx)
1017 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1018 			     DMA_TO_DEVICE);
1019 
1020 	if (!timeout) {
1021 		dev_err(dev->device, "SHA timeout\n");
1022 		return -ETIMEDOUT;
1023 	}
1024 
1025 	memcpy(rctx->context, dev->context_base, rctx->context_size);
1026 
1027 	if (req->result && rctx->last)
1028 		memcpy(req->result, rctx->context, rctx->digest_size);
1029 
1030 	return 0;
1031 }
1032 
1033 static int sahara_queue_manage(void *data)
1034 {
1035 	struct sahara_dev *dev = data;
1036 	struct crypto_async_request *async_req;
1037 	struct crypto_async_request *backlog;
1038 	int ret = 0;
1039 
1040 	do {
1041 		__set_current_state(TASK_INTERRUPTIBLE);
1042 
1043 		spin_lock_bh(&dev->queue_spinlock);
1044 		backlog = crypto_get_backlog(&dev->queue);
1045 		async_req = crypto_dequeue_request(&dev->queue);
1046 		spin_unlock_bh(&dev->queue_spinlock);
1047 
1048 		if (backlog)
1049 			crypto_request_complete(backlog, -EINPROGRESS);
1050 
1051 		if (async_req) {
1052 			if (crypto_tfm_alg_type(async_req->tfm) ==
1053 			    CRYPTO_ALG_TYPE_AHASH) {
1054 				struct ahash_request *req =
1055 					ahash_request_cast(async_req);
1056 
1057 				ret = sahara_sha_process(req);
1058 			} else {
1059 				struct skcipher_request *req =
1060 					skcipher_request_cast(async_req);
1061 
1062 				ret = sahara_aes_process(req);
1063 			}
1064 
1065 			crypto_request_complete(async_req, ret);
1066 
1067 			continue;
1068 		}
1069 
1070 		schedule();
1071 	} while (!kthread_should_stop());
1072 
1073 	return 0;
1074 }
1075 
1076 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1077 {
1078 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1079 	struct sahara_dev *dev = dev_ptr;
1080 	int ret;
1081 
1082 	if (!req->nbytes && !last)
1083 		return 0;
1084 
1085 	rctx->last = last;
1086 
1087 	if (!rctx->active) {
1088 		rctx->active = 1;
1089 		rctx->first = 1;
1090 	}
1091 
1092 	spin_lock_bh(&dev->queue_spinlock);
1093 	ret = crypto_enqueue_request(&dev->queue, &req->base);
1094 	spin_unlock_bh(&dev->queue_spinlock);
1095 
1096 	wake_up_process(dev->kthread);
1097 
1098 	return ret;
1099 }
1100 
1101 static int sahara_sha_init(struct ahash_request *req)
1102 {
1103 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1104 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1105 
1106 	memset(rctx, 0, sizeof(*rctx));
1107 
1108 	switch (crypto_ahash_digestsize(tfm)) {
1109 	case SHA1_DIGEST_SIZE:
1110 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1111 		rctx->digest_size = SHA1_DIGEST_SIZE;
1112 		break;
1113 	case SHA256_DIGEST_SIZE:
1114 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1115 		rctx->digest_size = SHA256_DIGEST_SIZE;
1116 		break;
1117 	default:
1118 		return -EINVAL;
1119 	}
1120 
1121 	rctx->context_size = rctx->digest_size + 4;
1122 	rctx->active = 0;
1123 
1124 	return 0;
1125 }
1126 
1127 static int sahara_sha_update(struct ahash_request *req)
1128 {
1129 	return sahara_sha_enqueue(req, 0);
1130 }
1131 
1132 static int sahara_sha_final(struct ahash_request *req)
1133 {
1134 	req->nbytes = 0;
1135 	return sahara_sha_enqueue(req, 1);
1136 }
1137 
1138 static int sahara_sha_finup(struct ahash_request *req)
1139 {
1140 	return sahara_sha_enqueue(req, 1);
1141 }
1142 
1143 static int sahara_sha_digest(struct ahash_request *req)
1144 {
1145 	sahara_sha_init(req);
1146 
1147 	return sahara_sha_finup(req);
1148 }
1149 
1150 static int sahara_sha_export(struct ahash_request *req, void *out)
1151 {
1152 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1153 
1154 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1155 
1156 	return 0;
1157 }
1158 
1159 static int sahara_sha_import(struct ahash_request *req, const void *in)
1160 {
1161 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1162 
1163 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1164 
1165 	return 0;
1166 }
1167 
1168 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1169 {
1170 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1171 				 sizeof(struct sahara_sha_reqctx));
1172 
1173 	return 0;
1174 }
1175 
1176 static struct skcipher_alg aes_algs[] = {
1177 {
1178 	.base.cra_name		= "ecb(aes)",
1179 	.base.cra_driver_name	= "sahara-ecb-aes",
1180 	.base.cra_priority	= 300,
1181 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1182 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1183 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1184 	.base.cra_alignmask	= 0x0,
1185 	.base.cra_module	= THIS_MODULE,
1186 
1187 	.init			= sahara_aes_init_tfm,
1188 	.exit			= sahara_aes_exit_tfm,
1189 	.min_keysize		= AES_MIN_KEY_SIZE ,
1190 	.max_keysize		= AES_MAX_KEY_SIZE,
1191 	.setkey			= sahara_aes_setkey,
1192 	.encrypt		= sahara_aes_ecb_encrypt,
1193 	.decrypt		= sahara_aes_ecb_decrypt,
1194 }, {
1195 	.base.cra_name		= "cbc(aes)",
1196 	.base.cra_driver_name	= "sahara-cbc-aes",
1197 	.base.cra_priority	= 300,
1198 	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1199 	.base.cra_blocksize	= AES_BLOCK_SIZE,
1200 	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1201 	.base.cra_alignmask	= 0x0,
1202 	.base.cra_module	= THIS_MODULE,
1203 
1204 	.init			= sahara_aes_init_tfm,
1205 	.exit			= sahara_aes_exit_tfm,
1206 	.min_keysize		= AES_MIN_KEY_SIZE ,
1207 	.max_keysize		= AES_MAX_KEY_SIZE,
1208 	.ivsize			= AES_BLOCK_SIZE,
1209 	.setkey			= sahara_aes_setkey,
1210 	.encrypt		= sahara_aes_cbc_encrypt,
1211 	.decrypt		= sahara_aes_cbc_decrypt,
1212 }
1213 };
1214 
1215 static struct ahash_alg sha_v3_algs[] = {
1216 {
1217 	.init		= sahara_sha_init,
1218 	.update		= sahara_sha_update,
1219 	.final		= sahara_sha_final,
1220 	.finup		= sahara_sha_finup,
1221 	.digest		= sahara_sha_digest,
1222 	.export		= sahara_sha_export,
1223 	.import		= sahara_sha_import,
1224 	.halg.digestsize	= SHA1_DIGEST_SIZE,
1225 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1226 	.halg.base	= {
1227 		.cra_name		= "sha1",
1228 		.cra_driver_name	= "sahara-sha1",
1229 		.cra_priority		= 300,
1230 		.cra_flags		= CRYPTO_ALG_ASYNC |
1231 						CRYPTO_ALG_NEED_FALLBACK,
1232 		.cra_blocksize		= SHA1_BLOCK_SIZE,
1233 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1234 		.cra_alignmask		= 0,
1235 		.cra_module		= THIS_MODULE,
1236 		.cra_init		= sahara_sha_cra_init,
1237 	}
1238 },
1239 };
1240 
1241 static struct ahash_alg sha_v4_algs[] = {
1242 {
1243 	.init		= sahara_sha_init,
1244 	.update		= sahara_sha_update,
1245 	.final		= sahara_sha_final,
1246 	.finup		= sahara_sha_finup,
1247 	.digest		= sahara_sha_digest,
1248 	.export		= sahara_sha_export,
1249 	.import		= sahara_sha_import,
1250 	.halg.digestsize	= SHA256_DIGEST_SIZE,
1251 	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1252 	.halg.base	= {
1253 		.cra_name		= "sha256",
1254 		.cra_driver_name	= "sahara-sha256",
1255 		.cra_priority		= 300,
1256 		.cra_flags		= CRYPTO_ALG_ASYNC |
1257 						CRYPTO_ALG_NEED_FALLBACK,
1258 		.cra_blocksize		= SHA256_BLOCK_SIZE,
1259 		.cra_ctxsize		= sizeof(struct sahara_ctx),
1260 		.cra_alignmask		= 0,
1261 		.cra_module		= THIS_MODULE,
1262 		.cra_init		= sahara_sha_cra_init,
1263 	}
1264 },
1265 };
1266 
1267 static irqreturn_t sahara_irq_handler(int irq, void *data)
1268 {
1269 	struct sahara_dev *dev = data;
1270 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1271 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1272 
1273 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1274 		     SAHARA_REG_CMD);
1275 
1276 	sahara_decode_status(dev, stat);
1277 
1278 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY)
1279 		return IRQ_NONE;
1280 
1281 	if (SAHARA_STATUS_GET_STATE(stat) != SAHARA_STATE_COMPLETE)
1282 		sahara_decode_error(dev, err);
1283 
1284 	complete(&dev->dma_completion);
1285 
1286 	return IRQ_HANDLED;
1287 }
1288 
1289 
1290 static int sahara_register_algs(struct sahara_dev *dev)
1291 {
1292 	int err;
1293 	unsigned int i, j, k, l;
1294 
1295 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1296 		err = crypto_register_skcipher(&aes_algs[i]);
1297 		if (err)
1298 			goto err_aes_algs;
1299 	}
1300 
1301 	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1302 		err = crypto_register_ahash(&sha_v3_algs[k]);
1303 		if (err)
1304 			goto err_sha_v3_algs;
1305 	}
1306 
1307 	if (dev->version > SAHARA_VERSION_3)
1308 		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1309 			err = crypto_register_ahash(&sha_v4_algs[l]);
1310 			if (err)
1311 				goto err_sha_v4_algs;
1312 		}
1313 
1314 	return 0;
1315 
1316 err_sha_v4_algs:
1317 	for (j = 0; j < l; j++)
1318 		crypto_unregister_ahash(&sha_v4_algs[j]);
1319 
1320 err_sha_v3_algs:
1321 	for (j = 0; j < k; j++)
1322 		crypto_unregister_ahash(&sha_v3_algs[j]);
1323 
1324 err_aes_algs:
1325 	for (j = 0; j < i; j++)
1326 		crypto_unregister_skcipher(&aes_algs[j]);
1327 
1328 	return err;
1329 }
1330 
1331 static void sahara_unregister_algs(struct sahara_dev *dev)
1332 {
1333 	unsigned int i;
1334 
1335 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1336 		crypto_unregister_skcipher(&aes_algs[i]);
1337 
1338 	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1339 		crypto_unregister_ahash(&sha_v3_algs[i]);
1340 
1341 	if (dev->version > SAHARA_VERSION_3)
1342 		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1343 			crypto_unregister_ahash(&sha_v4_algs[i]);
1344 }
1345 
1346 static const struct of_device_id sahara_dt_ids[] = {
1347 	{ .compatible = "fsl,imx53-sahara" },
1348 	{ .compatible = "fsl,imx27-sahara" },
1349 	{ /* sentinel */ }
1350 };
1351 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1352 
1353 static int sahara_probe(struct platform_device *pdev)
1354 {
1355 	struct sahara_dev *dev;
1356 	u32 version;
1357 	int irq;
1358 	int err;
1359 	int i;
1360 
1361 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1362 	if (!dev)
1363 		return -ENOMEM;
1364 
1365 	dev->device = &pdev->dev;
1366 	platform_set_drvdata(pdev, dev);
1367 
1368 	/* Get the base address */
1369 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1370 	if (IS_ERR(dev->regs_base))
1371 		return PTR_ERR(dev->regs_base);
1372 
1373 	/* Get the IRQ */
1374 	irq = platform_get_irq(pdev,  0);
1375 	if (irq < 0)
1376 		return irq;
1377 
1378 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1379 			       0, dev_name(&pdev->dev), dev);
1380 	if (err) {
1381 		dev_err(&pdev->dev, "failed to request irq\n");
1382 		return err;
1383 	}
1384 
1385 	/* clocks */
1386 	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1387 	if (IS_ERR(dev->clk_ipg)) {
1388 		dev_err(&pdev->dev, "Could not get ipg clock\n");
1389 		return PTR_ERR(dev->clk_ipg);
1390 	}
1391 
1392 	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1393 	if (IS_ERR(dev->clk_ahb)) {
1394 		dev_err(&pdev->dev, "Could not get ahb clock\n");
1395 		return PTR_ERR(dev->clk_ahb);
1396 	}
1397 
1398 	/* Allocate HW descriptors */
1399 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1400 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1401 			&dev->hw_phys_desc[0], GFP_KERNEL);
1402 	if (!dev->hw_desc[0]) {
1403 		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1404 		return -ENOMEM;
1405 	}
1406 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1407 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1408 				sizeof(struct sahara_hw_desc);
1409 
1410 	/* Allocate space for iv and key */
1411 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1412 				&dev->key_phys_base, GFP_KERNEL);
1413 	if (!dev->key_base) {
1414 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1415 		return -ENOMEM;
1416 	}
1417 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1418 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1419 
1420 	/* Allocate space for context: largest digest + message length field */
1421 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1422 					SHA256_DIGEST_SIZE + 4,
1423 					&dev->context_phys_base, GFP_KERNEL);
1424 	if (!dev->context_base) {
1425 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1426 		return -ENOMEM;
1427 	}
1428 
1429 	/* Allocate space for HW links */
1430 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1431 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1432 			&dev->hw_phys_link[0], GFP_KERNEL);
1433 	if (!dev->hw_link[0]) {
1434 		dev_err(&pdev->dev, "Could not allocate hw links\n");
1435 		return -ENOMEM;
1436 	}
1437 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1438 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1439 					sizeof(struct sahara_hw_link);
1440 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1441 	}
1442 
1443 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1444 
1445 	spin_lock_init(&dev->queue_spinlock);
1446 
1447 	dev_ptr = dev;
1448 
1449 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1450 	if (IS_ERR(dev->kthread)) {
1451 		return PTR_ERR(dev->kthread);
1452 	}
1453 
1454 	init_completion(&dev->dma_completion);
1455 
1456 	err = clk_prepare_enable(dev->clk_ipg);
1457 	if (err)
1458 		return err;
1459 	err = clk_prepare_enable(dev->clk_ahb);
1460 	if (err)
1461 		goto clk_ipg_disable;
1462 
1463 	version = sahara_read(dev, SAHARA_REG_VERSION);
1464 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1465 		if (version != SAHARA_VERSION_3)
1466 			err = -ENODEV;
1467 	} else if (of_device_is_compatible(pdev->dev.of_node,
1468 			"fsl,imx53-sahara")) {
1469 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1470 			err = -ENODEV;
1471 		version = (version >> 8) & 0xff;
1472 	}
1473 	if (err == -ENODEV) {
1474 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1475 				version);
1476 		goto err_algs;
1477 	}
1478 
1479 	dev->version = version;
1480 
1481 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1482 		     SAHARA_REG_CMD);
1483 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1484 			SAHARA_CONTROL_SET_MAXBURST(8) |
1485 			SAHARA_CONTROL_RNG_AUTORSD |
1486 			SAHARA_CONTROL_ENABLE_INT,
1487 			SAHARA_REG_CONTROL);
1488 
1489 	err = sahara_register_algs(dev);
1490 	if (err)
1491 		goto err_algs;
1492 
1493 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1494 
1495 	return 0;
1496 
1497 err_algs:
1498 	kthread_stop(dev->kthread);
1499 	dev_ptr = NULL;
1500 	clk_disable_unprepare(dev->clk_ahb);
1501 clk_ipg_disable:
1502 	clk_disable_unprepare(dev->clk_ipg);
1503 
1504 	return err;
1505 }
1506 
1507 static void sahara_remove(struct platform_device *pdev)
1508 {
1509 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1510 
1511 	kthread_stop(dev->kthread);
1512 
1513 	sahara_unregister_algs(dev);
1514 
1515 	clk_disable_unprepare(dev->clk_ipg);
1516 	clk_disable_unprepare(dev->clk_ahb);
1517 
1518 	dev_ptr = NULL;
1519 }
1520 
1521 static struct platform_driver sahara_driver = {
1522 	.probe		= sahara_probe,
1523 	.remove_new	= sahara_remove,
1524 	.driver		= {
1525 		.name	= SAHARA_NAME,
1526 		.of_match_table = sahara_dt_ids,
1527 	},
1528 };
1529 
1530 module_platform_driver(sahara_driver);
1531 
1532 MODULE_LICENSE("GPL");
1533 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1534 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1535 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1536