xref: /linux/drivers/crypto/sahara.c (revision e02ea6f9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for SAHARA cryptographic accelerator.
6  *
7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8  * Copyright (c) 2013 Vista Silicon S.L.
9  * Author: Javier Martin <javier.martin@vista-silicon.com>
10  *
11  * Based on omap-aes.c and tegra-aes.c
12  */
13 
14 #include <crypto/aes.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/engine.h>
19 #include <crypto/sha1.h>
20 #include <crypto/sha2.h>
21 
22 #include <linux/clk.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/interrupt.h>
25 #include <linux/io.h>
26 #include <linux/irq.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/platform_device.h>
31 #include <linux/spinlock.h>
32 
33 #define SHA_BUFFER_LEN				PAGE_SIZE
34 #define SAHARA_MAX_SHA_BLOCK_SIZE		SHA256_BLOCK_SIZE
35 
36 #define SAHARA_NAME				"sahara"
37 #define SAHARA_VERSION_3			3
38 #define SAHARA_VERSION_4			4
39 #define SAHARA_TIMEOUT_MS			1000
40 #define SAHARA_MAX_HW_DESC			2
41 #define SAHARA_MAX_HW_LINK			20
42 
43 #define FLAGS_MODE_MASK				0x000f
44 #define FLAGS_ENCRYPT				BIT(0)
45 #define FLAGS_CBC				BIT(1)
46 
47 #define SAHARA_HDR_BASE				0x00800000
48 #define SAHARA_HDR_SKHA_ALG_AES			0
49 #define SAHARA_HDR_SKHA_MODE_ECB		0
50 #define SAHARA_HDR_SKHA_OP_ENC			BIT(2)
51 #define SAHARA_HDR_SKHA_MODE_CBC		BIT(3)
52 #define SAHARA_HDR_FORM_DATA			(5 << 16)
53 #define SAHARA_HDR_FORM_KEY			BIT(19)
54 #define SAHARA_HDR_LLO				BIT(24)
55 #define SAHARA_HDR_CHA_SKHA			BIT(28)
56 #define SAHARA_HDR_CHA_MDHA			BIT(29)
57 #define SAHARA_HDR_PARITY_BIT			BIT(31)
58 
59 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY		0x20880000
60 #define SAHARA_HDR_MDHA_SET_MODE_HASH		0x208D0000
61 #define SAHARA_HDR_MDHA_HASH			0xA0850000
62 #define SAHARA_HDR_MDHA_STORE_DIGEST		0x20820000
63 #define SAHARA_HDR_MDHA_ALG_SHA1		0
64 #define SAHARA_HDR_MDHA_ALG_MD5			1
65 #define SAHARA_HDR_MDHA_ALG_SHA256		2
66 #define SAHARA_HDR_MDHA_ALG_SHA224		3
67 #define SAHARA_HDR_MDHA_PDATA			BIT(2)
68 #define SAHARA_HDR_MDHA_HMAC			BIT(3)
69 #define SAHARA_HDR_MDHA_INIT			BIT(5)
70 #define SAHARA_HDR_MDHA_IPAD			BIT(6)
71 #define SAHARA_HDR_MDHA_OPAD			BIT(7)
72 #define SAHARA_HDR_MDHA_SWAP			BIT(8)
73 #define SAHARA_HDR_MDHA_MAC_FULL		BIT(9)
74 #define SAHARA_HDR_MDHA_SSL			BIT(10)
75 
76 #define SAHARA_REG_VERSION			0x00
77 #define SAHARA_REG_DAR				0x04
78 #define SAHARA_REG_CONTROL			0x08
79 #define SAHARA_CONTROL_SET_THROTTLE(x)		(((x) & 0xff) << 24)
80 #define SAHARA_CONTROL_SET_MAXBURST(x)		(((x) & 0xff) << 16)
81 #define SAHARA_CONTROL_RNG_AUTORSD		BIT(7)
82 #define SAHARA_CONTROL_ENABLE_INT		BIT(4)
83 #define SAHARA_REG_CMD				0x0C
84 #define SAHARA_CMD_RESET			BIT(0)
85 #define SAHARA_CMD_CLEAR_INT			BIT(8)
86 #define SAHARA_CMD_CLEAR_ERR			BIT(9)
87 #define SAHARA_CMD_SINGLE_STEP			BIT(10)
88 #define SAHARA_CMD_MODE_BATCH			BIT(16)
89 #define SAHARA_CMD_MODE_DEBUG			BIT(18)
90 #define SAHARA_REG_STATUS			0x10
91 #define SAHARA_STATUS_GET_STATE(x)		((x) & 0x7)
92 #define SAHARA_STATE_IDLE			0
93 #define SAHARA_STATE_BUSY			1
94 #define SAHARA_STATE_ERR			2
95 #define SAHARA_STATE_FAULT			3
96 #define SAHARA_STATE_COMPLETE			4
97 #define SAHARA_STATE_COMP_FLAG			BIT(2)
98 #define SAHARA_STATUS_DAR_FULL			BIT(3)
99 #define SAHARA_STATUS_ERROR			BIT(4)
100 #define SAHARA_STATUS_SECURE			BIT(5)
101 #define SAHARA_STATUS_FAIL			BIT(6)
102 #define SAHARA_STATUS_INIT			BIT(7)
103 #define SAHARA_STATUS_RNG_RESEED		BIT(8)
104 #define SAHARA_STATUS_ACTIVE_RNG		BIT(9)
105 #define SAHARA_STATUS_ACTIVE_MDHA		BIT(10)
106 #define SAHARA_STATUS_ACTIVE_SKHA		BIT(11)
107 #define SAHARA_STATUS_MODE_BATCH		BIT(16)
108 #define SAHARA_STATUS_MODE_DEDICATED		BIT(17)
109 #define SAHARA_STATUS_MODE_DEBUG		BIT(18)
110 #define SAHARA_STATUS_GET_ISTATE(x)		(((x) >> 24) & 0xff)
111 #define SAHARA_REG_ERRSTATUS			0x14
112 #define SAHARA_ERRSTATUS_GET_SOURCE(x)		((x) & 0xf)
113 #define SAHARA_ERRSOURCE_CHA			14
114 #define SAHARA_ERRSOURCE_DMA			15
115 #define SAHARA_ERRSTATUS_DMA_DIR		BIT(8)
116 #define SAHARA_ERRSTATUS_GET_DMASZ(x)		(((x) >> 9) & 0x3)
117 #define SAHARA_ERRSTATUS_GET_DMASRC(x)		(((x) >> 13) & 0x7)
118 #define SAHARA_ERRSTATUS_GET_CHASRC(x)		(((x) >> 16) & 0xfff)
119 #define SAHARA_ERRSTATUS_GET_CHAERR(x)		(((x) >> 28) & 0x3)
120 #define SAHARA_REG_FADDR			0x18
121 #define SAHARA_REG_CDAR				0x1C
122 #define SAHARA_REG_IDAR				0x20
123 
124 struct sahara_hw_desc {
125 	u32	hdr;
126 	u32	len1;
127 	u32	p1;
128 	u32	len2;
129 	u32	p2;
130 	u32	next;
131 };
132 
133 struct sahara_hw_link {
134 	u32	len;
135 	u32	p;
136 	u32	next;
137 };
138 
139 struct sahara_ctx {
140 	/* AES-specific context */
141 	int keylen;
142 	u8 key[AES_KEYSIZE_128];
143 	struct crypto_skcipher *fallback;
144 };
145 
146 struct sahara_aes_reqctx {
147 	unsigned long mode;
148 	u8 iv_out[AES_BLOCK_SIZE];
149 	struct skcipher_request fallback_req;	// keep at the end
150 };
151 
152 /*
153  * struct sahara_sha_reqctx - private data per request
154  * @buf: holds data for requests smaller than block_size
155  * @rembuf: used to prepare one block_size-aligned request
156  * @context: hw-specific context for request. Digest is extracted from this
157  * @mode: specifies what type of hw-descriptor needs to be built
158  * @digest_size: length of digest for this request
159  * @context_size: length of hw-context for this request.
160  *                Always digest_size + 4
161  * @buf_cnt: number of bytes saved in buf
162  * @sg_in_idx: number of hw links
163  * @in_sg: scatterlist for input data
164  * @in_sg_chain: scatterlists for chained input data
165  * @total: total number of bytes for transfer
166  * @last: is this the last block
167  * @first: is this the first block
168  */
169 struct sahara_sha_reqctx {
170 	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
171 	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
172 	u8			context[SHA256_DIGEST_SIZE + 4];
173 	unsigned int		mode;
174 	unsigned int		digest_size;
175 	unsigned int		context_size;
176 	unsigned int		buf_cnt;
177 	unsigned int		sg_in_idx;
178 	struct scatterlist	*in_sg;
179 	struct scatterlist	in_sg_chain[2];
180 	size_t			total;
181 	unsigned int		last;
182 	unsigned int		first;
183 };
184 
185 struct sahara_dev {
186 	struct device		*device;
187 	unsigned int		version;
188 	void __iomem		*regs_base;
189 	struct clk		*clk_ipg;
190 	struct clk		*clk_ahb;
191 	struct completion	dma_completion;
192 
193 	struct sahara_ctx	*ctx;
194 	unsigned long		flags;
195 
196 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
197 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
198 
199 	u8			*key_base;
200 	dma_addr_t		key_phys_base;
201 
202 	u8			*iv_base;
203 	dma_addr_t		iv_phys_base;
204 
205 	u8			*context_base;
206 	dma_addr_t		context_phys_base;
207 
208 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
209 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
210 
211 	size_t			total;
212 	struct scatterlist	*in_sg;
213 	int		nb_in_sg;
214 	struct scatterlist	*out_sg;
215 	int		nb_out_sg;
216 
217 	struct crypto_engine *engine;
218 };
219 
220 static struct sahara_dev *dev_ptr;
221 
sahara_write(struct sahara_dev * dev,u32 data,u32 reg)222 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
223 {
224 	writel(data, dev->regs_base + reg);
225 }
226 
sahara_read(struct sahara_dev * dev,u32 reg)227 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
228 {
229 	return readl(dev->regs_base + reg);
230 }
231 
sahara_aes_key_hdr(struct sahara_dev * dev)232 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
233 {
234 	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
235 			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
236 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
237 
238 	if (dev->flags & FLAGS_CBC) {
239 		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
240 		hdr ^= SAHARA_HDR_PARITY_BIT;
241 	}
242 
243 	if (dev->flags & FLAGS_ENCRYPT) {
244 		hdr |= SAHARA_HDR_SKHA_OP_ENC;
245 		hdr ^= SAHARA_HDR_PARITY_BIT;
246 	}
247 
248 	return hdr;
249 }
250 
sahara_aes_data_link_hdr(struct sahara_dev * dev)251 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
252 {
253 	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
254 			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
255 }
256 
257 static const char *sahara_err_src[16] = {
258 	"No error",
259 	"Header error",
260 	"Descriptor length error",
261 	"Descriptor length or pointer error",
262 	"Link length error",
263 	"Link pointer error",
264 	"Input buffer error",
265 	"Output buffer error",
266 	"Output buffer starvation",
267 	"Internal state fault",
268 	"General descriptor problem",
269 	"Reserved",
270 	"Descriptor address error",
271 	"Link address error",
272 	"CHA error",
273 	"DMA error"
274 };
275 
276 static const char *sahara_err_dmasize[4] = {
277 	"Byte transfer",
278 	"Half-word transfer",
279 	"Word transfer",
280 	"Reserved"
281 };
282 
283 static const char *sahara_err_dmasrc[8] = {
284 	"No error",
285 	"AHB bus error",
286 	"Internal IP bus error",
287 	"Parity error",
288 	"DMA crosses 256 byte boundary",
289 	"DMA is busy",
290 	"Reserved",
291 	"DMA HW error"
292 };
293 
294 static const char *sahara_cha_errsrc[12] = {
295 	"Input buffer non-empty",
296 	"Illegal address",
297 	"Illegal mode",
298 	"Illegal data size",
299 	"Illegal key size",
300 	"Write during processing",
301 	"CTX read during processing",
302 	"HW error",
303 	"Input buffer disabled/underflow",
304 	"Output buffer disabled/overflow",
305 	"DES key parity error",
306 	"Reserved"
307 };
308 
309 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
310 
sahara_decode_error(struct sahara_dev * dev,unsigned int error)311 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
312 {
313 	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
314 	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
315 
316 	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
317 
318 	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
319 
320 	if (source == SAHARA_ERRSOURCE_DMA) {
321 		if (error & SAHARA_ERRSTATUS_DMA_DIR)
322 			dev_err(dev->device, "		* DMA read.\n");
323 		else
324 			dev_err(dev->device, "		* DMA write.\n");
325 
326 		dev_err(dev->device, "		* %s.\n",
327 		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
328 		dev_err(dev->device, "		* %s.\n",
329 		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
330 	} else if (source == SAHARA_ERRSOURCE_CHA) {
331 		dev_err(dev->device, "		* %s.\n",
332 			sahara_cha_errsrc[chasrc]);
333 		dev_err(dev->device, "		* %s.\n",
334 		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
335 	}
336 	dev_err(dev->device, "\n");
337 }
338 
339 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
340 
sahara_decode_status(struct sahara_dev * dev,unsigned int status)341 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
342 {
343 	u8 state;
344 
345 	if (!__is_defined(DEBUG))
346 		return;
347 
348 	state = SAHARA_STATUS_GET_STATE(status);
349 
350 	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
351 		__func__, status);
352 
353 	dev_dbg(dev->device, "	- State = %d:\n", state);
354 	if (state & SAHARA_STATE_COMP_FLAG)
355 		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
356 
357 	dev_dbg(dev->device, "		* %s.\n",
358 	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
359 
360 	if (status & SAHARA_STATUS_DAR_FULL)
361 		dev_dbg(dev->device, "	- DAR Full.\n");
362 	if (status & SAHARA_STATUS_ERROR)
363 		dev_dbg(dev->device, "	- Error.\n");
364 	if (status & SAHARA_STATUS_SECURE)
365 		dev_dbg(dev->device, "	- Secure.\n");
366 	if (status & SAHARA_STATUS_FAIL)
367 		dev_dbg(dev->device, "	- Fail.\n");
368 	if (status & SAHARA_STATUS_RNG_RESEED)
369 		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
370 	if (status & SAHARA_STATUS_ACTIVE_RNG)
371 		dev_dbg(dev->device, "	- RNG Active.\n");
372 	if (status & SAHARA_STATUS_ACTIVE_MDHA)
373 		dev_dbg(dev->device, "	- MDHA Active.\n");
374 	if (status & SAHARA_STATUS_ACTIVE_SKHA)
375 		dev_dbg(dev->device, "	- SKHA Active.\n");
376 
377 	if (status & SAHARA_STATUS_MODE_BATCH)
378 		dev_dbg(dev->device, "	- Batch Mode.\n");
379 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
380 		dev_dbg(dev->device, "	- Dedicated Mode.\n");
381 	else if (status & SAHARA_STATUS_MODE_DEBUG)
382 		dev_dbg(dev->device, "	- Debug Mode.\n");
383 
384 	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
385 	       SAHARA_STATUS_GET_ISTATE(status));
386 
387 	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
388 		sahara_read(dev, SAHARA_REG_CDAR));
389 	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
390 		sahara_read(dev, SAHARA_REG_IDAR));
391 }
392 
sahara_dump_descriptors(struct sahara_dev * dev)393 static void sahara_dump_descriptors(struct sahara_dev *dev)
394 {
395 	int i;
396 
397 	if (!__is_defined(DEBUG))
398 		return;
399 
400 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
401 		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
402 			i, &dev->hw_phys_desc[i]);
403 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
404 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
405 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
406 		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
407 		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
408 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
409 			dev->hw_desc[i]->next);
410 	}
411 	dev_dbg(dev->device, "\n");
412 }
413 
sahara_dump_links(struct sahara_dev * dev)414 static void sahara_dump_links(struct sahara_dev *dev)
415 {
416 	int i;
417 
418 	if (!__is_defined(DEBUG))
419 		return;
420 
421 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
422 		dev_dbg(dev->device, "Link (%d) (%pad):\n",
423 			i, &dev->hw_phys_link[i]);
424 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
425 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
426 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
427 			dev->hw_link[i]->next);
428 	}
429 	dev_dbg(dev->device, "\n");
430 }
431 
sahara_hw_descriptor_create(struct sahara_dev * dev)432 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
433 {
434 	struct sahara_ctx *ctx = dev->ctx;
435 	struct scatterlist *sg;
436 	int ret;
437 	int i, j;
438 	int idx = 0;
439 	u32 len;
440 
441 	memcpy(dev->key_base, ctx->key, ctx->keylen);
442 
443 	if (dev->flags & FLAGS_CBC) {
444 		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
445 		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
446 	} else {
447 		dev->hw_desc[idx]->len1 = 0;
448 		dev->hw_desc[idx]->p1 = 0;
449 	}
450 	dev->hw_desc[idx]->len2 = ctx->keylen;
451 	dev->hw_desc[idx]->p2 = dev->key_phys_base;
452 	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
453 	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
454 
455 	idx++;
456 
457 
458 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
459 	if (dev->nb_in_sg < 0) {
460 		dev_err(dev->device, "Invalid numbers of src SG.\n");
461 		return dev->nb_in_sg;
462 	}
463 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
464 	if (dev->nb_out_sg < 0) {
465 		dev_err(dev->device, "Invalid numbers of dst SG.\n");
466 		return dev->nb_out_sg;
467 	}
468 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
469 		dev_err(dev->device, "not enough hw links (%d)\n",
470 			dev->nb_in_sg + dev->nb_out_sg);
471 		return -EINVAL;
472 	}
473 
474 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
475 			 DMA_TO_DEVICE);
476 	if (!ret) {
477 		dev_err(dev->device, "couldn't map in sg\n");
478 		return -EINVAL;
479 	}
480 
481 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
482 			 DMA_FROM_DEVICE);
483 	if (!ret) {
484 		dev_err(dev->device, "couldn't map out sg\n");
485 		goto unmap_in;
486 	}
487 
488 	/* Create input links */
489 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
490 	sg = dev->in_sg;
491 	len = dev->total;
492 	for (i = 0; i < dev->nb_in_sg; i++) {
493 		dev->hw_link[i]->len = min(len, sg->length);
494 		dev->hw_link[i]->p = sg->dma_address;
495 		if (i == (dev->nb_in_sg - 1)) {
496 			dev->hw_link[i]->next = 0;
497 		} else {
498 			len -= min(len, sg->length);
499 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
500 			sg = sg_next(sg);
501 		}
502 	}
503 
504 	/* Create output links */
505 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
506 	sg = dev->out_sg;
507 	len = dev->total;
508 	for (j = i; j < dev->nb_out_sg + i; j++) {
509 		dev->hw_link[j]->len = min(len, sg->length);
510 		dev->hw_link[j]->p = sg->dma_address;
511 		if (j == (dev->nb_out_sg + i - 1)) {
512 			dev->hw_link[j]->next = 0;
513 		} else {
514 			len -= min(len, sg->length);
515 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
516 			sg = sg_next(sg);
517 		}
518 	}
519 
520 	/* Fill remaining fields of hw_desc[1] */
521 	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
522 	dev->hw_desc[idx]->len1 = dev->total;
523 	dev->hw_desc[idx]->len2 = dev->total;
524 	dev->hw_desc[idx]->next = 0;
525 
526 	sahara_dump_descriptors(dev);
527 	sahara_dump_links(dev);
528 
529 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
530 
531 	return 0;
532 
533 unmap_in:
534 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
535 		DMA_TO_DEVICE);
536 
537 	return -EINVAL;
538 }
539 
sahara_aes_cbc_update_iv(struct skcipher_request * req)540 static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
541 {
542 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
543 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
544 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
545 
546 	/* Update IV buffer to contain the last ciphertext block */
547 	if (rctx->mode & FLAGS_ENCRYPT) {
548 		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
549 				   ivsize, req->cryptlen - ivsize);
550 	} else {
551 		memcpy(req->iv, rctx->iv_out, ivsize);
552 	}
553 }
554 
sahara_aes_process(struct skcipher_request * req)555 static int sahara_aes_process(struct skcipher_request *req)
556 {
557 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
558 	struct sahara_dev *dev = dev_ptr;
559 	struct sahara_ctx *ctx;
560 	struct sahara_aes_reqctx *rctx;
561 	int ret;
562 	unsigned long time_left;
563 
564 	/* Request is ready to be dispatched by the device */
565 	dev_dbg(dev->device,
566 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
567 		req->cryptlen, req->src, req->dst);
568 
569 	/* assign new request to device */
570 	dev->total = req->cryptlen;
571 	dev->in_sg = req->src;
572 	dev->out_sg = req->dst;
573 
574 	rctx = skcipher_request_ctx(req);
575 	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
576 	rctx->mode &= FLAGS_MODE_MASK;
577 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
578 
579 	if ((dev->flags & FLAGS_CBC) && req->iv) {
580 		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
581 
582 		memcpy(dev->iv_base, req->iv, ivsize);
583 
584 		if (!(dev->flags & FLAGS_ENCRYPT)) {
585 			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
586 					   rctx->iv_out, ivsize,
587 					   req->cryptlen - ivsize);
588 		}
589 	}
590 
591 	/* assign new context to device */
592 	dev->ctx = ctx;
593 
594 	reinit_completion(&dev->dma_completion);
595 
596 	ret = sahara_hw_descriptor_create(dev);
597 	if (ret)
598 		return -EINVAL;
599 
600 	time_left = wait_for_completion_timeout(&dev->dma_completion,
601 						msecs_to_jiffies(SAHARA_TIMEOUT_MS));
602 
603 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
604 		DMA_FROM_DEVICE);
605 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
606 		DMA_TO_DEVICE);
607 
608 	if (!time_left) {
609 		dev_err(dev->device, "AES timeout\n");
610 		return -ETIMEDOUT;
611 	}
612 
613 	if ((dev->flags & FLAGS_CBC) && req->iv)
614 		sahara_aes_cbc_update_iv(req);
615 
616 	return 0;
617 }
618 
sahara_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)619 static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
620 			     unsigned int keylen)
621 {
622 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
623 
624 	ctx->keylen = keylen;
625 
626 	/* SAHARA only supports 128bit keys */
627 	if (keylen == AES_KEYSIZE_128) {
628 		memcpy(ctx->key, key, keylen);
629 		return 0;
630 	}
631 
632 	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
633 		return -EINVAL;
634 
635 	/*
636 	 * The requested key size is not supported by HW, do a fallback.
637 	 */
638 	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
639 	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
640 						 CRYPTO_TFM_REQ_MASK);
641 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
642 }
643 
sahara_aes_fallback(struct skcipher_request * req,unsigned long mode)644 static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
645 {
646 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
647 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
648 		crypto_skcipher_reqtfm(req));
649 
650 	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
651 	skcipher_request_set_callback(&rctx->fallback_req,
652 				      req->base.flags,
653 				      req->base.complete,
654 				      req->base.data);
655 	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
656 				   req->dst, req->cryptlen, req->iv);
657 
658 	if (mode & FLAGS_ENCRYPT)
659 		return crypto_skcipher_encrypt(&rctx->fallback_req);
660 
661 	return crypto_skcipher_decrypt(&rctx->fallback_req);
662 }
663 
sahara_aes_crypt(struct skcipher_request * req,unsigned long mode)664 static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
665 {
666 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
667 	struct sahara_ctx *ctx = crypto_skcipher_ctx(
668 		crypto_skcipher_reqtfm(req));
669 	struct sahara_dev *dev = dev_ptr;
670 
671 	if (!req->cryptlen)
672 		return 0;
673 
674 	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
675 		return sahara_aes_fallback(req, mode);
676 
677 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
678 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
679 
680 	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
681 		return -EINVAL;
682 
683 	rctx->mode = mode;
684 
685 	return crypto_transfer_skcipher_request_to_engine(dev->engine, req);
686 }
687 
sahara_aes_ecb_encrypt(struct skcipher_request * req)688 static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
689 {
690 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
691 }
692 
sahara_aes_ecb_decrypt(struct skcipher_request * req)693 static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
694 {
695 	return sahara_aes_crypt(req, 0);
696 }
697 
sahara_aes_cbc_encrypt(struct skcipher_request * req)698 static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
699 {
700 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
701 }
702 
sahara_aes_cbc_decrypt(struct skcipher_request * req)703 static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
704 {
705 	return sahara_aes_crypt(req, FLAGS_CBC);
706 }
707 
sahara_aes_init_tfm(struct crypto_skcipher * tfm)708 static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
709 {
710 	const char *name = crypto_tfm_alg_name(&tfm->base);
711 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
712 
713 	ctx->fallback = crypto_alloc_skcipher(name, 0,
714 					      CRYPTO_ALG_NEED_FALLBACK);
715 	if (IS_ERR(ctx->fallback)) {
716 		pr_err("Error allocating fallback algo %s\n", name);
717 		return PTR_ERR(ctx->fallback);
718 	}
719 
720 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
721 					 crypto_skcipher_reqsize(ctx->fallback));
722 
723 	return 0;
724 }
725 
sahara_aes_exit_tfm(struct crypto_skcipher * tfm)726 static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
727 {
728 	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
729 
730 	crypto_free_skcipher(ctx->fallback);
731 }
732 
sahara_sha_init_hdr(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx)733 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
734 			      struct sahara_sha_reqctx *rctx)
735 {
736 	u32 hdr = 0;
737 
738 	hdr = rctx->mode;
739 
740 	if (rctx->first) {
741 		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
742 		hdr |= SAHARA_HDR_MDHA_INIT;
743 	} else {
744 		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
745 	}
746 
747 	if (rctx->last)
748 		hdr |= SAHARA_HDR_MDHA_PDATA;
749 
750 	if (hweight_long(hdr) % 2 == 0)
751 		hdr |= SAHARA_HDR_PARITY_BIT;
752 
753 	return hdr;
754 }
755 
sahara_sha_hw_links_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,int start)756 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
757 				       struct sahara_sha_reqctx *rctx,
758 				       int start)
759 {
760 	struct scatterlist *sg;
761 	unsigned int len;
762 	unsigned int i;
763 	int ret;
764 
765 	dev->in_sg = rctx->in_sg;
766 
767 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
768 	if (dev->nb_in_sg < 0) {
769 		dev_err(dev->device, "Invalid numbers of src SG.\n");
770 		return dev->nb_in_sg;
771 	}
772 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
773 		dev_err(dev->device, "not enough hw links (%d)\n",
774 			dev->nb_in_sg + dev->nb_out_sg);
775 		return -EINVAL;
776 	}
777 
778 	sg = dev->in_sg;
779 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
780 	if (!ret)
781 		return -EFAULT;
782 
783 	len = rctx->total;
784 	for (i = start; i < dev->nb_in_sg + start; i++) {
785 		dev->hw_link[i]->len = min(len, sg->length);
786 		dev->hw_link[i]->p = sg->dma_address;
787 		if (i == (dev->nb_in_sg + start - 1)) {
788 			dev->hw_link[i]->next = 0;
789 		} else {
790 			len -= min(len, sg->length);
791 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
792 			sg = sg_next(sg);
793 		}
794 	}
795 
796 	return i;
797 }
798 
sahara_sha_hw_data_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)799 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
800 						struct sahara_sha_reqctx *rctx,
801 						struct ahash_request *req,
802 						int index)
803 {
804 	unsigned result_len;
805 	int i = index;
806 
807 	if (rctx->first)
808 		/* Create initial descriptor: #8*/
809 		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
810 	else
811 		/* Create hash descriptor: #10. Must follow #6. */
812 		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
813 
814 	dev->hw_desc[index]->len1 = rctx->total;
815 	if (dev->hw_desc[index]->len1 == 0) {
816 		/* if len1 is 0, p1 must be 0, too */
817 		dev->hw_desc[index]->p1 = 0;
818 		rctx->sg_in_idx = 0;
819 	} else {
820 		/* Create input links */
821 		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
822 		i = sahara_sha_hw_links_create(dev, rctx, index);
823 
824 		rctx->sg_in_idx = index;
825 		if (i < 0)
826 			return i;
827 	}
828 
829 	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
830 
831 	/* Save the context for the next operation */
832 	result_len = rctx->context_size;
833 	dev->hw_link[i]->p = dev->context_phys_base;
834 
835 	dev->hw_link[i]->len = result_len;
836 	dev->hw_desc[index]->len2 = result_len;
837 
838 	dev->hw_link[i]->next = 0;
839 
840 	return 0;
841 }
842 
843 /*
844  * Load descriptor aka #6
845  *
846  * To load a previously saved context back to the MDHA unit
847  *
848  * p1: Saved Context
849  * p2: NULL
850  *
851  */
sahara_sha_hw_context_descriptor_create(struct sahara_dev * dev,struct sahara_sha_reqctx * rctx,struct ahash_request * req,int index)852 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
853 						struct sahara_sha_reqctx *rctx,
854 						struct ahash_request *req,
855 						int index)
856 {
857 	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
858 
859 	dev->hw_desc[index]->len1 = rctx->context_size;
860 	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
861 	dev->hw_desc[index]->len2 = 0;
862 	dev->hw_desc[index]->p2 = 0;
863 
864 	dev->hw_link[index]->len = rctx->context_size;
865 	dev->hw_link[index]->p = dev->context_phys_base;
866 	dev->hw_link[index]->next = 0;
867 
868 	return 0;
869 }
870 
sahara_sha_prepare_request(struct ahash_request * req)871 static int sahara_sha_prepare_request(struct ahash_request *req)
872 {
873 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
874 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
875 	unsigned int hash_later;
876 	unsigned int block_size;
877 	unsigned int len;
878 
879 	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
880 
881 	/* append bytes from previous operation */
882 	len = rctx->buf_cnt + req->nbytes;
883 
884 	/* only the last transfer can be padded in hardware */
885 	if (!rctx->last && (len < block_size)) {
886 		/* to few data, save for next operation */
887 		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
888 					 0, req->nbytes, 0);
889 		rctx->buf_cnt += req->nbytes;
890 
891 		return 0;
892 	}
893 
894 	/* add data from previous operation first */
895 	if (rctx->buf_cnt)
896 		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
897 
898 	/* data must always be a multiple of block_size */
899 	hash_later = rctx->last ? 0 : len & (block_size - 1);
900 	if (hash_later) {
901 		unsigned int offset = req->nbytes - hash_later;
902 		/* Save remaining bytes for later use */
903 		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
904 					hash_later, 0);
905 	}
906 
907 	rctx->total = len - hash_later;
908 	/* have data from previous operation and current */
909 	if (rctx->buf_cnt && req->nbytes) {
910 		sg_init_table(rctx->in_sg_chain, 2);
911 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
912 		sg_chain(rctx->in_sg_chain, 2, req->src);
913 		rctx->in_sg = rctx->in_sg_chain;
914 	/* only data from previous operation */
915 	} else if (rctx->buf_cnt) {
916 		rctx->in_sg = rctx->in_sg_chain;
917 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
918 	/* no data from previous operation */
919 	} else {
920 		rctx->in_sg = req->src;
921 	}
922 
923 	/* on next call, we only have the remaining data in the buffer */
924 	rctx->buf_cnt = hash_later;
925 
926 	return -EINPROGRESS;
927 }
928 
sahara_sha_process(struct ahash_request * req)929 static int sahara_sha_process(struct ahash_request *req)
930 {
931 	struct sahara_dev *dev = dev_ptr;
932 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
933 	int ret;
934 	unsigned long time_left;
935 
936 	ret = sahara_sha_prepare_request(req);
937 	if (!ret)
938 		return ret;
939 
940 	if (rctx->first) {
941 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
942 		if (ret)
943 			return ret;
944 
945 		dev->hw_desc[0]->next = 0;
946 		rctx->first = 0;
947 	} else {
948 		memcpy(dev->context_base, rctx->context, rctx->context_size);
949 
950 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
951 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
952 		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
953 		if (ret)
954 			return ret;
955 
956 		dev->hw_desc[1]->next = 0;
957 	}
958 
959 	sahara_dump_descriptors(dev);
960 	sahara_dump_links(dev);
961 
962 	reinit_completion(&dev->dma_completion);
963 
964 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
965 
966 	time_left = wait_for_completion_timeout(&dev->dma_completion,
967 						msecs_to_jiffies(SAHARA_TIMEOUT_MS));
968 
969 	if (rctx->sg_in_idx)
970 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
971 			     DMA_TO_DEVICE);
972 
973 	if (!time_left) {
974 		dev_err(dev->device, "SHA timeout\n");
975 		return -ETIMEDOUT;
976 	}
977 
978 	memcpy(rctx->context, dev->context_base, rctx->context_size);
979 
980 	if (req->result && rctx->last)
981 		memcpy(req->result, rctx->context, rctx->digest_size);
982 
983 	return 0;
984 }
985 
sahara_do_one_request(struct crypto_engine * engine,void * areq)986 static int sahara_do_one_request(struct crypto_engine *engine, void *areq)
987 {
988 	struct crypto_async_request *async_req = areq;
989 	int err;
990 
991 	if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_AHASH) {
992 		struct ahash_request *req = ahash_request_cast(async_req);
993 
994 		err = sahara_sha_process(req);
995 		local_bh_disable();
996 		crypto_finalize_hash_request(engine, req, err);
997 		local_bh_enable();
998 	} else {
999 		struct skcipher_request *req = skcipher_request_cast(async_req);
1000 
1001 		err = sahara_aes_process(skcipher_request_cast(async_req));
1002 		local_bh_disable();
1003 		crypto_finalize_skcipher_request(engine, req, err);
1004 		local_bh_enable();
1005 	}
1006 
1007 	return 0;
1008 }
1009 
sahara_sha_enqueue(struct ahash_request * req,int last)1010 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1011 {
1012 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1013 	struct sahara_dev *dev = dev_ptr;
1014 
1015 	if (!req->nbytes && !last)
1016 		return 0;
1017 
1018 	rctx->last = last;
1019 
1020 	return crypto_transfer_hash_request_to_engine(dev->engine, req);
1021 }
1022 
sahara_sha_init(struct ahash_request * req)1023 static int sahara_sha_init(struct ahash_request *req)
1024 {
1025 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1026 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1027 
1028 	memset(rctx, 0, sizeof(*rctx));
1029 
1030 	switch (crypto_ahash_digestsize(tfm)) {
1031 	case SHA1_DIGEST_SIZE:
1032 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1033 		rctx->digest_size = SHA1_DIGEST_SIZE;
1034 		break;
1035 	case SHA256_DIGEST_SIZE:
1036 		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1037 		rctx->digest_size = SHA256_DIGEST_SIZE;
1038 		break;
1039 	default:
1040 		return -EINVAL;
1041 	}
1042 
1043 	rctx->context_size = rctx->digest_size + 4;
1044 	rctx->first = 1;
1045 
1046 	return 0;
1047 }
1048 
sahara_sha_update(struct ahash_request * req)1049 static int sahara_sha_update(struct ahash_request *req)
1050 {
1051 	return sahara_sha_enqueue(req, 0);
1052 }
1053 
sahara_sha_final(struct ahash_request * req)1054 static int sahara_sha_final(struct ahash_request *req)
1055 {
1056 	req->nbytes = 0;
1057 	return sahara_sha_enqueue(req, 1);
1058 }
1059 
sahara_sha_finup(struct ahash_request * req)1060 static int sahara_sha_finup(struct ahash_request *req)
1061 {
1062 	return sahara_sha_enqueue(req, 1);
1063 }
1064 
sahara_sha_digest(struct ahash_request * req)1065 static int sahara_sha_digest(struct ahash_request *req)
1066 {
1067 	sahara_sha_init(req);
1068 
1069 	return sahara_sha_finup(req);
1070 }
1071 
sahara_sha_export(struct ahash_request * req,void * out)1072 static int sahara_sha_export(struct ahash_request *req, void *out)
1073 {
1074 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1075 
1076 	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1077 
1078 	return 0;
1079 }
1080 
sahara_sha_import(struct ahash_request * req,const void * in)1081 static int sahara_sha_import(struct ahash_request *req, const void *in)
1082 {
1083 	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1084 
1085 	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1086 
1087 	return 0;
1088 }
1089 
sahara_sha_cra_init(struct crypto_tfm * tfm)1090 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1091 {
1092 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1093 				 sizeof(struct sahara_sha_reqctx));
1094 
1095 	return 0;
1096 }
1097 
1098 static struct skcipher_engine_alg aes_algs[] = {
1099 {
1100 	.base = {
1101 		.base.cra_name		= "ecb(aes)",
1102 		.base.cra_driver_name	= "sahara-ecb-aes",
1103 		.base.cra_priority	= 300,
1104 		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1105 		.base.cra_blocksize	= AES_BLOCK_SIZE,
1106 		.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1107 		.base.cra_alignmask	= 0x0,
1108 		.base.cra_module	= THIS_MODULE,
1109 
1110 		.init			= sahara_aes_init_tfm,
1111 		.exit			= sahara_aes_exit_tfm,
1112 		.min_keysize		= AES_MIN_KEY_SIZE,
1113 		.max_keysize		= AES_MAX_KEY_SIZE,
1114 		.setkey			= sahara_aes_setkey,
1115 		.encrypt		= sahara_aes_ecb_encrypt,
1116 		.decrypt		= sahara_aes_ecb_decrypt,
1117 	},
1118 	.op = {
1119 		.do_one_request = sahara_do_one_request,
1120 	},
1121 }, {
1122 	.base = {
1123 		.base.cra_name		= "cbc(aes)",
1124 		.base.cra_driver_name	= "sahara-cbc-aes",
1125 		.base.cra_priority	= 300,
1126 		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1127 		.base.cra_blocksize	= AES_BLOCK_SIZE,
1128 		.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1129 		.base.cra_alignmask	= 0x0,
1130 		.base.cra_module	= THIS_MODULE,
1131 
1132 		.init			= sahara_aes_init_tfm,
1133 		.exit			= sahara_aes_exit_tfm,
1134 		.min_keysize		= AES_MIN_KEY_SIZE,
1135 		.max_keysize		= AES_MAX_KEY_SIZE,
1136 		.ivsize			= AES_BLOCK_SIZE,
1137 		.setkey			= sahara_aes_setkey,
1138 		.encrypt		= sahara_aes_cbc_encrypt,
1139 		.decrypt		= sahara_aes_cbc_decrypt,
1140 	},
1141 	.op = {
1142 		.do_one_request = sahara_do_one_request,
1143 	},
1144 }
1145 };
1146 
1147 static struct ahash_engine_alg sha_v3_algs[] = {
1148 {
1149 	.base = {
1150 		.init		= sahara_sha_init,
1151 		.update		= sahara_sha_update,
1152 		.final		= sahara_sha_final,
1153 		.finup		= sahara_sha_finup,
1154 		.digest		= sahara_sha_digest,
1155 		.export		= sahara_sha_export,
1156 		.import		= sahara_sha_import,
1157 		.halg.digestsize	= SHA1_DIGEST_SIZE,
1158 		.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1159 		.halg.base	= {
1160 			.cra_name		= "sha1",
1161 			.cra_driver_name	= "sahara-sha1",
1162 			.cra_priority		= 300,
1163 			.cra_flags		= CRYPTO_ALG_ASYNC |
1164 							CRYPTO_ALG_NEED_FALLBACK,
1165 			.cra_blocksize		= SHA1_BLOCK_SIZE,
1166 			.cra_ctxsize		= sizeof(struct sahara_ctx),
1167 			.cra_alignmask		= 0,
1168 			.cra_module		= THIS_MODULE,
1169 			.cra_init		= sahara_sha_cra_init,
1170 		}
1171 	},
1172 	.op = {
1173 		.do_one_request = sahara_do_one_request,
1174 	},
1175 },
1176 };
1177 
1178 static struct ahash_engine_alg sha_v4_algs[] = {
1179 {
1180 	.base = {
1181 		.init		= sahara_sha_init,
1182 		.update		= sahara_sha_update,
1183 		.final		= sahara_sha_final,
1184 		.finup		= sahara_sha_finup,
1185 		.digest		= sahara_sha_digest,
1186 		.export		= sahara_sha_export,
1187 		.import		= sahara_sha_import,
1188 		.halg.digestsize	= SHA256_DIGEST_SIZE,
1189 		.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1190 		.halg.base	= {
1191 			.cra_name		= "sha256",
1192 			.cra_driver_name	= "sahara-sha256",
1193 			.cra_priority		= 300,
1194 			.cra_flags		= CRYPTO_ALG_ASYNC |
1195 							CRYPTO_ALG_NEED_FALLBACK,
1196 			.cra_blocksize		= SHA256_BLOCK_SIZE,
1197 			.cra_ctxsize		= sizeof(struct sahara_ctx),
1198 			.cra_alignmask		= 0,
1199 			.cra_module		= THIS_MODULE,
1200 			.cra_init		= sahara_sha_cra_init,
1201 		}
1202 	},
1203 	.op = {
1204 		.do_one_request = sahara_do_one_request,
1205 	},
1206 },
1207 };
1208 
sahara_irq_handler(int irq,void * data)1209 static irqreturn_t sahara_irq_handler(int irq, void *data)
1210 {
1211 	struct sahara_dev *dev = data;
1212 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1213 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1214 
1215 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1216 		     SAHARA_REG_CMD);
1217 
1218 	sahara_decode_status(dev, stat);
1219 
1220 	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY)
1221 		return IRQ_NONE;
1222 
1223 	if (SAHARA_STATUS_GET_STATE(stat) != SAHARA_STATE_COMPLETE)
1224 		sahara_decode_error(dev, err);
1225 
1226 	complete(&dev->dma_completion);
1227 
1228 	return IRQ_HANDLED;
1229 }
1230 
1231 
sahara_register_algs(struct sahara_dev * dev)1232 static int sahara_register_algs(struct sahara_dev *dev)
1233 {
1234 	int err;
1235 
1236 	err = crypto_engine_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1237 	if (err)
1238 		return err;
1239 
1240 	err = crypto_engine_register_ahashes(sha_v3_algs,
1241 					     ARRAY_SIZE(sha_v3_algs));
1242 	if (err)
1243 		goto err_aes_algs;
1244 
1245 	if (dev->version > SAHARA_VERSION_3) {
1246 		err = crypto_engine_register_ahashes(sha_v4_algs,
1247 						     ARRAY_SIZE(sha_v4_algs));
1248 		if (err)
1249 			goto err_sha_v3_algs;
1250 	}
1251 
1252 	return 0;
1253 
1254 err_sha_v3_algs:
1255 	crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
1256 
1257 err_aes_algs:
1258 	crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1259 
1260 	return err;
1261 }
1262 
sahara_unregister_algs(struct sahara_dev * dev)1263 static void sahara_unregister_algs(struct sahara_dev *dev)
1264 {
1265 	crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1266 	crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
1267 
1268 	if (dev->version > SAHARA_VERSION_3)
1269 		crypto_engine_unregister_ahashes(sha_v4_algs,
1270 						 ARRAY_SIZE(sha_v4_algs));
1271 }
1272 
1273 static const struct of_device_id sahara_dt_ids[] = {
1274 	{ .compatible = "fsl,imx53-sahara" },
1275 	{ .compatible = "fsl,imx27-sahara" },
1276 	{ /* sentinel */ }
1277 };
1278 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1279 
sahara_probe(struct platform_device * pdev)1280 static int sahara_probe(struct platform_device *pdev)
1281 {
1282 	struct sahara_dev *dev;
1283 	u32 version;
1284 	int irq;
1285 	int err;
1286 	int i;
1287 
1288 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1289 	if (!dev)
1290 		return -ENOMEM;
1291 
1292 	dev->device = &pdev->dev;
1293 	platform_set_drvdata(pdev, dev);
1294 
1295 	/* Get the base address */
1296 	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1297 	if (IS_ERR(dev->regs_base))
1298 		return PTR_ERR(dev->regs_base);
1299 
1300 	/* Get the IRQ */
1301 	irq = platform_get_irq(pdev,  0);
1302 	if (irq < 0)
1303 		return irq;
1304 
1305 	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1306 			       0, dev_name(&pdev->dev), dev);
1307 	if (err)
1308 		return dev_err_probe(&pdev->dev, err,
1309 				     "failed to request irq\n");
1310 
1311 	/* clocks */
1312 	dev->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
1313 	if (IS_ERR(dev->clk_ipg))
1314 		return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ipg),
1315 				     "Could not get ipg clock\n");
1316 
1317 	dev->clk_ahb = devm_clk_get_enabled(&pdev->dev, "ahb");
1318 	if (IS_ERR(dev->clk_ahb))
1319 		return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ahb),
1320 				     "Could not get ahb clock\n");
1321 
1322 	/* Allocate HW descriptors */
1323 	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1324 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1325 			&dev->hw_phys_desc[0], GFP_KERNEL);
1326 	if (!dev->hw_desc[0])
1327 		return -ENOMEM;
1328 	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1329 	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1330 				sizeof(struct sahara_hw_desc);
1331 
1332 	/* Allocate space for iv and key */
1333 	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1334 				&dev->key_phys_base, GFP_KERNEL);
1335 	if (!dev->key_base)
1336 		return -ENOMEM;
1337 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1338 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1339 
1340 	/* Allocate space for context: largest digest + message length field */
1341 	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1342 					SHA256_DIGEST_SIZE + 4,
1343 					&dev->context_phys_base, GFP_KERNEL);
1344 	if (!dev->context_base)
1345 		return -ENOMEM;
1346 
1347 	/* Allocate space for HW links */
1348 	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1349 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1350 			&dev->hw_phys_link[0], GFP_KERNEL);
1351 	if (!dev->hw_link[0])
1352 		return -ENOMEM;
1353 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1354 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1355 					sizeof(struct sahara_hw_link);
1356 		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1357 	}
1358 
1359 	dev_ptr = dev;
1360 
1361 	dev->engine = crypto_engine_alloc_init(&pdev->dev, true);
1362 	if (!dev->engine)
1363 		return -ENOMEM;
1364 
1365 	err = crypto_engine_start(dev->engine);
1366 	if (err) {
1367 		crypto_engine_exit(dev->engine);
1368 		return dev_err_probe(&pdev->dev, err,
1369 				     "Could not start crypto engine\n");
1370 	}
1371 
1372 	init_completion(&dev->dma_completion);
1373 
1374 	version = sahara_read(dev, SAHARA_REG_VERSION);
1375 	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1376 		if (version != SAHARA_VERSION_3)
1377 			err = -ENODEV;
1378 	} else if (of_device_is_compatible(pdev->dev.of_node,
1379 			"fsl,imx53-sahara")) {
1380 		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1381 			err = -ENODEV;
1382 		version = (version >> 8) & 0xff;
1383 	}
1384 	if (err == -ENODEV) {
1385 		dev_err_probe(&pdev->dev, err,
1386 			      "SAHARA version %d not supported\n", version);
1387 		goto err_algs;
1388 	}
1389 
1390 	dev->version = version;
1391 
1392 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1393 		     SAHARA_REG_CMD);
1394 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1395 			SAHARA_CONTROL_SET_MAXBURST(8) |
1396 			SAHARA_CONTROL_RNG_AUTORSD |
1397 			SAHARA_CONTROL_ENABLE_INT,
1398 			SAHARA_REG_CONTROL);
1399 
1400 	err = sahara_register_algs(dev);
1401 	if (err)
1402 		goto err_algs;
1403 
1404 	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1405 
1406 	return 0;
1407 
1408 err_algs:
1409 	crypto_engine_exit(dev->engine);
1410 
1411 	return err;
1412 }
1413 
sahara_remove(struct platform_device * pdev)1414 static void sahara_remove(struct platform_device *pdev)
1415 {
1416 	struct sahara_dev *dev = platform_get_drvdata(pdev);
1417 
1418 	crypto_engine_exit(dev->engine);
1419 	sahara_unregister_algs(dev);
1420 }
1421 
1422 static struct platform_driver sahara_driver = {
1423 	.probe		= sahara_probe,
1424 	.remove_new	= sahara_remove,
1425 	.driver		= {
1426 		.name	= SAHARA_NAME,
1427 		.of_match_table = sahara_dt_ids,
1428 	},
1429 };
1430 
1431 module_platform_driver(sahara_driver);
1432 
1433 MODULE_LICENSE("GPL");
1434 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1435 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1436 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1437