1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel Keem Bay OCS AES Crypto Driver.
4  *
5  * Copyright (C) 2018-2020 Intel Corporation
6  */
7 
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/platform_device.h>
11 #include <linux/slab.h>
12 #include <linux/swab.h>
13 
14 #include <asm/byteorder.h>
15 #include <asm/errno.h>
16 
17 #include <crypto/aes.h>
18 #include <crypto/gcm.h>
19 
20 #include "ocs-aes.h"
21 
22 #define AES_COMMAND_OFFSET			0x0000
23 #define AES_KEY_0_OFFSET			0x0004
24 #define AES_KEY_1_OFFSET			0x0008
25 #define AES_KEY_2_OFFSET			0x000C
26 #define AES_KEY_3_OFFSET			0x0010
27 #define AES_KEY_4_OFFSET			0x0014
28 #define AES_KEY_5_OFFSET			0x0018
29 #define AES_KEY_6_OFFSET			0x001C
30 #define AES_KEY_7_OFFSET			0x0020
31 #define AES_IV_0_OFFSET				0x0024
32 #define AES_IV_1_OFFSET				0x0028
33 #define AES_IV_2_OFFSET				0x002C
34 #define AES_IV_3_OFFSET				0x0030
35 #define AES_ACTIVE_OFFSET			0x0034
36 #define AES_STATUS_OFFSET			0x0038
37 #define AES_KEY_SIZE_OFFSET			0x0044
38 #define AES_IER_OFFSET				0x0048
39 #define AES_ISR_OFFSET				0x005C
40 #define AES_MULTIPURPOSE1_0_OFFSET		0x0200
41 #define AES_MULTIPURPOSE1_1_OFFSET		0x0204
42 #define AES_MULTIPURPOSE1_2_OFFSET		0x0208
43 #define AES_MULTIPURPOSE1_3_OFFSET		0x020C
44 #define AES_MULTIPURPOSE2_0_OFFSET		0x0220
45 #define AES_MULTIPURPOSE2_1_OFFSET		0x0224
46 #define AES_MULTIPURPOSE2_2_OFFSET		0x0228
47 #define AES_MULTIPURPOSE2_3_OFFSET		0x022C
48 #define AES_BYTE_ORDER_CFG_OFFSET		0x02C0
49 #define AES_TLEN_OFFSET				0x0300
50 #define AES_T_MAC_0_OFFSET			0x0304
51 #define AES_T_MAC_1_OFFSET			0x0308
52 #define AES_T_MAC_2_OFFSET			0x030C
53 #define AES_T_MAC_3_OFFSET			0x0310
54 #define AES_PLEN_OFFSET				0x0314
55 #define AES_A_DMA_SRC_ADDR_OFFSET		0x0400
56 #define AES_A_DMA_DST_ADDR_OFFSET		0x0404
57 #define AES_A_DMA_SRC_SIZE_OFFSET		0x0408
58 #define AES_A_DMA_DST_SIZE_OFFSET		0x040C
59 #define AES_A_DMA_DMA_MODE_OFFSET		0x0410
60 #define AES_A_DMA_NEXT_SRC_DESCR_OFFSET		0x0418
61 #define AES_A_DMA_NEXT_DST_DESCR_OFFSET		0x041C
62 #define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET	0x0420
63 #define AES_A_DMA_LOG_OFFSET			0x0424
64 #define AES_A_DMA_STATUS_OFFSET			0x0428
65 #define AES_A_DMA_PERF_CNTR_OFFSET		0x042C
66 #define AES_A_DMA_MSI_ISR_OFFSET		0x0480
67 #define AES_A_DMA_MSI_IER_OFFSET		0x0484
68 #define AES_A_DMA_MSI_MASK_OFFSET		0x0488
69 #define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET	0x0600
70 #define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET	0x0700
71 
72 /*
73  * AES_A_DMA_DMA_MODE register.
74  * Default: 0x00000000.
75  * bit[31]	ACTIVE
76  *		This bit activates the DMA. When the DMA finishes, it resets
77  *		this bit to zero.
78  * bit[30:26]	Unused by this driver.
79  * bit[25]	SRC_LINK_LIST_EN
80  *		Source link list enable bit. When the linked list is terminated
81  *		this bit is reset by the DMA.
82  * bit[24]	DST_LINK_LIST_EN
83  *		Destination link list enable bit. When the linked list is
84  *		terminated this bit is reset by the DMA.
85  * bit[23:0]	Unused by this driver.
86  */
87 #define AES_A_DMA_DMA_MODE_ACTIVE		BIT(31)
88 #define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN	BIT(25)
89 #define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN	BIT(24)
90 
91 /*
92  * AES_ACTIVE register
93  * default 0x00000000
94  * bit[31:10]	Reserved
95  * bit[9]	LAST_ADATA
96  * bit[8]	LAST_GCX
97  * bit[7:2]	Reserved
98  * bit[1]	TERMINATION
99  * bit[0]	TRIGGER
100  */
101 #define AES_ACTIVE_LAST_ADATA			BIT(9)
102 #define AES_ACTIVE_LAST_CCM_GCM			BIT(8)
103 #define AES_ACTIVE_TERMINATION			BIT(1)
104 #define AES_ACTIVE_TRIGGER			BIT(0)
105 
106 #define AES_DISABLE_INT				0x00000000
107 #define AES_DMA_CPD_ERR_INT			BIT(8)
108 #define AES_DMA_OUTBUF_RD_ERR_INT		BIT(7)
109 #define AES_DMA_OUTBUF_WR_ERR_INT		BIT(6)
110 #define AES_DMA_INBUF_RD_ERR_INT		BIT(5)
111 #define AES_DMA_INBUF_WR_ERR_INT		BIT(4)
112 #define AES_DMA_BAD_COMP_INT			BIT(3)
113 #define AES_DMA_SAI_INT				BIT(2)
114 #define AES_DMA_SRC_DONE_INT			BIT(0)
115 #define AES_COMPLETE_INT			BIT(1)
116 
117 #define AES_DMA_MSI_MASK_CLEAR			BIT(0)
118 
119 #define AES_128_BIT_KEY				0x00000000
120 #define AES_256_BIT_KEY				BIT(0)
121 
122 #define AES_DEACTIVATE_PERF_CNTR		0x00000000
123 #define AES_ACTIVATE_PERF_CNTR			BIT(0)
124 
125 #define AES_MAX_TAG_SIZE_U32			4
126 
127 #define OCS_LL_DMA_FLAG_TERMINATE		BIT(31)
128 
129 /*
130  * There is an inconsistency in the documentation. This is documented as a
131  * 11-bit value, but it is actually 10-bits.
132  */
133 #define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK	0x3FF
134 
135 /*
136  * During CCM decrypt, the OCS block needs to finish processing the ciphertext
137  * before the tag is written. For 128-bit mode this required delay is 28 OCS
138  * clock cycles. For 256-bit mode it is 36 OCS clock cycles.
139  */
140 #define CCM_DECRYPT_DELAY_TAG_CLK_COUNT		36UL
141 
142 /*
143  * During CCM decrypt there must be a delay of at least 42 OCS clock cycles
144  * between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
145  * bit in the same register (as stated in the OCS databook)
146  */
147 #define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT	42UL
148 
149 /* See RFC3610 section 2.2 */
150 #define L_PRIME_MIN (1)
151 #define L_PRIME_MAX (7)
152 /*
153  * CCM IV format from RFC 3610 section 2.3
154  *
155  *   Octet Number   Contents
156  *   ------------   ---------
157  *   0              Flags
158  *   1 ... 15-L     Nonce N
159  *   16-L ... 15    Counter i
160  *
161  * Flags = L' = L - 1
162  */
163 #define L_PRIME_IDX		0
164 #define COUNTER_START(lprime)	(16 - ((lprime) + 1))
165 #define COUNTER_LEN(lprime)	((lprime) + 1)
166 
167 enum aes_counter_mode {
168 	AES_CTR_M_NO_INC = 0,
169 	AES_CTR_M_32_INC = 1,
170 	AES_CTR_M_64_INC = 2,
171 	AES_CTR_M_128_INC = 3,
172 };
173 
174 /**
175  * struct ocs_dma_linked_list - OCS DMA linked list entry.
176  * @src_addr:   Source address of the data.
177  * @src_len:    Length of data to be fetched.
178  * @next:	Next dma_list to fetch.
179  * @ll_flags:   Flags (Freeze @ terminate) for the DMA engine.
180  */
181 struct ocs_dma_linked_list {
182 	u32 src_addr;
183 	u32 src_len;
184 	u32 next;
185 	u32 ll_flags;
186 } __packed;
187 
188 /*
189  * Set endianness of inputs and outputs
190  * AES_BYTE_ORDER_CFG
191  * default 0x00000000
192  * bit [10] - KEY_HI_LO_SWAP
193  * bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
194  * bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
195  * bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
196  * bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
197  * bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
198  * bit [4] - IV_SWAP_BYTES_IN_DWORD
199  * bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
200  * bit [2] - DOUT_SWAP_BYTES_IN_DWORD
201  * bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
202  * bit [0] - DOUT_SWAP_BYTES_IN_DWORD
203  */
aes_a_set_endianness(const struct ocs_aes_dev * aes_dev)204 static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)
205 {
206 	iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);
207 }
208 
209 /* Trigger AES process start. */
aes_a_op_trigger(const struct ocs_aes_dev * aes_dev)210 static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)
211 {
212 	iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);
213 }
214 
215 /* Indicate last bulk of data. */
aes_a_op_termination(const struct ocs_aes_dev * aes_dev)216 static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)
217 {
218 	iowrite32(AES_ACTIVE_TERMINATION,
219 		  aes_dev->base_reg + AES_ACTIVE_OFFSET);
220 }
221 
222 /*
223  * Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
224  *
225  * Called when DMA is programmed to fetch the last batch of data.
226  * - For AES-CCM it is called for the last batch of Payload data and Ciphertext
227  *   data.
228  * - For AES-GCM, it is called for the last batch of Plaintext data and
229  *   Ciphertext data.
230  */
aes_a_set_last_gcx(const struct ocs_aes_dev * aes_dev)231 static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)
232 {
233 	iowrite32(AES_ACTIVE_LAST_CCM_GCM,
234 		  aes_dev->base_reg + AES_ACTIVE_OFFSET);
235 }
236 
237 /* Wait for LAST_CCM_GCM bit to be unset. */
aes_a_wait_last_gcx(const struct ocs_aes_dev * aes_dev)238 static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)
239 {
240 	u32 aes_active_reg;
241 
242 	do {
243 		aes_active_reg = ioread32(aes_dev->base_reg +
244 					  AES_ACTIVE_OFFSET);
245 	} while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);
246 }
247 
248 /* Wait for 10 bits of input occupancy. */
aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev * aes_dev)249 static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)
250 {
251 	u32 reg;
252 
253 	do {
254 		reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);
255 	} while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);
256 }
257 
258  /*
259   * Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
260   * other bits).
261   *
262   * Called when DMA is programmed to fetch the last batch of Associated Data
263   * (CCM case) or Additional Authenticated Data (GCM case).
264   */
aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev * aes_dev)265 static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)
266 {
267 	iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,
268 		  aes_dev->base_reg + AES_ACTIVE_OFFSET);
269 }
270 
271 /* Set DMA src and dst transfer size to 0 */
aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev * aes_dev)272 static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)
273 {
274 	iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
275 	iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
276 }
277 
278 /* Activate DMA for zero-byte transfer case. */
aes_a_dma_active(const struct ocs_aes_dev * aes_dev)279 static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)
280 {
281 	iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,
282 		  aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
283 }
284 
285 /* Activate DMA and enable src linked list */
aes_a_dma_active_src_ll_en(const struct ocs_aes_dev * aes_dev)286 static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)
287 {
288 	iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
289 		  AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,
290 		  aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
291 }
292 
293 /* Activate DMA and enable dst linked list */
aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev * aes_dev)294 static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)
295 {
296 	iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
297 		  AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
298 		  aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
299 }
300 
301 /* Activate DMA and enable src and dst linked lists */
aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev * aes_dev)302 static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)
303 {
304 	iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
305 		  AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |
306 		  AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
307 		  aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
308 }
309 
310 /* Reset PERF_CNTR to 0 and activate it */
aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev * aes_dev)311 static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)
312 {
313 	iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);
314 	iowrite32(AES_ACTIVATE_PERF_CNTR,
315 		  aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
316 }
317 
318 /* Wait until PERF_CNTR is > delay, then deactivate it */
aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev * aes_dev,int delay)319 static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,
320 							   int delay)
321 {
322 	while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)
323 		;
324 	iowrite32(AES_DEACTIVATE_PERF_CNTR,
325 		  aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
326 }
327 
328 /* Disable AES and DMA IRQ. */
aes_irq_disable(struct ocs_aes_dev * aes_dev)329 static void aes_irq_disable(struct ocs_aes_dev *aes_dev)
330 {
331 	u32 isr_val = 0;
332 
333 	/* Disable interrupts */
334 	iowrite32(AES_DISABLE_INT,
335 		  aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
336 	iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
337 
338 	/* Clear any pending interrupt */
339 	isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
340 	if (isr_val)
341 		iowrite32(isr_val,
342 			  aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
343 
344 	isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
345 	if (isr_val)
346 		iowrite32(isr_val,
347 			  aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
348 
349 	isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);
350 	if (isr_val)
351 		iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);
352 }
353 
354 /* Enable AES or DMA IRQ.  IRQ is disabled once fired. */
aes_irq_enable(struct ocs_aes_dev * aes_dev,u8 irq)355 static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)
356 {
357 	if (irq == AES_COMPLETE_INT) {
358 		/* Ensure DMA error interrupts are enabled */
359 		iowrite32(AES_DMA_CPD_ERR_INT |
360 			  AES_DMA_OUTBUF_RD_ERR_INT |
361 			  AES_DMA_OUTBUF_WR_ERR_INT |
362 			  AES_DMA_INBUF_RD_ERR_INT |
363 			  AES_DMA_INBUF_WR_ERR_INT |
364 			  AES_DMA_BAD_COMP_INT |
365 			  AES_DMA_SAI_INT,
366 			  aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
367 		/*
368 		 * AES_IER
369 		 * default 0x00000000
370 		 * bits [31:3] - reserved
371 		 * bit [2] - EN_SKS_ERR
372 		 * bit [1] - EN_AES_COMPLETE
373 		 * bit [0] - reserved
374 		 */
375 		iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);
376 		return;
377 	}
378 	if (irq == AES_DMA_SRC_DONE_INT) {
379 		/* Ensure AES interrupts are disabled */
380 		iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
381 		/*
382 		 * DMA_MSI_IER
383 		 * default 0x00000000
384 		 * bits [31:9] - reserved
385 		 * bit [8] - CPD_ERR_INT_EN
386 		 * bit [7] - OUTBUF_RD_ERR_INT_EN
387 		 * bit [6] - OUTBUF_WR_ERR_INT_EN
388 		 * bit [5] - INBUF_RD_ERR_INT_EN
389 		 * bit [4] - INBUF_WR_ERR_INT_EN
390 		 * bit [3] - BAD_COMP_INT_EN
391 		 * bit [2] - SAI_INT_EN
392 		 * bit [1] - DST_DONE_INT_EN
393 		 * bit [0] - SRC_DONE_INT_EN
394 		 */
395 		iowrite32(AES_DMA_CPD_ERR_INT |
396 			  AES_DMA_OUTBUF_RD_ERR_INT |
397 			  AES_DMA_OUTBUF_WR_ERR_INT |
398 			  AES_DMA_INBUF_RD_ERR_INT |
399 			  AES_DMA_INBUF_WR_ERR_INT |
400 			  AES_DMA_BAD_COMP_INT |
401 			  AES_DMA_SAI_INT |
402 			  AES_DMA_SRC_DONE_INT,
403 			  aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
404 	}
405 }
406 
407 /* Enable and wait for IRQ (either from OCS AES engine or DMA) */
ocs_aes_irq_enable_and_wait(struct ocs_aes_dev * aes_dev,u8 irq)408 static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)
409 {
410 	int rc;
411 
412 	reinit_completion(&aes_dev->irq_completion);
413 	aes_irq_enable(aes_dev, irq);
414 	rc = wait_for_completion_interruptible(&aes_dev->irq_completion);
415 	if (rc)
416 		return rc;
417 
418 	return aes_dev->dma_err_mask ? -EIO : 0;
419 }
420 
421 /* Configure DMA to OCS, linked list mode */
dma_to_ocs_aes_ll(struct ocs_aes_dev * aes_dev,dma_addr_t dma_list)422 static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
423 				     dma_addr_t dma_list)
424 {
425 	iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
426 	iowrite32(dma_list,
427 		  aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);
428 }
429 
430 /* Configure DMA from OCS, linked list mode */
dma_from_ocs_aes_ll(struct ocs_aes_dev * aes_dev,dma_addr_t dma_list)431 static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
432 				       dma_addr_t dma_list)
433 {
434 	iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
435 	iowrite32(dma_list,
436 		  aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);
437 }
438 
ocs_aes_irq_handler(int irq,void * dev_id)439 irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)
440 {
441 	struct ocs_aes_dev *aes_dev = dev_id;
442 	u32 aes_dma_isr;
443 
444 	/* Read DMA ISR status. */
445 	aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
446 
447 	/* Disable and clear interrupts. */
448 	aes_irq_disable(aes_dev);
449 
450 	/* Save DMA error status. */
451 	aes_dev->dma_err_mask = aes_dma_isr &
452 				(AES_DMA_CPD_ERR_INT |
453 				 AES_DMA_OUTBUF_RD_ERR_INT |
454 				 AES_DMA_OUTBUF_WR_ERR_INT |
455 				 AES_DMA_INBUF_RD_ERR_INT |
456 				 AES_DMA_INBUF_WR_ERR_INT |
457 				 AES_DMA_BAD_COMP_INT |
458 				 AES_DMA_SAI_INT);
459 
460 	/* Signal IRQ completion. */
461 	complete(&aes_dev->irq_completion);
462 
463 	return IRQ_HANDLED;
464 }
465 
466 /**
467  * ocs_aes_set_key() - Write key into OCS AES hardware.
468  * @aes_dev:	The OCS AES device to write the key to.
469  * @key_size:	The size of the key (in bytes).
470  * @key:	The key to write.
471  * @cipher:	The cipher the key is for.
472  *
473  * For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
474  *
475  * Return:	0 on success, negative error code otherwise.
476  */
ocs_aes_set_key(struct ocs_aes_dev * aes_dev,u32 key_size,const u8 * key,enum ocs_cipher cipher)477 int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,
478 		    enum ocs_cipher cipher)
479 {
480 	const u32 *key_u32;
481 	u32 val;
482 	int i;
483 
484 	/* OCS AES supports 128-bit and 256-bit keys only. */
485 	if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {
486 		dev_err(aes_dev->dev,
487 			"%d-bit keys not supported by AES cipher\n",
488 			key_size * 8);
489 		return -EINVAL;
490 	}
491 	/* OCS SM4 supports 128-bit keys only. */
492 	if (cipher == OCS_SM4 && key_size != 16) {
493 		dev_err(aes_dev->dev,
494 			"%d-bit keys not supported for SM4 cipher\n",
495 			key_size * 8);
496 		return -EINVAL;
497 	}
498 
499 	if (!key)
500 		return -EINVAL;
501 
502 	key_u32 = (const u32 *)key;
503 
504 	/* Write key to AES_KEY[0-7] registers */
505 	for (i = 0; i < (key_size / sizeof(u32)); i++) {
506 		iowrite32(key_u32[i],
507 			  aes_dev->base_reg + AES_KEY_0_OFFSET +
508 			  (i * sizeof(u32)));
509 	}
510 	/*
511 	 * Write key size
512 	 * bits [31:1] - reserved
513 	 * bit [0] - AES_KEY_SIZE
514 	 *           0 - 128 bit key
515 	 *           1 - 256 bit key
516 	 */
517 	val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;
518 	iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);
519 
520 	return 0;
521 }
522 
523 /* Write AES_COMMAND */
set_ocs_aes_command(struct ocs_aes_dev * aes_dev,enum ocs_cipher cipher,enum ocs_mode mode,enum ocs_instruction instruction)524 static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,
525 				       enum ocs_cipher cipher,
526 				       enum ocs_mode mode,
527 				       enum ocs_instruction instruction)
528 {
529 	u32 val;
530 
531 	/* AES_COMMAND
532 	 * default 0x000000CC
533 	 * bit [14] - CIPHER_SELECT
534 	 *            0 - AES
535 	 *            1 - SM4
536 	 * bits [11:8] - OCS_AES_MODE
537 	 *               0000 - ECB
538 	 *               0001 - CBC
539 	 *               0010 - CTR
540 	 *               0110 - CCM
541 	 *               0111 - GCM
542 	 *               1001 - CTS
543 	 * bits [7:6] - AES_INSTRUCTION
544 	 *              00 - ENCRYPT
545 	 *              01 - DECRYPT
546 	 *              10 - EXPAND
547 	 *              11 - BYPASS
548 	 * bits [3:2] - CTR_M_BITS
549 	 *              00 - No increment
550 	 *              01 - Least significant 32 bits are incremented
551 	 *              10 - Least significant 64 bits are incremented
552 	 *              11 - Full 128 bits are incremented
553 	 */
554 	val = (cipher << 14) | (mode << 8) | (instruction << 6) |
555 	      (AES_CTR_M_128_INC << 2);
556 	iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);
557 }
558 
ocs_aes_init(struct ocs_aes_dev * aes_dev,enum ocs_mode mode,enum ocs_cipher cipher,enum ocs_instruction instruction)559 static void ocs_aes_init(struct ocs_aes_dev *aes_dev,
560 			 enum ocs_mode mode,
561 			 enum ocs_cipher cipher,
562 			 enum ocs_instruction instruction)
563 {
564 	/* Ensure interrupts are disabled and pending interrupts cleared. */
565 	aes_irq_disable(aes_dev);
566 
567 	/* Set endianness recommended by data-sheet. */
568 	aes_a_set_endianness(aes_dev);
569 
570 	/* Set AES_COMMAND register. */
571 	set_ocs_aes_command(aes_dev, cipher, mode, instruction);
572 }
573 
574 /*
575  * Write the byte length of the last AES/SM4 block of Payload data (without
576  * zero padding and without the length of the MAC) in register AES_PLEN.
577  */
ocs_aes_write_last_data_blk_len(struct ocs_aes_dev * aes_dev,u32 size)578 static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,
579 						   u32 size)
580 {
581 	u32 val;
582 
583 	if (size == 0) {
584 		val = 0;
585 		goto exit;
586 	}
587 
588 	val = size % AES_BLOCK_SIZE;
589 	if (val == 0)
590 		val = AES_BLOCK_SIZE;
591 
592 exit:
593 	iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);
594 }
595 
596 /*
597  * Validate inputs according to mode.
598  * If OK return 0; else return -EINVAL.
599  */
ocs_aes_validate_inputs(dma_addr_t src_dma_list,u32 src_size,const u8 * iv,u32 iv_size,dma_addr_t aad_dma_list,u32 aad_size,const u8 * tag,u32 tag_size,enum ocs_cipher cipher,enum ocs_mode mode,enum ocs_instruction instruction,dma_addr_t dst_dma_list)600 static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,
601 				   const u8 *iv, u32 iv_size,
602 				   dma_addr_t aad_dma_list, u32 aad_size,
603 				   const u8 *tag, u32 tag_size,
604 				   enum ocs_cipher cipher, enum ocs_mode mode,
605 				   enum ocs_instruction instruction,
606 				   dma_addr_t dst_dma_list)
607 {
608 	/* Ensure cipher, mode and instruction are valid. */
609 	if (!(cipher == OCS_AES || cipher == OCS_SM4))
610 		return -EINVAL;
611 
612 	if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&
613 	    mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&
614 	    mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)
615 		return -EINVAL;
616 
617 	if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&
618 	    instruction != OCS_EXPAND  && instruction != OCS_BYPASS)
619 		return -EINVAL;
620 
621 	/*
622 	 * When instruction is OCS_BYPASS, OCS simply copies data from source
623 	 * to destination using DMA.
624 	 *
625 	 * AES mode is irrelevant, but both source and destination DMA
626 	 * linked-list must be defined.
627 	 */
628 	if (instruction == OCS_BYPASS) {
629 		if (src_dma_list == DMA_MAPPING_ERROR ||
630 		    dst_dma_list == DMA_MAPPING_ERROR)
631 			return -EINVAL;
632 
633 		return 0;
634 	}
635 
636 	/*
637 	 * For performance reasons switch based on mode to limit unnecessary
638 	 * conditionals for each mode
639 	 */
640 	switch (mode) {
641 	case OCS_MODE_ECB:
642 		/* Ensure input length is multiple of block size */
643 		if (src_size % AES_BLOCK_SIZE != 0)
644 			return -EINVAL;
645 
646 		/* Ensure source and destination linked lists are created */
647 		if (src_dma_list == DMA_MAPPING_ERROR ||
648 		    dst_dma_list == DMA_MAPPING_ERROR)
649 			return -EINVAL;
650 
651 		return 0;
652 
653 	case OCS_MODE_CBC:
654 		/* Ensure input length is multiple of block size */
655 		if (src_size % AES_BLOCK_SIZE != 0)
656 			return -EINVAL;
657 
658 		/* Ensure source and destination linked lists are created */
659 		if (src_dma_list == DMA_MAPPING_ERROR ||
660 		    dst_dma_list == DMA_MAPPING_ERROR)
661 			return -EINVAL;
662 
663 		/* Ensure IV is present and block size in length */
664 		if (!iv || iv_size != AES_BLOCK_SIZE)
665 			return -EINVAL;
666 
667 		return 0;
668 
669 	case OCS_MODE_CTR:
670 		/* Ensure input length of 1 byte or greater */
671 		if (src_size == 0)
672 			return -EINVAL;
673 
674 		/* Ensure source and destination linked lists are created */
675 		if (src_dma_list == DMA_MAPPING_ERROR ||
676 		    dst_dma_list == DMA_MAPPING_ERROR)
677 			return -EINVAL;
678 
679 		/* Ensure IV is present and block size in length */
680 		if (!iv || iv_size != AES_BLOCK_SIZE)
681 			return -EINVAL;
682 
683 		return 0;
684 
685 	case OCS_MODE_CTS:
686 		/* Ensure input length >= block size */
687 		if (src_size < AES_BLOCK_SIZE)
688 			return -EINVAL;
689 
690 		/* Ensure source and destination linked lists are created */
691 		if (src_dma_list == DMA_MAPPING_ERROR ||
692 		    dst_dma_list == DMA_MAPPING_ERROR)
693 			return -EINVAL;
694 
695 		/* Ensure IV is present and block size in length */
696 		if (!iv || iv_size != AES_BLOCK_SIZE)
697 			return -EINVAL;
698 
699 		return 0;
700 
701 	case OCS_MODE_GCM:
702 		/* Ensure IV is present and GCM_AES_IV_SIZE in length */
703 		if (!iv || iv_size != GCM_AES_IV_SIZE)
704 			return -EINVAL;
705 
706 		/*
707 		 * If input data present ensure source and destination linked
708 		 * lists are created
709 		 */
710 		if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
711 				 dst_dma_list == DMA_MAPPING_ERROR))
712 			return -EINVAL;
713 
714 		/* If aad present ensure aad linked list is created */
715 		if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
716 			return -EINVAL;
717 
718 		/* Ensure tag destination is set */
719 		if (!tag)
720 			return -EINVAL;
721 
722 		/* Just ensure that tag_size doesn't cause overflows. */
723 		if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
724 			return -EINVAL;
725 
726 		return 0;
727 
728 	case OCS_MODE_CCM:
729 		/* Ensure IV is present and block size in length */
730 		if (!iv || iv_size != AES_BLOCK_SIZE)
731 			return -EINVAL;
732 
733 		/* 2 <= L <= 8, so 1 <= L' <= 7 */
734 		if (iv[L_PRIME_IDX] < L_PRIME_MIN ||
735 		    iv[L_PRIME_IDX] > L_PRIME_MAX)
736 			return -EINVAL;
737 
738 		/* If aad present ensure aad linked list is created */
739 		if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
740 			return -EINVAL;
741 
742 		/* Just ensure that tag_size doesn't cause overflows. */
743 		if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
744 			return -EINVAL;
745 
746 		if (instruction == OCS_DECRYPT) {
747 			/*
748 			 * If input data present ensure source and destination
749 			 * linked lists are created
750 			 */
751 			if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
752 					 dst_dma_list == DMA_MAPPING_ERROR))
753 				return -EINVAL;
754 
755 			/* Ensure input tag is present */
756 			if (!tag)
757 				return -EINVAL;
758 
759 			return 0;
760 		}
761 
762 		/* Instruction == OCS_ENCRYPT */
763 
764 		/*
765 		 * Destination linked list always required (for tag even if no
766 		 * input data)
767 		 */
768 		if (dst_dma_list == DMA_MAPPING_ERROR)
769 			return -EINVAL;
770 
771 		/* If input data present ensure src linked list is created */
772 		if (src_size && src_dma_list == DMA_MAPPING_ERROR)
773 			return -EINVAL;
774 
775 		return 0;
776 
777 	default:
778 		return -EINVAL;
779 	}
780 }
781 
782 /**
783  * ocs_aes_op() - Perform AES/SM4 operation.
784  * @aes_dev:		The OCS AES device to use.
785  * @mode:		The mode to use (ECB, CBC, CTR, or CTS).
786  * @cipher:		The cipher to use (AES or SM4).
787  * @instruction:	The instruction to perform (encrypt or decrypt).
788  * @dst_dma_list:	The OCS DMA list mapping output memory.
789  * @src_dma_list:	The OCS DMA list mapping input payload data.
790  * @src_size:		The amount of data mapped by @src_dma_list.
791  * @iv:			The IV vector.
792  * @iv_size:		The size (in bytes) of @iv.
793  *
794  * Return: 0 on success, negative error code otherwise.
795  */
ocs_aes_op(struct ocs_aes_dev * aes_dev,enum ocs_mode mode,enum ocs_cipher cipher,enum ocs_instruction instruction,dma_addr_t dst_dma_list,dma_addr_t src_dma_list,u32 src_size,u8 * iv,u32 iv_size)796 int ocs_aes_op(struct ocs_aes_dev *aes_dev,
797 	       enum ocs_mode mode,
798 	       enum ocs_cipher cipher,
799 	       enum ocs_instruction instruction,
800 	       dma_addr_t dst_dma_list,
801 	       dma_addr_t src_dma_list,
802 	       u32 src_size,
803 	       u8 *iv,
804 	       u32 iv_size)
805 {
806 	u32 *iv32;
807 	int rc;
808 
809 	rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,
810 				     NULL, 0, cipher, mode, instruction,
811 				     dst_dma_list);
812 	if (rc)
813 		return rc;
814 	/*
815 	 * ocs_aes_validate_inputs() is a generic check, now ensure mode is not
816 	 * GCM or CCM.
817 	 */
818 	if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)
819 		return -EINVAL;
820 
821 	/* Cast IV to u32 array. */
822 	iv32 = (u32 *)iv;
823 
824 	ocs_aes_init(aes_dev, mode, cipher, instruction);
825 
826 	if (mode == OCS_MODE_CTS) {
827 		/* Write the byte length of the last data block to engine. */
828 		ocs_aes_write_last_data_blk_len(aes_dev, src_size);
829 	}
830 
831 	/* ECB is the only mode that doesn't use IV. */
832 	if (mode != OCS_MODE_ECB) {
833 		iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);
834 		iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);
835 		iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);
836 		iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);
837 	}
838 
839 	/* Set AES_ACTIVE.TRIGGER to start the operation. */
840 	aes_a_op_trigger(aes_dev);
841 
842 	/* Configure and activate input / output DMA. */
843 	dma_to_ocs_aes_ll(aes_dev, src_dma_list);
844 	dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
845 	aes_a_dma_active_src_dst_ll_en(aes_dev);
846 
847 	if (mode == OCS_MODE_CTS) {
848 		/*
849 		 * For CTS mode, instruct engine to activate ciphertext
850 		 * stealing if last block of data is incomplete.
851 		 */
852 		aes_a_set_last_gcx(aes_dev);
853 	} else {
854 		/* For all other modes, just write the 'termination' bit. */
855 		aes_a_op_termination(aes_dev);
856 	}
857 
858 	/* Wait for engine to complete processing. */
859 	rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
860 	if (rc)
861 		return rc;
862 
863 	if (mode == OCS_MODE_CTR) {
864 		/* Read back IV for streaming mode */
865 		iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);
866 		iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);
867 		iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);
868 		iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);
869 	}
870 
871 	return 0;
872 }
873 
874 /* Compute and write J0 to engine registers. */
ocs_aes_gcm_write_j0(const struct ocs_aes_dev * aes_dev,const u8 * iv)875 static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,
876 				 const u8 *iv)
877 {
878 	const u32 *j0 = (u32 *)iv;
879 
880 	/*
881 	 * IV must be 12 bytes; Other sizes not supported as Linux crypto API
882 	 * does only expects/allows 12 byte IV for GCM
883 	 */
884 	iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);
885 	iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);
886 	iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);
887 	iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);
888 }
889 
890 /* Read GCM tag from engine registers. */
ocs_aes_gcm_read_tag(struct ocs_aes_dev * aes_dev,u8 * tag,u32 tag_size)891 static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,
892 					u8 *tag, u32 tag_size)
893 {
894 	u32 tag_u32[AES_MAX_TAG_SIZE_U32];
895 
896 	/*
897 	 * The Authentication Tag T is stored in Little Endian order in the
898 	 * registers with the most significant bytes stored from AES_T_MAC[3]
899 	 * downward.
900 	 */
901 	tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));
902 	tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));
903 	tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));
904 	tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));
905 
906 	memcpy(tag, tag_u32, tag_size);
907 }
908 
909 /**
910  * ocs_aes_gcm_op() - Perform GCM operation.
911  * @aes_dev:		The OCS AES device to use.
912  * @cipher:		The Cipher to use (AES or SM4).
913  * @instruction:	The instruction to perform (encrypt or decrypt).
914  * @dst_dma_list:	The OCS DMA list mapping output memory.
915  * @src_dma_list:	The OCS DMA list mapping input payload data.
916  * @src_size:		The amount of data mapped by @src_dma_list.
917  * @iv:			The input IV vector.
918  * @aad_dma_list:	The OCS DMA list mapping input AAD data.
919  * @aad_size:		The amount of data mapped by @aad_dma_list.
920  * @out_tag:		Where to store computed tag.
921  * @tag_size:		The size (in bytes) of @out_tag.
922  *
923  * Return: 0 on success, negative error code otherwise.
924  */
ocs_aes_gcm_op(struct ocs_aes_dev * aes_dev,enum ocs_cipher cipher,enum ocs_instruction instruction,dma_addr_t dst_dma_list,dma_addr_t src_dma_list,u32 src_size,const u8 * iv,dma_addr_t aad_dma_list,u32 aad_size,u8 * out_tag,u32 tag_size)925 int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
926 		   enum ocs_cipher cipher,
927 		   enum ocs_instruction instruction,
928 		   dma_addr_t dst_dma_list,
929 		   dma_addr_t src_dma_list,
930 		   u32 src_size,
931 		   const u8 *iv,
932 		   dma_addr_t aad_dma_list,
933 		   u32 aad_size,
934 		   u8 *out_tag,
935 		   u32 tag_size)
936 {
937 	u64 bit_len;
938 	u32 val;
939 	int rc;
940 
941 	rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
942 				     GCM_AES_IV_SIZE, aad_dma_list,
943 				     aad_size, out_tag, tag_size, cipher,
944 				     OCS_MODE_GCM, instruction,
945 				     dst_dma_list);
946 	if (rc)
947 		return rc;
948 
949 	ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);
950 
951 	/* Compute and write J0 to OCS HW. */
952 	ocs_aes_gcm_write_j0(aes_dev, iv);
953 
954 	/* Write out_tag byte length */
955 	iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
956 
957 	/* Write the byte length of the last plaintext / ciphertext block. */
958 	ocs_aes_write_last_data_blk_len(aes_dev, src_size);
959 
960 	/* Write ciphertext bit length */
961 	bit_len = (u64)src_size * 8;
962 	val = bit_len & 0xFFFFFFFF;
963 	iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
964 	val = bit_len >> 32;
965 	iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
966 
967 	/* Write aad bit length */
968 	bit_len = (u64)aad_size * 8;
969 	val = bit_len & 0xFFFFFFFF;
970 	iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
971 	val = bit_len >> 32;
972 	iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);
973 
974 	/* Set AES_ACTIVE.TRIGGER to start the operation. */
975 	aes_a_op_trigger(aes_dev);
976 
977 	/* Process AAD. */
978 	if (aad_size) {
979 		/* If aad present, configure DMA to feed it to the engine. */
980 		dma_to_ocs_aes_ll(aes_dev, aad_dma_list);
981 		aes_a_dma_active_src_ll_en(aes_dev);
982 
983 		/* Instructs engine to pad last block of aad, if needed. */
984 		aes_a_set_last_gcx_and_adata(aes_dev);
985 
986 		/* Wait for DMA transfer to complete. */
987 		rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
988 		if (rc)
989 			return rc;
990 	} else {
991 		aes_a_set_last_gcx_and_adata(aes_dev);
992 	}
993 
994 	/* Wait until adata (if present) has been processed. */
995 	aes_a_wait_last_gcx(aes_dev);
996 	aes_a_dma_wait_input_buffer_occupancy(aes_dev);
997 
998 	/* Now process payload. */
999 	if (src_size) {
1000 		/* Configure and activate DMA for both input and output data. */
1001 		dma_to_ocs_aes_ll(aes_dev, src_dma_list);
1002 		dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1003 		aes_a_dma_active_src_dst_ll_en(aes_dev);
1004 	} else {
1005 		aes_a_dma_set_xfer_size_zero(aes_dev);
1006 		aes_a_dma_active(aes_dev);
1007 	}
1008 
1009 	/* Instruct AES/SMA4 engine payload processing is over. */
1010 	aes_a_set_last_gcx(aes_dev);
1011 
1012 	/* Wait for OCS AES engine to complete processing. */
1013 	rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
1014 	if (rc)
1015 		return rc;
1016 
1017 	ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);
1018 
1019 	return 0;
1020 }
1021 
1022 /* Write encrypted tag to AES/SM4 engine. */
ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev * aes_dev,const u8 * in_tag,u32 tag_size)1023 static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,
1024 					    const u8 *in_tag, u32 tag_size)
1025 {
1026 	int i;
1027 
1028 	/* Ensure DMA input buffer is empty */
1029 	aes_a_dma_wait_input_buffer_occupancy(aes_dev);
1030 
1031 	/*
1032 	 * During CCM decrypt, the OCS block needs to finish processing the
1033 	 * ciphertext before the tag is written.  So delay needed after DMA has
1034 	 * completed writing the ciphertext
1035 	 */
1036 	aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
1037 	aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
1038 						CCM_DECRYPT_DELAY_TAG_CLK_COUNT);
1039 
1040 	/* Write encrypted tag to AES/SM4 engine. */
1041 	for (i = 0; i < tag_size; i++) {
1042 		iowrite8(in_tag[i], aes_dev->base_reg +
1043 				    AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
1044 	}
1045 }
1046 
1047 /*
1048  * Write B0 CCM block to OCS AES HW.
1049  *
1050  * Note: B0 format is documented in NIST Special Publication 800-38C
1051  * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
1052  * (see Section A.2.1)
1053  */
ocs_aes_ccm_write_b0(const struct ocs_aes_dev * aes_dev,const u8 * iv,u32 adata_size,u32 tag_size,u32 cryptlen)1054 static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
1055 				const u8 *iv, u32 adata_size, u32 tag_size,
1056 				u32 cryptlen)
1057 {
1058 	u8 b0[16]; /* CCM B0 block is 16 bytes long. */
1059 	int i, q;
1060 
1061 	/* Initialize B0 to 0. */
1062 	memset(b0, 0, sizeof(b0));
1063 
1064 	/*
1065 	 * B0[0] is the 'Flags Octet' and has the following structure:
1066 	 *   bit 7: Reserved
1067 	 *   bit 6: Adata flag
1068 	 *   bit 5-3: t value encoded as (t-2)/2
1069 	 *   bit 2-0: q value encoded as q - 1
1070 	 */
1071 	/* If there is AAD data, set the Adata flag. */
1072 	if (adata_size)
1073 		b0[0] |= BIT(6);
1074 	/*
1075 	 * t denotes the octet length of T.
1076 	 * t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
1077 	 * encoded as (t - 2) / 2
1078 	 */
1079 	b0[0] |= (((tag_size - 2) / 2) & 0x7)  << 3;
1080 	/*
1081 	 * q is the octet length of Q.
1082 	 * q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
1083 	 * q - 1 == iv[0] & 0x7;
1084 	 */
1085 	b0[0] |= iv[0] & 0x7;
1086 	/*
1087 	 * Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
1088 	 * and must be copied to b0[1]..b0[15-q].
1089 	 * q == (iv[0] & 0x7) + 1
1090 	 */
1091 	q = (iv[0] & 0x7) + 1;
1092 	for (i = 1; i <= 15 - q; i++)
1093 		b0[i] = iv[i];
1094 	/*
1095 	 * The rest of B0 must contain Q, i.e., the message length.
1096 	 * Q is encoded in q octets, in big-endian order, so to write it, we
1097 	 * start from the end of B0 and we move backward.
1098 	 */
1099 	i = sizeof(b0) - 1;
1100 	while (q) {
1101 		b0[i] = cryptlen & 0xff;
1102 		cryptlen >>= 8;
1103 		i--;
1104 		q--;
1105 	}
1106 	/*
1107 	 * If cryptlen is not zero at this point, it means that its original
1108 	 * value was too big.
1109 	 */
1110 	if (cryptlen)
1111 		return -EOVERFLOW;
1112 	/* Now write B0 to OCS AES input buffer. */
1113 	for (i = 0; i < sizeof(b0); i++)
1114 		iowrite8(b0[i], aes_dev->base_reg +
1115 				AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
1116 	return 0;
1117 }
1118 
1119 /*
1120  * Write adata length to OCS AES HW.
1121  *
1122  * Note: adata len encoding is documented in NIST Special Publication 800-38C
1123  * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
1124  * (see Section A.2.2)
1125  */
ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev * aes_dev,u64 adata_len)1126 static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,
1127 					u64 adata_len)
1128 {
1129 	u8 enc_a[10]; /* Maximum encoded size: 10 octets. */
1130 	int i, len;
1131 
1132 	/*
1133 	 * adata_len ('a') is encoded as follows:
1134 	 * If 0 < a < 2^16 - 2^8    ==> 'a' encoded as [a]16, i.e., two octets
1135 	 *				(big endian).
1136 	 * If 2^16 - 2^8 ≤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
1137 	 *				i.e., six octets (big endian).
1138 	 * If 2^32 ≤ a < 2^64       ==> 'a' encoded as 0xff || 0xff || [a]64,
1139 	 *				i.e., ten octets (big endian).
1140 	 */
1141 	if (adata_len < 65280) {
1142 		len = 2;
1143 		*(__be16 *)enc_a = cpu_to_be16(adata_len);
1144 	} else if (adata_len <= 0xFFFFFFFF) {
1145 		len = 6;
1146 		*(__be16 *)enc_a = cpu_to_be16(0xfffe);
1147 		*(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);
1148 	} else { /* adata_len >= 2^32 */
1149 		len = 10;
1150 		*(__be16 *)enc_a = cpu_to_be16(0xffff);
1151 		*(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);
1152 	}
1153 	for (i = 0; i < len; i++)
1154 		iowrite8(enc_a[i],
1155 			 aes_dev->base_reg +
1156 			 AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
1157 }
1158 
ocs_aes_ccm_do_adata(struct ocs_aes_dev * aes_dev,dma_addr_t adata_dma_list,u32 adata_size)1159 static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,
1160 				dma_addr_t adata_dma_list, u32 adata_size)
1161 {
1162 	int rc;
1163 
1164 	if (!adata_size) {
1165 		/* Since no aad the LAST_GCX bit can be set now */
1166 		aes_a_set_last_gcx_and_adata(aes_dev);
1167 		goto exit;
1168 	}
1169 
1170 	/* Adata case. */
1171 
1172 	/*
1173 	 * Form the encoding of the Associated data length and write it
1174 	 * to the AES/SM4 input buffer.
1175 	 */
1176 	ocs_aes_ccm_write_adata_len(aes_dev, adata_size);
1177 
1178 	/* Configure the AES/SM4 DMA to fetch the Associated Data */
1179 	dma_to_ocs_aes_ll(aes_dev, adata_dma_list);
1180 
1181 	/* Activate DMA to fetch Associated data. */
1182 	aes_a_dma_active_src_ll_en(aes_dev);
1183 
1184 	/* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
1185 	aes_a_set_last_gcx_and_adata(aes_dev);
1186 
1187 	/* Wait for DMA transfer to complete. */
1188 	rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
1189 	if (rc)
1190 		return rc;
1191 
1192 exit:
1193 	/* Wait until adata (if present) has been processed. */
1194 	aes_a_wait_last_gcx(aes_dev);
1195 	aes_a_dma_wait_input_buffer_occupancy(aes_dev);
1196 
1197 	return 0;
1198 }
1199 
ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev * aes_dev,dma_addr_t dst_dma_list,dma_addr_t src_dma_list,u32 src_size)1200 static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,
1201 					  dma_addr_t dst_dma_list,
1202 					  dma_addr_t src_dma_list,
1203 					  u32 src_size)
1204 {
1205 	if (src_size) {
1206 		/*
1207 		 * Configure and activate DMA for both input and output
1208 		 * data.
1209 		 */
1210 		dma_to_ocs_aes_ll(aes_dev, src_dma_list);
1211 		dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1212 		aes_a_dma_active_src_dst_ll_en(aes_dev);
1213 	} else {
1214 		/* Configure and activate DMA for output data only. */
1215 		dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1216 		aes_a_dma_active_dst_ll_en(aes_dev);
1217 	}
1218 
1219 	/*
1220 	 * Set the LAST GCX bit in AES_ACTIVE Register to instruct
1221 	 * AES/SM4 engine to pad the last block of data.
1222 	 */
1223 	aes_a_set_last_gcx(aes_dev);
1224 
1225 	/* We are done, wait for IRQ and return. */
1226 	return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
1227 }
1228 
ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev * aes_dev,dma_addr_t dst_dma_list,dma_addr_t src_dma_list,u32 src_size)1229 static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,
1230 					  dma_addr_t dst_dma_list,
1231 					  dma_addr_t src_dma_list,
1232 					  u32 src_size)
1233 {
1234 	if (!src_size) {
1235 		/* Let engine process 0-length input. */
1236 		aes_a_dma_set_xfer_size_zero(aes_dev);
1237 		aes_a_dma_active(aes_dev);
1238 		aes_a_set_last_gcx(aes_dev);
1239 
1240 		return 0;
1241 	}
1242 
1243 	/*
1244 	 * Configure and activate DMA for both input and output
1245 	 * data.
1246 	 */
1247 	dma_to_ocs_aes_ll(aes_dev, src_dma_list);
1248 	dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
1249 	aes_a_dma_active_src_dst_ll_en(aes_dev);
1250 	/*
1251 	 * Set the LAST GCX bit in AES_ACTIVE Register; this allows the
1252 	 * AES/SM4 engine to differentiate between encrypted data and
1253 	 * encrypted MAC.
1254 	 */
1255 	aes_a_set_last_gcx(aes_dev);
1256 	 /*
1257 	  * Enable DMA DONE interrupt; once DMA transfer is over,
1258 	  * interrupt handler will process the MAC/tag.
1259 	  */
1260 	return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
1261 }
1262 
1263 /*
1264  * Compare Tag to Yr.
1265  *
1266  * Only used at the end of CCM decrypt. If tag == yr, message authentication
1267  * has succeeded.
1268  */
ccm_compare_tag_to_yr(struct ocs_aes_dev * aes_dev,u8 tag_size_bytes)1269 static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,
1270 					u8 tag_size_bytes)
1271 {
1272 	u32 tag[AES_MAX_TAG_SIZE_U32];
1273 	u32 yr[AES_MAX_TAG_SIZE_U32];
1274 	u8 i;
1275 
1276 	/* Read Tag and Yr from AES registers. */
1277 	for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {
1278 		tag[i] = ioread32(aes_dev->base_reg +
1279 				  AES_T_MAC_0_OFFSET + (i * sizeof(u32)));
1280 		yr[i] = ioread32(aes_dev->base_reg +
1281 				 AES_MULTIPURPOSE2_0_OFFSET +
1282 				 (i * sizeof(u32)));
1283 	}
1284 
1285 	return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;
1286 }
1287 
1288 /**
1289  * ocs_aes_ccm_op() - Perform CCM operation.
1290  * @aes_dev:		The OCS AES device to use.
1291  * @cipher:		The Cipher to use (AES or SM4).
1292  * @instruction:	The instruction to perform (encrypt or decrypt).
1293  * @dst_dma_list:	The OCS DMA list mapping output memory.
1294  * @src_dma_list:	The OCS DMA list mapping input payload data.
1295  * @src_size:		The amount of data mapped by @src_dma_list.
1296  * @iv:			The input IV vector.
1297  * @adata_dma_list:	The OCS DMA list mapping input A-data.
1298  * @adata_size:		The amount of data mapped by @adata_dma_list.
1299  * @in_tag:		Input tag.
1300  * @tag_size:		The size (in bytes) of @in_tag.
1301  *
1302  * Note: for encrypt the tag is appended to the ciphertext (in the memory
1303  *	 mapped by @dst_dma_list).
1304  *
1305  * Return: 0 on success, negative error code otherwise.
1306  */
ocs_aes_ccm_op(struct ocs_aes_dev * aes_dev,enum ocs_cipher cipher,enum ocs_instruction instruction,dma_addr_t dst_dma_list,dma_addr_t src_dma_list,u32 src_size,u8 * iv,dma_addr_t adata_dma_list,u32 adata_size,u8 * in_tag,u32 tag_size)1307 int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
1308 		   enum ocs_cipher cipher,
1309 		   enum ocs_instruction instruction,
1310 		   dma_addr_t dst_dma_list,
1311 		   dma_addr_t src_dma_list,
1312 		   u32 src_size,
1313 		   u8 *iv,
1314 		   dma_addr_t adata_dma_list,
1315 		   u32 adata_size,
1316 		   u8 *in_tag,
1317 		   u32 tag_size)
1318 {
1319 	u32 *iv_32;
1320 	u8 lprime;
1321 	int rc;
1322 
1323 	rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
1324 				     AES_BLOCK_SIZE, adata_dma_list, adata_size,
1325 				     in_tag, tag_size, cipher, OCS_MODE_CCM,
1326 				     instruction, dst_dma_list);
1327 	if (rc)
1328 		return rc;
1329 
1330 	ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);
1331 
1332 	/*
1333 	 * Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
1334 	 * auth tag so ensure this is the case
1335 	 */
1336 	lprime = iv[L_PRIME_IDX];
1337 	memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));
1338 
1339 	/*
1340 	 * Nonce is already converted to ctr0 before being passed into this
1341 	 * function as iv.
1342 	 */
1343 	iv_32 = (u32 *)iv;
1344 	iowrite32(__swab32(iv_32[0]),
1345 		  aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);
1346 	iowrite32(__swab32(iv_32[1]),
1347 		  aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);
1348 	iowrite32(__swab32(iv_32[2]),
1349 		  aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);
1350 	iowrite32(__swab32(iv_32[3]),
1351 		  aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);
1352 
1353 	/* Write MAC/tag length in register AES_TLEN */
1354 	iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
1355 	/*
1356 	 * Write the byte length of the last AES/SM4 block of Payload data
1357 	 * (without zero padding and without the length of the MAC) in register
1358 	 * AES_PLEN.
1359 	 */
1360 	ocs_aes_write_last_data_blk_len(aes_dev, src_size);
1361 
1362 	/* Set AES_ACTIVE.TRIGGER to start the operation. */
1363 	aes_a_op_trigger(aes_dev);
1364 
1365 	aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
1366 
1367 	/* Form block B0 and write it to the AES/SM4 input buffer. */
1368 	rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);
1369 	if (rc)
1370 		return rc;
1371 	/*
1372 	 * Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
1373 	 * clock cycles since TRIGGER bit was set
1374 	 */
1375 	aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
1376 						CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);
1377 
1378 	/* Process Adata. */
1379 	ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);
1380 
1381 	/* For Encrypt case we just process the payload and return. */
1382 	if (instruction == OCS_ENCRYPT) {
1383 		return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,
1384 						      src_dma_list, src_size);
1385 	}
1386 	/* For Decypt we need to process the payload and then the tag. */
1387 	rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,
1388 					    src_dma_list, src_size);
1389 	if (rc)
1390 		return rc;
1391 
1392 	/* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
1393 	ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);
1394 	rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
1395 	if (rc)
1396 		return rc;
1397 
1398 	return ccm_compare_tag_to_yr(aes_dev, tag_size);
1399 }
1400 
1401 /**
1402  * ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
1403  * @aes_dev:	  The OCS AES device the list will be created for.
1404  * @sg:		  The SG list OCS DMA linked list will be created from. When
1405  *		  passed to this function, @sg must have been already mapped
1406  *		  with dma_map_sg().
1407  * @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
1408  *		  value returned by dma_map_sg() when @sg was mapped.
1409  * @dll_desc:	  The OCS DMA dma_list to use to store information about the
1410  *		  created linked list.
1411  * @data_size:	  The size of the data (from the SG list) to be mapped into the
1412  *		  OCS DMA linked list.
1413  * @data_offset:  The offset (within the SG list) of the data to be mapped.
1414  *
1415  * Return:	0 on success, negative error code otherwise.
1416  */
ocs_create_linked_list_from_sg(const struct ocs_aes_dev * aes_dev,struct scatterlist * sg,int sg_dma_count,struct ocs_dll_desc * dll_desc,size_t data_size,size_t data_offset)1417 int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
1418 				   struct scatterlist *sg,
1419 				   int sg_dma_count,
1420 				   struct ocs_dll_desc *dll_desc,
1421 				   size_t data_size, size_t data_offset)
1422 {
1423 	struct ocs_dma_linked_list *ll = NULL;
1424 	struct scatterlist *sg_tmp;
1425 	unsigned int tmp;
1426 	int dma_nents;
1427 	int i;
1428 
1429 	if (!dll_desc || !sg || !aes_dev)
1430 		return -EINVAL;
1431 
1432 	/* Default values for when no ddl_desc is created. */
1433 	dll_desc->vaddr = NULL;
1434 	dll_desc->dma_addr = DMA_MAPPING_ERROR;
1435 	dll_desc->size = 0;
1436 
1437 	if (data_size == 0)
1438 		return 0;
1439 
1440 	/* Loop over sg_list until we reach entry at specified offset. */
1441 	while (data_offset >= sg_dma_len(sg)) {
1442 		data_offset -= sg_dma_len(sg);
1443 		sg_dma_count--;
1444 		sg = sg_next(sg);
1445 		/* If we reach the end of the list, offset was invalid. */
1446 		if (!sg || sg_dma_count == 0)
1447 			return -EINVAL;
1448 	}
1449 
1450 	/* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
1451 	dma_nents = 0;
1452 	tmp = 0;
1453 	sg_tmp = sg;
1454 	while (tmp < data_offset + data_size) {
1455 		/* If we reach the end of the list, data_size was invalid. */
1456 		if (!sg_tmp)
1457 			return -EINVAL;
1458 		tmp += sg_dma_len(sg_tmp);
1459 		dma_nents++;
1460 		sg_tmp = sg_next(sg_tmp);
1461 	}
1462 	if (dma_nents > sg_dma_count)
1463 		return -EINVAL;
1464 
1465 	/* Allocate the DMA list, one entry for each SG entry. */
1466 	dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;
1467 	dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
1468 					     &dll_desc->dma_addr, GFP_KERNEL);
1469 	if (!dll_desc->vaddr)
1470 		return -ENOMEM;
1471 
1472 	/* Populate DMA linked list entries. */
1473 	ll = dll_desc->vaddr;
1474 	for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
1475 		ll[i].src_addr = sg_dma_address(sg) + data_offset;
1476 		ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
1477 				(sg_dma_len(sg) - data_offset) : data_size;
1478 		data_offset = 0;
1479 		data_size -= ll[i].src_len;
1480 		/* Current element points to the DMA address of the next one. */
1481 		ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));
1482 		ll[i].ll_flags = 0;
1483 	}
1484 	/* Terminate last element. */
1485 	ll[i - 1].next = 0;
1486 	ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
1487 
1488 	return 0;
1489 }
1490