1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy /*
22eda14cbcSMatt Macy  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23eda14cbcSMatt Macy  */
24eda14cbcSMatt Macy 
25eda14cbcSMatt Macy #include <sys/zfs_context.h>
26eda14cbcSMatt Macy #include <sys/crypto/icp.h>
27eda14cbcSMatt Macy #include <sys/crypto/spi.h>
28eda14cbcSMatt Macy #include <sys/simd.h>
29eda14cbcSMatt Macy #include <modes/modes.h>
30eda14cbcSMatt Macy #include <aes/aes_impl.h>
31eda14cbcSMatt Macy 
32eda14cbcSMatt Macy /*
33eda14cbcSMatt Macy  * Initialize AES encryption and decryption key schedules.
34eda14cbcSMatt Macy  *
35eda14cbcSMatt Macy  * Parameters:
36eda14cbcSMatt Macy  * cipherKey	User key
37eda14cbcSMatt Macy  * keyBits	AES key size (128, 192, or 256 bits)
38eda14cbcSMatt Macy  * keysched	AES key schedule to be initialized, of type aes_key_t.
39eda14cbcSMatt Macy  *		Allocated by aes_alloc_keysched().
40eda14cbcSMatt Macy  */
41eda14cbcSMatt Macy void
aes_init_keysched(const uint8_t * cipherKey,uint_t keyBits,void * keysched)42eda14cbcSMatt Macy aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
43eda14cbcSMatt Macy {
44eda14cbcSMatt Macy 	const aes_impl_ops_t *ops = aes_impl_get_ops();
45eda14cbcSMatt Macy 	aes_key_t *newbie = keysched;
46eda14cbcSMatt Macy 	uint_t keysize, i, j;
47eda14cbcSMatt Macy 	union {
48eda14cbcSMatt Macy 		uint64_t	ka64[4];
49eda14cbcSMatt Macy 		uint32_t	ka32[8];
50eda14cbcSMatt Macy 	} keyarr;
51eda14cbcSMatt Macy 
52eda14cbcSMatt Macy 	switch (keyBits) {
53eda14cbcSMatt Macy 	case 128:
54eda14cbcSMatt Macy 		newbie->nr = 10;
55eda14cbcSMatt Macy 		break;
56eda14cbcSMatt Macy 
57eda14cbcSMatt Macy 	case 192:
58eda14cbcSMatt Macy 		newbie->nr = 12;
59eda14cbcSMatt Macy 		break;
60eda14cbcSMatt Macy 
61eda14cbcSMatt Macy 	case 256:
62eda14cbcSMatt Macy 		newbie->nr = 14;
63eda14cbcSMatt Macy 		break;
64eda14cbcSMatt Macy 
65eda14cbcSMatt Macy 	default:
66eda14cbcSMatt Macy 		/* should never get here */
67eda14cbcSMatt Macy 		return;
68eda14cbcSMatt Macy 	}
69eda14cbcSMatt Macy 	keysize = CRYPTO_BITS2BYTES(keyBits);
70eda14cbcSMatt Macy 
71eda14cbcSMatt Macy 	/*
72eda14cbcSMatt Macy 	 * Generic C implementation requires byteswap for little endian
73eda14cbcSMatt Macy 	 * machines, various accelerated implementations for various
74eda14cbcSMatt Macy 	 * architectures may not.
75eda14cbcSMatt Macy 	 */
76eda14cbcSMatt Macy 	if (!ops->needs_byteswap) {
77eda14cbcSMatt Macy 		/* no byteswap needed */
78eda14cbcSMatt Macy 		if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
79eda14cbcSMatt Macy 			for (i = 0, j = 0; j < keysize; i++, j += 8) {
80eda14cbcSMatt Macy 				/* LINTED: pointer alignment */
81eda14cbcSMatt Macy 				keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
82eda14cbcSMatt Macy 			}
83eda14cbcSMatt Macy 		} else {
84da5137abSMartin Matuska 			memcpy(keyarr.ka32, cipherKey, keysize);
85eda14cbcSMatt Macy 		}
86eda14cbcSMatt Macy 	} else {
87eda14cbcSMatt Macy 		/* byte swap */
88eda14cbcSMatt Macy 		for (i = 0, j = 0; j < keysize; i++, j += 4) {
89eda14cbcSMatt Macy 			keyarr.ka32[i] =
90eda14cbcSMatt Macy 			    htonl(*(uint32_t *)(void *)&cipherKey[j]);
91eda14cbcSMatt Macy 		}
92eda14cbcSMatt Macy 	}
93eda14cbcSMatt Macy 
94eda14cbcSMatt Macy 	ops->generate(newbie, keyarr.ka32, keyBits);
95eda14cbcSMatt Macy 	newbie->ops = ops;
96eda14cbcSMatt Macy 
97eda14cbcSMatt Macy 	/*
98eda14cbcSMatt Macy 	 * Note: if there are systems that need the AES_64BIT_KS type in the
99eda14cbcSMatt Macy 	 * future, move setting key schedule type to individual implementations
100eda14cbcSMatt Macy 	 */
101eda14cbcSMatt Macy 	newbie->type = AES_32BIT_KS;
102eda14cbcSMatt Macy }
103eda14cbcSMatt Macy 
104eda14cbcSMatt Macy 
105eda14cbcSMatt Macy /*
106eda14cbcSMatt Macy  * Encrypt one block using AES.
107eda14cbcSMatt Macy  * Align if needed and (for x86 32-bit only) byte-swap.
108eda14cbcSMatt Macy  *
109eda14cbcSMatt Macy  * Parameters:
110eda14cbcSMatt Macy  * ks	Key schedule, of type aes_key_t
111eda14cbcSMatt Macy  * pt	Input block (plain text)
112eda14cbcSMatt Macy  * ct	Output block (crypto text).  Can overlap with pt
113eda14cbcSMatt Macy  */
114eda14cbcSMatt Macy int
aes_encrypt_block(const void * ks,const uint8_t * pt,uint8_t * ct)115eda14cbcSMatt Macy aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
116eda14cbcSMatt Macy {
117eda14cbcSMatt Macy 	aes_key_t	*ksch = (aes_key_t *)ks;
118eda14cbcSMatt Macy 	const aes_impl_ops_t	*ops = ksch->ops;
119eda14cbcSMatt Macy 
120eda14cbcSMatt Macy 	if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t)) && !ops->needs_byteswap) {
121eda14cbcSMatt Macy 		/* LINTED:  pointer alignment */
122eda14cbcSMatt Macy 		ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr,
123eda14cbcSMatt Macy 		    /* LINTED:  pointer alignment */
124eda14cbcSMatt Macy 		    (uint32_t *)pt, (uint32_t *)ct);
125eda14cbcSMatt Macy 	} else {
126eda14cbcSMatt Macy 		uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
127eda14cbcSMatt Macy 
128eda14cbcSMatt Macy 		/* Copy input block into buffer */
129eda14cbcSMatt Macy 		if (ops->needs_byteswap) {
130eda14cbcSMatt Macy 			buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
131eda14cbcSMatt Macy 			buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
132eda14cbcSMatt Macy 			buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
133eda14cbcSMatt Macy 			buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
134eda14cbcSMatt Macy 		} else
135da5137abSMartin Matuska 			memcpy(&buffer, pt, AES_BLOCK_LEN);
136eda14cbcSMatt Macy 
137eda14cbcSMatt Macy 		ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer);
138eda14cbcSMatt Macy 
139eda14cbcSMatt Macy 		/* Copy result from buffer to output block */
140eda14cbcSMatt Macy 		if (ops->needs_byteswap) {
141eda14cbcSMatt Macy 			*(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
142eda14cbcSMatt Macy 			*(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
143eda14cbcSMatt Macy 			*(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
144eda14cbcSMatt Macy 			*(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
145eda14cbcSMatt Macy 		} else
146da5137abSMartin Matuska 			memcpy(ct, &buffer, AES_BLOCK_LEN);
147eda14cbcSMatt Macy 	}
148eda14cbcSMatt Macy 	return (CRYPTO_SUCCESS);
149eda14cbcSMatt Macy }
150eda14cbcSMatt Macy 
151eda14cbcSMatt Macy 
152eda14cbcSMatt Macy /*
153eda14cbcSMatt Macy  * Decrypt one block using AES.
154eda14cbcSMatt Macy  * Align and byte-swap if needed.
155eda14cbcSMatt Macy  *
156eda14cbcSMatt Macy  * Parameters:
157eda14cbcSMatt Macy  * ks	Key schedule, of type aes_key_t
158eda14cbcSMatt Macy  * ct	Input block (crypto text)
159eda14cbcSMatt Macy  * pt	Output block (plain text). Can overlap with pt
160eda14cbcSMatt Macy  */
161eda14cbcSMatt Macy int
aes_decrypt_block(const void * ks,const uint8_t * ct,uint8_t * pt)162eda14cbcSMatt Macy aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
163eda14cbcSMatt Macy {
164eda14cbcSMatt Macy 	aes_key_t	*ksch = (aes_key_t *)ks;
165eda14cbcSMatt Macy 	const aes_impl_ops_t	*ops = ksch->ops;
166eda14cbcSMatt Macy 
167eda14cbcSMatt Macy 	if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t)) && !ops->needs_byteswap) {
168eda14cbcSMatt Macy 		/* LINTED:  pointer alignment */
169eda14cbcSMatt Macy 		ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr,
170eda14cbcSMatt Macy 		    /* LINTED:  pointer alignment */
171eda14cbcSMatt Macy 		    (uint32_t *)ct, (uint32_t *)pt);
172eda14cbcSMatt Macy 	} else {
173eda14cbcSMatt Macy 		uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
174eda14cbcSMatt Macy 
175eda14cbcSMatt Macy 		/* Copy input block into buffer */
176eda14cbcSMatt Macy 		if (ops->needs_byteswap) {
177eda14cbcSMatt Macy 			buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
178eda14cbcSMatt Macy 			buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
179eda14cbcSMatt Macy 			buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
180eda14cbcSMatt Macy 			buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
181eda14cbcSMatt Macy 		} else
182da5137abSMartin Matuska 			memcpy(&buffer, ct, AES_BLOCK_LEN);
183eda14cbcSMatt Macy 
184eda14cbcSMatt Macy 		ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer);
185eda14cbcSMatt Macy 
186eda14cbcSMatt Macy 		/* Copy result from buffer to output block */
187eda14cbcSMatt Macy 		if (ops->needs_byteswap) {
188eda14cbcSMatt Macy 			*(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
189eda14cbcSMatt Macy 			*(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
190eda14cbcSMatt Macy 			*(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
191eda14cbcSMatt Macy 			*(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
192eda14cbcSMatt Macy 		} else
193da5137abSMartin Matuska 			memcpy(pt, &buffer, AES_BLOCK_LEN);
194eda14cbcSMatt Macy 	}
195eda14cbcSMatt Macy 	return (CRYPTO_SUCCESS);
196eda14cbcSMatt Macy }
197eda14cbcSMatt Macy 
198eda14cbcSMatt Macy 
199eda14cbcSMatt Macy /*
200eda14cbcSMatt Macy  * Allocate key schedule for AES.
201eda14cbcSMatt Macy  *
202eda14cbcSMatt Macy  * Return the pointer and set size to the number of bytes allocated.
203eda14cbcSMatt Macy  * Memory allocated must be freed by the caller when done.
204eda14cbcSMatt Macy  *
205eda14cbcSMatt Macy  * Parameters:
206eda14cbcSMatt Macy  * size		Size of key schedule allocated, in bytes
207eda14cbcSMatt Macy  * kmflag	Flag passed to kmem_alloc(9F); ignored in userland.
208eda14cbcSMatt Macy  */
209eda14cbcSMatt Macy void *
aes_alloc_keysched(size_t * size,int kmflag)210eda14cbcSMatt Macy aes_alloc_keysched(size_t *size, int kmflag)
211eda14cbcSMatt Macy {
212eda14cbcSMatt Macy 	aes_key_t *keysched;
213eda14cbcSMatt Macy 
214*15f0b8c3SMartin Matuska 	keysched = kmem_alloc(sizeof (aes_key_t), kmflag);
215eda14cbcSMatt Macy 	if (keysched != NULL) {
216eda14cbcSMatt Macy 		*size = sizeof (aes_key_t);
217eda14cbcSMatt Macy 		return (keysched);
218eda14cbcSMatt Macy 	}
219eda14cbcSMatt Macy 	return (NULL);
220eda14cbcSMatt Macy }
221eda14cbcSMatt Macy 
222eda14cbcSMatt Macy /* AES implementation that contains the fastest methods */
223eda14cbcSMatt Macy static aes_impl_ops_t aes_fastest_impl = {
224eda14cbcSMatt Macy 	.name = "fastest"
225eda14cbcSMatt Macy };
226eda14cbcSMatt Macy 
227eda14cbcSMatt Macy /* All compiled in implementations */
228e92ffd9bSMartin Matuska static const aes_impl_ops_t *aes_all_impl[] = {
229eda14cbcSMatt Macy 	&aes_generic_impl,
230eda14cbcSMatt Macy #if defined(__x86_64)
231eda14cbcSMatt Macy 	&aes_x86_64_impl,
232eda14cbcSMatt Macy #endif
233eda14cbcSMatt Macy #if defined(__x86_64) && defined(HAVE_AES)
234eda14cbcSMatt Macy 	&aes_aesni_impl,
235eda14cbcSMatt Macy #endif
236eda14cbcSMatt Macy };
237eda14cbcSMatt Macy 
238eda14cbcSMatt Macy /* Indicate that benchmark has been completed */
239eda14cbcSMatt Macy static boolean_t aes_impl_initialized = B_FALSE;
240eda14cbcSMatt Macy 
241eda14cbcSMatt Macy /* Select aes implementation */
242eda14cbcSMatt Macy #define	IMPL_FASTEST	(UINT32_MAX)
243eda14cbcSMatt Macy #define	IMPL_CYCLE	(UINT32_MAX-1)
244eda14cbcSMatt Macy 
245eda14cbcSMatt Macy #define	AES_IMPL_READ(i) (*(volatile uint32_t *) &(i))
246eda14cbcSMatt Macy 
247eda14cbcSMatt Macy static uint32_t icp_aes_impl = IMPL_FASTEST;
248eda14cbcSMatt Macy static uint32_t user_sel_impl = IMPL_FASTEST;
249eda14cbcSMatt Macy 
250eda14cbcSMatt Macy /* Hold all supported implementations */
251eda14cbcSMatt Macy static size_t aes_supp_impl_cnt = 0;
252eda14cbcSMatt Macy static aes_impl_ops_t *aes_supp_impl[ARRAY_SIZE(aes_all_impl)];
253eda14cbcSMatt Macy 
254eda14cbcSMatt Macy /*
255eda14cbcSMatt Macy  * Returns the AES operations for encrypt/decrypt/key setup.  When a
256eda14cbcSMatt Macy  * SIMD implementation is not allowed in the current context, then
257eda14cbcSMatt Macy  * fallback to the fastest generic implementation.
258eda14cbcSMatt Macy  */
259eda14cbcSMatt Macy const aes_impl_ops_t *
aes_impl_get_ops(void)260eda14cbcSMatt Macy aes_impl_get_ops(void)
261eda14cbcSMatt Macy {
262eda14cbcSMatt Macy 	if (!kfpu_allowed())
263eda14cbcSMatt Macy 		return (&aes_generic_impl);
264eda14cbcSMatt Macy 
265eda14cbcSMatt Macy 	const aes_impl_ops_t *ops = NULL;
266eda14cbcSMatt Macy 	const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
267eda14cbcSMatt Macy 
268eda14cbcSMatt Macy 	switch (impl) {
269eda14cbcSMatt Macy 	case IMPL_FASTEST:
270eda14cbcSMatt Macy 		ASSERT(aes_impl_initialized);
271eda14cbcSMatt Macy 		ops = &aes_fastest_impl;
272eda14cbcSMatt Macy 		break;
273eda14cbcSMatt Macy 	case IMPL_CYCLE:
274eda14cbcSMatt Macy 		/* Cycle through supported implementations */
275eda14cbcSMatt Macy 		ASSERT(aes_impl_initialized);
276eda14cbcSMatt Macy 		ASSERT3U(aes_supp_impl_cnt, >, 0);
277eda14cbcSMatt Macy 		static size_t cycle_impl_idx = 0;
278eda14cbcSMatt Macy 		size_t idx = (++cycle_impl_idx) % aes_supp_impl_cnt;
279eda14cbcSMatt Macy 		ops = aes_supp_impl[idx];
280eda14cbcSMatt Macy 		break;
281eda14cbcSMatt Macy 	default:
282eda14cbcSMatt Macy 		ASSERT3U(impl, <, aes_supp_impl_cnt);
283eda14cbcSMatt Macy 		ASSERT3U(aes_supp_impl_cnt, >, 0);
284eda14cbcSMatt Macy 		if (impl < ARRAY_SIZE(aes_all_impl))
285eda14cbcSMatt Macy 			ops = aes_supp_impl[impl];
286eda14cbcSMatt Macy 		break;
287eda14cbcSMatt Macy 	}
288eda14cbcSMatt Macy 
289eda14cbcSMatt Macy 	ASSERT3P(ops, !=, NULL);
290eda14cbcSMatt Macy 
291eda14cbcSMatt Macy 	return (ops);
292eda14cbcSMatt Macy }
293eda14cbcSMatt Macy 
294eda14cbcSMatt Macy /*
295eda14cbcSMatt Macy  * Initialize all supported implementations.
296eda14cbcSMatt Macy  */
297eda14cbcSMatt Macy void
aes_impl_init(void)298eda14cbcSMatt Macy aes_impl_init(void)
299eda14cbcSMatt Macy {
300eda14cbcSMatt Macy 	aes_impl_ops_t *curr_impl;
301eda14cbcSMatt Macy 	int i, c;
302eda14cbcSMatt Macy 
303eda14cbcSMatt Macy 	/* Move supported implementations into aes_supp_impls */
304eda14cbcSMatt Macy 	for (i = 0, c = 0; i < ARRAY_SIZE(aes_all_impl); i++) {
305eda14cbcSMatt Macy 		curr_impl = (aes_impl_ops_t *)aes_all_impl[i];
306eda14cbcSMatt Macy 
307eda14cbcSMatt Macy 		if (curr_impl->is_supported())
308eda14cbcSMatt Macy 			aes_supp_impl[c++] = (aes_impl_ops_t *)curr_impl;
309eda14cbcSMatt Macy 	}
310eda14cbcSMatt Macy 	aes_supp_impl_cnt = c;
311eda14cbcSMatt Macy 
312eda14cbcSMatt Macy 	/*
313eda14cbcSMatt Macy 	 * Set the fastest implementation given the assumption that the
314eda14cbcSMatt Macy 	 * hardware accelerated version is the fastest.
315eda14cbcSMatt Macy 	 */
316eda14cbcSMatt Macy #if defined(__x86_64)
317eda14cbcSMatt Macy #if defined(HAVE_AES)
318eda14cbcSMatt Macy 	if (aes_aesni_impl.is_supported()) {
319eda14cbcSMatt Macy 		memcpy(&aes_fastest_impl, &aes_aesni_impl,
320eda14cbcSMatt Macy 		    sizeof (aes_fastest_impl));
321eda14cbcSMatt Macy 	} else
322eda14cbcSMatt Macy #endif
323eda14cbcSMatt Macy 	{
324eda14cbcSMatt Macy 		memcpy(&aes_fastest_impl, &aes_x86_64_impl,
325eda14cbcSMatt Macy 		    sizeof (aes_fastest_impl));
326eda14cbcSMatt Macy 	}
327eda14cbcSMatt Macy #else
328eda14cbcSMatt Macy 	memcpy(&aes_fastest_impl, &aes_generic_impl,
329eda14cbcSMatt Macy 	    sizeof (aes_fastest_impl));
330eda14cbcSMatt Macy #endif
331eda14cbcSMatt Macy 
332eda14cbcSMatt Macy 	strlcpy(aes_fastest_impl.name, "fastest", AES_IMPL_NAME_MAX);
333eda14cbcSMatt Macy 
334eda14cbcSMatt Macy 	/* Finish initialization */
335eda14cbcSMatt Macy 	atomic_swap_32(&icp_aes_impl, user_sel_impl);
336eda14cbcSMatt Macy 	aes_impl_initialized = B_TRUE;
337eda14cbcSMatt Macy }
338eda14cbcSMatt Macy 
339eda14cbcSMatt Macy static const struct {
340a0b956f5SMartin Matuska 	const char *name;
341eda14cbcSMatt Macy 	uint32_t sel;
342eda14cbcSMatt Macy } aes_impl_opts[] = {
343eda14cbcSMatt Macy 		{ "cycle",	IMPL_CYCLE },
344eda14cbcSMatt Macy 		{ "fastest",	IMPL_FASTEST },
345eda14cbcSMatt Macy };
346eda14cbcSMatt Macy 
347eda14cbcSMatt Macy /*
348eda14cbcSMatt Macy  * Function sets desired aes implementation.
349eda14cbcSMatt Macy  *
350eda14cbcSMatt Macy  * If we are called before init(), user preference will be saved in
351eda14cbcSMatt Macy  * user_sel_impl, and applied in later init() call. This occurs when module
352eda14cbcSMatt Macy  * parameter is specified on module load. Otherwise, directly update
353eda14cbcSMatt Macy  * icp_aes_impl.
354eda14cbcSMatt Macy  *
355eda14cbcSMatt Macy  * @val		Name of aes implementation to use
356eda14cbcSMatt Macy  * @param	Unused.
357eda14cbcSMatt Macy  */
358eda14cbcSMatt Macy int
aes_impl_set(const char * val)359eda14cbcSMatt Macy aes_impl_set(const char *val)
360eda14cbcSMatt Macy {
361eda14cbcSMatt Macy 	int err = -EINVAL;
362eda14cbcSMatt Macy 	char req_name[AES_IMPL_NAME_MAX];
363eda14cbcSMatt Macy 	uint32_t impl = AES_IMPL_READ(user_sel_impl);
364eda14cbcSMatt Macy 	size_t i;
365eda14cbcSMatt Macy 
366eda14cbcSMatt Macy 	/* sanitize input */
367eda14cbcSMatt Macy 	i = strnlen(val, AES_IMPL_NAME_MAX);
368eda14cbcSMatt Macy 	if (i == 0 || i >= AES_IMPL_NAME_MAX)
369eda14cbcSMatt Macy 		return (err);
370eda14cbcSMatt Macy 
371eda14cbcSMatt Macy 	strlcpy(req_name, val, AES_IMPL_NAME_MAX);
372eda14cbcSMatt Macy 	while (i > 0 && isspace(req_name[i-1]))
373eda14cbcSMatt Macy 		i--;
374eda14cbcSMatt Macy 	req_name[i] = '\0';
375eda14cbcSMatt Macy 
376eda14cbcSMatt Macy 	/* Check mandatory options */
377eda14cbcSMatt Macy 	for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
378eda14cbcSMatt Macy 		if (strcmp(req_name, aes_impl_opts[i].name) == 0) {
379eda14cbcSMatt Macy 			impl = aes_impl_opts[i].sel;
380eda14cbcSMatt Macy 			err = 0;
381eda14cbcSMatt Macy 			break;
382eda14cbcSMatt Macy 		}
383eda14cbcSMatt Macy 	}
384eda14cbcSMatt Macy 
385eda14cbcSMatt Macy 	/* check all supported impl if init() was already called */
386eda14cbcSMatt Macy 	if (err != 0 && aes_impl_initialized) {
387eda14cbcSMatt Macy 		/* check all supported implementations */
388eda14cbcSMatt Macy 		for (i = 0; i < aes_supp_impl_cnt; i++) {
389eda14cbcSMatt Macy 			if (strcmp(req_name, aes_supp_impl[i]->name) == 0) {
390eda14cbcSMatt Macy 				impl = i;
391eda14cbcSMatt Macy 				err = 0;
392eda14cbcSMatt Macy 				break;
393eda14cbcSMatt Macy 			}
394eda14cbcSMatt Macy 		}
395eda14cbcSMatt Macy 	}
396eda14cbcSMatt Macy 
397eda14cbcSMatt Macy 	if (err == 0) {
398eda14cbcSMatt Macy 		if (aes_impl_initialized)
399eda14cbcSMatt Macy 			atomic_swap_32(&icp_aes_impl, impl);
400eda14cbcSMatt Macy 		else
401eda14cbcSMatt Macy 			atomic_swap_32(&user_sel_impl, impl);
402eda14cbcSMatt Macy 	}
403eda14cbcSMatt Macy 
404eda14cbcSMatt Macy 	return (err);
405eda14cbcSMatt Macy }
406eda14cbcSMatt Macy 
407eda14cbcSMatt Macy #if defined(_KERNEL) && defined(__linux__)
408eda14cbcSMatt Macy 
409eda14cbcSMatt Macy static int
icp_aes_impl_set(const char * val,zfs_kernel_param_t * kp)410eda14cbcSMatt Macy icp_aes_impl_set(const char *val, zfs_kernel_param_t *kp)
411eda14cbcSMatt Macy {
412eda14cbcSMatt Macy 	return (aes_impl_set(val));
413eda14cbcSMatt Macy }
414eda14cbcSMatt Macy 
415eda14cbcSMatt Macy static int
icp_aes_impl_get(char * buffer,zfs_kernel_param_t * kp)416eda14cbcSMatt Macy icp_aes_impl_get(char *buffer, zfs_kernel_param_t *kp)
417eda14cbcSMatt Macy {
418eda14cbcSMatt Macy 	int i, cnt = 0;
419eda14cbcSMatt Macy 	char *fmt;
420eda14cbcSMatt Macy 	const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
421eda14cbcSMatt Macy 
422eda14cbcSMatt Macy 	ASSERT(aes_impl_initialized);
423eda14cbcSMatt Macy 
424eda14cbcSMatt Macy 	/* list mandatory options */
425eda14cbcSMatt Macy 	for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
426eda14cbcSMatt Macy 		fmt = (impl == aes_impl_opts[i].sel) ? "[%s] " : "%s ";
427bb2d13b6SMartin Matuska 		cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
428bb2d13b6SMartin Matuska 		    aes_impl_opts[i].name);
429eda14cbcSMatt Macy 	}
430eda14cbcSMatt Macy 
431eda14cbcSMatt Macy 	/* list all supported implementations */
432eda14cbcSMatt Macy 	for (i = 0; i < aes_supp_impl_cnt; i++) {
433eda14cbcSMatt Macy 		fmt = (i == impl) ? "[%s] " : "%s ";
434bb2d13b6SMartin Matuska 		cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
435bb2d13b6SMartin Matuska 		    aes_supp_impl[i]->name);
436eda14cbcSMatt Macy 	}
437eda14cbcSMatt Macy 
438eda14cbcSMatt Macy 	return (cnt);
439eda14cbcSMatt Macy }
440eda14cbcSMatt Macy 
441eda14cbcSMatt Macy module_param_call(icp_aes_impl, icp_aes_impl_set, icp_aes_impl_get,
442eda14cbcSMatt Macy     NULL, 0644);
443eda14cbcSMatt Macy MODULE_PARM_DESC(icp_aes_impl, "Select aes implementation.");
444eda14cbcSMatt Macy #endif
445