1*eda14cbcSMatt Macy /*
2*eda14cbcSMatt Macy  * ---------------------------------------------------------------------------
3*eda14cbcSMatt Macy  * Copyright (c) 1998-2007, Brian Gladman, Worcester, UK. All rights reserved.
4*eda14cbcSMatt Macy  *
5*eda14cbcSMatt Macy  * LICENSE TERMS
6*eda14cbcSMatt Macy  *
7*eda14cbcSMatt Macy  * The free distribution and use of this software is allowed (with or without
8*eda14cbcSMatt Macy  * changes) provided that:
9*eda14cbcSMatt Macy  *
10*eda14cbcSMatt Macy  *  1. source code distributions include the above copyright notice, this
11*eda14cbcSMatt Macy  *	list of conditions and the following disclaimer;
12*eda14cbcSMatt Macy  *
13*eda14cbcSMatt Macy  *  2. binary distributions include the above copyright notice, this list
14*eda14cbcSMatt Macy  *	of conditions and the following disclaimer in their documentation;
15*eda14cbcSMatt Macy  *
16*eda14cbcSMatt Macy  *  3. the name of the copyright holder is not used to endorse products
17*eda14cbcSMatt Macy  *	built using this software without specific written permission.
18*eda14cbcSMatt Macy  *
19*eda14cbcSMatt Macy  * DISCLAIMER
20*eda14cbcSMatt Macy  *
21*eda14cbcSMatt Macy  * This software is provided 'as is' with no explicit or implied warranties
22*eda14cbcSMatt Macy  * in respect of its properties, including, but not limited to, correctness
23*eda14cbcSMatt Macy  * and/or fitness for purpose.
24*eda14cbcSMatt Macy  * ---------------------------------------------------------------------------
25*eda14cbcSMatt Macy  * Issue Date: 20/12/2007
26*eda14cbcSMatt Macy  *
27*eda14cbcSMatt Macy  * This file contains the compilation options for AES (Rijndael) and code
28*eda14cbcSMatt Macy  * that is common across encryption, key scheduling and table generation.
29*eda14cbcSMatt Macy  *
30*eda14cbcSMatt Macy  * OPERATION
31*eda14cbcSMatt Macy  *
32*eda14cbcSMatt Macy  * These source code files implement the AES algorithm Rijndael designed by
33*eda14cbcSMatt Macy  * Joan Daemen and Vincent Rijmen. This version is designed for the standard
34*eda14cbcSMatt Macy  * block size of 16 bytes and for key sizes of 128, 192 and 256 bits (16, 24
35*eda14cbcSMatt Macy  * and 32 bytes).
36*eda14cbcSMatt Macy  *
37*eda14cbcSMatt Macy  * This version is designed for flexibility and speed using operations on
38*eda14cbcSMatt Macy  * 32-bit words rather than operations on bytes.  It can be compiled with
39*eda14cbcSMatt Macy  * either big or little endian internal byte order but is faster when the
40*eda14cbcSMatt Macy  * native byte order for the processor is used.
41*eda14cbcSMatt Macy  *
42*eda14cbcSMatt Macy  * THE CIPHER INTERFACE
43*eda14cbcSMatt Macy  *
44*eda14cbcSMatt Macy  * The cipher interface is implemented as an array of bytes in which lower
45*eda14cbcSMatt Macy  * AES bit sequence indexes map to higher numeric significance within bytes.
46*eda14cbcSMatt Macy  */
47*eda14cbcSMatt Macy 
48*eda14cbcSMatt Macy /*
49*eda14cbcSMatt Macy  * OpenSolaris changes
50*eda14cbcSMatt Macy  * 1. Added __cplusplus and _AESTAB_H header guards
51*eda14cbcSMatt Macy  * 2. Added header files sys/types.h and aes_impl.h
52*eda14cbcSMatt Macy  * 3. Added defines for AES_ENCRYPT, AES_DECRYPT, AES_REV_DKS, and ASM_AMD64_C
53*eda14cbcSMatt Macy  * 4. Moved defines for IS_BIG_ENDIAN, IS_LITTLE_ENDIAN, PLATFORM_BYTE_ORDER
54*eda14cbcSMatt Macy  *    from brg_endian.h
55*eda14cbcSMatt Macy  * 5. Undefined VIA_ACE_POSSIBLE and ASSUME_VIA_ACE_PRESENT
56*eda14cbcSMatt Macy  * 6. Changed uint_8t and uint_32t to uint8_t and uint32_t
57*eda14cbcSMatt Macy  * 7. Defined aes_sw32 as htonl() for byte swapping
58*eda14cbcSMatt Macy  * 8. Cstyled and hdrchk code
59*eda14cbcSMatt Macy  *
60*eda14cbcSMatt Macy  */
61*eda14cbcSMatt Macy 
62*eda14cbcSMatt Macy #ifndef _AESOPT_H
63*eda14cbcSMatt Macy #define	_AESOPT_H
64*eda14cbcSMatt Macy 
65*eda14cbcSMatt Macy #ifdef	__cplusplus
66*eda14cbcSMatt Macy extern "C" {
67*eda14cbcSMatt Macy #endif
68*eda14cbcSMatt Macy 
69*eda14cbcSMatt Macy #include <sys/zfs_context.h>
70*eda14cbcSMatt Macy #include <aes/aes_impl.h>
71*eda14cbcSMatt Macy 
72*eda14cbcSMatt Macy /*  SUPPORT FEATURES */
73*eda14cbcSMatt Macy #define	AES_ENCRYPT /* if support for encryption is needed */
74*eda14cbcSMatt Macy #define	AES_DECRYPT /* if support for decryption is needed */
75*eda14cbcSMatt Macy 
76*eda14cbcSMatt Macy /*  PLATFORM-SPECIFIC FEATURES */
77*eda14cbcSMatt Macy #define	IS_BIG_ENDIAN		4321 /* byte 0 is most significant (mc68k) */
78*eda14cbcSMatt Macy #define	IS_LITTLE_ENDIAN	1234 /* byte 0 is least significant (i386) */
79*eda14cbcSMatt Macy #define	PLATFORM_BYTE_ORDER	IS_LITTLE_ENDIAN
80*eda14cbcSMatt Macy #define	AES_REV_DKS /* define to reverse decryption key schedule */
81*eda14cbcSMatt Macy 
82*eda14cbcSMatt Macy 
83*eda14cbcSMatt Macy /*
84*eda14cbcSMatt Macy  *  CONFIGURATION - THE USE OF DEFINES
85*eda14cbcSMatt Macy  *	Later in this section there are a number of defines that control the
86*eda14cbcSMatt Macy  *	operation of the code.  In each section, the purpose of each define is
87*eda14cbcSMatt Macy  *	explained so that the relevant form can be included or excluded by
88*eda14cbcSMatt Macy  *	setting either 1's or 0's respectively on the branches of the related
89*eda14cbcSMatt Macy  *	#if clauses.  The following local defines should not be changed.
90*eda14cbcSMatt Macy  */
91*eda14cbcSMatt Macy 
92*eda14cbcSMatt Macy #define	ENCRYPTION_IN_C	1
93*eda14cbcSMatt Macy #define	DECRYPTION_IN_C	2
94*eda14cbcSMatt Macy #define	ENC_KEYING_IN_C	4
95*eda14cbcSMatt Macy #define	DEC_KEYING_IN_C	8
96*eda14cbcSMatt Macy 
97*eda14cbcSMatt Macy #define	NO_TABLES	0
98*eda14cbcSMatt Macy #define	ONE_TABLE	1
99*eda14cbcSMatt Macy #define	FOUR_TABLES	4
100*eda14cbcSMatt Macy #define	NONE		0
101*eda14cbcSMatt Macy #define	PARTIAL		1
102*eda14cbcSMatt Macy #define	FULL		2
103*eda14cbcSMatt Macy 
104*eda14cbcSMatt Macy /*  --- START OF USER CONFIGURED OPTIONS --- */
105*eda14cbcSMatt Macy 
106*eda14cbcSMatt Macy /*
107*eda14cbcSMatt Macy  *  1. BYTE ORDER WITHIN 32 BIT WORDS
108*eda14cbcSMatt Macy  *
109*eda14cbcSMatt Macy  *	The fundamental data processing units in Rijndael are 8-bit bytes. The
110*eda14cbcSMatt Macy  *	input, output and key input are all enumerated arrays of bytes in which
111*eda14cbcSMatt Macy  *	bytes are numbered starting at zero and increasing to one less than the
112*eda14cbcSMatt Macy  *	number of bytes in the array in question. This enumeration is only used
113*eda14cbcSMatt Macy  *	for naming bytes and does not imply any adjacency or order relationship
114*eda14cbcSMatt Macy  *	from one byte to another. When these inputs and outputs are considered
115*eda14cbcSMatt Macy  *	as bit sequences, bits 8*n to 8*n+7 of the bit sequence are mapped to
116*eda14cbcSMatt Macy  *	byte[n] with bit 8n+i in the sequence mapped to bit 7-i within the byte.
117*eda14cbcSMatt Macy  *	In this implementation bits are numbered from 0 to 7 starting at the
118*eda14cbcSMatt Macy  *	numerically least significant end of each byte.  Bit n represents 2^n.
119*eda14cbcSMatt Macy  *
120*eda14cbcSMatt Macy  *	However, Rijndael can be implemented more efficiently using 32-bit
121*eda14cbcSMatt Macy  *	words by packing bytes into words so that bytes 4*n to 4*n+3 are placed
122*eda14cbcSMatt Macy  *	into word[n]. While in principle these bytes can be assembled into words
123*eda14cbcSMatt Macy  *	in any positions, this implementation only supports the two formats in
124*eda14cbcSMatt Macy  *	which bytes in adjacent positions within words also have adjacent byte
125*eda14cbcSMatt Macy  *	numbers. This order is called big-endian if the lowest numbered bytes
126*eda14cbcSMatt Macy  *	in words have the highest numeric significance and little-endian if the
127*eda14cbcSMatt Macy  *	opposite applies.
128*eda14cbcSMatt Macy  *
129*eda14cbcSMatt Macy  *	This code can work in either order irrespective of the order used by the
130*eda14cbcSMatt Macy  *	machine on which it runs. Normally the internal byte order will be set
131*eda14cbcSMatt Macy  *	to the order of the processor on which the code is to be run but this
132*eda14cbcSMatt Macy  *	define	can be used to reverse this in special situations
133*eda14cbcSMatt Macy  *
134*eda14cbcSMatt Macy  *	WARNING: Assembler code versions rely on PLATFORM_BYTE_ORDER being set.
135*eda14cbcSMatt Macy  *	This define will hence be redefined later (in section 4) if necessary
136*eda14cbcSMatt Macy  */
137*eda14cbcSMatt Macy 
138*eda14cbcSMatt Macy #if 1
139*eda14cbcSMatt Macy #define	ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
140*eda14cbcSMatt Macy #elif 0
141*eda14cbcSMatt Macy #define	ALGORITHM_BYTE_ORDER IS_LITTLE_ENDIAN
142*eda14cbcSMatt Macy #elif 0
143*eda14cbcSMatt Macy #define	ALGORITHM_BYTE_ORDER IS_BIG_ENDIAN
144*eda14cbcSMatt Macy #else
145*eda14cbcSMatt Macy #error The algorithm byte order is not defined
146*eda14cbcSMatt Macy #endif
147*eda14cbcSMatt Macy 
148*eda14cbcSMatt Macy /*  2. VIA ACE SUPPORT */
149*eda14cbcSMatt Macy 
150*eda14cbcSMatt Macy #if defined(__GNUC__) && defined(__i386__) || \
151*eda14cbcSMatt Macy 	defined(_WIN32) && defined(_M_IX86) && \
152*eda14cbcSMatt Macy 	!(defined(_WIN64) || defined(_WIN32_WCE) || \
153*eda14cbcSMatt Macy 	defined(_MSC_VER) && (_MSC_VER <= 800))
154*eda14cbcSMatt Macy #define	VIA_ACE_POSSIBLE
155*eda14cbcSMatt Macy #endif
156*eda14cbcSMatt Macy 
157*eda14cbcSMatt Macy /*
158*eda14cbcSMatt Macy  *  Define this option if support for the VIA ACE is required. This uses
159*eda14cbcSMatt Macy  *  inline assembler instructions and is only implemented for the Microsoft,
160*eda14cbcSMatt Macy  *  Intel and GCC compilers.  If VIA ACE is known to be present, then defining
161*eda14cbcSMatt Macy  *  ASSUME_VIA_ACE_PRESENT will remove the ordinary encryption/decryption
162*eda14cbcSMatt Macy  *  code.  If USE_VIA_ACE_IF_PRESENT is defined then VIA ACE will be used if
163*eda14cbcSMatt Macy  *  it is detected (both present and enabled) but the normal AES code will
164*eda14cbcSMatt Macy  *  also be present.
165*eda14cbcSMatt Macy  *
166*eda14cbcSMatt Macy  *  When VIA ACE is to be used, all AES encryption contexts MUST be 16 byte
167*eda14cbcSMatt Macy  *  aligned; other input/output buffers do not need to be 16 byte aligned
168*eda14cbcSMatt Macy  *  but there are very large performance gains if this can be arranged.
169*eda14cbcSMatt Macy  *  VIA ACE also requires the decryption key schedule to be in reverse
170*eda14cbcSMatt Macy  *  order (which later checks below ensure).
171*eda14cbcSMatt Macy  */
172*eda14cbcSMatt Macy 
173*eda14cbcSMatt Macy /*  VIA ACE is not used here for OpenSolaris: */
174*eda14cbcSMatt Macy #undef	VIA_ACE_POSSIBLE
175*eda14cbcSMatt Macy #undef	ASSUME_VIA_ACE_PRESENT
176*eda14cbcSMatt Macy 
177*eda14cbcSMatt Macy #if 0 && defined(VIA_ACE_POSSIBLE) && !defined(USE_VIA_ACE_IF_PRESENT)
178*eda14cbcSMatt Macy #define	USE_VIA_ACE_IF_PRESENT
179*eda14cbcSMatt Macy #endif
180*eda14cbcSMatt Macy 
181*eda14cbcSMatt Macy #if 0 && defined(VIA_ACE_POSSIBLE) && !defined(ASSUME_VIA_ACE_PRESENT)
182*eda14cbcSMatt Macy #define	ASSUME_VIA_ACE_PRESENT
183*eda14cbcSMatt Macy #endif
184*eda14cbcSMatt Macy 
185*eda14cbcSMatt Macy 
186*eda14cbcSMatt Macy /*
187*eda14cbcSMatt Macy  *  3. ASSEMBLER SUPPORT
188*eda14cbcSMatt Macy  *
189*eda14cbcSMatt Macy  *	This define (which can be on the command line) enables the use of the
190*eda14cbcSMatt Macy  *	assembler code routines for encryption, decryption and key scheduling
191*eda14cbcSMatt Macy  *	as follows:
192*eda14cbcSMatt Macy  *
193*eda14cbcSMatt Macy  *	ASM_X86_V1C uses the assembler (aes_x86_v1.asm) with large tables for
194*eda14cbcSMatt Macy  *		encryption and decryption and but with key scheduling in C
195*eda14cbcSMatt Macy  *	ASM_X86_V2  uses assembler (aes_x86_v2.asm) with compressed tables for
196*eda14cbcSMatt Macy  *		encryption, decryption and key scheduling
197*eda14cbcSMatt Macy  *	ASM_X86_V2C uses assembler (aes_x86_v2.asm) with compressed tables for
198*eda14cbcSMatt Macy  *		encryption and decryption and but with key scheduling in C
199*eda14cbcSMatt Macy  *	ASM_AMD64_C uses assembler (aes_amd64.asm) with compressed tables for
200*eda14cbcSMatt Macy  *		encryption and decryption and but with key scheduling in C
201*eda14cbcSMatt Macy  *
202*eda14cbcSMatt Macy  *	Change one 'if 0' below to 'if 1' to select the version or define
203*eda14cbcSMatt Macy  *	as a compilation option.
204*eda14cbcSMatt Macy  */
205*eda14cbcSMatt Macy 
206*eda14cbcSMatt Macy #if 0 && !defined(ASM_X86_V1C)
207*eda14cbcSMatt Macy #define	ASM_X86_V1C
208*eda14cbcSMatt Macy #elif 0 && !defined(ASM_X86_V2)
209*eda14cbcSMatt Macy #define	ASM_X86_V2
210*eda14cbcSMatt Macy #elif 0 && !defined(ASM_X86_V2C)
211*eda14cbcSMatt Macy #define	ASM_X86_V2C
212*eda14cbcSMatt Macy #elif 1 && !defined(ASM_AMD64_C)
213*eda14cbcSMatt Macy #define	ASM_AMD64_C
214*eda14cbcSMatt Macy #endif
215*eda14cbcSMatt Macy 
216*eda14cbcSMatt Macy #if (defined(ASM_X86_V1C) || defined(ASM_X86_V2) || defined(ASM_X86_V2C)) && \
217*eda14cbcSMatt Macy 	!defined(_M_IX86) || defined(ASM_AMD64_C) && !defined(_M_X64) && \
218*eda14cbcSMatt Macy 	!defined(__amd64)
219*eda14cbcSMatt Macy #error Assembler code is only available for x86 and AMD64 systems
220*eda14cbcSMatt Macy #endif
221*eda14cbcSMatt Macy 
222*eda14cbcSMatt Macy /*
223*eda14cbcSMatt Macy  *  4. FAST INPUT/OUTPUT OPERATIONS.
224*eda14cbcSMatt Macy  *
225*eda14cbcSMatt Macy  *	On some machines it is possible to improve speed by transferring the
226*eda14cbcSMatt Macy  *	bytes in the input and output arrays to and from the internal 32-bit
227*eda14cbcSMatt Macy  *	variables by addressing these arrays as if they are arrays of 32-bit
228*eda14cbcSMatt Macy  *	words.  On some machines this will always be possible but there may
229*eda14cbcSMatt Macy  *	be a large performance penalty if the byte arrays are not aligned on
230*eda14cbcSMatt Macy  *	the normal word boundaries. On other machines this technique will
231*eda14cbcSMatt Macy  *	lead to memory access errors when such 32-bit word accesses are not
232*eda14cbcSMatt Macy  *	properly aligned. The option SAFE_IO avoids such problems but will
233*eda14cbcSMatt Macy  *	often be slower on those machines that support misaligned access
234*eda14cbcSMatt Macy  *	(especially so if care is taken to align the input  and output byte
235*eda14cbcSMatt Macy  *	arrays on 32-bit word boundaries). If SAFE_IO is not defined it is
236*eda14cbcSMatt Macy  *	assumed that access to byte arrays as if they are arrays of 32-bit
237*eda14cbcSMatt Macy  *	words will not cause problems when such accesses are misaligned.
238*eda14cbcSMatt Macy  */
239*eda14cbcSMatt Macy #if 1 && !defined(_MSC_VER)
240*eda14cbcSMatt Macy #define	SAFE_IO
241*eda14cbcSMatt Macy #endif
242*eda14cbcSMatt Macy 
243*eda14cbcSMatt Macy /*
244*eda14cbcSMatt Macy  *  5. LOOP UNROLLING
245*eda14cbcSMatt Macy  *
246*eda14cbcSMatt Macy  *	The code for encryption and decryption cycles through a number of rounds
247*eda14cbcSMatt Macy  *	that can be implemented either in a loop or by expanding the code into a
248*eda14cbcSMatt Macy  *	long sequence of instructions, the latter producing a larger program but
249*eda14cbcSMatt Macy  *	one that will often be much faster. The latter is called loop unrolling.
250*eda14cbcSMatt Macy  *	There are also potential speed advantages in expanding two iterations in
251*eda14cbcSMatt Macy  *	a loop with half the number of iterations, which is called partial loop
252*eda14cbcSMatt Macy  *	unrolling.  The following options allow partial or full loop unrolling
253*eda14cbcSMatt Macy  *	to be set independently for encryption and decryption
254*eda14cbcSMatt Macy  */
255*eda14cbcSMatt Macy #if 1
256*eda14cbcSMatt Macy #define	ENC_UNROLL  FULL
257*eda14cbcSMatt Macy #elif 0
258*eda14cbcSMatt Macy #define	ENC_UNROLL  PARTIAL
259*eda14cbcSMatt Macy #else
260*eda14cbcSMatt Macy #define	ENC_UNROLL  NONE
261*eda14cbcSMatt Macy #endif
262*eda14cbcSMatt Macy 
263*eda14cbcSMatt Macy #if 1
264*eda14cbcSMatt Macy #define	DEC_UNROLL  FULL
265*eda14cbcSMatt Macy #elif 0
266*eda14cbcSMatt Macy #define	DEC_UNROLL  PARTIAL
267*eda14cbcSMatt Macy #else
268*eda14cbcSMatt Macy #define	DEC_UNROLL  NONE
269*eda14cbcSMatt Macy #endif
270*eda14cbcSMatt Macy 
271*eda14cbcSMatt Macy #if 1
272*eda14cbcSMatt Macy #define	ENC_KS_UNROLL
273*eda14cbcSMatt Macy #endif
274*eda14cbcSMatt Macy 
275*eda14cbcSMatt Macy #if 1
276*eda14cbcSMatt Macy #define	DEC_KS_UNROLL
277*eda14cbcSMatt Macy #endif
278*eda14cbcSMatt Macy 
279*eda14cbcSMatt Macy /*
280*eda14cbcSMatt Macy  *  6. FAST FINITE FIELD OPERATIONS
281*eda14cbcSMatt Macy  *
282*eda14cbcSMatt Macy  *	If this section is included, tables are used to provide faster finite
283*eda14cbcSMatt Macy  *	field arithmetic.  This has no effect if FIXED_TABLES is defined.
284*eda14cbcSMatt Macy  */
285*eda14cbcSMatt Macy #if 1
286*eda14cbcSMatt Macy #define	FF_TABLES
287*eda14cbcSMatt Macy #endif
288*eda14cbcSMatt Macy 
289*eda14cbcSMatt Macy /*
290*eda14cbcSMatt Macy  *  7. INTERNAL STATE VARIABLE FORMAT
291*eda14cbcSMatt Macy  *
292*eda14cbcSMatt Macy  *	The internal state of Rijndael is stored in a number of local 32-bit
293*eda14cbcSMatt Macy  *	word variables which can be defined either as an array or as individual
294*eda14cbcSMatt Macy  *	names variables. Include this section if you want to store these local
295*eda14cbcSMatt Macy  *	variables in arrays. Otherwise individual local variables will be used.
296*eda14cbcSMatt Macy  */
297*eda14cbcSMatt Macy #if 1
298*eda14cbcSMatt Macy #define	ARRAYS
299*eda14cbcSMatt Macy #endif
300*eda14cbcSMatt Macy 
301*eda14cbcSMatt Macy /*
302*eda14cbcSMatt Macy  *  8. FIXED OR DYNAMIC TABLES
303*eda14cbcSMatt Macy  *
304*eda14cbcSMatt Macy  *	When this section is included the tables used by the code are compiled
305*eda14cbcSMatt Macy  *	statically into the binary file.  Otherwise the subroutine aes_init()
306*eda14cbcSMatt Macy  *	must be called to compute them before the code is first used.
307*eda14cbcSMatt Macy  */
308*eda14cbcSMatt Macy #if 1 && !(defined(_MSC_VER) && (_MSC_VER <= 800))
309*eda14cbcSMatt Macy #define	FIXED_TABLES
310*eda14cbcSMatt Macy #endif
311*eda14cbcSMatt Macy 
312*eda14cbcSMatt Macy /*
313*eda14cbcSMatt Macy  *  9. MASKING OR CASTING FROM LONGER VALUES TO BYTES
314*eda14cbcSMatt Macy  *
315*eda14cbcSMatt Macy  *	In some systems it is better to mask longer values to extract bytes
316*eda14cbcSMatt Macy  *	rather than using a cast. This option allows this choice.
317*eda14cbcSMatt Macy  */
318*eda14cbcSMatt Macy #if 0
319*eda14cbcSMatt Macy #define	to_byte(x)  ((uint8_t)(x))
320*eda14cbcSMatt Macy #else
321*eda14cbcSMatt Macy #define	to_byte(x)  ((x) & 0xff)
322*eda14cbcSMatt Macy #endif
323*eda14cbcSMatt Macy 
324*eda14cbcSMatt Macy /*
325*eda14cbcSMatt Macy  *  10. TABLE ALIGNMENT
326*eda14cbcSMatt Macy  *
327*eda14cbcSMatt Macy  *	On some systems speed will be improved by aligning the AES large lookup
328*eda14cbcSMatt Macy  *	tables on particular boundaries. This define should be set to a power of
329*eda14cbcSMatt Macy  *	two giving the desired alignment. It can be left undefined if alignment
330*eda14cbcSMatt Macy  *	is not needed.  This option is specific to the Microsoft VC++ compiler -
331*eda14cbcSMatt Macy  *	it seems to sometimes cause trouble for the VC++ version 6 compiler.
332*eda14cbcSMatt Macy  */
333*eda14cbcSMatt Macy 
334*eda14cbcSMatt Macy #if 1 && defined(_MSC_VER) && (_MSC_VER >= 1300)
335*eda14cbcSMatt Macy #define	TABLE_ALIGN 32
336*eda14cbcSMatt Macy #endif
337*eda14cbcSMatt Macy 
338*eda14cbcSMatt Macy /*
339*eda14cbcSMatt Macy  *  11.  REDUCE CODE AND TABLE SIZE
340*eda14cbcSMatt Macy  *
341*eda14cbcSMatt Macy  *	This replaces some expanded macros with function calls if AES_ASM_V2 or
342*eda14cbcSMatt Macy  *	AES_ASM_V2C are defined
343*eda14cbcSMatt Macy  */
344*eda14cbcSMatt Macy 
345*eda14cbcSMatt Macy #if 1 && (defined(ASM_X86_V2) || defined(ASM_X86_V2C))
346*eda14cbcSMatt Macy #define	REDUCE_CODE_SIZE
347*eda14cbcSMatt Macy #endif
348*eda14cbcSMatt Macy 
349*eda14cbcSMatt Macy /*
350*eda14cbcSMatt Macy  *  12. TABLE OPTIONS
351*eda14cbcSMatt Macy  *
352*eda14cbcSMatt Macy  *	This cipher proceeds by repeating in a number of cycles known as rounds
353*eda14cbcSMatt Macy  *	which are implemented by a round function which is optionally be speeded
354*eda14cbcSMatt Macy  *	up using tables.  The basic tables are 256 32-bit words, with either
355*eda14cbcSMatt Macy  *	one or four tables being required for each round function depending on
356*eda14cbcSMatt Macy  *	how much speed is required. Encryption and decryption round functions
357*eda14cbcSMatt Macy  *	are different and the last encryption and decryption round functions are
358*eda14cbcSMatt Macy  *	different again making four different round functions in all.
359*eda14cbcSMatt Macy  *
360*eda14cbcSMatt Macy  *	This means that:
361*eda14cbcSMatt Macy  *	1. Normal encryption and decryption rounds can each use either 0, 1
362*eda14cbcSMatt Macy  *		or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
363*eda14cbcSMatt Macy  *	2. The last encryption and decryption rounds can also use either 0, 1
364*eda14cbcSMatt Macy  *		or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
365*eda14cbcSMatt Macy  *
366*eda14cbcSMatt Macy  *	Include or exclude the appropriate definitions below to set the number
367*eda14cbcSMatt Macy  *	of tables used by this implementation.
368*eda14cbcSMatt Macy  */
369*eda14cbcSMatt Macy 
370*eda14cbcSMatt Macy #if 1   /* set tables for the normal encryption round */
371*eda14cbcSMatt Macy #define	ENC_ROUND   FOUR_TABLES
372*eda14cbcSMatt Macy #elif 0
373*eda14cbcSMatt Macy #define	ENC_ROUND   ONE_TABLE
374*eda14cbcSMatt Macy #else
375*eda14cbcSMatt Macy #define	ENC_ROUND   NO_TABLES
376*eda14cbcSMatt Macy #endif
377*eda14cbcSMatt Macy 
378*eda14cbcSMatt Macy #if 1   /* set tables for the last encryption round */
379*eda14cbcSMatt Macy #define	LAST_ENC_ROUND  FOUR_TABLES
380*eda14cbcSMatt Macy #elif 0
381*eda14cbcSMatt Macy #define	LAST_ENC_ROUND  ONE_TABLE
382*eda14cbcSMatt Macy #else
383*eda14cbcSMatt Macy #define	LAST_ENC_ROUND  NO_TABLES
384*eda14cbcSMatt Macy #endif
385*eda14cbcSMatt Macy 
386*eda14cbcSMatt Macy #if 1   /* set tables for the normal decryption round */
387*eda14cbcSMatt Macy #define	DEC_ROUND   FOUR_TABLES
388*eda14cbcSMatt Macy #elif 0
389*eda14cbcSMatt Macy #define	DEC_ROUND   ONE_TABLE
390*eda14cbcSMatt Macy #else
391*eda14cbcSMatt Macy #define	DEC_ROUND   NO_TABLES
392*eda14cbcSMatt Macy #endif
393*eda14cbcSMatt Macy 
394*eda14cbcSMatt Macy #if 1   /* set tables for the last decryption round */
395*eda14cbcSMatt Macy #define	LAST_DEC_ROUND  FOUR_TABLES
396*eda14cbcSMatt Macy #elif 0
397*eda14cbcSMatt Macy #define	LAST_DEC_ROUND  ONE_TABLE
398*eda14cbcSMatt Macy #else
399*eda14cbcSMatt Macy #define	LAST_DEC_ROUND  NO_TABLES
400*eda14cbcSMatt Macy #endif
401*eda14cbcSMatt Macy 
402*eda14cbcSMatt Macy /*
403*eda14cbcSMatt Macy  *  The decryption key schedule can be speeded up with tables in the same
404*eda14cbcSMatt Macy  *	way that the round functions can.  Include or exclude the following
405*eda14cbcSMatt Macy  *	defines to set this requirement.
406*eda14cbcSMatt Macy  */
407*eda14cbcSMatt Macy #if 1
408*eda14cbcSMatt Macy #define	KEY_SCHED   FOUR_TABLES
409*eda14cbcSMatt Macy #elif 0
410*eda14cbcSMatt Macy #define	KEY_SCHED   ONE_TABLE
411*eda14cbcSMatt Macy #else
412*eda14cbcSMatt Macy #define	KEY_SCHED   NO_TABLES
413*eda14cbcSMatt Macy #endif
414*eda14cbcSMatt Macy 
415*eda14cbcSMatt Macy /*  ---- END OF USER CONFIGURED OPTIONS ---- */
416*eda14cbcSMatt Macy 
417*eda14cbcSMatt Macy /* VIA ACE support is only available for VC++ and GCC */
418*eda14cbcSMatt Macy 
419*eda14cbcSMatt Macy #if !defined(_MSC_VER) && !defined(__GNUC__)
420*eda14cbcSMatt Macy #if defined(ASSUME_VIA_ACE_PRESENT)
421*eda14cbcSMatt Macy #undef ASSUME_VIA_ACE_PRESENT
422*eda14cbcSMatt Macy #endif
423*eda14cbcSMatt Macy #if defined(USE_VIA_ACE_IF_PRESENT)
424*eda14cbcSMatt Macy #undef USE_VIA_ACE_IF_PRESENT
425*eda14cbcSMatt Macy #endif
426*eda14cbcSMatt Macy #endif
427*eda14cbcSMatt Macy 
428*eda14cbcSMatt Macy #if defined(ASSUME_VIA_ACE_PRESENT) && !defined(USE_VIA_ACE_IF_PRESENT)
429*eda14cbcSMatt Macy #define	USE_VIA_ACE_IF_PRESENT
430*eda14cbcSMatt Macy #endif
431*eda14cbcSMatt Macy 
432*eda14cbcSMatt Macy #if defined(USE_VIA_ACE_IF_PRESENT) && !defined(AES_REV_DKS)
433*eda14cbcSMatt Macy #define	AES_REV_DKS
434*eda14cbcSMatt Macy #endif
435*eda14cbcSMatt Macy 
436*eda14cbcSMatt Macy /* Assembler support requires the use of platform byte order */
437*eda14cbcSMatt Macy 
438*eda14cbcSMatt Macy #if (defined(ASM_X86_V1C) || defined(ASM_X86_V2C) || defined(ASM_AMD64_C)) && \
439*eda14cbcSMatt Macy 	(ALGORITHM_BYTE_ORDER != PLATFORM_BYTE_ORDER)
440*eda14cbcSMatt Macy #undef  ALGORITHM_BYTE_ORDER
441*eda14cbcSMatt Macy #define	ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
442*eda14cbcSMatt Macy #endif
443*eda14cbcSMatt Macy 
444*eda14cbcSMatt Macy /*
445*eda14cbcSMatt Macy  * In this implementation the columns of the state array are each held in
446*eda14cbcSMatt Macy  *	32-bit words. The state array can be held in various ways: in an array
447*eda14cbcSMatt Macy  *	of words, in a number of individual word variables or in a number of
448*eda14cbcSMatt Macy  *	processor registers. The following define maps a variable name x and
449*eda14cbcSMatt Macy  *	a column number c to the way the state array variable is to be held.
450*eda14cbcSMatt Macy  *	The first define below maps the state into an array x[c] whereas the
451*eda14cbcSMatt Macy  *	second form maps the state into a number of individual variables x0,
452*eda14cbcSMatt Macy  *	x1, etc.  Another form could map individual state columns to machine
453*eda14cbcSMatt Macy  *	register names.
454*eda14cbcSMatt Macy  */
455*eda14cbcSMatt Macy 
456*eda14cbcSMatt Macy #if defined(ARRAYS)
457*eda14cbcSMatt Macy #define	s(x, c) x[c]
458*eda14cbcSMatt Macy #else
459*eda14cbcSMatt Macy #define	s(x, c) x##c
460*eda14cbcSMatt Macy #endif
461*eda14cbcSMatt Macy 
462*eda14cbcSMatt Macy /*
463*eda14cbcSMatt Macy  *  This implementation provides subroutines for encryption, decryption
464*eda14cbcSMatt Macy  *	and for setting the three key lengths (separately) for encryption
465*eda14cbcSMatt Macy  *	and decryption. Since not all functions are needed, masks are set
466*eda14cbcSMatt Macy  *	up here to determine which will be implemented in C
467*eda14cbcSMatt Macy  */
468*eda14cbcSMatt Macy 
469*eda14cbcSMatt Macy #if !defined(AES_ENCRYPT)
470*eda14cbcSMatt Macy #define	EFUNCS_IN_C   0
471*eda14cbcSMatt Macy #elif defined(ASSUME_VIA_ACE_PRESENT) || defined(ASM_X86_V1C) || \
472*eda14cbcSMatt Macy 	defined(ASM_X86_V2C) || defined(ASM_AMD64_C)
473*eda14cbcSMatt Macy #define	EFUNCS_IN_C   ENC_KEYING_IN_C
474*eda14cbcSMatt Macy #elif !defined(ASM_X86_V2)
475*eda14cbcSMatt Macy #define	EFUNCS_IN_C   (ENCRYPTION_IN_C | ENC_KEYING_IN_C)
476*eda14cbcSMatt Macy #else
477*eda14cbcSMatt Macy #define	EFUNCS_IN_C   0
478*eda14cbcSMatt Macy #endif
479*eda14cbcSMatt Macy 
480*eda14cbcSMatt Macy #if !defined(AES_DECRYPT)
481*eda14cbcSMatt Macy #define	DFUNCS_IN_C   0
482*eda14cbcSMatt Macy #elif defined(ASSUME_VIA_ACE_PRESENT) || defined(ASM_X86_V1C) || \
483*eda14cbcSMatt Macy 	defined(ASM_X86_V2C) || defined(ASM_AMD64_C)
484*eda14cbcSMatt Macy #define	DFUNCS_IN_C   DEC_KEYING_IN_C
485*eda14cbcSMatt Macy #elif !defined(ASM_X86_V2)
486*eda14cbcSMatt Macy #define	DFUNCS_IN_C   (DECRYPTION_IN_C | DEC_KEYING_IN_C)
487*eda14cbcSMatt Macy #else
488*eda14cbcSMatt Macy #define	DFUNCS_IN_C   0
489*eda14cbcSMatt Macy #endif
490*eda14cbcSMatt Macy 
491*eda14cbcSMatt Macy #define	FUNCS_IN_C  (EFUNCS_IN_C | DFUNCS_IN_C)
492*eda14cbcSMatt Macy 
493*eda14cbcSMatt Macy /* END OF CONFIGURATION OPTIONS */
494*eda14cbcSMatt Macy 
495*eda14cbcSMatt Macy /* Disable or report errors on some combinations of options */
496*eda14cbcSMatt Macy 
497*eda14cbcSMatt Macy #if ENC_ROUND == NO_TABLES && LAST_ENC_ROUND != NO_TABLES
498*eda14cbcSMatt Macy #undef  LAST_ENC_ROUND
499*eda14cbcSMatt Macy #define	LAST_ENC_ROUND  NO_TABLES
500*eda14cbcSMatt Macy #elif ENC_ROUND == ONE_TABLE && LAST_ENC_ROUND == FOUR_TABLES
501*eda14cbcSMatt Macy #undef  LAST_ENC_ROUND
502*eda14cbcSMatt Macy #define	LAST_ENC_ROUND  ONE_TABLE
503*eda14cbcSMatt Macy #endif
504*eda14cbcSMatt Macy 
505*eda14cbcSMatt Macy #if ENC_ROUND == NO_TABLES && ENC_UNROLL != NONE
506*eda14cbcSMatt Macy #undef  ENC_UNROLL
507*eda14cbcSMatt Macy #define	ENC_UNROLL  NONE
508*eda14cbcSMatt Macy #endif
509*eda14cbcSMatt Macy 
510*eda14cbcSMatt Macy #if DEC_ROUND == NO_TABLES && LAST_DEC_ROUND != NO_TABLES
511*eda14cbcSMatt Macy #undef  LAST_DEC_ROUND
512*eda14cbcSMatt Macy #define	LAST_DEC_ROUND  NO_TABLES
513*eda14cbcSMatt Macy #elif DEC_ROUND == ONE_TABLE && LAST_DEC_ROUND == FOUR_TABLES
514*eda14cbcSMatt Macy #undef  LAST_DEC_ROUND
515*eda14cbcSMatt Macy #define	LAST_DEC_ROUND  ONE_TABLE
516*eda14cbcSMatt Macy #endif
517*eda14cbcSMatt Macy 
518*eda14cbcSMatt Macy #if DEC_ROUND == NO_TABLES && DEC_UNROLL != NONE
519*eda14cbcSMatt Macy #undef  DEC_UNROLL
520*eda14cbcSMatt Macy #define	DEC_UNROLL  NONE
521*eda14cbcSMatt Macy #endif
522*eda14cbcSMatt Macy 
523*eda14cbcSMatt Macy #if (ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN)
524*eda14cbcSMatt Macy #define	aes_sw32	htonl
525*eda14cbcSMatt Macy #elif defined(bswap32)
526*eda14cbcSMatt Macy #define	aes_sw32	bswap32
527*eda14cbcSMatt Macy #elif defined(bswap_32)
528*eda14cbcSMatt Macy #define	aes_sw32	bswap_32
529*eda14cbcSMatt Macy #else
530*eda14cbcSMatt Macy #define	brot(x, n)  (((uint32_t)(x) << (n)) | ((uint32_t)(x) >> (32 - (n))))
531*eda14cbcSMatt Macy #define	aes_sw32(x) ((brot((x), 8) & 0x00ff00ff) | (brot((x), 24) & 0xff00ff00))
532*eda14cbcSMatt Macy #endif
533*eda14cbcSMatt Macy 
534*eda14cbcSMatt Macy 
535*eda14cbcSMatt Macy /*
536*eda14cbcSMatt Macy  *	upr(x, n):  rotates bytes within words by n positions, moving bytes to
537*eda14cbcSMatt Macy  *		higher index positions with wrap around into low positions
538*eda14cbcSMatt Macy  *	ups(x, n):  moves bytes by n positions to higher index positions in
539*eda14cbcSMatt Macy  *		words but without wrap around
540*eda14cbcSMatt Macy  *	bval(x, n): extracts a byte from a word
541*eda14cbcSMatt Macy  *
542*eda14cbcSMatt Macy  *	WARNING:   The definitions given here are intended only for use with
543*eda14cbcSMatt Macy  *		unsigned variables and with shift counts that are compile
544*eda14cbcSMatt Macy  *		time constants
545*eda14cbcSMatt Macy  */
546*eda14cbcSMatt Macy 
547*eda14cbcSMatt Macy #if (ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN)
548*eda14cbcSMatt Macy #define	upr(x, n)	(((uint32_t)(x) << (8 * (n))) | \
549*eda14cbcSMatt Macy 			((uint32_t)(x) >> (32 - 8 * (n))))
550*eda14cbcSMatt Macy #define	ups(x, n)	((uint32_t)(x) << (8 * (n)))
551*eda14cbcSMatt Macy #define	bval(x, n)	to_byte((x) >> (8 * (n)))
552*eda14cbcSMatt Macy #define	bytes2word(b0, b1, b2, b3)  \
553*eda14cbcSMatt Macy 		(((uint32_t)(b3) << 24) | ((uint32_t)(b2) << 16) | \
554*eda14cbcSMatt Macy 		((uint32_t)(b1) << 8) | (b0))
555*eda14cbcSMatt Macy #endif
556*eda14cbcSMatt Macy 
557*eda14cbcSMatt Macy #if (ALGORITHM_BYTE_ORDER == IS_BIG_ENDIAN)
558*eda14cbcSMatt Macy #define	upr(x, n)	(((uint32_t)(x) >> (8 * (n))) | \
559*eda14cbcSMatt Macy 			((uint32_t)(x) << (32 - 8 * (n))))
560*eda14cbcSMatt Macy #define	ups(x, n)	((uint32_t)(x) >> (8 * (n)))
561*eda14cbcSMatt Macy #define	bval(x, n)	to_byte((x) >> (24 - 8 * (n)))
562*eda14cbcSMatt Macy #define	bytes2word(b0, b1, b2, b3)  \
563*eda14cbcSMatt Macy 		(((uint32_t)(b0) << 24) | ((uint32_t)(b1) << 16) | \
564*eda14cbcSMatt Macy 		((uint32_t)(b2) << 8) | (b3))
565*eda14cbcSMatt Macy #endif
566*eda14cbcSMatt Macy 
567*eda14cbcSMatt Macy #if defined(SAFE_IO)
568*eda14cbcSMatt Macy #define	word_in(x, c)	bytes2word(((const uint8_t *)(x) + 4 * c)[0], \
569*eda14cbcSMatt Macy 				((const uint8_t *)(x) + 4 * c)[1], \
570*eda14cbcSMatt Macy 				((const uint8_t *)(x) + 4 * c)[2], \
571*eda14cbcSMatt Macy 				((const uint8_t *)(x) + 4 * c)[3])
572*eda14cbcSMatt Macy #define	word_out(x, c, v) { ((uint8_t *)(x) + 4 * c)[0] = bval(v, 0); \
573*eda14cbcSMatt Macy 			((uint8_t *)(x) + 4 * c)[1] = bval(v, 1); \
574*eda14cbcSMatt Macy 			((uint8_t *)(x) + 4 * c)[2] = bval(v, 2); \
575*eda14cbcSMatt Macy 			((uint8_t *)(x) + 4 * c)[3] = bval(v, 3); }
576*eda14cbcSMatt Macy #elif (ALGORITHM_BYTE_ORDER == PLATFORM_BYTE_ORDER)
577*eda14cbcSMatt Macy #define	word_in(x, c)	(*((uint32_t *)(x) + (c)))
578*eda14cbcSMatt Macy #define	word_out(x, c, v) (*((uint32_t *)(x) + (c)) = (v))
579*eda14cbcSMatt Macy #else
580*eda14cbcSMatt Macy #define	word_in(x, c)	aes_sw32(*((uint32_t *)(x) + (c)))
581*eda14cbcSMatt Macy #define	word_out(x, c, v) (*((uint32_t *)(x) + (c)) = aes_sw32(v))
582*eda14cbcSMatt Macy #endif
583*eda14cbcSMatt Macy 
584*eda14cbcSMatt Macy /* the finite field modular polynomial and elements */
585*eda14cbcSMatt Macy 
586*eda14cbcSMatt Macy #define	WPOLY   0x011b
587*eda14cbcSMatt Macy #define	BPOLY	0x1b
588*eda14cbcSMatt Macy 
589*eda14cbcSMatt Macy /* multiply four bytes in GF(2^8) by 'x' {02} in parallel */
590*eda14cbcSMatt Macy 
591*eda14cbcSMatt Macy #define	m1  0x80808080
592*eda14cbcSMatt Macy #define	m2  0x7f7f7f7f
593*eda14cbcSMatt Macy #define	gf_mulx(x)  ((((x) & m2) << 1) ^ ((((x) & m1) >> 7) * BPOLY))
594*eda14cbcSMatt Macy 
595*eda14cbcSMatt Macy /*
596*eda14cbcSMatt Macy  * The following defines provide alternative definitions of gf_mulx that might
597*eda14cbcSMatt Macy  * give improved performance if a fast 32-bit multiply is not available. Note
598*eda14cbcSMatt Macy  * that a temporary variable u needs to be defined where gf_mulx is used.
599*eda14cbcSMatt Macy  *
600*eda14cbcSMatt Macy  * #define	gf_mulx(x) (u = (x) & m1, u |= (u >> 1), ((x) & m2) << 1) ^ \
601*eda14cbcSMatt Macy  *			((u >> 3) | (u >> 6))
602*eda14cbcSMatt Macy  * #define	m4  (0x01010101 * BPOLY)
603*eda14cbcSMatt Macy  * #define	gf_mulx(x) (u = (x) & m1, ((x) & m2) << 1) ^ ((u - (u >> 7)) \
604*eda14cbcSMatt Macy  *			& m4)
605*eda14cbcSMatt Macy  */
606*eda14cbcSMatt Macy 
607*eda14cbcSMatt Macy /* Work out which tables are needed for the different options   */
608*eda14cbcSMatt Macy 
609*eda14cbcSMatt Macy #if defined(ASM_X86_V1C)
610*eda14cbcSMatt Macy #if defined(ENC_ROUND)
611*eda14cbcSMatt Macy #undef  ENC_ROUND
612*eda14cbcSMatt Macy #endif
613*eda14cbcSMatt Macy #define	ENC_ROUND   FOUR_TABLES
614*eda14cbcSMatt Macy #if defined(LAST_ENC_ROUND)
615*eda14cbcSMatt Macy #undef  LAST_ENC_ROUND
616*eda14cbcSMatt Macy #endif
617*eda14cbcSMatt Macy #define	LAST_ENC_ROUND  FOUR_TABLES
618*eda14cbcSMatt Macy #if defined(DEC_ROUND)
619*eda14cbcSMatt Macy #undef  DEC_ROUND
620*eda14cbcSMatt Macy #endif
621*eda14cbcSMatt Macy #define	DEC_ROUND   FOUR_TABLES
622*eda14cbcSMatt Macy #if defined(LAST_DEC_ROUND)
623*eda14cbcSMatt Macy #undef  LAST_DEC_ROUND
624*eda14cbcSMatt Macy #endif
625*eda14cbcSMatt Macy #define	LAST_DEC_ROUND  FOUR_TABLES
626*eda14cbcSMatt Macy #if defined(KEY_SCHED)
627*eda14cbcSMatt Macy #undef  KEY_SCHED
628*eda14cbcSMatt Macy #define	KEY_SCHED   FOUR_TABLES
629*eda14cbcSMatt Macy #endif
630*eda14cbcSMatt Macy #endif
631*eda14cbcSMatt Macy 
632*eda14cbcSMatt Macy #if (FUNCS_IN_C & ENCRYPTION_IN_C) || defined(ASM_X86_V1C)
633*eda14cbcSMatt Macy #if ENC_ROUND == ONE_TABLE
634*eda14cbcSMatt Macy #define	FT1_SET
635*eda14cbcSMatt Macy #elif ENC_ROUND == FOUR_TABLES
636*eda14cbcSMatt Macy #define	FT4_SET
637*eda14cbcSMatt Macy #else
638*eda14cbcSMatt Macy #define	SBX_SET
639*eda14cbcSMatt Macy #endif
640*eda14cbcSMatt Macy #if LAST_ENC_ROUND == ONE_TABLE
641*eda14cbcSMatt Macy #define	FL1_SET
642*eda14cbcSMatt Macy #elif LAST_ENC_ROUND == FOUR_TABLES
643*eda14cbcSMatt Macy #define	FL4_SET
644*eda14cbcSMatt Macy #elif !defined(SBX_SET)
645*eda14cbcSMatt Macy #define	SBX_SET
646*eda14cbcSMatt Macy #endif
647*eda14cbcSMatt Macy #endif
648*eda14cbcSMatt Macy 
649*eda14cbcSMatt Macy #if (FUNCS_IN_C & DECRYPTION_IN_C) || defined(ASM_X86_V1C)
650*eda14cbcSMatt Macy #if DEC_ROUND == ONE_TABLE
651*eda14cbcSMatt Macy #define	IT1_SET
652*eda14cbcSMatt Macy #elif DEC_ROUND == FOUR_TABLES
653*eda14cbcSMatt Macy #define	IT4_SET
654*eda14cbcSMatt Macy #else
655*eda14cbcSMatt Macy #define	ISB_SET
656*eda14cbcSMatt Macy #endif
657*eda14cbcSMatt Macy #if LAST_DEC_ROUND == ONE_TABLE
658*eda14cbcSMatt Macy #define	IL1_SET
659*eda14cbcSMatt Macy #elif LAST_DEC_ROUND == FOUR_TABLES
660*eda14cbcSMatt Macy #define	IL4_SET
661*eda14cbcSMatt Macy #elif !defined(ISB_SET)
662*eda14cbcSMatt Macy #define	ISB_SET
663*eda14cbcSMatt Macy #endif
664*eda14cbcSMatt Macy #endif
665*eda14cbcSMatt Macy 
666*eda14cbcSMatt Macy 
667*eda14cbcSMatt Macy #if !(defined(REDUCE_CODE_SIZE) && (defined(ASM_X86_V2) || \
668*eda14cbcSMatt Macy 	defined(ASM_X86_V2C)))
669*eda14cbcSMatt Macy #if ((FUNCS_IN_C & ENC_KEYING_IN_C) || (FUNCS_IN_C & DEC_KEYING_IN_C))
670*eda14cbcSMatt Macy #if KEY_SCHED == ONE_TABLE
671*eda14cbcSMatt Macy #if !defined(FL1_SET) && !defined(FL4_SET)
672*eda14cbcSMatt Macy #define	LS1_SET
673*eda14cbcSMatt Macy #endif
674*eda14cbcSMatt Macy #elif KEY_SCHED == FOUR_TABLES
675*eda14cbcSMatt Macy #if !defined(FL4_SET)
676*eda14cbcSMatt Macy #define	LS4_SET
677*eda14cbcSMatt Macy #endif
678*eda14cbcSMatt Macy #elif !defined(SBX_SET)
679*eda14cbcSMatt Macy #define	SBX_SET
680*eda14cbcSMatt Macy #endif
681*eda14cbcSMatt Macy #endif
682*eda14cbcSMatt Macy #if (FUNCS_IN_C & DEC_KEYING_IN_C)
683*eda14cbcSMatt Macy #if KEY_SCHED == ONE_TABLE
684*eda14cbcSMatt Macy #define	IM1_SET
685*eda14cbcSMatt Macy #elif KEY_SCHED == FOUR_TABLES
686*eda14cbcSMatt Macy #define	IM4_SET
687*eda14cbcSMatt Macy #elif !defined(SBX_SET)
688*eda14cbcSMatt Macy #define	SBX_SET
689*eda14cbcSMatt Macy #endif
690*eda14cbcSMatt Macy #endif
691*eda14cbcSMatt Macy #endif
692*eda14cbcSMatt Macy 
693*eda14cbcSMatt Macy /* generic definitions of Rijndael macros that use tables */
694*eda14cbcSMatt Macy 
695*eda14cbcSMatt Macy #define	no_table(x, box, vf, rf, c) bytes2word(\
696*eda14cbcSMatt Macy 	box[bval(vf(x, 0, c), rf(0, c))], \
697*eda14cbcSMatt Macy 	box[bval(vf(x, 1, c), rf(1, c))], \
698*eda14cbcSMatt Macy 	box[bval(vf(x, 2, c), rf(2, c))], \
699*eda14cbcSMatt Macy 	box[bval(vf(x, 3, c), rf(3, c))])
700*eda14cbcSMatt Macy 
701*eda14cbcSMatt Macy #define	one_table(x, op, tab, vf, rf, c) \
702*eda14cbcSMatt Macy 	(tab[bval(vf(x, 0, c), rf(0, c))] \
703*eda14cbcSMatt Macy 	^ op(tab[bval(vf(x, 1, c), rf(1, c))], 1) \
704*eda14cbcSMatt Macy 	^ op(tab[bval(vf(x, 2, c), rf(2, c))], 2) \
705*eda14cbcSMatt Macy 	^ op(tab[bval(vf(x, 3, c), rf(3, c))], 3))
706*eda14cbcSMatt Macy 
707*eda14cbcSMatt Macy #define	four_tables(x, tab, vf, rf, c) \
708*eda14cbcSMatt Macy 	(tab[0][bval(vf(x, 0, c), rf(0, c))] \
709*eda14cbcSMatt Macy 	^ tab[1][bval(vf(x, 1, c), rf(1, c))] \
710*eda14cbcSMatt Macy 	^ tab[2][bval(vf(x, 2, c), rf(2, c))] \
711*eda14cbcSMatt Macy 	^ tab[3][bval(vf(x, 3, c), rf(3, c))])
712*eda14cbcSMatt Macy 
713*eda14cbcSMatt Macy #define	vf1(x, r, c)	(x)
714*eda14cbcSMatt Macy #define	rf1(r, c)	(r)
715*eda14cbcSMatt Macy #define	rf2(r, c)	((8+r-c)&3)
716*eda14cbcSMatt Macy 
717*eda14cbcSMatt Macy /*
718*eda14cbcSMatt Macy  * Perform forward and inverse column mix operation on four bytes in long word
719*eda14cbcSMatt Macy  * x in parallel. NOTE: x must be a simple variable, NOT an expression in
720*eda14cbcSMatt Macy  * these macros.
721*eda14cbcSMatt Macy  */
722*eda14cbcSMatt Macy 
723*eda14cbcSMatt Macy #if !(defined(REDUCE_CODE_SIZE) && (defined(ASM_X86_V2) || \
724*eda14cbcSMatt Macy 	defined(ASM_X86_V2C)))
725*eda14cbcSMatt Macy 
726*eda14cbcSMatt Macy #if defined(FM4_SET)	/* not currently used */
727*eda14cbcSMatt Macy #define	fwd_mcol(x)	four_tables(x, t_use(f, m), vf1, rf1, 0)
728*eda14cbcSMatt Macy #elif defined(FM1_SET)	/* not currently used */
729*eda14cbcSMatt Macy #define	fwd_mcol(x)	one_table(x, upr, t_use(f, m), vf1, rf1, 0)
730*eda14cbcSMatt Macy #else
731*eda14cbcSMatt Macy #define	dec_fmvars	uint32_t g2
732*eda14cbcSMatt Macy #define	fwd_mcol(x)	(g2 = gf_mulx(x), g2 ^ upr((x) ^ g2, 3) ^ \
733*eda14cbcSMatt Macy 				upr((x), 2) ^ upr((x), 1))
734*eda14cbcSMatt Macy #endif
735*eda14cbcSMatt Macy 
736*eda14cbcSMatt Macy #if defined(IM4_SET)
737*eda14cbcSMatt Macy #define	inv_mcol(x)	four_tables(x, t_use(i, m), vf1, rf1, 0)
738*eda14cbcSMatt Macy #elif defined(IM1_SET)
739*eda14cbcSMatt Macy #define	inv_mcol(x)	one_table(x, upr, t_use(i, m), vf1, rf1, 0)
740*eda14cbcSMatt Macy #else
741*eda14cbcSMatt Macy #define	dec_imvars	uint32_t g2, g4, g9
742*eda14cbcSMatt Macy #define	inv_mcol(x)	(g2 = gf_mulx(x), g4 = gf_mulx(g2), g9 = \
743*eda14cbcSMatt Macy 				(x) ^ gf_mulx(g4), g4 ^= g9, \
744*eda14cbcSMatt Macy 				(x) ^ g2 ^ g4 ^ upr(g2 ^ g9, 3) ^ \
745*eda14cbcSMatt Macy 				upr(g4, 2) ^ upr(g9, 1))
746*eda14cbcSMatt Macy #endif
747*eda14cbcSMatt Macy 
748*eda14cbcSMatt Macy #if defined(FL4_SET)
749*eda14cbcSMatt Macy #define	ls_box(x, c)	four_tables(x, t_use(f, l), vf1, rf2, c)
750*eda14cbcSMatt Macy #elif defined(LS4_SET)
751*eda14cbcSMatt Macy #define	ls_box(x, c)	four_tables(x, t_use(l, s), vf1, rf2, c)
752*eda14cbcSMatt Macy #elif defined(FL1_SET)
753*eda14cbcSMatt Macy #define	ls_box(x, c)	one_table(x, upr, t_use(f, l), vf1, rf2, c)
754*eda14cbcSMatt Macy #elif defined(LS1_SET)
755*eda14cbcSMatt Macy #define	ls_box(x, c)	one_table(x, upr, t_use(l, s), vf1, rf2, c)
756*eda14cbcSMatt Macy #else
757*eda14cbcSMatt Macy #define	ls_box(x, c)	no_table(x, t_use(s, box), vf1, rf2, c)
758*eda14cbcSMatt Macy #endif
759*eda14cbcSMatt Macy 
760*eda14cbcSMatt Macy #endif
761*eda14cbcSMatt Macy 
762*eda14cbcSMatt Macy #if defined(ASM_X86_V1C) && defined(AES_DECRYPT) && !defined(ISB_SET)
763*eda14cbcSMatt Macy #define	ISB_SET
764*eda14cbcSMatt Macy #endif
765*eda14cbcSMatt Macy 
766*eda14cbcSMatt Macy #ifdef	__cplusplus
767*eda14cbcSMatt Macy }
768*eda14cbcSMatt Macy #endif
769*eda14cbcSMatt Macy 
770*eda14cbcSMatt Macy #endif	/* _AESOPT_H */
771