1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17 
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/jump_label.h>
35 #include <linux/workqueue.h>
36 #include <linux/spinlock.h>
37 #include <linux/static_call.h>
38 
39 
40 #define AESNI_ALIGN	16
41 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
42 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
43 #define RFC4106_HASH_SUBKEY_SIZE 16
44 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
45 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
46 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
47 
48 /* This data is stored at the end of the crypto_tfm struct.
49  * It's a type of per "session" data storage location.
50  * This needs to be 16 byte aligned.
51  */
52 struct aesni_rfc4106_gcm_ctx {
53 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
54 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
55 	u8 nonce[4];
56 };
57 
58 struct generic_gcmaes_ctx {
59 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
60 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
61 };
62 
63 struct aesni_xts_ctx {
64 	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
65 	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66 };
67 
68 #define GCM_BLOCK_LEN 16
69 
70 struct gcm_context_data {
71 	/* init, update and finalize context data */
72 	u8 aad_hash[GCM_BLOCK_LEN];
73 	u64 aad_length;
74 	u64 in_length;
75 	u8 partial_block_enc_key[GCM_BLOCK_LEN];
76 	u8 orig_IV[GCM_BLOCK_LEN];
77 	u8 current_counter[GCM_BLOCK_LEN];
78 	u64 partial_block_len;
79 	u64 unused;
80 	u8 hash_keys[GCM_BLOCK_LEN * 16];
81 };
82 
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 			     unsigned int key_len);
85 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
86 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
87 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
88 			      const u8 *in, unsigned int len);
89 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
90 			      const u8 *in, unsigned int len);
91 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
92 			      const u8 *in, unsigned int len, u8 *iv);
93 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
94 			      const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96 				  const u8 *in, unsigned int len, u8 *iv);
97 asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98 				  const u8 *in, unsigned int len, u8 *iv);
99 
100 #define AVX_GEN2_OPTSIZE 640
101 #define AVX_GEN4_OPTSIZE 4096
102 
103 asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
104 				  const u8 *in, unsigned int len, u8 *iv);
105 
106 asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
107 				  const u8 *in, unsigned int len, u8 *iv);
108 
109 #ifdef CONFIG_X86_64
110 
111 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
112 			      const u8 *in, unsigned int len, u8 *iv);
113 DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
114 
115 /* Scatter / Gather routines, with args similar to above */
116 asmlinkage void aesni_gcm_init(void *ctx,
117 			       struct gcm_context_data *gdata,
118 			       u8 *iv,
119 			       u8 *hash_subkey, const u8 *aad,
120 			       unsigned long aad_len);
121 asmlinkage void aesni_gcm_enc_update(void *ctx,
122 				     struct gcm_context_data *gdata, u8 *out,
123 				     const u8 *in, unsigned long plaintext_len);
124 asmlinkage void aesni_gcm_dec_update(void *ctx,
125 				     struct gcm_context_data *gdata, u8 *out,
126 				     const u8 *in,
127 				     unsigned long ciphertext_len);
128 asmlinkage void aesni_gcm_finalize(void *ctx,
129 				   struct gcm_context_data *gdata,
130 				   u8 *auth_tag, unsigned long auth_tag_len);
131 
132 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
133 		void *keys, u8 *out, unsigned int num_bytes);
134 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
135 		void *keys, u8 *out, unsigned int num_bytes);
136 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
137 		void *keys, u8 *out, unsigned int num_bytes);
138 /*
139  * asmlinkage void aesni_gcm_init_avx_gen2()
140  * gcm_data *my_ctx_data, context data
141  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
142  */
143 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
144 					struct gcm_context_data *gdata,
145 					u8 *iv,
146 					u8 *hash_subkey,
147 					const u8 *aad,
148 					unsigned long aad_len);
149 
150 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
151 				     struct gcm_context_data *gdata, u8 *out,
152 				     const u8 *in, unsigned long plaintext_len);
153 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
154 				     struct gcm_context_data *gdata, u8 *out,
155 				     const u8 *in,
156 				     unsigned long ciphertext_len);
157 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
158 				   struct gcm_context_data *gdata,
159 				   u8 *auth_tag, unsigned long auth_tag_len);
160 
161 /*
162  * asmlinkage void aesni_gcm_init_avx_gen4()
163  * gcm_data *my_ctx_data, context data
164  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
165  */
166 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
167 					struct gcm_context_data *gdata,
168 					u8 *iv,
169 					u8 *hash_subkey,
170 					const u8 *aad,
171 					unsigned long aad_len);
172 
173 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
174 				     struct gcm_context_data *gdata, u8 *out,
175 				     const u8 *in, unsigned long plaintext_len);
176 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
177 				     struct gcm_context_data *gdata, u8 *out,
178 				     const u8 *in,
179 				     unsigned long ciphertext_len);
180 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
181 				   struct gcm_context_data *gdata,
182 				   u8 *auth_tag, unsigned long auth_tag_len);
183 
184 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
185 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
186 
187 static inline struct
aesni_rfc4106_gcm_ctx_get(struct crypto_aead * tfm)188 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
189 {
190 	unsigned long align = AESNI_ALIGN;
191 
192 	if (align <= crypto_tfm_ctx_alignment())
193 		align = 1;
194 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
195 }
196 
197 static inline struct
generic_gcmaes_ctx_get(struct crypto_aead * tfm)198 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
199 {
200 	unsigned long align = AESNI_ALIGN;
201 
202 	if (align <= crypto_tfm_ctx_alignment())
203 		align = 1;
204 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
205 }
206 #endif
207 
aes_ctx(void * raw_ctx)208 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
209 {
210 	unsigned long addr = (unsigned long)raw_ctx;
211 	unsigned long align = AESNI_ALIGN;
212 
213 	if (align <= crypto_tfm_ctx_alignment())
214 		align = 1;
215 	return (struct crypto_aes_ctx *)ALIGN(addr, align);
216 }
217 
aes_set_key_common(struct crypto_tfm * tfm,void * raw_ctx,const u8 * in_key,unsigned int key_len)218 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
219 			      const u8 *in_key, unsigned int key_len)
220 {
221 	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
222 	int err;
223 
224 	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
225 	    key_len != AES_KEYSIZE_256)
226 		return -EINVAL;
227 
228 	if (!crypto_simd_usable())
229 		err = aes_expandkey(ctx, in_key, key_len);
230 	else {
231 		kernel_fpu_begin();
232 		err = aesni_set_key(ctx, in_key, key_len);
233 		kernel_fpu_end();
234 	}
235 
236 	return err;
237 }
238 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)239 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
240 		       unsigned int key_len)
241 {
242 	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
243 }
244 
aesni_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)245 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
246 {
247 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
248 
249 	if (!crypto_simd_usable()) {
250 		aes_encrypt(ctx, dst, src);
251 	} else {
252 		kernel_fpu_begin();
253 		aesni_enc(ctx, dst, src);
254 		kernel_fpu_end();
255 	}
256 }
257 
aesni_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)258 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
259 {
260 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
261 
262 	if (!crypto_simd_usable()) {
263 		aes_decrypt(ctx, dst, src);
264 	} else {
265 		kernel_fpu_begin();
266 		aesni_dec(ctx, dst, src);
267 		kernel_fpu_end();
268 	}
269 }
270 
aesni_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)271 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
272 			         unsigned int len)
273 {
274 	return aes_set_key_common(crypto_skcipher_tfm(tfm),
275 				  crypto_skcipher_ctx(tfm), key, len);
276 }
277 
ecb_encrypt(struct skcipher_request * req)278 static int ecb_encrypt(struct skcipher_request *req)
279 {
280 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
281 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
282 	struct skcipher_walk walk;
283 	unsigned int nbytes;
284 	int err;
285 
286 	err = skcipher_walk_virt(&walk, req, false);
287 
288 	while ((nbytes = walk.nbytes)) {
289 		kernel_fpu_begin();
290 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
291 			      nbytes & AES_BLOCK_MASK);
292 		kernel_fpu_end();
293 		nbytes &= AES_BLOCK_SIZE - 1;
294 		err = skcipher_walk_done(&walk, nbytes);
295 	}
296 
297 	return err;
298 }
299 
ecb_decrypt(struct skcipher_request * req)300 static int ecb_decrypt(struct skcipher_request *req)
301 {
302 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
303 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
304 	struct skcipher_walk walk;
305 	unsigned int nbytes;
306 	int err;
307 
308 	err = skcipher_walk_virt(&walk, req, false);
309 
310 	while ((nbytes = walk.nbytes)) {
311 		kernel_fpu_begin();
312 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
313 			      nbytes & AES_BLOCK_MASK);
314 		kernel_fpu_end();
315 		nbytes &= AES_BLOCK_SIZE - 1;
316 		err = skcipher_walk_done(&walk, nbytes);
317 	}
318 
319 	return err;
320 }
321 
cbc_encrypt(struct skcipher_request * req)322 static int cbc_encrypt(struct skcipher_request *req)
323 {
324 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
325 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
326 	struct skcipher_walk walk;
327 	unsigned int nbytes;
328 	int err;
329 
330 	err = skcipher_walk_virt(&walk, req, false);
331 
332 	while ((nbytes = walk.nbytes)) {
333 		kernel_fpu_begin();
334 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
335 			      nbytes & AES_BLOCK_MASK, walk.iv);
336 		kernel_fpu_end();
337 		nbytes &= AES_BLOCK_SIZE - 1;
338 		err = skcipher_walk_done(&walk, nbytes);
339 	}
340 
341 	return err;
342 }
343 
cbc_decrypt(struct skcipher_request * req)344 static int cbc_decrypt(struct skcipher_request *req)
345 {
346 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
347 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
348 	struct skcipher_walk walk;
349 	unsigned int nbytes;
350 	int err;
351 
352 	err = skcipher_walk_virt(&walk, req, false);
353 
354 	while ((nbytes = walk.nbytes)) {
355 		kernel_fpu_begin();
356 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
357 			      nbytes & AES_BLOCK_MASK, walk.iv);
358 		kernel_fpu_end();
359 		nbytes &= AES_BLOCK_SIZE - 1;
360 		err = skcipher_walk_done(&walk, nbytes);
361 	}
362 
363 	return err;
364 }
365 
cts_cbc_encrypt(struct skcipher_request * req)366 static int cts_cbc_encrypt(struct skcipher_request *req)
367 {
368 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
369 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
370 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
371 	struct scatterlist *src = req->src, *dst = req->dst;
372 	struct scatterlist sg_src[2], sg_dst[2];
373 	struct skcipher_request subreq;
374 	struct skcipher_walk walk;
375 	int err;
376 
377 	skcipher_request_set_tfm(&subreq, tfm);
378 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
379 				      NULL, NULL);
380 
381 	if (req->cryptlen <= AES_BLOCK_SIZE) {
382 		if (req->cryptlen < AES_BLOCK_SIZE)
383 			return -EINVAL;
384 		cbc_blocks = 1;
385 	}
386 
387 	if (cbc_blocks > 0) {
388 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
389 					   cbc_blocks * AES_BLOCK_SIZE,
390 					   req->iv);
391 
392 		err = cbc_encrypt(&subreq);
393 		if (err)
394 			return err;
395 
396 		if (req->cryptlen == AES_BLOCK_SIZE)
397 			return 0;
398 
399 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
400 		if (req->dst != req->src)
401 			dst = scatterwalk_ffwd(sg_dst, req->dst,
402 					       subreq.cryptlen);
403 	}
404 
405 	/* handle ciphertext stealing */
406 	skcipher_request_set_crypt(&subreq, src, dst,
407 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
408 				   req->iv);
409 
410 	err = skcipher_walk_virt(&walk, &subreq, false);
411 	if (err)
412 		return err;
413 
414 	kernel_fpu_begin();
415 	aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
416 			  walk.nbytes, walk.iv);
417 	kernel_fpu_end();
418 
419 	return skcipher_walk_done(&walk, 0);
420 }
421 
cts_cbc_decrypt(struct skcipher_request * req)422 static int cts_cbc_decrypt(struct skcipher_request *req)
423 {
424 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
425 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
426 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
427 	struct scatterlist *src = req->src, *dst = req->dst;
428 	struct scatterlist sg_src[2], sg_dst[2];
429 	struct skcipher_request subreq;
430 	struct skcipher_walk walk;
431 	int err;
432 
433 	skcipher_request_set_tfm(&subreq, tfm);
434 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
435 				      NULL, NULL);
436 
437 	if (req->cryptlen <= AES_BLOCK_SIZE) {
438 		if (req->cryptlen < AES_BLOCK_SIZE)
439 			return -EINVAL;
440 		cbc_blocks = 1;
441 	}
442 
443 	if (cbc_blocks > 0) {
444 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
445 					   cbc_blocks * AES_BLOCK_SIZE,
446 					   req->iv);
447 
448 		err = cbc_decrypt(&subreq);
449 		if (err)
450 			return err;
451 
452 		if (req->cryptlen == AES_BLOCK_SIZE)
453 			return 0;
454 
455 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
456 		if (req->dst != req->src)
457 			dst = scatterwalk_ffwd(sg_dst, req->dst,
458 					       subreq.cryptlen);
459 	}
460 
461 	/* handle ciphertext stealing */
462 	skcipher_request_set_crypt(&subreq, src, dst,
463 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
464 				   req->iv);
465 
466 	err = skcipher_walk_virt(&walk, &subreq, false);
467 	if (err)
468 		return err;
469 
470 	kernel_fpu_begin();
471 	aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
472 			  walk.nbytes, walk.iv);
473 	kernel_fpu_end();
474 
475 	return skcipher_walk_done(&walk, 0);
476 }
477 
478 #ifdef CONFIG_X86_64
aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,unsigned int len,u8 * iv)479 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
480 			      const u8 *in, unsigned int len, u8 *iv)
481 {
482 	/*
483 	 * based on key length, override with the by8 version
484 	 * of ctr mode encryption/decryption for improved performance
485 	 * aes_set_key_common() ensures that key length is one of
486 	 * {128,192,256}
487 	 */
488 	if (ctx->key_length == AES_KEYSIZE_128)
489 		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
490 	else if (ctx->key_length == AES_KEYSIZE_192)
491 		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
492 	else
493 		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
494 }
495 
ctr_crypt(struct skcipher_request * req)496 static int ctr_crypt(struct skcipher_request *req)
497 {
498 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
499 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
500 	u8 keystream[AES_BLOCK_SIZE];
501 	struct skcipher_walk walk;
502 	unsigned int nbytes;
503 	int err;
504 
505 	err = skcipher_walk_virt(&walk, req, false);
506 
507 	while ((nbytes = walk.nbytes) > 0) {
508 		kernel_fpu_begin();
509 		if (nbytes & AES_BLOCK_MASK)
510 			static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
511 						       walk.src.virt.addr,
512 						       nbytes & AES_BLOCK_MASK,
513 						       walk.iv);
514 		nbytes &= ~AES_BLOCK_MASK;
515 
516 		if (walk.nbytes == walk.total && nbytes > 0) {
517 			aesni_enc(ctx, keystream, walk.iv);
518 			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
519 				       walk.src.virt.addr + walk.nbytes - nbytes,
520 				       keystream, nbytes);
521 			crypto_inc(walk.iv, AES_BLOCK_SIZE);
522 			nbytes = 0;
523 		}
524 		kernel_fpu_end();
525 		err = skcipher_walk_done(&walk, nbytes);
526 	}
527 	return err;
528 }
529 
530 static int
rfc4106_set_hash_subkey(u8 * hash_subkey,const u8 * key,unsigned int key_len)531 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
532 {
533 	struct crypto_aes_ctx ctx;
534 	int ret;
535 
536 	ret = aes_expandkey(&ctx, key, key_len);
537 	if (ret)
538 		return ret;
539 
540 	/* Clear the data in the hash sub key container to zero.*/
541 	/* We want to cipher all zeros to create the hash sub key. */
542 	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
543 
544 	aes_encrypt(&ctx, hash_subkey, hash_subkey);
545 
546 	memzero_explicit(&ctx, sizeof(ctx));
547 	return 0;
548 }
549 
common_rfc4106_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)550 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
551 				  unsigned int key_len)
552 {
553 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
554 
555 	if (key_len < 4)
556 		return -EINVAL;
557 
558 	/*Account for 4 byte nonce at the end.*/
559 	key_len -= 4;
560 
561 	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
562 
563 	return aes_set_key_common(crypto_aead_tfm(aead),
564 				  &ctx->aes_key_expanded, key, key_len) ?:
565 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
566 }
567 
568 /* This is the Integrity Check Value (aka the authentication tag) length and can
569  * be 8, 12 or 16 bytes long. */
common_rfc4106_set_authsize(struct crypto_aead * aead,unsigned int authsize)570 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
571 				       unsigned int authsize)
572 {
573 	switch (authsize) {
574 	case 8:
575 	case 12:
576 	case 16:
577 		break;
578 	default:
579 		return -EINVAL;
580 	}
581 
582 	return 0;
583 }
584 
generic_gcmaes_set_authsize(struct crypto_aead * tfm,unsigned int authsize)585 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
586 				       unsigned int authsize)
587 {
588 	switch (authsize) {
589 	case 4:
590 	case 8:
591 	case 12:
592 	case 13:
593 	case 14:
594 	case 15:
595 	case 16:
596 		break;
597 	default:
598 		return -EINVAL;
599 	}
600 
601 	return 0;
602 }
603 
gcmaes_crypt_by_sg(bool enc,struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx,u8 * auth_tag,unsigned long auth_tag_len)604 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
605 			      unsigned int assoclen, u8 *hash_subkey,
606 			      u8 *iv, void *aes_ctx, u8 *auth_tag,
607 			      unsigned long auth_tag_len)
608 {
609 	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
610 	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
611 	unsigned long left = req->cryptlen;
612 	struct scatter_walk assoc_sg_walk;
613 	struct skcipher_walk walk;
614 	bool do_avx, do_avx2;
615 	u8 *assocmem = NULL;
616 	u8 *assoc;
617 	int err;
618 
619 	if (!enc)
620 		left -= auth_tag_len;
621 
622 	do_avx = (left >= AVX_GEN2_OPTSIZE);
623 	do_avx2 = (left >= AVX_GEN4_OPTSIZE);
624 
625 	/* Linearize assoc, if not already linear */
626 	if (req->src->length >= assoclen && req->src->length) {
627 		scatterwalk_start(&assoc_sg_walk, req->src);
628 		assoc = scatterwalk_map(&assoc_sg_walk);
629 	} else {
630 		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
631 			      GFP_KERNEL : GFP_ATOMIC;
632 
633 		/* assoc can be any length, so must be on heap */
634 		assocmem = kmalloc(assoclen, flags);
635 		if (unlikely(!assocmem))
636 			return -ENOMEM;
637 		assoc = assocmem;
638 
639 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
640 	}
641 
642 	kernel_fpu_begin();
643 	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
644 		aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
645 					assoclen);
646 	else if (static_branch_likely(&gcm_use_avx) && do_avx)
647 		aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
648 					assoclen);
649 	else
650 		aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
651 	kernel_fpu_end();
652 
653 	if (!assocmem)
654 		scatterwalk_unmap(assoc);
655 	else
656 		kfree(assocmem);
657 
658 	err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
659 		  : skcipher_walk_aead_decrypt(&walk, req, false);
660 
661 	while (walk.nbytes > 0) {
662 		kernel_fpu_begin();
663 		if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
664 			if (enc)
665 				aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
666 							      walk.dst.virt.addr,
667 							      walk.src.virt.addr,
668 							      walk.nbytes);
669 			else
670 				aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
671 							      walk.dst.virt.addr,
672 							      walk.src.virt.addr,
673 							      walk.nbytes);
674 		} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
675 			if (enc)
676 				aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
677 							      walk.dst.virt.addr,
678 							      walk.src.virt.addr,
679 							      walk.nbytes);
680 			else
681 				aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
682 							      walk.dst.virt.addr,
683 							      walk.src.virt.addr,
684 							      walk.nbytes);
685 		} else if (enc) {
686 			aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
687 					     walk.src.virt.addr, walk.nbytes);
688 		} else {
689 			aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
690 					     walk.src.virt.addr, walk.nbytes);
691 		}
692 		kernel_fpu_end();
693 
694 		err = skcipher_walk_done(&walk, 0);
695 	}
696 
697 	if (err)
698 		return err;
699 
700 	kernel_fpu_begin();
701 	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
702 		aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
703 					    auth_tag_len);
704 	else if (static_branch_likely(&gcm_use_avx) && do_avx)
705 		aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
706 					    auth_tag_len);
707 	else
708 		aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
709 	kernel_fpu_end();
710 
711 	return 0;
712 }
713 
gcmaes_encrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)714 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
715 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
716 {
717 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
718 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
719 	u8 auth_tag[16];
720 	int err;
721 
722 	err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
723 				 auth_tag, auth_tag_len);
724 	if (err)
725 		return err;
726 
727 	scatterwalk_map_and_copy(auth_tag, req->dst,
728 				 req->assoclen + req->cryptlen,
729 				 auth_tag_len, 1);
730 	return 0;
731 }
732 
gcmaes_decrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)733 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
734 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
735 {
736 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
737 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
738 	u8 auth_tag_msg[16];
739 	u8 auth_tag[16];
740 	int err;
741 
742 	err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
743 				 auth_tag, auth_tag_len);
744 	if (err)
745 		return err;
746 
747 	/* Copy out original auth_tag */
748 	scatterwalk_map_and_copy(auth_tag_msg, req->src,
749 				 req->assoclen + req->cryptlen - auth_tag_len,
750 				 auth_tag_len, 0);
751 
752 	/* Compare generated tag with passed in tag. */
753 	if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
754 		memzero_explicit(auth_tag, sizeof(auth_tag));
755 		return -EBADMSG;
756 	}
757 	return 0;
758 }
759 
helper_rfc4106_encrypt(struct aead_request * req)760 static int helper_rfc4106_encrypt(struct aead_request *req)
761 {
762 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
763 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
764 	void *aes_ctx = &(ctx->aes_key_expanded);
765 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
766 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
767 	unsigned int i;
768 	__be32 counter = cpu_to_be32(1);
769 
770 	/* Assuming we are supporting rfc4106 64-bit extended */
771 	/* sequence numbers We need to have the AAD length equal */
772 	/* to 16 or 20 bytes */
773 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
774 		return -EINVAL;
775 
776 	/* IV below built */
777 	for (i = 0; i < 4; i++)
778 		*(iv+i) = ctx->nonce[i];
779 	for (i = 0; i < 8; i++)
780 		*(iv+4+i) = req->iv[i];
781 	*((__be32 *)(iv+12)) = counter;
782 
783 	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
784 			      aes_ctx);
785 }
786 
helper_rfc4106_decrypt(struct aead_request * req)787 static int helper_rfc4106_decrypt(struct aead_request *req)
788 {
789 	__be32 counter = cpu_to_be32(1);
790 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
791 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
792 	void *aes_ctx = &(ctx->aes_key_expanded);
793 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
794 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
795 	unsigned int i;
796 
797 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
798 		return -EINVAL;
799 
800 	/* Assuming we are supporting rfc4106 64-bit extended */
801 	/* sequence numbers We need to have the AAD length */
802 	/* equal to 16 or 20 bytes */
803 
804 	/* IV below built */
805 	for (i = 0; i < 4; i++)
806 		*(iv+i) = ctx->nonce[i];
807 	for (i = 0; i < 8; i++)
808 		*(iv+4+i) = req->iv[i];
809 	*((__be32 *)(iv+12)) = counter;
810 
811 	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
812 			      aes_ctx);
813 }
814 #endif
815 
xts_aesni_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)816 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
817 			    unsigned int keylen)
818 {
819 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
820 	int err;
821 
822 	err = xts_verify_key(tfm, key, keylen);
823 	if (err)
824 		return err;
825 
826 	keylen /= 2;
827 
828 	/* first half of xts-key is for crypt */
829 	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
830 				 key, keylen);
831 	if (err)
832 		return err;
833 
834 	/* second half of xts-key is for tweak */
835 	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
836 				  key + keylen, keylen);
837 }
838 
xts_crypt(struct skcipher_request * req,bool encrypt)839 static int xts_crypt(struct skcipher_request *req, bool encrypt)
840 {
841 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
842 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
843 	int tail = req->cryptlen % AES_BLOCK_SIZE;
844 	struct skcipher_request subreq;
845 	struct skcipher_walk walk;
846 	int err;
847 
848 	if (req->cryptlen < AES_BLOCK_SIZE)
849 		return -EINVAL;
850 
851 	err = skcipher_walk_virt(&walk, req, false);
852 
853 	if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
854 		int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
855 
856 		skcipher_walk_abort(&walk);
857 
858 		skcipher_request_set_tfm(&subreq, tfm);
859 		skcipher_request_set_callback(&subreq,
860 					      skcipher_request_flags(req),
861 					      NULL, NULL);
862 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
863 					   blocks * AES_BLOCK_SIZE, req->iv);
864 		req = &subreq;
865 		err = skcipher_walk_virt(&walk, req, false);
866 	} else {
867 		tail = 0;
868 	}
869 
870 	kernel_fpu_begin();
871 
872 	/* calculate first value of T */
873 	aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
874 
875 	while (walk.nbytes > 0) {
876 		int nbytes = walk.nbytes;
877 
878 		if (nbytes < walk.total)
879 			nbytes &= ~(AES_BLOCK_SIZE - 1);
880 
881 		if (encrypt)
882 			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
883 					  walk.dst.virt.addr, walk.src.virt.addr,
884 					  nbytes, walk.iv);
885 		else
886 			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
887 					  walk.dst.virt.addr, walk.src.virt.addr,
888 					  nbytes, walk.iv);
889 		kernel_fpu_end();
890 
891 		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
892 
893 		if (walk.nbytes > 0)
894 			kernel_fpu_begin();
895 	}
896 
897 	if (unlikely(tail > 0 && !err)) {
898 		struct scatterlist sg_src[2], sg_dst[2];
899 		struct scatterlist *src, *dst;
900 
901 		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
902 		if (req->dst != req->src)
903 			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
904 
905 		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
906 					   req->iv);
907 
908 		err = skcipher_walk_virt(&walk, &subreq, false);
909 		if (err)
910 			return err;
911 
912 		kernel_fpu_begin();
913 		if (encrypt)
914 			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
915 					  walk.dst.virt.addr, walk.src.virt.addr,
916 					  walk.nbytes, walk.iv);
917 		else
918 			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
919 					  walk.dst.virt.addr, walk.src.virt.addr,
920 					  walk.nbytes, walk.iv);
921 		kernel_fpu_end();
922 
923 		err = skcipher_walk_done(&walk, 0);
924 	}
925 	return err;
926 }
927 
xts_encrypt(struct skcipher_request * req)928 static int xts_encrypt(struct skcipher_request *req)
929 {
930 	return xts_crypt(req, true);
931 }
932 
xts_decrypt(struct skcipher_request * req)933 static int xts_decrypt(struct skcipher_request *req)
934 {
935 	return xts_crypt(req, false);
936 }
937 
938 static struct crypto_alg aesni_cipher_alg = {
939 	.cra_name		= "aes",
940 	.cra_driver_name	= "aes-aesni",
941 	.cra_priority		= 300,
942 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
943 	.cra_blocksize		= AES_BLOCK_SIZE,
944 	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
945 	.cra_module		= THIS_MODULE,
946 	.cra_u	= {
947 		.cipher	= {
948 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
949 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
950 			.cia_setkey		= aes_set_key,
951 			.cia_encrypt		= aesni_encrypt,
952 			.cia_decrypt		= aesni_decrypt
953 		}
954 	}
955 };
956 
957 static struct skcipher_alg aesni_skciphers[] = {
958 	{
959 		.base = {
960 			.cra_name		= "__ecb(aes)",
961 			.cra_driver_name	= "__ecb-aes-aesni",
962 			.cra_priority		= 400,
963 			.cra_flags		= CRYPTO_ALG_INTERNAL,
964 			.cra_blocksize		= AES_BLOCK_SIZE,
965 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
966 			.cra_module		= THIS_MODULE,
967 		},
968 		.min_keysize	= AES_MIN_KEY_SIZE,
969 		.max_keysize	= AES_MAX_KEY_SIZE,
970 		.setkey		= aesni_skcipher_setkey,
971 		.encrypt	= ecb_encrypt,
972 		.decrypt	= ecb_decrypt,
973 	}, {
974 		.base = {
975 			.cra_name		= "__cbc(aes)",
976 			.cra_driver_name	= "__cbc-aes-aesni",
977 			.cra_priority		= 400,
978 			.cra_flags		= CRYPTO_ALG_INTERNAL,
979 			.cra_blocksize		= AES_BLOCK_SIZE,
980 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
981 			.cra_module		= THIS_MODULE,
982 		},
983 		.min_keysize	= AES_MIN_KEY_SIZE,
984 		.max_keysize	= AES_MAX_KEY_SIZE,
985 		.ivsize		= AES_BLOCK_SIZE,
986 		.setkey		= aesni_skcipher_setkey,
987 		.encrypt	= cbc_encrypt,
988 		.decrypt	= cbc_decrypt,
989 	}, {
990 		.base = {
991 			.cra_name		= "__cts(cbc(aes))",
992 			.cra_driver_name	= "__cts-cbc-aes-aesni",
993 			.cra_priority		= 400,
994 			.cra_flags		= CRYPTO_ALG_INTERNAL,
995 			.cra_blocksize		= AES_BLOCK_SIZE,
996 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
997 			.cra_module		= THIS_MODULE,
998 		},
999 		.min_keysize	= AES_MIN_KEY_SIZE,
1000 		.max_keysize	= AES_MAX_KEY_SIZE,
1001 		.ivsize		= AES_BLOCK_SIZE,
1002 		.walksize	= 2 * AES_BLOCK_SIZE,
1003 		.setkey		= aesni_skcipher_setkey,
1004 		.encrypt	= cts_cbc_encrypt,
1005 		.decrypt	= cts_cbc_decrypt,
1006 #ifdef CONFIG_X86_64
1007 	}, {
1008 		.base = {
1009 			.cra_name		= "__ctr(aes)",
1010 			.cra_driver_name	= "__ctr-aes-aesni",
1011 			.cra_priority		= 400,
1012 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1013 			.cra_blocksize		= 1,
1014 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1015 			.cra_module		= THIS_MODULE,
1016 		},
1017 		.min_keysize	= AES_MIN_KEY_SIZE,
1018 		.max_keysize	= AES_MAX_KEY_SIZE,
1019 		.ivsize		= AES_BLOCK_SIZE,
1020 		.chunksize	= AES_BLOCK_SIZE,
1021 		.setkey		= aesni_skcipher_setkey,
1022 		.encrypt	= ctr_crypt,
1023 		.decrypt	= ctr_crypt,
1024 #endif
1025 	}, {
1026 		.base = {
1027 			.cra_name		= "__xts(aes)",
1028 			.cra_driver_name	= "__xts-aes-aesni",
1029 			.cra_priority		= 401,
1030 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1031 			.cra_blocksize		= AES_BLOCK_SIZE,
1032 			.cra_ctxsize		= XTS_AES_CTX_SIZE,
1033 			.cra_module		= THIS_MODULE,
1034 		},
1035 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1036 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1037 		.ivsize		= AES_BLOCK_SIZE,
1038 		.walksize	= 2 * AES_BLOCK_SIZE,
1039 		.setkey		= xts_aesni_setkey,
1040 		.encrypt	= xts_encrypt,
1041 		.decrypt	= xts_decrypt,
1042 	}
1043 };
1044 
1045 static
1046 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1047 
1048 #ifdef CONFIG_X86_64
generic_gcmaes_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)1049 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1050 				  unsigned int key_len)
1051 {
1052 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1053 
1054 	return aes_set_key_common(crypto_aead_tfm(aead),
1055 				  &ctx->aes_key_expanded, key, key_len) ?:
1056 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1057 }
1058 
generic_gcmaes_encrypt(struct aead_request * req)1059 static int generic_gcmaes_encrypt(struct aead_request *req)
1060 {
1061 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1062 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1063 	void *aes_ctx = &(ctx->aes_key_expanded);
1064 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1065 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1066 	__be32 counter = cpu_to_be32(1);
1067 
1068 	memcpy(iv, req->iv, 12);
1069 	*((__be32 *)(iv+12)) = counter;
1070 
1071 	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1072 			      aes_ctx);
1073 }
1074 
generic_gcmaes_decrypt(struct aead_request * req)1075 static int generic_gcmaes_decrypt(struct aead_request *req)
1076 {
1077 	__be32 counter = cpu_to_be32(1);
1078 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1079 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1080 	void *aes_ctx = &(ctx->aes_key_expanded);
1081 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1082 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1083 
1084 	memcpy(iv, req->iv, 12);
1085 	*((__be32 *)(iv+12)) = counter;
1086 
1087 	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1088 			      aes_ctx);
1089 }
1090 
1091 static struct aead_alg aesni_aeads[] = { {
1092 	.setkey			= common_rfc4106_set_key,
1093 	.setauthsize		= common_rfc4106_set_authsize,
1094 	.encrypt		= helper_rfc4106_encrypt,
1095 	.decrypt		= helper_rfc4106_decrypt,
1096 	.ivsize			= GCM_RFC4106_IV_SIZE,
1097 	.maxauthsize		= 16,
1098 	.base = {
1099 		.cra_name		= "__rfc4106(gcm(aes))",
1100 		.cra_driver_name	= "__rfc4106-gcm-aesni",
1101 		.cra_priority		= 400,
1102 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1103 		.cra_blocksize		= 1,
1104 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1105 		.cra_alignmask		= AESNI_ALIGN - 1,
1106 		.cra_module		= THIS_MODULE,
1107 	},
1108 }, {
1109 	.setkey			= generic_gcmaes_set_key,
1110 	.setauthsize		= generic_gcmaes_set_authsize,
1111 	.encrypt		= generic_gcmaes_encrypt,
1112 	.decrypt		= generic_gcmaes_decrypt,
1113 	.ivsize			= GCM_AES_IV_SIZE,
1114 	.maxauthsize		= 16,
1115 	.base = {
1116 		.cra_name		= "__gcm(aes)",
1117 		.cra_driver_name	= "__generic-gcm-aesni",
1118 		.cra_priority		= 400,
1119 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1120 		.cra_blocksize		= 1,
1121 		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1122 		.cra_alignmask		= AESNI_ALIGN - 1,
1123 		.cra_module		= THIS_MODULE,
1124 	},
1125 } };
1126 #else
1127 static struct aead_alg aesni_aeads[0];
1128 #endif
1129 
1130 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1131 
1132 static const struct x86_cpu_id aesni_cpu_id[] = {
1133 	X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1134 	{}
1135 };
1136 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1137 
aesni_init(void)1138 static int __init aesni_init(void)
1139 {
1140 	int err;
1141 
1142 	if (!x86_match_cpu(aesni_cpu_id))
1143 		return -ENODEV;
1144 #ifdef CONFIG_X86_64
1145 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1146 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1147 		static_branch_enable(&gcm_use_avx);
1148 		static_branch_enable(&gcm_use_avx2);
1149 	} else
1150 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1151 		pr_info("AVX version of gcm_enc/dec engaged.\n");
1152 		static_branch_enable(&gcm_use_avx);
1153 	} else {
1154 		pr_info("SSE version of gcm_enc/dec engaged.\n");
1155 	}
1156 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1157 		/* optimize performance of ctr mode encryption transform */
1158 		static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
1159 		pr_info("AES CTR mode by8 optimization enabled\n");
1160 	}
1161 #endif
1162 
1163 	err = crypto_register_alg(&aesni_cipher_alg);
1164 	if (err)
1165 		return err;
1166 
1167 	err = simd_register_skciphers_compat(aesni_skciphers,
1168 					     ARRAY_SIZE(aesni_skciphers),
1169 					     aesni_simd_skciphers);
1170 	if (err)
1171 		goto unregister_cipher;
1172 
1173 	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1174 					 aesni_simd_aeads);
1175 	if (err)
1176 		goto unregister_skciphers;
1177 
1178 	return 0;
1179 
1180 unregister_skciphers:
1181 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1182 				  aesni_simd_skciphers);
1183 unregister_cipher:
1184 	crypto_unregister_alg(&aesni_cipher_alg);
1185 	return err;
1186 }
1187 
aesni_exit(void)1188 static void __exit aesni_exit(void)
1189 {
1190 	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1191 			      aesni_simd_aeads);
1192 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1193 				  aesni_simd_skciphers);
1194 	crypto_unregister_alg(&aesni_cipher_alg);
1195 }
1196 
1197 late_initcall(aesni_init);
1198 module_exit(aesni_exit);
1199 
1200 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1201 MODULE_LICENSE("GPL");
1202 MODULE_ALIAS_CRYPTO("aes");
1203