1 /*- 2 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org> 3 * Copyright (c) 2010 Pawel Jakub Dawidek <pjd@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/libkern.h> 33 #include <sys/malloc.h> 34 #include <sys/proc.h> 35 #include <sys/systm.h> 36 #include <crypto/aesni/aesni.h> 37 38 MALLOC_DECLARE(M_AESNI); 39 40 #ifdef DEBUG 41 static void 42 ps_len(const char *string, const uint8_t *data, int length) 43 { 44 int i; 45 46 printf("%-12s[0x", string); 47 for(i = 0; i < length; i++) { 48 if (i % AES_BLOCK_LEN == 0 && i > 0) 49 printf("+"); 50 printf("%02x", data[i]); 51 } 52 printf("]\n"); 53 } 54 #endif 55 56 void 57 aesni_encrypt_cbc(int rounds, const void *key_schedule, size_t len, 58 const uint8_t *from, uint8_t *to, const uint8_t iv[AES_BLOCK_LEN]) 59 { 60 const uint8_t *ivp; 61 size_t i; 62 63 #ifdef DEBUG 64 ps_len("AES CBC encrypt iv:", iv, AES_BLOCK_LEN); 65 ps_len("from:", from, len); 66 #endif 67 68 len /= AES_BLOCK_LEN; 69 ivp = iv; 70 for (i = 0; i < len; i++) { 71 aesni_enc(rounds - 1, key_schedule, from, to, ivp); 72 ivp = to; 73 from += AES_BLOCK_LEN; 74 to += AES_BLOCK_LEN; 75 } 76 #ifdef DEBUG 77 ps_len("to:", to - len * AES_BLOCK_LEN, len * AES_BLOCK_LEN); 78 #endif 79 } 80 81 void 82 aesni_encrypt_ecb(int rounds, const void *key_schedule, size_t len, 83 const uint8_t from[AES_BLOCK_LEN], uint8_t to[AES_BLOCK_LEN]) 84 { 85 size_t i; 86 87 len /= AES_BLOCK_LEN; 88 for (i = 0; i < len; i++) { 89 aesni_enc(rounds - 1, key_schedule, from, to, NULL); 90 from += AES_BLOCK_LEN; 91 to += AES_BLOCK_LEN; 92 } 93 } 94 95 void 96 aesni_decrypt_ecb(int rounds, const void *key_schedule, size_t len, 97 const uint8_t from[AES_BLOCK_LEN], uint8_t to[AES_BLOCK_LEN]) 98 { 99 size_t i; 100 101 len /= AES_BLOCK_LEN; 102 for (i = 0; i < len; i++) { 103 aesni_dec(rounds - 1, key_schedule, from, to, NULL); 104 from += AES_BLOCK_LEN; 105 to += AES_BLOCK_LEN; 106 } 107 } 108 109 #define AES_XTS_BLOCKSIZE 16 110 #define AES_XTS_IVSIZE 8 111 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ 112 113 static void 114 aesni_crypt_xts_block(int rounds, const void *key_schedule, uint8_t *tweak, 115 const uint8_t *from, uint8_t *to, int do_encrypt) 116 { 117 uint8_t block[AES_XTS_BLOCKSIZE]; 118 u_int i, carry_in, carry_out; 119 120 for (i = 0; i < AES_XTS_BLOCKSIZE; i++) 121 block[i] = from[i] ^ tweak[i]; 122 123 if (do_encrypt) 124 aesni_enc(rounds - 1, key_schedule, block, to, NULL); 125 else 126 aesni_dec(rounds - 1, key_schedule, block, to, NULL); 127 128 for (i = 0; i < AES_XTS_BLOCKSIZE; i++) 129 to[i] ^= tweak[i]; 130 131 /* Exponentiate tweak. */ 132 carry_in = 0; 133 for (i = 0; i < AES_XTS_BLOCKSIZE; i++) { 134 carry_out = tweak[i] & 0x80; 135 tweak[i] = (tweak[i] << 1) | (carry_in ? 1 : 0); 136 carry_in = carry_out; 137 } 138 if (carry_in) 139 tweak[0] ^= AES_XTS_ALPHA; 140 bzero(block, sizeof(block)); 141 } 142 143 static void 144 aesni_crypt_xts(int rounds, const void *data_schedule, 145 const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to, 146 const uint8_t iv[AES_BLOCK_LEN], int do_encrypt) 147 { 148 uint8_t tweak[AES_XTS_BLOCKSIZE]; 149 uint64_t blocknum; 150 size_t i; 151 152 /* 153 * Prepare tweak as E_k2(IV). IV is specified as LE representation 154 * of a 64-bit block number which we allow to be passed in directly. 155 */ 156 bcopy(iv, &blocknum, AES_XTS_IVSIZE); 157 for (i = 0; i < AES_XTS_IVSIZE; i++) { 158 tweak[i] = blocknum & 0xff; 159 blocknum >>= 8; 160 } 161 /* Last 64 bits of IV are always zero. */ 162 bzero(tweak + AES_XTS_IVSIZE, AES_XTS_IVSIZE); 163 aesni_enc(rounds - 1, tweak_schedule, tweak, tweak, NULL); 164 165 len /= AES_XTS_BLOCKSIZE; 166 for (i = 0; i < len; i++) { 167 aesni_crypt_xts_block(rounds, data_schedule, tweak, from, to, 168 do_encrypt); 169 from += AES_XTS_BLOCKSIZE; 170 to += AES_XTS_BLOCKSIZE; 171 } 172 173 bzero(tweak, sizeof(tweak)); 174 } 175 176 static void 177 aesni_encrypt_xts(int rounds, const void *data_schedule, 178 const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to, 179 const uint8_t iv[AES_BLOCK_LEN]) 180 { 181 182 aesni_crypt_xts(rounds, data_schedule, tweak_schedule, len, from, to, 183 iv, 1); 184 } 185 186 static void 187 aesni_decrypt_xts(int rounds, const void *data_schedule, 188 const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to, 189 const uint8_t iv[AES_BLOCK_LEN]) 190 { 191 192 aesni_crypt_xts(rounds, data_schedule, tweak_schedule, len, from, to, 193 iv, 0); 194 } 195 196 static int 197 aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key, 198 int keylen) 199 { 200 201 switch (ses->algo) { 202 case CRYPTO_AES_CBC: 203 switch (keylen) { 204 case 128: 205 ses->rounds = AES128_ROUNDS; 206 break; 207 case 192: 208 ses->rounds = AES192_ROUNDS; 209 break; 210 case 256: 211 ses->rounds = AES256_ROUNDS; 212 break; 213 default: 214 return (EINVAL); 215 } 216 break; 217 case CRYPTO_AES_XTS: 218 switch (keylen) { 219 case 256: 220 ses->rounds = AES128_ROUNDS; 221 break; 222 case 512: 223 ses->rounds = AES256_ROUNDS; 224 break; 225 default: 226 return (EINVAL); 227 } 228 break; 229 default: 230 return (EINVAL); 231 } 232 233 aesni_set_enckey(key, ses->enc_schedule, ses->rounds); 234 aesni_set_deckey(ses->enc_schedule, ses->dec_schedule, ses->rounds); 235 if (ses->algo == CRYPTO_AES_CBC) 236 arc4rand(ses->iv, sizeof(ses->iv), 0); 237 else /* if (ses->algo == CRYPTO_AES_XTS) */ { 238 aesni_set_enckey(key + keylen / 16, ses->xts_schedule, 239 ses->rounds); 240 } 241 242 return (0); 243 } 244 245 int 246 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini) 247 { 248 struct thread *td; 249 int error; 250 251 td = curthread; 252 error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL); 253 if (error == 0) { 254 error = aesni_cipher_setup_common(ses, encini->cri_key, 255 encini->cri_klen); 256 fpu_kern_leave(td, &ses->fpu_ctx); 257 } 258 return (error); 259 } 260 261 int 262 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd, 263 struct cryptop *crp) 264 { 265 struct thread *td; 266 uint8_t *buf; 267 int error, allocated; 268 269 buf = aesni_cipher_alloc(enccrd, crp, &allocated); 270 if (buf == NULL) 271 return (ENOMEM); 272 273 td = curthread; 274 error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL); 275 if (error != 0) 276 goto out; 277 278 if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { 279 error = aesni_cipher_setup_common(ses, enccrd->crd_key, 280 enccrd->crd_klen); 281 if (error != 0) 282 goto out; 283 } 284 285 if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) { 286 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) 287 bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN); 288 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) 289 crypto_copyback(crp->crp_flags, crp->crp_buf, 290 enccrd->crd_inject, AES_BLOCK_LEN, ses->iv); 291 if (ses->algo == CRYPTO_AES_CBC) { 292 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, 293 enccrd->crd_len, buf, buf, ses->iv); 294 } else /* if (ses->algo == CRYPTO_AES_XTS) */ { 295 aesni_encrypt_xts(ses->rounds, ses->enc_schedule, 296 ses->xts_schedule, enccrd->crd_len, buf, buf, 297 ses->iv); 298 } 299 } else { 300 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) 301 bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN); 302 else 303 crypto_copydata(crp->crp_flags, crp->crp_buf, 304 enccrd->crd_inject, AES_BLOCK_LEN, ses->iv); 305 if (ses->algo == CRYPTO_AES_CBC) { 306 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, 307 enccrd->crd_len, buf, ses->iv); 308 } else /* if (ses->algo == CRYPTO_AES_XTS) */ { 309 aesni_decrypt_xts(ses->rounds, ses->dec_schedule, 310 ses->xts_schedule, enccrd->crd_len, buf, buf, 311 ses->iv); 312 } 313 } 314 fpu_kern_leave(td, &ses->fpu_ctx); 315 if (allocated) 316 crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, 317 enccrd->crd_len, buf); 318 if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) 319 crypto_copydata(crp->crp_flags, crp->crp_buf, 320 enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN, 321 AES_BLOCK_LEN, ses->iv); 322 out: 323 if (allocated) { 324 bzero(buf, enccrd->crd_len); 325 free(buf, M_AESNI); 326 } 327 return (error); 328 } 329