1 /* 2 * Accelerated GHASH implementation with ARMv8 PMULL instructions. 3 * 4 * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation. 9 */ 10 11 #include <asm/neon.h> 12 #include <asm/simd.h> 13 #include <asm/unaligned.h> 14 #include <crypto/aes.h> 15 #include <crypto/algapi.h> 16 #include <crypto/b128ops.h> 17 #include <crypto/gf128mul.h> 18 #include <crypto/internal/aead.h> 19 #include <crypto/internal/hash.h> 20 #include <crypto/internal/skcipher.h> 21 #include <crypto/scatterwalk.h> 22 #include <linux/cpufeature.h> 23 #include <linux/crypto.h> 24 #include <linux/module.h> 25 26 MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions"); 27 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 28 MODULE_LICENSE("GPL v2"); 29 MODULE_ALIAS_CRYPTO("ghash"); 30 31 #define GHASH_BLOCK_SIZE 16 32 #define GHASH_DIGEST_SIZE 16 33 #define GCM_IV_SIZE 12 34 35 struct ghash_key { 36 u64 a; 37 u64 b; 38 be128 k; 39 }; 40 41 struct ghash_desc_ctx { 42 u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)]; 43 u8 buf[GHASH_BLOCK_SIZE]; 44 u32 count; 45 }; 46 47 struct gcm_aes_ctx { 48 struct crypto_aes_ctx aes_key; 49 u64 h2[2]; 50 struct ghash_key ghash_key; 51 }; 52 53 asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, 54 struct ghash_key const *k, 55 const char *head); 56 57 asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, 58 struct ghash_key const *k, 59 const char *head); 60 61 static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, 62 struct ghash_key const *k, 63 const char *head); 64 65 asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], 66 const u8 src[], u64 const *k, u8 ctr[], 67 u32 const rk[], int rounds, u8 ks[]); 68 69 asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], 70 const u8 src[], u64 const *k, 71 u8 ctr[], u32 const rk[], int rounds); 72 73 asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[], 74 u32 const rk[], int rounds); 75 76 asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 77 78 static int ghash_init(struct shash_desc *desc) 79 { 80 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); 81 82 *ctx = (struct ghash_desc_ctx){}; 83 return 0; 84 } 85 86 static void ghash_do_update(int blocks, u64 dg[], const char *src, 87 struct ghash_key *key, const char *head) 88 { 89 if (likely(may_use_simd())) { 90 kernel_neon_begin(); 91 pmull_ghash_update(blocks, dg, src, key, head); 92 kernel_neon_end(); 93 } else { 94 be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; 95 96 do { 97 const u8 *in = src; 98 99 if (head) { 100 in = head; 101 blocks++; 102 head = NULL; 103 } else { 104 src += GHASH_BLOCK_SIZE; 105 } 106 107 crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); 108 gf128mul_lle(&dst, &key->k); 109 } while (--blocks); 110 111 dg[0] = be64_to_cpu(dst.b); 112 dg[1] = be64_to_cpu(dst.a); 113 } 114 } 115 116 /* avoid hogging the CPU for too long */ 117 #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE) 118 119 static int ghash_update(struct shash_desc *desc, const u8 *src, 120 unsigned int len) 121 { 122 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); 123 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; 124 125 ctx->count += len; 126 127 if ((partial + len) >= GHASH_BLOCK_SIZE) { 128 struct ghash_key *key = crypto_shash_ctx(desc->tfm); 129 int blocks; 130 131 if (partial) { 132 int p = GHASH_BLOCK_SIZE - partial; 133 134 memcpy(ctx->buf + partial, src, p); 135 src += p; 136 len -= p; 137 } 138 139 blocks = len / GHASH_BLOCK_SIZE; 140 len %= GHASH_BLOCK_SIZE; 141 142 do { 143 int chunk = min(blocks, MAX_BLOCKS); 144 145 ghash_do_update(chunk, ctx->digest, src, key, 146 partial ? ctx->buf : NULL); 147 148 blocks -= chunk; 149 src += chunk * GHASH_BLOCK_SIZE; 150 partial = 0; 151 } while (unlikely(blocks > 0)); 152 } 153 if (len) 154 memcpy(ctx->buf + partial, src, len); 155 return 0; 156 } 157 158 static int ghash_final(struct shash_desc *desc, u8 *dst) 159 { 160 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); 161 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; 162 163 if (partial) { 164 struct ghash_key *key = crypto_shash_ctx(desc->tfm); 165 166 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); 167 168 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL); 169 } 170 put_unaligned_be64(ctx->digest[1], dst); 171 put_unaligned_be64(ctx->digest[0], dst + 8); 172 173 *ctx = (struct ghash_desc_ctx){}; 174 return 0; 175 } 176 177 static int __ghash_setkey(struct ghash_key *key, 178 const u8 *inkey, unsigned int keylen) 179 { 180 u64 a, b; 181 182 /* needed for the fallback */ 183 memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); 184 185 /* perform multiplication by 'x' in GF(2^128) */ 186 b = get_unaligned_be64(inkey); 187 a = get_unaligned_be64(inkey + 8); 188 189 key->a = (a << 1) | (b >> 63); 190 key->b = (b << 1) | (a >> 63); 191 192 if (b >> 63) 193 key->b ^= 0xc200000000000000UL; 194 195 return 0; 196 } 197 198 static int ghash_setkey(struct crypto_shash *tfm, 199 const u8 *inkey, unsigned int keylen) 200 { 201 struct ghash_key *key = crypto_shash_ctx(tfm); 202 203 if (keylen != GHASH_BLOCK_SIZE) { 204 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 205 return -EINVAL; 206 } 207 208 return __ghash_setkey(key, inkey, keylen); 209 } 210 211 static struct shash_alg ghash_alg = { 212 .base.cra_name = "ghash", 213 .base.cra_driver_name = "ghash-ce", 214 .base.cra_priority = 200, 215 .base.cra_blocksize = GHASH_BLOCK_SIZE, 216 .base.cra_ctxsize = sizeof(struct ghash_key), 217 .base.cra_module = THIS_MODULE, 218 219 .digestsize = GHASH_DIGEST_SIZE, 220 .init = ghash_init, 221 .update = ghash_update, 222 .final = ghash_final, 223 .setkey = ghash_setkey, 224 .descsize = sizeof(struct ghash_desc_ctx), 225 }; 226 227 static int num_rounds(struct crypto_aes_ctx *ctx) 228 { 229 /* 230 * # of rounds specified by AES: 231 * 128 bit key 10 rounds 232 * 192 bit key 12 rounds 233 * 256 bit key 14 rounds 234 * => n byte key => 6 + (n/4) rounds 235 */ 236 return 6 + ctx->key_length / 4; 237 } 238 239 static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, 240 unsigned int keylen) 241 { 242 struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm); 243 be128 h1, h2; 244 u8 *key = (u8 *)&h1; 245 int ret; 246 247 ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen); 248 if (ret) { 249 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 250 return -EINVAL; 251 } 252 253 __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){}, 254 num_rounds(&ctx->aes_key)); 255 256 __ghash_setkey(&ctx->ghash_key, key, sizeof(be128)); 257 258 /* calculate H^2 (used for 2-way aggregation) */ 259 h2 = h1; 260 gf128mul_lle(&h2, &h1); 261 262 ctx->h2[0] = (be64_to_cpu(h2.b) << 1) | (be64_to_cpu(h2.a) >> 63); 263 ctx->h2[1] = (be64_to_cpu(h2.a) << 1) | (be64_to_cpu(h2.b) >> 63); 264 265 if (be64_to_cpu(h2.a) >> 63) 266 ctx->h2[1] ^= 0xc200000000000000UL; 267 268 return 0; 269 } 270 271 static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 272 { 273 switch (authsize) { 274 case 4: 275 case 8: 276 case 12 ... 16: 277 break; 278 default: 279 return -EINVAL; 280 } 281 return 0; 282 } 283 284 static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], 285 int *buf_count, struct gcm_aes_ctx *ctx) 286 { 287 if (*buf_count > 0) { 288 int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count); 289 290 memcpy(&buf[*buf_count], src, buf_added); 291 292 *buf_count += buf_added; 293 src += buf_added; 294 count -= buf_added; 295 } 296 297 if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) { 298 int blocks = count / GHASH_BLOCK_SIZE; 299 300 ghash_do_update(blocks, dg, src, &ctx->ghash_key, 301 *buf_count ? buf : NULL); 302 303 src += blocks * GHASH_BLOCK_SIZE; 304 count %= GHASH_BLOCK_SIZE; 305 *buf_count = 0; 306 } 307 308 if (count > 0) { 309 memcpy(buf, src, count); 310 *buf_count = count; 311 } 312 } 313 314 static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) 315 { 316 struct crypto_aead *aead = crypto_aead_reqtfm(req); 317 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); 318 u8 buf[GHASH_BLOCK_SIZE]; 319 struct scatter_walk walk; 320 u32 len = req->assoclen; 321 int buf_count = 0; 322 323 scatterwalk_start(&walk, req->src); 324 325 do { 326 u32 n = scatterwalk_clamp(&walk, len); 327 u8 *p; 328 329 if (!n) { 330 scatterwalk_start(&walk, sg_next(walk.sg)); 331 n = scatterwalk_clamp(&walk, len); 332 } 333 p = scatterwalk_map(&walk); 334 335 gcm_update_mac(dg, p, n, buf, &buf_count, ctx); 336 len -= n; 337 338 scatterwalk_unmap(p); 339 scatterwalk_advance(&walk, n); 340 scatterwalk_done(&walk, 0, len); 341 } while (len); 342 343 if (buf_count) { 344 memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); 345 ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); 346 } 347 } 348 349 static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx, 350 u64 dg[], u8 tag[], int cryptlen) 351 { 352 u8 mac[AES_BLOCK_SIZE]; 353 u128 lengths; 354 355 lengths.a = cpu_to_be64(req->assoclen * 8); 356 lengths.b = cpu_to_be64(cryptlen * 8); 357 358 ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL); 359 360 put_unaligned_be64(dg[1], mac); 361 put_unaligned_be64(dg[0], mac + 8); 362 363 crypto_xor(tag, mac, AES_BLOCK_SIZE); 364 } 365 366 static int gcm_encrypt(struct aead_request *req) 367 { 368 struct crypto_aead *aead = crypto_aead_reqtfm(req); 369 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); 370 struct skcipher_walk walk; 371 u8 iv[AES_BLOCK_SIZE]; 372 u8 ks[2 * AES_BLOCK_SIZE]; 373 u8 tag[AES_BLOCK_SIZE]; 374 u64 dg[2] = {}; 375 int nrounds = num_rounds(&ctx->aes_key); 376 int err; 377 378 if (req->assoclen) 379 gcm_calculate_auth_mac(req, dg); 380 381 memcpy(iv, req->iv, GCM_IV_SIZE); 382 put_unaligned_be32(1, iv + GCM_IV_SIZE); 383 384 err = skcipher_walk_aead_encrypt(&walk, req, false); 385 386 if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { 387 u32 const *rk = NULL; 388 389 kernel_neon_begin(); 390 pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); 391 put_unaligned_be32(2, iv + GCM_IV_SIZE); 392 pmull_gcm_encrypt_block(ks, iv, NULL, nrounds); 393 put_unaligned_be32(3, iv + GCM_IV_SIZE); 394 pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds); 395 put_unaligned_be32(4, iv + GCM_IV_SIZE); 396 397 do { 398 int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; 399 400 if (rk) 401 kernel_neon_begin(); 402 403 pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr, 404 walk.src.virt.addr, ctx->h2, iv, 405 rk, nrounds, ks); 406 kernel_neon_end(); 407 408 err = skcipher_walk_done(&walk, 409 walk.nbytes % (2 * AES_BLOCK_SIZE)); 410 411 rk = ctx->aes_key.key_enc; 412 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); 413 } else { 414 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); 415 put_unaligned_be32(2, iv + GCM_IV_SIZE); 416 417 while (walk.nbytes >= AES_BLOCK_SIZE) { 418 int blocks = walk.nbytes / AES_BLOCK_SIZE; 419 u8 *dst = walk.dst.virt.addr; 420 u8 *src = walk.src.virt.addr; 421 422 do { 423 __aes_arm64_encrypt(ctx->aes_key.key_enc, 424 ks, iv, nrounds); 425 crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE); 426 crypto_inc(iv, AES_BLOCK_SIZE); 427 428 dst += AES_BLOCK_SIZE; 429 src += AES_BLOCK_SIZE; 430 } while (--blocks > 0); 431 432 ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, 433 walk.dst.virt.addr, &ctx->ghash_key, 434 NULL); 435 436 err = skcipher_walk_done(&walk, 437 walk.nbytes % AES_BLOCK_SIZE); 438 } 439 if (walk.nbytes) 440 __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, 441 nrounds); 442 } 443 444 /* handle the tail */ 445 if (walk.nbytes) { 446 u8 buf[GHASH_BLOCK_SIZE]; 447 unsigned int nbytes = walk.nbytes; 448 u8 *dst = walk.dst.virt.addr; 449 u8 *head = NULL; 450 451 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks, 452 walk.nbytes); 453 454 if (walk.nbytes > GHASH_BLOCK_SIZE) { 455 head = dst; 456 dst += GHASH_BLOCK_SIZE; 457 nbytes %= GHASH_BLOCK_SIZE; 458 } 459 460 memcpy(buf, dst, nbytes); 461 memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); 462 ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); 463 464 err = skcipher_walk_done(&walk, 0); 465 } 466 467 if (err) 468 return err; 469 470 gcm_final(req, ctx, dg, tag, req->cryptlen); 471 472 /* copy authtag to end of dst */ 473 scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen, 474 crypto_aead_authsize(aead), 1); 475 476 return 0; 477 } 478 479 static int gcm_decrypt(struct aead_request *req) 480 { 481 struct crypto_aead *aead = crypto_aead_reqtfm(req); 482 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); 483 unsigned int authsize = crypto_aead_authsize(aead); 484 struct skcipher_walk walk; 485 u8 iv[2 * AES_BLOCK_SIZE]; 486 u8 tag[AES_BLOCK_SIZE]; 487 u8 buf[2 * GHASH_BLOCK_SIZE]; 488 u64 dg[2] = {}; 489 int nrounds = num_rounds(&ctx->aes_key); 490 int err; 491 492 if (req->assoclen) 493 gcm_calculate_auth_mac(req, dg); 494 495 memcpy(iv, req->iv, GCM_IV_SIZE); 496 put_unaligned_be32(1, iv + GCM_IV_SIZE); 497 498 err = skcipher_walk_aead_decrypt(&walk, req, false); 499 500 if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { 501 u32 const *rk = NULL; 502 503 kernel_neon_begin(); 504 pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); 505 put_unaligned_be32(2, iv + GCM_IV_SIZE); 506 507 do { 508 int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; 509 int rem = walk.total - blocks * AES_BLOCK_SIZE; 510 511 if (rk) 512 kernel_neon_begin(); 513 514 pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr, 515 walk.src.virt.addr, ctx->h2, iv, 516 rk, nrounds); 517 518 /* check if this is the final iteration of the loop */ 519 if (rem < (2 * AES_BLOCK_SIZE)) { 520 u8 *iv2 = iv + AES_BLOCK_SIZE; 521 522 if (rem > AES_BLOCK_SIZE) { 523 memcpy(iv2, iv, AES_BLOCK_SIZE); 524 crypto_inc(iv2, AES_BLOCK_SIZE); 525 } 526 527 pmull_gcm_encrypt_block(iv, iv, NULL, nrounds); 528 529 if (rem > AES_BLOCK_SIZE) 530 pmull_gcm_encrypt_block(iv2, iv2, NULL, 531 nrounds); 532 } 533 534 kernel_neon_end(); 535 536 err = skcipher_walk_done(&walk, 537 walk.nbytes % (2 * AES_BLOCK_SIZE)); 538 539 rk = ctx->aes_key.key_enc; 540 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); 541 } else { 542 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); 543 put_unaligned_be32(2, iv + GCM_IV_SIZE); 544 545 while (walk.nbytes >= AES_BLOCK_SIZE) { 546 int blocks = walk.nbytes / AES_BLOCK_SIZE; 547 u8 *dst = walk.dst.virt.addr; 548 u8 *src = walk.src.virt.addr; 549 550 ghash_do_update(blocks, dg, walk.src.virt.addr, 551 &ctx->ghash_key, NULL); 552 553 do { 554 __aes_arm64_encrypt(ctx->aes_key.key_enc, 555 buf, iv, nrounds); 556 crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE); 557 crypto_inc(iv, AES_BLOCK_SIZE); 558 559 dst += AES_BLOCK_SIZE; 560 src += AES_BLOCK_SIZE; 561 } while (--blocks > 0); 562 563 err = skcipher_walk_done(&walk, 564 walk.nbytes % AES_BLOCK_SIZE); 565 } 566 if (walk.nbytes) 567 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, 568 nrounds); 569 } 570 571 /* handle the tail */ 572 if (walk.nbytes) { 573 const u8 *src = walk.src.virt.addr; 574 const u8 *head = NULL; 575 unsigned int nbytes = walk.nbytes; 576 577 if (walk.nbytes > GHASH_BLOCK_SIZE) { 578 head = src; 579 src += GHASH_BLOCK_SIZE; 580 nbytes %= GHASH_BLOCK_SIZE; 581 } 582 583 memcpy(buf, src, nbytes); 584 memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); 585 ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); 586 587 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv, 588 walk.nbytes); 589 590 err = skcipher_walk_done(&walk, 0); 591 } 592 593 if (err) 594 return err; 595 596 gcm_final(req, ctx, dg, tag, req->cryptlen - authsize); 597 598 /* compare calculated auth tag with the stored one */ 599 scatterwalk_map_and_copy(buf, req->src, 600 req->assoclen + req->cryptlen - authsize, 601 authsize, 0); 602 603 if (crypto_memneq(tag, buf, authsize)) 604 return -EBADMSG; 605 return 0; 606 } 607 608 static struct aead_alg gcm_aes_alg = { 609 .ivsize = GCM_IV_SIZE, 610 .chunksize = 2 * AES_BLOCK_SIZE, 611 .maxauthsize = AES_BLOCK_SIZE, 612 .setkey = gcm_setkey, 613 .setauthsize = gcm_setauthsize, 614 .encrypt = gcm_encrypt, 615 .decrypt = gcm_decrypt, 616 617 .base.cra_name = "gcm(aes)", 618 .base.cra_driver_name = "gcm-aes-ce", 619 .base.cra_priority = 300, 620 .base.cra_blocksize = 1, 621 .base.cra_ctxsize = sizeof(struct gcm_aes_ctx), 622 .base.cra_module = THIS_MODULE, 623 }; 624 625 static int __init ghash_ce_mod_init(void) 626 { 627 int ret; 628 629 if (!(elf_hwcap & HWCAP_ASIMD)) 630 return -ENODEV; 631 632 if (elf_hwcap & HWCAP_PMULL) 633 pmull_ghash_update = pmull_ghash_update_p64; 634 635 else 636 pmull_ghash_update = pmull_ghash_update_p8; 637 638 ret = crypto_register_shash(&ghash_alg); 639 if (ret) 640 return ret; 641 642 if (elf_hwcap & HWCAP_PMULL) { 643 ret = crypto_register_aead(&gcm_aes_alg); 644 if (ret) 645 crypto_unregister_shash(&ghash_alg); 646 } 647 return ret; 648 } 649 650 static void __exit ghash_ce_mod_exit(void) 651 { 652 crypto_unregister_shash(&ghash_alg); 653 crypto_unregister_aead(&gcm_aes_alg); 654 } 655 656 static const struct cpu_feature ghash_cpu_feature[] = { 657 { cpu_feature(PMULL) }, { } 658 }; 659 MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature); 660 661 module_init(ghash_ce_mod_init); 662 module_exit(ghash_ce_mod_exit); 663