1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc. 3 */ 4 5 #include <linux/module.h> 6 #include <linux/kernel.h> 7 #include <linux/pci.h> 8 #include <linux/pci_ids.h> 9 #include <linux/crypto.h> 10 #include <linux/spinlock.h> 11 #include <crypto/algapi.h> 12 #include <crypto/aes.h> 13 #include <crypto/internal/skcipher.h> 14 15 #include <linux/io.h> 16 #include <linux/delay.h> 17 18 #include "geode-aes.h" 19 20 /* Static structures */ 21 22 static void __iomem *_iobase; 23 static spinlock_t lock; 24 25 /* Write a 128 bit field (either a writable key or IV) */ 26 static inline void 27 _writefield(u32 offset, const void *value) 28 { 29 int i; 30 31 for (i = 0; i < 4; i++) 32 iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4)); 33 } 34 35 /* Read a 128 bit field (either a writable key or IV) */ 36 static inline void 37 _readfield(u32 offset, void *value) 38 { 39 int i; 40 41 for (i = 0; i < 4; i++) 42 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); 43 } 44 45 static int 46 do_crypt(const void *src, void *dst, u32 len, u32 flags) 47 { 48 u32 status; 49 u32 counter = AES_OP_TIMEOUT; 50 51 iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG); 52 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); 53 iowrite32(len, _iobase + AES_LENA_REG); 54 55 /* Start the operation */ 56 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); 57 58 do { 59 status = ioread32(_iobase + AES_INTR_REG); 60 cpu_relax(); 61 } while (!(status & AES_INTRA_PENDING) && --counter); 62 63 /* Clear the event */ 64 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); 65 return counter ? 0 : 1; 66 } 67 68 static void 69 geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src, 70 void *dst, u32 len, u8 *iv, int mode, int dir) 71 { 72 u32 flags = 0; 73 unsigned long iflags; 74 int ret; 75 76 /* If the source and destination is the same, then 77 * we need to turn on the coherent flags, otherwise 78 * we don't need to worry 79 */ 80 81 flags |= (AES_CTRL_DCA | AES_CTRL_SCA); 82 83 if (dir == AES_DIR_ENCRYPT) 84 flags |= AES_CTRL_ENCRYPT; 85 86 /* Start the critical section */ 87 88 spin_lock_irqsave(&lock, iflags); 89 90 if (mode == AES_MODE_CBC) { 91 flags |= AES_CTRL_CBC; 92 _writefield(AES_WRITEIV0_REG, iv); 93 } 94 95 flags |= AES_CTRL_WRKEY; 96 _writefield(AES_WRITEKEY0_REG, tctx->key); 97 98 ret = do_crypt(src, dst, len, flags); 99 BUG_ON(ret); 100 101 if (mode == AES_MODE_CBC) 102 _readfield(AES_WRITEIV0_REG, iv); 103 104 spin_unlock_irqrestore(&lock, iflags); 105 } 106 107 /* CRYPTO-API Functions */ 108 109 static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, 110 unsigned int len) 111 { 112 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 113 114 tctx->keylen = len; 115 116 if (len == AES_KEYSIZE_128) { 117 memcpy(tctx->key, key, len); 118 return 0; 119 } 120 121 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) 122 /* not supported at all */ 123 return -EINVAL; 124 125 /* 126 * The requested key size is not supported by HW, do a fallback 127 */ 128 tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 129 tctx->fallback.cip->base.crt_flags |= 130 (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); 131 132 return crypto_cipher_setkey(tctx->fallback.cip, key, len); 133 } 134 135 static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, 136 unsigned int len) 137 { 138 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 139 140 tctx->keylen = len; 141 142 if (len == AES_KEYSIZE_128) { 143 memcpy(tctx->key, key, len); 144 return 0; 145 } 146 147 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) 148 /* not supported at all */ 149 return -EINVAL; 150 151 /* 152 * The requested key size is not supported by HW, do a fallback 153 */ 154 crypto_skcipher_clear_flags(tctx->fallback.skcipher, 155 CRYPTO_TFM_REQ_MASK); 156 crypto_skcipher_set_flags(tctx->fallback.skcipher, 157 crypto_skcipher_get_flags(tfm) & 158 CRYPTO_TFM_REQ_MASK); 159 return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); 160 } 161 162 static void 163 geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 164 { 165 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 166 167 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 168 crypto_cipher_encrypt_one(tctx->fallback.cip, out, in); 169 return; 170 } 171 172 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, 173 AES_MODE_ECB, AES_DIR_ENCRYPT); 174 } 175 176 177 static void 178 geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 179 { 180 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 181 182 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 183 crypto_cipher_decrypt_one(tctx->fallback.cip, out, in); 184 return; 185 } 186 187 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, 188 AES_MODE_ECB, AES_DIR_DECRYPT); 189 } 190 191 static int fallback_init_cip(struct crypto_tfm *tfm) 192 { 193 const char *name = crypto_tfm_alg_name(tfm); 194 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 195 196 tctx->fallback.cip = crypto_alloc_cipher(name, 0, 197 CRYPTO_ALG_NEED_FALLBACK); 198 199 if (IS_ERR(tctx->fallback.cip)) { 200 printk(KERN_ERR "Error allocating fallback algo %s\n", name); 201 return PTR_ERR(tctx->fallback.cip); 202 } 203 204 return 0; 205 } 206 207 static void fallback_exit_cip(struct crypto_tfm *tfm) 208 { 209 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 210 211 crypto_free_cipher(tctx->fallback.cip); 212 } 213 214 static struct crypto_alg geode_alg = { 215 .cra_name = "aes", 216 .cra_driver_name = "geode-aes", 217 .cra_priority = 300, 218 .cra_alignmask = 15, 219 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | 220 CRYPTO_ALG_NEED_FALLBACK, 221 .cra_init = fallback_init_cip, 222 .cra_exit = fallback_exit_cip, 223 .cra_blocksize = AES_BLOCK_SIZE, 224 .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 225 .cra_module = THIS_MODULE, 226 .cra_u = { 227 .cipher = { 228 .cia_min_keysize = AES_MIN_KEY_SIZE, 229 .cia_max_keysize = AES_MAX_KEY_SIZE, 230 .cia_setkey = geode_setkey_cip, 231 .cia_encrypt = geode_encrypt, 232 .cia_decrypt = geode_decrypt 233 } 234 } 235 }; 236 237 static int geode_init_skcipher(struct crypto_skcipher *tfm) 238 { 239 const char *name = crypto_tfm_alg_name(&tfm->base); 240 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 241 242 tctx->fallback.skcipher = 243 crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | 244 CRYPTO_ALG_ASYNC); 245 if (IS_ERR(tctx->fallback.skcipher)) { 246 printk(KERN_ERR "Error allocating fallback algo %s\n", name); 247 return PTR_ERR(tctx->fallback.skcipher); 248 } 249 250 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 251 crypto_skcipher_reqsize(tctx->fallback.skcipher)); 252 return 0; 253 } 254 255 static void geode_exit_skcipher(struct crypto_skcipher *tfm) 256 { 257 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 258 259 crypto_free_skcipher(tctx->fallback.skcipher); 260 } 261 262 static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir) 263 { 264 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 265 const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 266 struct skcipher_walk walk; 267 unsigned int nbytes; 268 int err; 269 270 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 271 struct skcipher_request *subreq = skcipher_request_ctx(req); 272 273 *subreq = *req; 274 skcipher_request_set_tfm(subreq, tctx->fallback.skcipher); 275 if (dir == AES_DIR_DECRYPT) 276 return crypto_skcipher_decrypt(subreq); 277 else 278 return crypto_skcipher_encrypt(subreq); 279 } 280 281 err = skcipher_walk_virt(&walk, req, false); 282 283 while ((nbytes = walk.nbytes) != 0) { 284 geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr, 285 round_down(nbytes, AES_BLOCK_SIZE), 286 walk.iv, mode, dir); 287 err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); 288 } 289 290 return err; 291 } 292 293 static int geode_cbc_encrypt(struct skcipher_request *req) 294 { 295 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT); 296 } 297 298 static int geode_cbc_decrypt(struct skcipher_request *req) 299 { 300 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT); 301 } 302 303 static int geode_ecb_encrypt(struct skcipher_request *req) 304 { 305 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT); 306 } 307 308 static int geode_ecb_decrypt(struct skcipher_request *req) 309 { 310 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT); 311 } 312 313 static struct skcipher_alg geode_skcipher_algs[] = { 314 { 315 .base.cra_name = "cbc(aes)", 316 .base.cra_driver_name = "cbc-aes-geode", 317 .base.cra_priority = 400, 318 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 319 CRYPTO_ALG_NEED_FALLBACK, 320 .base.cra_blocksize = AES_BLOCK_SIZE, 321 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 322 .base.cra_alignmask = 15, 323 .base.cra_module = THIS_MODULE, 324 .init = geode_init_skcipher, 325 .exit = geode_exit_skcipher, 326 .setkey = geode_setkey_skcipher, 327 .encrypt = geode_cbc_encrypt, 328 .decrypt = geode_cbc_decrypt, 329 .min_keysize = AES_MIN_KEY_SIZE, 330 .max_keysize = AES_MAX_KEY_SIZE, 331 .ivsize = AES_BLOCK_SIZE, 332 }, { 333 .base.cra_name = "ecb(aes)", 334 .base.cra_driver_name = "ecb-aes-geode", 335 .base.cra_priority = 400, 336 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 337 CRYPTO_ALG_NEED_FALLBACK, 338 .base.cra_blocksize = AES_BLOCK_SIZE, 339 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 340 .base.cra_alignmask = 15, 341 .base.cra_module = THIS_MODULE, 342 .init = geode_init_skcipher, 343 .exit = geode_exit_skcipher, 344 .setkey = geode_setkey_skcipher, 345 .encrypt = geode_ecb_encrypt, 346 .decrypt = geode_ecb_decrypt, 347 .min_keysize = AES_MIN_KEY_SIZE, 348 .max_keysize = AES_MAX_KEY_SIZE, 349 }, 350 }; 351 352 static void geode_aes_remove(struct pci_dev *dev) 353 { 354 crypto_unregister_alg(&geode_alg); 355 crypto_unregister_skciphers(geode_skcipher_algs, 356 ARRAY_SIZE(geode_skcipher_algs)); 357 358 pci_iounmap(dev, _iobase); 359 _iobase = NULL; 360 361 pci_release_regions(dev); 362 pci_disable_device(dev); 363 } 364 365 366 static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) 367 { 368 int ret; 369 370 ret = pci_enable_device(dev); 371 if (ret) 372 return ret; 373 374 ret = pci_request_regions(dev, "geode-aes"); 375 if (ret) 376 goto eenable; 377 378 _iobase = pci_iomap(dev, 0, 0); 379 380 if (_iobase == NULL) { 381 ret = -ENOMEM; 382 goto erequest; 383 } 384 385 spin_lock_init(&lock); 386 387 /* Clear any pending activity */ 388 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); 389 390 ret = crypto_register_alg(&geode_alg); 391 if (ret) 392 goto eiomap; 393 394 ret = crypto_register_skciphers(geode_skcipher_algs, 395 ARRAY_SIZE(geode_skcipher_algs)); 396 if (ret) 397 goto ealg; 398 399 dev_notice(&dev->dev, "GEODE AES engine enabled.\n"); 400 return 0; 401 402 ealg: 403 crypto_unregister_alg(&geode_alg); 404 405 eiomap: 406 pci_iounmap(dev, _iobase); 407 408 erequest: 409 pci_release_regions(dev); 410 411 eenable: 412 pci_disable_device(dev); 413 414 dev_err(&dev->dev, "GEODE AES initialization failed.\n"); 415 return ret; 416 } 417 418 static struct pci_device_id geode_aes_tbl[] = { 419 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), }, 420 { 0, } 421 }; 422 423 MODULE_DEVICE_TABLE(pci, geode_aes_tbl); 424 425 static struct pci_driver geode_aes_driver = { 426 .name = "Geode LX AES", 427 .id_table = geode_aes_tbl, 428 .probe = geode_aes_probe, 429 .remove = geode_aes_remove, 430 }; 431 432 module_pci_driver(geode_aes_driver); 433 434 MODULE_AUTHOR("Advanced Micro Devices, Inc."); 435 MODULE_DESCRIPTION("Geode LX Hardware AES driver"); 436 MODULE_LICENSE("GPL"); 437