1 /* $OpenBSD: glxsb.c,v 1.40 2021/10/24 10:26:22 patrick Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Tom Cosgrove <tom@openbsd.org> 5 * Copyright (c) 2003, 2004 Theo de Raadt 6 * Copyright (c) 2003 Jason Wright 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* 22 * Driver for the security block on the AMD Geode LX processors 23 * http://www.amd.com/files/connectivitysolutions/geode/geode_lx/33234d_lx_ds.pdf 24 */ 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/device.h> 29 #include <sys/malloc.h> 30 #include <sys/mbuf.h> 31 #include <sys/timeout.h> 32 33 #include <machine/bus.h> 34 #include <machine/cpufunc.h> 35 36 #include <dev/pci/pcivar.h> 37 #include <dev/pci/pcidevs.h> 38 39 #ifdef CRYPTO 40 #include <crypto/cryptodev.h> 41 #include <crypto/aes.h> 42 #include <crypto/xform.h> 43 #include <crypto/cryptosoft.h> 44 #endif 45 46 #define SB_GLD_MSR_CAP 0x58002000 /* RO - Capabilities */ 47 #define SB_GLD_MSR_CONFIG 0x58002001 /* RW - Master Config */ 48 #define SB_GLD_MSR_SMI 0x58002002 /* RW - SMI */ 49 #define SB_GLD_MSR_ERROR 0x58002003 /* RW - Error */ 50 #define SB_GLD_MSR_PM 0x58002004 /* RW - Power Mgmt */ 51 #define SB_GLD_MSR_DIAG 0x58002005 /* RW - Diagnostic */ 52 #define SB_GLD_MSR_CTRL 0x58002006 /* RW - Security Block Cntrl */ 53 54 /* For GLD_MSR_CTRL: */ 55 #define SB_GMC_DIV0 0x0000 /* AES update divisor values */ 56 #define SB_GMC_DIV1 0x0001 57 #define SB_GMC_DIV2 0x0002 58 #define SB_GMC_DIV3 0x0003 59 #define SB_GMC_DIV_MASK 0x0003 60 #define SB_GMC_SBI 0x0004 /* AES swap bits */ 61 #define SB_GMC_SBY 0x0008 /* AES swap bytes */ 62 #define SB_GMC_TW 0x0010 /* Time write (EEPROM) */ 63 #define SB_GMC_T_SEL0 0x0000 /* RNG post-proc: none */ 64 #define SB_GMC_T_SEL1 0x0100 /* RNG post-proc: LFSR */ 65 #define SB_GMC_T_SEL2 0x0200 /* RNG post-proc: whitener */ 66 #define SB_GMC_T_SEL3 0x0300 /* RNG LFSR+whitener */ 67 #define SB_GMC_T_SEL_MASK 0x0300 68 #define SB_GMC_T_NE 0x0400 /* Noise (generator) Enable */ 69 #define SB_GMC_T_TM 0x0800 /* RNG test mode */ 70 /* (deterministic) */ 71 72 /* Security Block configuration/control registers (offsets from base) */ 73 74 #define SB_CTL_A 0x0000 /* RW - SB Control A */ 75 #define SB_CTL_B 0x0004 /* RW - SB Control B */ 76 #define SB_AES_INT 0x0008 /* RW - SB AES Interrupt */ 77 #define SB_SOURCE_A 0x0010 /* RW - Source A */ 78 #define SB_DEST_A 0x0014 /* RW - Destination A */ 79 #define SB_LENGTH_A 0x0018 /* RW - Length A */ 80 #define SB_SOURCE_B 0x0020 /* RW - Source B */ 81 #define SB_DEST_B 0x0024 /* RW - Destination B */ 82 #define SB_LENGTH_B 0x0028 /* RW - Length B */ 83 #define SB_WKEY 0x0030 /* WO - Writable Key 0-3 */ 84 #define SB_WKEY_0 0x0030 /* WO - Writable Key 0 */ 85 #define SB_WKEY_1 0x0034 /* WO - Writable Key 1 */ 86 #define SB_WKEY_2 0x0038 /* WO - Writable Key 2 */ 87 #define SB_WKEY_3 0x003C /* WO - Writable Key 3 */ 88 #define SB_CBC_IV 0x0040 /* RW - CBC IV 0-3 */ 89 #define SB_CBC_IV_0 0x0040 /* RW - CBC IV 0 */ 90 #define SB_CBC_IV_1 0x0044 /* RW - CBC IV 1 */ 91 #define SB_CBC_IV_2 0x0048 /* RW - CBC IV 2 */ 92 #define SB_CBC_IV_3 0x004C /* RW - CBC IV 3 */ 93 #define SB_RANDOM_NUM 0x0050 /* RW - Random Number */ 94 #define SB_RANDOM_NUM_STATUS 0x0054 /* RW - Random Number Status */ 95 #define SB_EEPROM_COMM 0x0800 /* RW - EEPROM Command */ 96 #define SB_EEPROM_ADDR 0x0804 /* RW - EEPROM Address */ 97 #define SB_EEPROM_DATA 0x0808 /* RW - EEPROM Data */ 98 #define SB_EEPROM_SEC_STATE 0x080C /* RW - EEPROM Security State */ 99 100 /* For SB_CTL_A and _B */ 101 #define SB_CTL_ST 0x0001 /* Start operation (enc/dec) */ 102 #define SB_CTL_ENC 0x0002 /* Encrypt (0 is decrypt) */ 103 #define SB_CTL_DEC 0x0000 /* Decrypt */ 104 #define SB_CTL_WK 0x0004 /* Use writable key (we set) */ 105 #define SB_CTL_DC 0x0008 /* Destination coherent */ 106 #define SB_CTL_SC 0x0010 /* Source coherent */ 107 #define SB_CTL_CBC 0x0020 /* CBC (0 is ECB) */ 108 109 /* For SB_AES_INT */ 110 #define SB_AI_DISABLE_AES_A 0x00001 /* Disable AES A compl int */ 111 #define SB_AI_ENABLE_AES_A 0x00000 /* Enable AES A compl int */ 112 #define SB_AI_DISABLE_AES_B 0x00002 /* Disable AES B compl int */ 113 #define SB_AI_ENABLE_AES_B 0x00000 /* Enable AES B compl int */ 114 #define SB_AI_DISABLE_EEPROM 0x00004 /* Disable EEPROM op comp int */ 115 #define SB_AI_ENABLE_EEPROM 0x00000 /* Enable EEPROM op compl int */ 116 #define SB_AI_AES_A_COMPLETE 0x10000 /* AES A operation complete */ 117 #define SB_AI_AES_B_COMPLETE 0x20000 /* AES B operation complete */ 118 #define SB_AI_EEPROM_COMPLETE 0x40000 /* EEPROM operation complete */ 119 120 #define SB_RNS_TRNG_VALID 0x0001 /* in SB_RANDOM_NUM_STATUS */ 121 122 #define SB_MEM_SIZE 0x0810 /* Size of memory block */ 123 124 #define SB_AES_ALIGN 0x0010 /* Source and dest buffers */ 125 /* must be 16-byte aligned */ 126 #define SB_AES_BLOCK_SIZE 0x0010 127 128 /* 129 * The Geode LX security block AES acceleration doesn't perform scatter- 130 * gather: it just takes source and destination addresses. Therefore the 131 * plain- and ciphertexts need to be contiguous. To this end, we allocate 132 * a buffer for both, and accept the overhead of copying in and out. If 133 * the number of bytes in one operation is bigger than allowed for by the 134 * buffer (buffer is twice the size of the max length, as it has both input 135 * and output) then we have to perform multiple encryptions/decryptions. 136 */ 137 #define GLXSB_MAX_AES_LEN 16384 138 139 #ifdef CRYPTO 140 struct glxsb_dma_map { 141 bus_dmamap_t dma_map; 142 bus_dma_segment_t dma_seg; 143 int dma_nsegs; 144 int dma_size; 145 caddr_t dma_vaddr; 146 uint32_t dma_paddr; 147 }; 148 struct glxsb_session { 149 uint32_t ses_key[4]; 150 int ses_klen; 151 int ses_used; 152 struct swcr_data *ses_swd_auth; 153 struct swcr_data *ses_swd_enc; 154 }; 155 #endif /* CRYPTO */ 156 157 struct glxsb_softc { 158 struct device sc_dev; 159 bus_space_tag_t sc_iot; 160 bus_space_handle_t sc_ioh; 161 struct timeout sc_to; 162 163 #ifdef CRYPTO 164 bus_dma_tag_t sc_dmat; 165 struct glxsb_dma_map sc_dma; 166 int32_t sc_cid; 167 int sc_nsessions; 168 struct glxsb_session *sc_sessions; 169 #endif /* CRYPTO */ 170 171 uint64_t save_gld_msr; 172 }; 173 174 int glxsb_match(struct device *, void *, void *); 175 void glxsb_attach(struct device *, struct device *, void *); 176 int glxsb_activate(struct device *, int); 177 void glxsb_rnd(void *); 178 179 struct cfattach glxsb_ca = { 180 sizeof(struct glxsb_softc), glxsb_match, glxsb_attach, NULL, 181 glxsb_activate 182 }; 183 184 struct cfdriver glxsb_cd = { 185 NULL, "glxsb", DV_DULL 186 }; 187 188 189 #ifdef CRYPTO 190 191 #define GLXSB_SESSION(sid) ((sid) & 0x0fffffff) 192 #define GLXSB_SID(crd,ses) (((crd) << 28) | ((ses) & 0x0fffffff)) 193 194 static struct glxsb_softc *glxsb_sc; 195 196 int glxsb_crypto_setup(struct glxsb_softc *); 197 int glxsb_crypto_newsession(uint32_t *, struct cryptoini *); 198 int glxsb_crypto_process(struct cryptop *); 199 int glxsb_crypto_freesession(uint64_t); 200 static __inline void glxsb_aes(struct glxsb_softc *, uint32_t, uint32_t, 201 uint32_t, void *, int, void *); 202 203 int glxsb_dma_alloc(struct glxsb_softc *, int, struct glxsb_dma_map *); 204 void glxsb_dma_pre_op(struct glxsb_softc *, struct glxsb_dma_map *); 205 void glxsb_dma_post_op(struct glxsb_softc *, struct glxsb_dma_map *); 206 void glxsb_dma_free(struct glxsb_softc *, struct glxsb_dma_map *); 207 208 #endif /* CRYPTO */ 209 210 211 int 212 glxsb_match(struct device *parent, void *match, void *aux) 213 { 214 struct pci_attach_args *pa = aux; 215 216 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_AMD && 217 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_AMD_GEODE_LX_CRYPTO) 218 return (1); 219 220 return (0); 221 } 222 223 void 224 glxsb_attach(struct device *parent, struct device *self, void *aux) 225 { 226 struct glxsb_softc *sc = (void *) self; 227 struct pci_attach_args *pa = aux; 228 bus_addr_t membase; 229 bus_size_t memsize; 230 uint64_t msr; 231 #ifdef CRYPTO 232 uint32_t intr; 233 #endif 234 235 msr = rdmsr(SB_GLD_MSR_CAP); 236 if ((msr & 0xFFFF00) != 0x130400) { 237 printf(": unknown ID 0x%x\n", (int) ((msr & 0xFFFF00) >> 16)); 238 return; 239 } 240 241 /* printf(": revision %d", (int) (msr & 0xFF)); */ 242 243 /* Map in the security block configuration/control registers */ 244 if (pci_mapreg_map(pa, PCI_MAPREG_START, 245 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_iot, 246 &sc->sc_ioh, &membase, &memsize, SB_MEM_SIZE)) { 247 printf(": can't find mem space\n"); 248 return; 249 } 250 251 /* 252 * Configure the Security Block. 253 * 254 * We want to enable the noise generator (T_NE), and enable the 255 * linear feedback shift register and whitener post-processing 256 * (T_SEL = 3). Also ensure that test mode (deterministic values) 257 * is disabled. 258 */ 259 msr = rdmsr(SB_GLD_MSR_CTRL); 260 msr &= ~(SB_GMC_T_TM | SB_GMC_T_SEL_MASK); 261 msr |= SB_GMC_T_NE | SB_GMC_T_SEL3; 262 #if 0 263 msr |= SB_GMC_SBI | SB_GMC_SBY; /* for AES, if necessary */ 264 #endif 265 wrmsr(SB_GLD_MSR_CTRL, msr); 266 267 /* Install a periodic collector for the "true" (AMD's word) RNG */ 268 timeout_set(&sc->sc_to, glxsb_rnd, sc); 269 glxsb_rnd(sc); 270 printf(": RNG"); 271 272 #ifdef CRYPTO 273 /* We don't have an interrupt handler, so disable completion INTs */ 274 intr = SB_AI_DISABLE_AES_A | SB_AI_DISABLE_AES_B | 275 SB_AI_DISABLE_EEPROM | SB_AI_AES_A_COMPLETE | 276 SB_AI_AES_B_COMPLETE | SB_AI_EEPROM_COMPLETE; 277 bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_AES_INT, intr); 278 279 sc->sc_dmat = pa->pa_dmat; 280 281 if (glxsb_crypto_setup(sc)) 282 printf(" AES"); 283 #endif 284 285 printf("\n"); 286 } 287 288 int 289 glxsb_activate(struct device *self, int act) 290 { 291 struct glxsb_softc *sc = (struct glxsb_softc *)self; 292 293 switch (act) { 294 case DVACT_QUIESCE: 295 /* XXX should wait for current crypto op to finish */ 296 break; 297 case DVACT_SUSPEND: 298 sc->save_gld_msr = rdmsr(SB_GLD_MSR_CTRL); 299 break; 300 case DVACT_RESUME: 301 wrmsr(SB_GLD_MSR_CTRL, sc->save_gld_msr); 302 break; 303 } 304 return (0); 305 } 306 307 void 308 glxsb_rnd(void *v) 309 { 310 struct glxsb_softc *sc = v; 311 uint32_t status, value; 312 313 status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SB_RANDOM_NUM_STATUS); 314 if (status & SB_RNS_TRNG_VALID) { 315 value = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SB_RANDOM_NUM); 316 enqueue_randomness(value); 317 } 318 319 timeout_add_msec(&sc->sc_to, 10); 320 } 321 322 #ifdef CRYPTO 323 int 324 glxsb_crypto_setup(struct glxsb_softc *sc) 325 { 326 int algs[CRYPTO_ALGORITHM_MAX + 1]; 327 328 /* Allocate a contiguous DMA-able buffer to work in */ 329 if (glxsb_dma_alloc(sc, GLXSB_MAX_AES_LEN * 2, &sc->sc_dma) != 0) 330 return 0; 331 332 bzero(algs, sizeof(algs)); 333 algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED; 334 algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; 335 algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; 336 algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; 337 algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; 338 algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; 339 algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; 340 341 sc->sc_cid = crypto_get_driverid(0); 342 if (sc->sc_cid < 0) 343 return 0; 344 345 crypto_register(sc->sc_cid, algs, glxsb_crypto_newsession, 346 glxsb_crypto_freesession, glxsb_crypto_process); 347 348 sc->sc_nsessions = 0; 349 350 glxsb_sc = sc; 351 352 return 1; 353 } 354 355 int 356 glxsb_crypto_newsession(uint32_t *sidp, struct cryptoini *cri) 357 { 358 struct glxsb_softc *sc = glxsb_sc; 359 struct glxsb_session *ses = NULL; 360 const struct auth_hash *axf; 361 const struct enc_xform *txf; 362 struct cryptoini *c; 363 struct swcr_data *swd; 364 int sesn, i; 365 366 if (sc == NULL || sidp == NULL || cri == NULL) 367 return (EINVAL); 368 369 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 370 if (sc->sc_sessions[sesn].ses_used == 0) { 371 ses = &sc->sc_sessions[sesn]; 372 break; 373 } 374 } 375 376 if (ses == NULL) { 377 sesn = sc->sc_nsessions; 378 ses = mallocarray(sesn + 1, sizeof(*ses), M_DEVBUF, 379 M_NOWAIT); 380 if (ses == NULL) 381 return (ENOMEM); 382 if (sesn != 0) { 383 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); 384 explicit_bzero(sc->sc_sessions, sesn * sizeof(*ses)); 385 free(sc->sc_sessions, M_DEVBUF, sesn * sizeof(*ses)); 386 } 387 sc->sc_sessions = ses; 388 ses = &sc->sc_sessions[sesn]; 389 sc->sc_nsessions++; 390 } 391 392 bzero(ses, sizeof(*ses)); 393 ses->ses_used = 1; 394 395 for (c = cri; c != NULL; c = c->cri_next) { 396 switch (c->cri_alg) { 397 case CRYPTO_AES_CBC: 398 399 if (c->cri_klen != 128) { 400 swd = malloc(sizeof(struct swcr_data), 401 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 402 if (swd == NULL) { 403 glxsb_crypto_freesession(sesn); 404 return (ENOMEM); 405 } 406 ses->ses_swd_enc = swd; 407 txf = &enc_xform_aes; 408 if (txf->ctxsize > 0) { 409 swd->sw_kschedule = 410 malloc(txf->ctxsize, 411 M_CRYPTO_DATA, 412 M_NOWAIT|M_ZERO); 413 if (swd->sw_kschedule == NULL) { 414 glxsb_crypto_freesession(sesn); 415 return (EINVAL); 416 } 417 } 418 if (txf->setkey(swd->sw_kschedule, c->cri_key, 419 c->cri_klen / 8) < 0) { 420 glxsb_crypto_freesession(sesn); 421 return (EINVAL); 422 } 423 swd->sw_exf = txf; 424 break; 425 } 426 427 ses->ses_klen = c->cri_klen; 428 429 /* Copy the key (Geode LX wants the primary key only) */ 430 bcopy(c->cri_key, ses->ses_key, sizeof(ses->ses_key)); 431 break; 432 433 case CRYPTO_MD5_HMAC: 434 axf = &auth_hash_hmac_md5_96; 435 goto authcommon; 436 case CRYPTO_SHA1_HMAC: 437 axf = &auth_hash_hmac_sha1_96; 438 goto authcommon; 439 case CRYPTO_RIPEMD160_HMAC: 440 axf = &auth_hash_hmac_ripemd_160_96; 441 goto authcommon; 442 case CRYPTO_SHA2_256_HMAC: 443 axf = &auth_hash_hmac_sha2_256_128; 444 goto authcommon; 445 case CRYPTO_SHA2_384_HMAC: 446 axf = &auth_hash_hmac_sha2_384_192; 447 goto authcommon; 448 case CRYPTO_SHA2_512_HMAC: 449 axf = &auth_hash_hmac_sha2_512_256; 450 authcommon: 451 swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA, 452 M_NOWAIT|M_ZERO); 453 if (swd == NULL) { 454 glxsb_crypto_freesession(sesn); 455 return (ENOMEM); 456 } 457 ses->ses_swd_auth = swd; 458 459 swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 460 M_NOWAIT); 461 if (swd->sw_ictx == NULL) { 462 glxsb_crypto_freesession(sesn); 463 return (ENOMEM); 464 } 465 466 swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 467 M_NOWAIT); 468 if (swd->sw_octx == NULL) { 469 glxsb_crypto_freesession(sesn); 470 return (ENOMEM); 471 } 472 473 for (i = 0; i < c->cri_klen / 8; i++) 474 c->cri_key[i] ^= HMAC_IPAD_VAL; 475 476 axf->Init(swd->sw_ictx); 477 axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8); 478 axf->Update(swd->sw_ictx, hmac_ipad_buffer, 479 axf->blocksize - (c->cri_klen / 8)); 480 481 for (i = 0; i < c->cri_klen / 8; i++) 482 c->cri_key[i] ^= (HMAC_IPAD_VAL ^ 483 HMAC_OPAD_VAL); 484 485 axf->Init(swd->sw_octx); 486 axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8); 487 axf->Update(swd->sw_octx, hmac_opad_buffer, 488 axf->blocksize - (c->cri_klen / 8)); 489 490 for (i = 0; i < c->cri_klen / 8; i++) 491 c->cri_key[i] ^= HMAC_OPAD_VAL; 492 493 swd->sw_axf = axf; 494 swd->sw_alg = c->cri_alg; 495 496 break; 497 default: 498 glxsb_crypto_freesession(sesn); 499 return (EINVAL); 500 } 501 } 502 503 *sidp = GLXSB_SID(0, sesn); 504 return (0); 505 } 506 507 int 508 glxsb_crypto_freesession(uint64_t tid) 509 { 510 struct glxsb_softc *sc = glxsb_sc; 511 struct swcr_data *swd; 512 const struct auth_hash *axf; 513 const struct enc_xform *txf; 514 int sesn; 515 uint32_t sid = ((uint32_t)tid) & 0xffffffff; 516 517 if (sc == NULL) 518 return (EINVAL); 519 sesn = GLXSB_SESSION(sid); 520 if (sesn >= sc->sc_nsessions) 521 return (EINVAL); 522 if ((swd = sc->sc_sessions[sesn].ses_swd_enc)) { 523 txf = swd->sw_exf; 524 525 if (swd->sw_kschedule) { 526 explicit_bzero(swd->sw_kschedule, txf->ctxsize); 527 free(swd->sw_kschedule, M_CRYPTO_DATA, txf->ctxsize); 528 } 529 free(swd, M_CRYPTO_DATA, sizeof(*swd)); 530 } 531 if ((swd = sc->sc_sessions[sesn].ses_swd_auth)) { 532 axf = swd->sw_axf; 533 534 if (swd->sw_ictx) { 535 explicit_bzero(swd->sw_ictx, axf->ctxsize); 536 free(swd->sw_ictx, M_CRYPTO_DATA, axf->ctxsize); 537 } 538 if (swd->sw_octx) { 539 explicit_bzero(swd->sw_octx, axf->ctxsize); 540 free(swd->sw_octx, M_CRYPTO_DATA, axf->ctxsize); 541 } 542 free(swd, M_CRYPTO_DATA, sizeof(*swd)); 543 } 544 explicit_bzero(&sc->sc_sessions[sesn], sizeof(sc->sc_sessions[sesn])); 545 return (0); 546 } 547 548 /* 549 * Must be called at splnet() or higher 550 */ 551 static __inline void 552 glxsb_aes(struct glxsb_softc *sc, uint32_t control, uint32_t psrc, 553 uint32_t pdst, void *key, int len, void *iv) 554 { 555 uint32_t status; 556 int i; 557 558 if (len & 0xF) { 559 printf("%s: len must be a multiple of 16 (not %d)\n", 560 sc->sc_dev.dv_xname, len); 561 return; 562 } 563 564 /* Set the source */ 565 bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_SOURCE_A, psrc); 566 567 /* Set the destination address */ 568 bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_DEST_A, pdst); 569 570 /* Set the data length */ 571 bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_LENGTH_A, len); 572 573 /* Set the IV */ 574 if (iv != NULL) { 575 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, 576 SB_CBC_IV, iv, 4); 577 control |= SB_CTL_CBC; 578 } 579 580 /* Set the key */ 581 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, SB_WKEY, key, 4); 582 583 /* Ask the security block to do it */ 584 bus_space_write_4(sc->sc_iot, sc->sc_ioh, SB_CTL_A, 585 control | SB_CTL_WK | SB_CTL_DC | SB_CTL_SC | SB_CTL_ST); 586 587 /* 588 * Now wait until it is done. 589 * 590 * We do a busy wait. Obviously the number of iterations of 591 * the loop required to perform the AES operation depends upon 592 * the number of bytes to process. 593 * 594 * On a 500 MHz Geode LX we see 595 * 596 * length (bytes) typical max iterations 597 * 16 12 598 * 64 22 599 * 256 59 600 * 1024 212 601 * 8192 1,537 602 * 603 * Since we have a maximum size of operation defined in 604 * GLXSB_MAX_AES_LEN, we use this constant to decide how long 605 * to wait. Allow an order of magnitude longer than it should 606 * really take, just in case. 607 */ 608 for (i = 0; i < GLXSB_MAX_AES_LEN * 10; i++) { 609 status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SB_CTL_A); 610 611 if ((status & SB_CTL_ST) == 0) /* Done */ 612 return; 613 } 614 615 printf("%s: operation failed to complete\n", sc->sc_dev.dv_xname); 616 } 617 618 static int 619 glxsb_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd, 620 struct swcr_data *sw, caddr_t buf) 621 { 622 int type; 623 624 if (crp->crp_flags & CRYPTO_F_IMBUF) 625 type = CRYPTO_BUF_MBUF; 626 else 627 type = CRYPTO_BUF_IOV; 628 629 return (swcr_authcompute(crp, crd, sw, buf, type)); 630 } 631 632 static int 633 glxsb_crypto_swenc(struct cryptop *crp, struct cryptodesc *crd, 634 struct swcr_data *sw, caddr_t buf) 635 { 636 int type; 637 638 if (crp->crp_flags & CRYPTO_F_IMBUF) 639 type = CRYPTO_BUF_MBUF; 640 else 641 type = CRYPTO_BUF_IOV; 642 643 return (swcr_encdec(crd, sw, buf, type)); 644 } 645 646 static int 647 glxsb_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd, 648 struct glxsb_session *ses, struct glxsb_softc *sc, caddr_t buf) 649 { 650 char *op_src, *op_dst; 651 uint32_t op_psrc, op_pdst; 652 uint8_t op_iv[SB_AES_BLOCK_SIZE]; 653 int err = 0; 654 int len, tlen, xlen; 655 int offset; 656 uint32_t control; 657 658 if (crd == NULL || (crd->crd_len % SB_AES_BLOCK_SIZE) != 0) { 659 err = EINVAL; 660 goto out; 661 } 662 663 /* How much of our buffer will we need to use? */ 664 xlen = crd->crd_len > GLXSB_MAX_AES_LEN ? 665 GLXSB_MAX_AES_LEN : crd->crd_len; 666 667 /* 668 * XXX Check if we can have input == output on Geode LX. 669 * XXX In the meantime, use two separate (adjacent) buffers. 670 */ 671 op_src = sc->sc_dma.dma_vaddr; 672 op_dst = sc->sc_dma.dma_vaddr + xlen; 673 674 op_psrc = sc->sc_dma.dma_paddr; 675 op_pdst = sc->sc_dma.dma_paddr + xlen; 676 677 if (crd->crd_flags & CRD_F_ENCRYPT) { 678 control = SB_CTL_ENC; 679 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 680 bcopy(crd->crd_iv, op_iv, sizeof(op_iv)); 681 else 682 arc4random_buf(op_iv, sizeof(op_iv)); 683 684 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) { 685 if (crp->crp_flags & CRYPTO_F_IMBUF) 686 err = m_copyback((struct mbuf *)crp->crp_buf, 687 crd->crd_inject, sizeof(op_iv), op_iv, 688 M_NOWAIT); 689 else if (crp->crp_flags & CRYPTO_F_IOV) 690 cuio_copyback((struct uio *)crp->crp_buf, 691 crd->crd_inject, sizeof(op_iv), op_iv); 692 else 693 bcopy(op_iv, 694 crp->crp_buf + crd->crd_inject, sizeof(op_iv)); 695 if (err) 696 goto out; 697 } 698 } else { 699 control = SB_CTL_DEC; 700 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 701 bcopy(crd->crd_iv, op_iv, sizeof(op_iv)); 702 else { 703 if (crp->crp_flags & CRYPTO_F_IMBUF) 704 m_copydata((struct mbuf *)crp->crp_buf, 705 crd->crd_inject, sizeof(op_iv), op_iv); 706 else if (crp->crp_flags & CRYPTO_F_IOV) 707 cuio_copydata((struct uio *)crp->crp_buf, 708 crd->crd_inject, sizeof(op_iv), op_iv); 709 else 710 bcopy(crp->crp_buf + crd->crd_inject, 711 op_iv, sizeof(op_iv)); 712 } 713 } 714 715 offset = 0; 716 tlen = crd->crd_len; 717 718 /* Process the data in GLXSB_MAX_AES_LEN chunks */ 719 while (tlen > 0) { 720 len = (tlen > GLXSB_MAX_AES_LEN) ? GLXSB_MAX_AES_LEN : tlen; 721 722 if (crp->crp_flags & CRYPTO_F_IMBUF) 723 m_copydata((struct mbuf *)crp->crp_buf, 724 crd->crd_skip + offset, len, op_src); 725 else if (crp->crp_flags & CRYPTO_F_IOV) 726 cuio_copydata((struct uio *)crp->crp_buf, 727 crd->crd_skip + offset, len, op_src); 728 else 729 bcopy(crp->crp_buf + crd->crd_skip + offset, op_src, 730 len); 731 732 glxsb_dma_pre_op(sc, &sc->sc_dma); 733 734 glxsb_aes(sc, control, op_psrc, op_pdst, ses->ses_key, 735 len, op_iv); 736 737 glxsb_dma_post_op(sc, &sc->sc_dma); 738 739 if (crp->crp_flags & CRYPTO_F_IMBUF) 740 err = m_copyback((struct mbuf *)crp->crp_buf, 741 crd->crd_skip + offset, len, op_dst, M_NOWAIT); 742 else if (crp->crp_flags & CRYPTO_F_IOV) 743 cuio_copyback((struct uio *)crp->crp_buf, 744 crd->crd_skip + offset, len, op_dst); 745 else 746 bcopy(op_dst, crp->crp_buf + crd->crd_skip + offset, 747 len); 748 if (err) 749 break; 750 751 offset += len; 752 tlen -= len; 753 754 if (tlen > 0) { 755 /* Copy out last block for use as next iteration */ 756 if (crd->crd_flags & CRD_F_ENCRYPT) 757 bcopy(op_dst + len - sizeof(op_iv), op_iv, 758 sizeof(op_iv)); 759 else 760 bcopy(op_src + len - sizeof(op_iv), op_iv, 761 sizeof(op_iv)); 762 } 763 } 764 765 /* All AES processing has now been done. */ 766 explicit_bzero(sc->sc_dma.dma_vaddr, xlen * 2); 767 768 out: 769 return (err); 770 } 771 772 int 773 glxsb_crypto_process(struct cryptop *crp) 774 { 775 struct glxsb_softc *sc = glxsb_sc; 776 struct glxsb_session *ses; 777 struct cryptodesc *crd; 778 int sesn,err = 0; 779 int s, i; 780 781 s = splnet(); 782 783 KASSERT(crp->crp_ndesc >= 1); 784 785 sesn = GLXSB_SESSION(crp->crp_sid); 786 if (sesn >= sc->sc_nsessions) { 787 err = EINVAL; 788 goto out; 789 } 790 ses = &sc->sc_sessions[sesn]; 791 if (ses->ses_used == 0) { 792 err = EINVAL; 793 goto out; 794 } 795 796 for (i = 0; i < crp->crp_ndesc; i++) { 797 crd = &crp->crp_desc[i]; 798 switch (crd->crd_alg) { 799 case CRYPTO_AES_CBC: 800 if (ses->ses_swd_enc) { 801 if ((err = glxsb_crypto_swenc(crp, crd, ses->ses_swd_enc, 802 crp->crp_buf)) != 0) 803 goto out; 804 } else if ((err = glxsb_crypto_encdec(crp, crd, ses, sc, 805 crp->crp_buf)) != 0) 806 goto out; 807 break; 808 809 case CRYPTO_MD5_HMAC: 810 case CRYPTO_SHA1_HMAC: 811 case CRYPTO_RIPEMD160_HMAC: 812 case CRYPTO_SHA2_256_HMAC: 813 case CRYPTO_SHA2_384_HMAC: 814 case CRYPTO_SHA2_512_HMAC: 815 if ((err = glxsb_crypto_swauth(crp, crd, ses->ses_swd_auth, 816 crp->crp_buf)) != 0) 817 goto out; 818 break; 819 820 default: 821 err = EINVAL; 822 goto out; 823 } 824 } 825 826 out: 827 splx(s); 828 return (err); 829 } 830 831 int 832 glxsb_dma_alloc(struct glxsb_softc *sc, int size, struct glxsb_dma_map *dma) 833 { 834 int rc; 835 836 dma->dma_nsegs = 1; 837 dma->dma_size = size; 838 839 rc = bus_dmamap_create(sc->sc_dmat, size, dma->dma_nsegs, size, 840 0, BUS_DMA_NOWAIT, &dma->dma_map); 841 if (rc != 0) { 842 printf("%s: couldn't create DMA map for %d bytes (%d)\n", 843 sc->sc_dev.dv_xname, size, rc); 844 845 goto fail0; 846 } 847 848 rc = bus_dmamem_alloc(sc->sc_dmat, size, SB_AES_ALIGN, 0, 849 &dma->dma_seg, dma->dma_nsegs, &dma->dma_nsegs, BUS_DMA_NOWAIT); 850 if (rc != 0) { 851 printf("%s: couldn't allocate DMA memory of %d bytes (%d)\n", 852 sc->sc_dev.dv_xname, size, rc); 853 854 goto fail1; 855 } 856 857 rc = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, 1, size, 858 &dma->dma_vaddr, BUS_DMA_NOWAIT); 859 if (rc != 0) { 860 printf("%s: couldn't map DMA memory for %d bytes (%d)\n", 861 sc->sc_dev.dv_xname, size, rc); 862 863 goto fail2; 864 } 865 866 rc = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 867 size, NULL, BUS_DMA_NOWAIT); 868 if (rc != 0) { 869 printf("%s: couldn't load DMA memory for %d bytes (%d)\n", 870 sc->sc_dev.dv_xname, size, rc); 871 872 goto fail3; 873 } 874 875 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 876 877 return 0; 878 879 fail3: 880 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 881 fail2: 882 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nsegs); 883 fail1: 884 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 885 fail0: 886 return rc; 887 } 888 889 void 890 glxsb_dma_pre_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma) 891 { 892 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 0, dma->dma_size, 893 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 894 } 895 896 void 897 glxsb_dma_post_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma) 898 { 899 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 0, dma->dma_size, 900 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 901 } 902 903 void 904 glxsb_dma_free(struct glxsb_softc *sc, struct glxsb_dma_map *dma) 905 { 906 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 907 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size); 908 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nsegs); 909 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 910 } 911 912 #endif /* CRYPTO */ 913