1 /* $NetBSD: ubsec.c,v 1.25 2010/11/13 13:52:09 uebayasi Exp $ */ 2 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */ 3 /* $OpenBSD: ubsec.c,v 1.127 2003/06/04 14:04:58 jason Exp $ */ 4 5 /* 6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Effort sponsored in part by the Defense Advanced Research Projects 32 * Agency (DARPA) and Air Force Research Laboratory, Air Force 33 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 34 * 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: ubsec.c,v 1.25 2010/11/13 13:52:09 uebayasi Exp $"); 39 40 #undef UBSEC_DEBUG 41 42 /* 43 * uBsec 5[56]01, bcm580xx, bcm582x hardware crypto accelerator 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/proc.h> 49 #include <sys/endian.h> 50 #ifdef __NetBSD__ 51 #define letoh16 htole16 52 #define letoh32 htole32 53 #define UBSEC_NO_RNG /* until statistically tested */ 54 #endif 55 #include <sys/errno.h> 56 #include <sys/malloc.h> 57 #include <sys/kernel.h> 58 #include <sys/mbuf.h> 59 #include <sys/device.h> 60 #include <sys/queue.h> 61 62 #include <opencrypto/cryptodev.h> 63 #include <opencrypto/xform.h> 64 #ifdef __OpenBSD__ 65 #include <dev/rndvar.h> 66 #include <sys/md5k.h> 67 #else 68 #include <sys/rnd.h> 69 #include <sys/md5.h> 70 #endif 71 #include <sys/sha1.h> 72 73 #include <dev/pci/pcireg.h> 74 #include <dev/pci/pcivar.h> 75 #include <dev/pci/pcidevs.h> 76 77 #include <dev/pci/ubsecreg.h> 78 #include <dev/pci/ubsecvar.h> 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int ubsec_probe(device_t, cfdata_t, void *); 84 static void ubsec_attach(device_t, device_t, void *); 85 static void ubsec_reset_board(struct ubsec_softc *); 86 static void ubsec_init_board(struct ubsec_softc *); 87 static void ubsec_init_pciregs(struct pci_attach_args *pa); 88 static void ubsec_cleanchip(struct ubsec_softc *); 89 static void ubsec_totalreset(struct ubsec_softc *); 90 static int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *); 91 92 #ifdef __OpenBSD__ 93 struct cfattach ubsec_ca = { 94 sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 95 }; 96 97 struct cfdriver ubsec_cd = { 98 0, "ubsec", DV_DULL 99 }; 100 #else 101 CFATTACH_DECL(ubsec, sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 102 NULL, NULL); 103 extern struct cfdriver ubsec_cd; 104 #endif 105 106 /* patchable */ 107 #ifdef UBSEC_DEBUG 108 extern int ubsec_debug; 109 int ubsec_debug=1; 110 #endif 111 112 static int ubsec_intr(void *); 113 static int ubsec_newsession(void*, u_int32_t *, struct cryptoini *); 114 static int ubsec_freesession(void*, u_int64_t); 115 static int ubsec_process(void*, struct cryptop *, int hint); 116 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 117 static void ubsec_feed(struct ubsec_softc *); 118 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 119 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 120 static void ubsec_feed2(struct ubsec_softc *); 121 #ifndef UBSEC_NO_RNG 122 static void ubsec_rng(void *); 123 #endif /* UBSEC_NO_RNG */ 124 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 125 struct ubsec_dma_alloc *, int); 126 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 127 static int ubsec_dmamap_aligned(bus_dmamap_t); 128 129 static int ubsec_kprocess(void*, struct cryptkop *, int); 130 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, 131 struct cryptkop *, int); 132 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, 133 struct cryptkop *, int); 134 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, 135 struct cryptkop *, int); 136 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 137 static int ubsec_ksigbits(struct crparam *); 138 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 139 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 140 141 #ifdef UBSEC_DEBUG 142 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 143 static void ubsec_dump_mcr(struct ubsec_mcr *); 144 static void ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *); 145 #endif 146 147 #define READ_REG(sc,r) \ 148 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 149 150 #define WRITE_REG(sc,reg,val) \ 151 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 152 153 #define SWAP32(x) (x) = htole32(ntohl((x))) 154 #ifndef HTOLE32 155 #define HTOLE32(x) (x) = htole32(x) 156 #endif 157 158 struct ubsec_stats ubsecstats; 159 160 /* 161 * ubsec_maxbatch controls the number of crypto ops to voluntarily 162 * collect into one submission to the hardware. This batching happens 163 * when ops are dispatched from the crypto subsystem with a hint that 164 * more are to follow immediately. These ops must also not be marked 165 * with a ``no delay'' flag. 166 */ 167 static int ubsec_maxbatch = 1; 168 #ifdef SYSCTL_INT 169 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxbatch, CTLFLAG_RW, &ubsec_maxbatch, 170 0, "Broadcom driver: max ops to batch w/o interrupt"); 171 #endif 172 173 /* 174 * ubsec_maxaggr controls the number of crypto ops to submit to the 175 * hardware as a unit. This aggregation reduces the number of interrupts 176 * to the host at the expense of increased latency (for all but the last 177 * operation). For network traffic setting this to one yields the highest 178 * performance but at the expense of more interrupt processing. 179 */ 180 static int ubsec_maxaggr = 1; 181 #ifdef SYSCTL_INT 182 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxaggr, CTLFLAG_RW, &ubsec_maxaggr, 183 0, "Broadcom driver: max ops to aggregate under one interrupt"); 184 #endif 185 186 static const struct ubsec_product { 187 pci_vendor_id_t ubsec_vendor; 188 pci_product_id_t ubsec_product; 189 int ubsec_flags; 190 int ubsec_statmask; 191 const char *ubsec_name; 192 } ubsec_products[] = { 193 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501, 194 0, 195 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 196 "Bluesteel 5501" 197 }, 198 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601, 199 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 200 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 201 "Bluesteel 5601" 202 }, 203 204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801, 205 0, 206 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 207 "Broadcom BCM5801" 208 }, 209 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802, 211 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 212 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 213 "Broadcom BCM5802" 214 }, 215 216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805, 217 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 218 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 219 "Broadcom BCM5805" 220 }, 221 222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820, 223 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 224 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 225 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 226 "Broadcom BCM5820" 227 }, 228 229 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821, 230 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 231 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 232 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 233 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 234 "Broadcom BCM5821" 235 }, 236 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K, 237 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 238 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 239 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 240 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 241 "Sun Crypto Accelerator 1000" 242 }, 243 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821, 244 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 245 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 246 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 247 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 248 "Broadcom BCM5821 (Sun)" 249 }, 250 251 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822, 252 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 253 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 254 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 255 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 256 "Broadcom BCM5822" 257 }, 258 259 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823, 260 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 261 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 262 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 263 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 264 "Broadcom BCM5823" 265 }, 266 267 { 0, 0, 268 0, 269 0, 270 NULL 271 } 272 }; 273 274 static const struct ubsec_product * 275 ubsec_lookup(const struct pci_attach_args *pa) 276 { 277 const struct ubsec_product *up; 278 279 for (up = ubsec_products; up->ubsec_name != NULL; up++) { 280 if (PCI_VENDOR(pa->pa_id) == up->ubsec_vendor && 281 PCI_PRODUCT(pa->pa_id) == up->ubsec_product) 282 return (up); 283 } 284 return (NULL); 285 } 286 287 static int 288 ubsec_probe(device_t parent, cfdata_t match, void *aux) 289 { 290 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 291 292 if (ubsec_lookup(pa) != NULL) 293 return (1); 294 295 return (0); 296 } 297 298 static void 299 ubsec_attach(device_t parent, device_t self, void *aux) 300 { 301 struct ubsec_softc *sc = device_private(self); 302 struct pci_attach_args *pa = aux; 303 const struct ubsec_product *up; 304 pci_chipset_tag_t pc = pa->pa_pc; 305 pci_intr_handle_t ih; 306 const char *intrstr = NULL; 307 struct ubsec_dma *dmap; 308 u_int32_t cmd, i; 309 310 up = ubsec_lookup(pa); 311 if (up == NULL) { 312 printf("\n"); 313 panic("ubsec_attach: impossible"); 314 } 315 316 aprint_naive(": Crypto processor\n"); 317 aprint_normal(": %s, rev. %d\n", up->ubsec_name, 318 PCI_REVISION(pa->pa_class)); 319 320 SIMPLEQ_INIT(&sc->sc_queue); 321 SIMPLEQ_INIT(&sc->sc_qchip); 322 SIMPLEQ_INIT(&sc->sc_queue2); 323 SIMPLEQ_INIT(&sc->sc_qchip2); 324 SIMPLEQ_INIT(&sc->sc_q2free); 325 326 sc->sc_flags = up->ubsec_flags; 327 sc->sc_statmask = up->ubsec_statmask; 328 329 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 330 cmd |= PCI_COMMAND_MASTER_ENABLE; 331 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 332 333 if (pci_mapreg_map(pa, BS_BAR, PCI_MAPREG_TYPE_MEM, 0, 334 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 335 aprint_error_dev(&sc->sc_dv, "can't find mem space"); 336 return; 337 } 338 339 sc->sc_dmat = pa->pa_dmat; 340 341 if (pci_intr_map(pa, &ih)) { 342 aprint_error_dev(&sc->sc_dv, "couldn't map interrupt\n"); 343 return; 344 } 345 intrstr = pci_intr_string(pc, ih); 346 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc); 347 if (sc->sc_ih == NULL) { 348 aprint_error_dev(&sc->sc_dv, "couldn't establish interrupt"); 349 if (intrstr != NULL) 350 aprint_error(" at %s", intrstr); 351 aprint_error("\n"); 352 return; 353 } 354 aprint_normal_dev(&sc->sc_dv, "interrupting at %s\n", intrstr); 355 356 sc->sc_cid = crypto_get_driverid(0); 357 if (sc->sc_cid < 0) { 358 aprint_error_dev(&sc->sc_dv, "couldn't get crypto driver id\n"); 359 pci_intr_disestablish(pc, sc->sc_ih); 360 return; 361 } 362 363 SIMPLEQ_INIT(&sc->sc_freequeue); 364 dmap = sc->sc_dmaa; 365 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 366 struct ubsec_q *q; 367 368 q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), 369 M_DEVBUF, M_NOWAIT); 370 if (q == NULL) { 371 aprint_error_dev(&sc->sc_dv, "can't allocate queue buffers\n"); 372 break; 373 } 374 375 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 376 &dmap->d_alloc, 0)) { 377 aprint_error_dev(&sc->sc_dv, "can't allocate dma buffers\n"); 378 free(q, M_DEVBUF); 379 break; 380 } 381 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 382 383 q->q_dma = dmap; 384 sc->sc_queuea[i] = q; 385 386 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 387 } 388 389 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 390 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 391 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 392 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 393 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, 394 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 395 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, 396 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 397 398 /* 399 * Reset Broadcom chip 400 */ 401 ubsec_reset_board(sc); 402 403 /* 404 * Init Broadcom specific PCI settings 405 */ 406 ubsec_init_pciregs(pa); 407 408 /* 409 * Init Broadcom chip 410 */ 411 ubsec_init_board(sc); 412 413 #ifndef UBSEC_NO_RNG 414 if (sc->sc_flags & UBS_FLAGS_RNG) { 415 sc->sc_statmask |= BS_STAT_MCR2_DONE; 416 417 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 418 &sc->sc_rng.rng_q.q_mcr, 0)) 419 goto skip_rng; 420 421 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 422 &sc->sc_rng.rng_q.q_ctx, 0)) { 423 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 424 goto skip_rng; 425 } 426 427 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 428 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 429 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 430 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 431 goto skip_rng; 432 } 433 434 if (hz >= 100) 435 sc->sc_rnghz = hz / 100; 436 else 437 sc->sc_rnghz = 1; 438 #ifdef __OpenBSD__ 439 timeout_set(&sc->sc_rngto, ubsec_rng, sc); 440 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 441 #else 442 callout_init(&sc->sc_rngto, 0); 443 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 444 #endif 445 skip_rng: 446 if (sc->sc_rnghz) 447 aprint_normal_dev(&sc->sc_dv, "random number generator enabled\n"); 448 else 449 aprint_error_dev(&sc->sc_dv, "WARNING: random number generator " 450 "disabled\n"); 451 } 452 #endif /* UBSEC_NO_RNG */ 453 454 if (sc->sc_flags & UBS_FLAGS_KEY) { 455 sc->sc_statmask |= BS_STAT_MCR2_DONE; 456 457 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 458 ubsec_kprocess, sc); 459 #if 0 460 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 461 ubsec_kprocess, sc); 462 #endif 463 } 464 } 465 466 /* 467 * UBSEC Interrupt routine 468 */ 469 static int 470 ubsec_intr(void *arg) 471 { 472 struct ubsec_softc *sc = arg; 473 volatile u_int32_t stat; 474 struct ubsec_q *q; 475 struct ubsec_dma *dmap; 476 int npkts = 0, i; 477 478 stat = READ_REG(sc, BS_STAT); 479 stat &= sc->sc_statmask; 480 if (stat == 0) { 481 return (0); 482 } 483 484 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 485 486 /* 487 * Check to see if we have any packets waiting for us 488 */ 489 if ((stat & BS_STAT_MCR1_DONE)) { 490 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 491 q = SIMPLEQ_FIRST(&sc->sc_qchip); 492 dmap = q->q_dma; 493 494 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 495 break; 496 497 q = SIMPLEQ_FIRST(&sc->sc_qchip); 498 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 499 500 npkts = q->q_nstacked_mcrs; 501 sc->sc_nqchip -= 1+npkts; 502 /* 503 * search for further sc_qchip ubsec_q's that share 504 * the same MCR, and complete them too, they must be 505 * at the top. 506 */ 507 for (i = 0; i < npkts; i++) { 508 if(q->q_stacked_mcr[i]) 509 ubsec_callback(sc, q->q_stacked_mcr[i]); 510 else 511 break; 512 } 513 ubsec_callback(sc, q); 514 } 515 516 /* 517 * Don't send any more packet to chip if there has been 518 * a DMAERR. 519 */ 520 if (!(stat & BS_STAT_DMAERR)) 521 ubsec_feed(sc); 522 } 523 524 /* 525 * Check to see if we have any key setups/rng's waiting for us 526 */ 527 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 528 (stat & BS_STAT_MCR2_DONE)) { 529 struct ubsec_q2 *q2; 530 struct ubsec_mcr *mcr; 531 532 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 533 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 534 535 bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 536 0, q2->q_mcr.dma_map->dm_mapsize, 537 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 538 539 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 540 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 541 bus_dmamap_sync(sc->sc_dmat, 542 q2->q_mcr.dma_map, 0, 543 q2->q_mcr.dma_map->dm_mapsize, 544 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 545 break; 546 } 547 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 548 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, /*q2,*/ q_next); 549 ubsec_callback2(sc, q2); 550 /* 551 * Don't send any more packet to chip if there has been 552 * a DMAERR. 553 */ 554 if (!(stat & BS_STAT_DMAERR)) 555 ubsec_feed2(sc); 556 } 557 } 558 559 /* 560 * Check to see if we got any DMA Error 561 */ 562 if (stat & BS_STAT_DMAERR) { 563 #ifdef UBSEC_DEBUG 564 if (ubsec_debug) { 565 volatile u_int32_t a = READ_REG(sc, BS_ERR); 566 567 printf("%s: dmaerr %s@%08x\n", device_xname(&sc->sc_dv), 568 (a & BS_ERR_READ) ? "read" : "write", 569 a & BS_ERR_ADDR); 570 } 571 #endif /* UBSEC_DEBUG */ 572 ubsecstats.hst_dmaerr++; 573 ubsec_totalreset(sc); 574 ubsec_feed(sc); 575 } 576 577 if (sc->sc_needwakeup) { /* XXX check high watermark */ 578 int wkeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 579 #ifdef UBSEC_DEBUG 580 if (ubsec_debug) 581 printf("%s: wakeup crypto (%x)\n", device_xname(&sc->sc_dv), 582 sc->sc_needwakeup); 583 #endif /* UBSEC_DEBUG */ 584 sc->sc_needwakeup &= ~wkeup; 585 crypto_unblock(sc->sc_cid, wkeup); 586 } 587 return (1); 588 } 589 590 /* 591 * ubsec_feed() - aggregate and post requests to chip 592 * OpenBSD comments: 593 * It is assumed that the caller set splnet() 594 */ 595 static void 596 ubsec_feed(struct ubsec_softc *sc) 597 { 598 struct ubsec_q *q, *q2; 599 int npkts, i; 600 void *v; 601 u_int32_t stat; 602 #ifdef UBSEC_DEBUG 603 static int max; 604 #endif /* UBSEC_DEBUG */ 605 606 npkts = sc->sc_nqueue; 607 if (npkts > ubsecstats.hst_maxqueue) 608 ubsecstats.hst_maxqueue = npkts; 609 if (npkts < 2) 610 goto feed1; 611 612 /* 613 * Decide how many ops to combine in a single MCR. We cannot 614 * aggregate more than UBS_MAX_AGGR because this is the number 615 * of slots defined in the data structure. Otherwise we clamp 616 * based on the tunable parameter ubsec_maxaggr. Note that 617 * aggregation can happen in two ways: either by batching ops 618 * from above or because the h/w backs up and throttles us. 619 * Aggregating ops reduces the number of interrupts to the host 620 * but also (potentially) increases the latency for processing 621 * completed ops as we only get an interrupt when all aggregated 622 * ops have completed. 623 */ 624 if (npkts > UBS_MAX_AGGR) 625 npkts = UBS_MAX_AGGR; 626 if (npkts > ubsec_maxaggr) 627 npkts = ubsec_maxaggr; 628 if (npkts > ubsecstats.hst_maxbatch) 629 ubsecstats.hst_maxbatch = npkts; 630 if (npkts < 2) 631 goto feed1; 632 ubsecstats.hst_totbatch += npkts-1; 633 634 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 635 if (stat & BS_STAT_DMAERR) { 636 ubsec_totalreset(sc); 637 ubsecstats.hst_dmaerr++; 638 } else { 639 ubsecstats.hst_mcr1full++; 640 } 641 return; 642 } 643 644 #ifdef UBSEC_DEBUG 645 if (ubsec_debug) 646 printf("merging %d records\n", npkts); 647 /* XXX temporary aggregation statistics reporting code */ 648 if (max < npkts) { 649 max = npkts; 650 printf("%s: new max aggregate %d\n", device_xname(&sc->sc_dv), max); 651 } 652 #endif /* UBSEC_DEBUG */ 653 654 q = SIMPLEQ_FIRST(&sc->sc_queue); 655 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 656 --sc->sc_nqueue; 657 658 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 659 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 660 if (q->q_dst_map != NULL) 661 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 662 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 663 664 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 665 666 for (i = 0; i < q->q_nstacked_mcrs; i++) { 667 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 668 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 669 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 670 if (q2->q_dst_map != NULL) 671 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 672 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 673 q2= SIMPLEQ_FIRST(&sc->sc_queue); 674 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q2,*/ q_next); 675 --sc->sc_nqueue; 676 677 v = ((void *)&q2->q_dma->d_dma->d_mcr); 678 v = (char*)v + (sizeof(struct ubsec_mcr) - 679 sizeof(struct ubsec_mcr_add)); 680 memcpy(&q->q_dma->d_dma->d_mcradd[i], v, sizeof(struct ubsec_mcr_add)); 681 q->q_stacked_mcr[i] = q2; 682 } 683 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 684 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 685 sc->sc_nqchip += npkts; 686 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 687 ubsecstats.hst_maxqchip = sc->sc_nqchip; 688 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 689 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 690 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 691 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 692 offsetof(struct ubsec_dmachunk, d_mcr)); 693 return; 694 695 feed1: 696 while (!SIMPLEQ_EMPTY(&sc->sc_queue)) { 697 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 698 if (stat & BS_STAT_DMAERR) { 699 ubsec_totalreset(sc); 700 ubsecstats.hst_dmaerr++; 701 } else { 702 ubsecstats.hst_mcr1full++; 703 } 704 break; 705 } 706 707 q = SIMPLEQ_FIRST(&sc->sc_queue); 708 709 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 710 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 711 if (q->q_dst_map != NULL) 712 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 713 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 714 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 715 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 717 718 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 719 offsetof(struct ubsec_dmachunk, d_mcr)); 720 #ifdef UBSEC_DEBUG 721 if (ubsec_debug) 722 printf("feed: q->chip %p %08x stat %08x\n", 723 q, (u_int32_t)q->q_dma->d_alloc.dma_paddr, 724 stat); 725 #endif /* UBSEC_DEBUG */ 726 q = SIMPLEQ_FIRST(&sc->sc_queue); 727 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 728 --sc->sc_nqueue; 729 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 730 sc->sc_nqchip++; 731 } 732 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 733 ubsecstats.hst_maxqchip = sc->sc_nqchip; 734 } 735 736 /* 737 * Allocate a new 'session' and return an encoded session id. 'sidp' 738 * contains our registration id, and should contain an encoded session 739 * id on successful allocation. 740 */ 741 static int 742 ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 743 { 744 struct cryptoini *c, *encini = NULL, *macini = NULL; 745 struct ubsec_softc *sc; 746 struct ubsec_session *ses = NULL; 747 MD5_CTX md5ctx; 748 SHA1_CTX sha1ctx; 749 int i, sesn; 750 751 sc = arg; 752 KASSERT(sc != NULL /*, ("ubsec_newsession: null softc")*/); 753 754 if (sidp == NULL || cri == NULL || sc == NULL) 755 return (EINVAL); 756 757 for (c = cri; c != NULL; c = c->cri_next) { 758 if (c->cri_alg == CRYPTO_MD5_HMAC_96 || 759 c->cri_alg == CRYPTO_SHA1_HMAC_96) { 760 if (macini) 761 return (EINVAL); 762 macini = c; 763 } else if (c->cri_alg == CRYPTO_DES_CBC || 764 c->cri_alg == CRYPTO_3DES_CBC) { 765 if (encini) 766 return (EINVAL); 767 encini = c; 768 } else 769 return (EINVAL); 770 } 771 if (encini == NULL && macini == NULL) 772 return (EINVAL); 773 774 if (sc->sc_sessions == NULL) { 775 ses = sc->sc_sessions = (struct ubsec_session *)malloc( 776 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 777 if (ses == NULL) 778 return (ENOMEM); 779 sesn = 0; 780 sc->sc_nsessions = 1; 781 } else { 782 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 783 if (sc->sc_sessions[sesn].ses_used == 0) { 784 ses = &sc->sc_sessions[sesn]; 785 break; 786 } 787 } 788 789 if (ses == NULL) { 790 sesn = sc->sc_nsessions; 791 ses = (struct ubsec_session *)malloc((sesn + 1) * 792 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 793 if (ses == NULL) 794 return (ENOMEM); 795 memcpy(ses, sc->sc_sessions, sesn * 796 sizeof(struct ubsec_session)); 797 memset(sc->sc_sessions, 0, sesn * 798 sizeof(struct ubsec_session)); 799 free(sc->sc_sessions, M_DEVBUF); 800 sc->sc_sessions = ses; 801 ses = &sc->sc_sessions[sesn]; 802 sc->sc_nsessions++; 803 } 804 } 805 806 memset(ses, 0, sizeof(struct ubsec_session)); 807 ses->ses_used = 1; 808 if (encini) { 809 /* get an IV, network byte order */ 810 #ifdef __NetBSD__ 811 rnd_extract_data(ses->ses_iv, 812 sizeof(ses->ses_iv), RND_EXTRACT_ANY); 813 #else 814 get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv)); 815 #endif 816 817 /* Go ahead and compute key in ubsec's byte order */ 818 if (encini->cri_alg == CRYPTO_DES_CBC) { 819 memcpy(&ses->ses_deskey[0], encini->cri_key, 8); 820 memcpy(&ses->ses_deskey[2], encini->cri_key, 8); 821 memcpy(&ses->ses_deskey[4], encini->cri_key, 8); 822 } else 823 memcpy(ses->ses_deskey, encini->cri_key, 24); 824 825 SWAP32(ses->ses_deskey[0]); 826 SWAP32(ses->ses_deskey[1]); 827 SWAP32(ses->ses_deskey[2]); 828 SWAP32(ses->ses_deskey[3]); 829 SWAP32(ses->ses_deskey[4]); 830 SWAP32(ses->ses_deskey[5]); 831 } 832 833 if (macini) { 834 for (i = 0; i < macini->cri_klen / 8; i++) 835 macini->cri_key[i] ^= HMAC_IPAD_VAL; 836 837 if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 838 MD5Init(&md5ctx); 839 MD5Update(&md5ctx, macini->cri_key, 840 macini->cri_klen / 8); 841 MD5Update(&md5ctx, hmac_ipad_buffer, 842 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 843 memcpy(ses->ses_hminner, md5ctx.state, 844 sizeof(md5ctx.state)); 845 } else { 846 SHA1Init(&sha1ctx); 847 SHA1Update(&sha1ctx, macini->cri_key, 848 macini->cri_klen / 8); 849 SHA1Update(&sha1ctx, hmac_ipad_buffer, 850 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 851 memcpy(ses->ses_hminner, sha1ctx.state, 852 sizeof(sha1ctx.state)); 853 } 854 855 for (i = 0; i < macini->cri_klen / 8; i++) 856 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 857 858 if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 859 MD5Init(&md5ctx); 860 MD5Update(&md5ctx, macini->cri_key, 861 macini->cri_klen / 8); 862 MD5Update(&md5ctx, hmac_opad_buffer, 863 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 864 memcpy(ses->ses_hmouter, md5ctx.state, 865 sizeof(md5ctx.state)); 866 } else { 867 SHA1Init(&sha1ctx); 868 SHA1Update(&sha1ctx, macini->cri_key, 869 macini->cri_klen / 8); 870 SHA1Update(&sha1ctx, hmac_opad_buffer, 871 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 872 memcpy(ses->ses_hmouter, sha1ctx.state, 873 sizeof(sha1ctx.state)); 874 } 875 876 for (i = 0; i < macini->cri_klen / 8; i++) 877 macini->cri_key[i] ^= HMAC_OPAD_VAL; 878 } 879 880 *sidp = UBSEC_SID(device_unit(&sc->sc_dv), sesn); 881 return (0); 882 } 883 884 /* 885 * Deallocate a session. 886 */ 887 static int 888 ubsec_freesession(void *arg, u_int64_t tid) 889 { 890 struct ubsec_softc *sc; 891 int session; 892 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 893 894 sc = arg; 895 KASSERT(sc != NULL /*, ("ubsec_freesession: null softc")*/); 896 897 session = UBSEC_SESSION(sid); 898 if (session >= sc->sc_nsessions) 899 return (EINVAL); 900 901 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session])); 902 return (0); 903 } 904 905 #ifdef __FreeBSD__ /* Ugly gratuitous changes to bus_dma */ 906 static void 907 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 908 { 909 struct ubsec_operand *op = arg; 910 911 KASSERT(nsegs <= UBS_MAX_SCATTER 912 /*, ("Too many DMA segments returned when mapping operand")*/); 913 #ifdef UBSEC_DEBUG 914 if (ubsec_debug) 915 printf("ubsec_op_cb: mapsize %u nsegs %d\n", 916 (u_int) mapsize, nsegs); 917 #endif 918 op->mapsize = mapsize; 919 op->nsegs = nsegs; 920 memcpy(op->segs, seg, nsegs * sizeof (seg[0])); 921 } 922 #endif 923 924 static int 925 ubsec_process(void *arg, struct cryptop *crp, int hint) 926 { 927 struct ubsec_q *q = NULL; 928 #ifdef __OpenBSD__ 929 int card; 930 #endif 931 int err = 0, i, j, s, nicealign; 932 struct ubsec_softc *sc; 933 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 934 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 935 int sskip, dskip, stheend, dtheend; 936 int16_t coffset; 937 struct ubsec_session *ses; 938 struct ubsec_pktctx ctx; 939 struct ubsec_dma *dmap = NULL; 940 941 sc = arg; 942 KASSERT(sc != NULL /*, ("ubsec_process: null softc")*/); 943 944 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 945 ubsecstats.hst_invalid++; 946 return (EINVAL); 947 } 948 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 949 ubsecstats.hst_badsession++; 950 return (EINVAL); 951 } 952 953 s = splnet(); 954 955 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 956 ubsecstats.hst_queuefull++; 957 sc->sc_needwakeup |= CRYPTO_SYMQ; 958 splx(s); 959 return(ERESTART); 960 } 961 962 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 963 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, /*q,*/ q_next); 964 splx(s); 965 966 dmap = q->q_dma; /* Save dma pointer */ 967 memset(q, 0, sizeof(struct ubsec_q)); 968 memset(&ctx, 0, sizeof(ctx)); 969 970 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 971 q->q_dma = dmap; 972 ses = &sc->sc_sessions[q->q_sesn]; 973 974 if (crp->crp_flags & CRYPTO_F_IMBUF) { 975 q->q_src_m = (struct mbuf *)crp->crp_buf; 976 q->q_dst_m = (struct mbuf *)crp->crp_buf; 977 } else if (crp->crp_flags & CRYPTO_F_IOV) { 978 q->q_src_io = (struct uio *)crp->crp_buf; 979 q->q_dst_io = (struct uio *)crp->crp_buf; 980 } else { 981 ubsecstats.hst_badflags++; 982 err = EINVAL; 983 goto errout; /* XXX we don't handle contiguous blocks! */ 984 } 985 986 memset(&dmap->d_dma->d_mcr, 0, sizeof(struct ubsec_mcr)); 987 988 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 989 dmap->d_dma->d_mcr.mcr_flags = 0; 990 q->q_crp = crp; 991 992 crd1 = crp->crp_desc; 993 if (crd1 == NULL) { 994 ubsecstats.hst_nodesc++; 995 err = EINVAL; 996 goto errout; 997 } 998 crd2 = crd1->crd_next; 999 1000 if (crd2 == NULL) { 1001 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 1002 crd1->crd_alg == CRYPTO_SHA1_HMAC_96) { 1003 maccrd = crd1; 1004 enccrd = NULL; 1005 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1006 crd1->crd_alg == CRYPTO_3DES_CBC) { 1007 maccrd = NULL; 1008 enccrd = crd1; 1009 } else { 1010 ubsecstats.hst_badalg++; 1011 err = EINVAL; 1012 goto errout; 1013 } 1014 } else { 1015 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 1016 crd1->crd_alg == CRYPTO_SHA1_HMAC_96) && 1017 (crd2->crd_alg == CRYPTO_DES_CBC || 1018 crd2->crd_alg == CRYPTO_3DES_CBC) && 1019 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1020 maccrd = crd1; 1021 enccrd = crd2; 1022 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1023 crd1->crd_alg == CRYPTO_3DES_CBC) && 1024 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || 1025 crd2->crd_alg == CRYPTO_SHA1_HMAC_96) && 1026 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1027 enccrd = crd1; 1028 maccrd = crd2; 1029 } else { 1030 /* 1031 * We cannot order the ubsec as requested 1032 */ 1033 ubsecstats.hst_badalg++; 1034 err = EINVAL; 1035 goto errout; 1036 } 1037 } 1038 1039 if (enccrd) { 1040 encoffset = enccrd->crd_skip; 1041 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1042 1043 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1044 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1045 1046 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1047 memcpy(ctx.pc_iv, enccrd->crd_iv, 8); 1048 else { 1049 ctx.pc_iv[0] = ses->ses_iv[0]; 1050 ctx.pc_iv[1] = ses->ses_iv[1]; 1051 } 1052 1053 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1054 if (crp->crp_flags & CRYPTO_F_IMBUF) 1055 m_copyback(q->q_src_m, 1056 enccrd->crd_inject, 1057 8, (void *)ctx.pc_iv); 1058 else if (crp->crp_flags & CRYPTO_F_IOV) 1059 cuio_copyback(q->q_src_io, 1060 enccrd->crd_inject, 1061 8, (void *)ctx.pc_iv); 1062 } 1063 } else { 1064 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1065 1066 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1067 memcpy(ctx.pc_iv, enccrd->crd_iv, 8); 1068 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1069 m_copydata(q->q_src_m, enccrd->crd_inject, 1070 8, (void *)ctx.pc_iv); 1071 else if (crp->crp_flags & CRYPTO_F_IOV) 1072 cuio_copydata(q->q_src_io, 1073 enccrd->crd_inject, 8, 1074 (void *)ctx.pc_iv); 1075 } 1076 1077 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1078 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1079 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1080 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1081 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1082 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1083 SWAP32(ctx.pc_iv[0]); 1084 SWAP32(ctx.pc_iv[1]); 1085 } 1086 1087 if (maccrd) { 1088 macoffset = maccrd->crd_skip; 1089 1090 if (maccrd->crd_alg == CRYPTO_MD5_HMAC_96) 1091 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1092 else 1093 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1094 1095 for (i = 0; i < 5; i++) { 1096 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1097 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1098 1099 HTOLE32(ctx.pc_hminner[i]); 1100 HTOLE32(ctx.pc_hmouter[i]); 1101 } 1102 } 1103 1104 if (enccrd && maccrd) { 1105 /* 1106 * ubsec cannot handle packets where the end of encryption 1107 * and authentication are not the same, or where the 1108 * encrypted part begins before the authenticated part. 1109 */ 1110 if ((encoffset + enccrd->crd_len) != 1111 (macoffset + maccrd->crd_len)) { 1112 ubsecstats.hst_lenmismatch++; 1113 err = EINVAL; 1114 goto errout; 1115 } 1116 if (enccrd->crd_skip < maccrd->crd_skip) { 1117 ubsecstats.hst_skipmismatch++; 1118 err = EINVAL; 1119 goto errout; 1120 } 1121 sskip = maccrd->crd_skip; 1122 cpskip = dskip = enccrd->crd_skip; 1123 stheend = maccrd->crd_len; 1124 dtheend = enccrd->crd_len; 1125 coffset = enccrd->crd_skip - maccrd->crd_skip; 1126 cpoffset = cpskip + dtheend; 1127 #ifdef UBSEC_DEBUG 1128 if (ubsec_debug) { 1129 printf("mac: skip %d, len %d, inject %d\n", 1130 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1131 printf("enc: skip %d, len %d, inject %d\n", 1132 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1133 printf("src: skip %d, len %d\n", sskip, stheend); 1134 printf("dst: skip %d, len %d\n", dskip, dtheend); 1135 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1136 coffset, stheend, cpskip, cpoffset); 1137 } 1138 #endif 1139 } else { 1140 cpskip = dskip = sskip = macoffset + encoffset; 1141 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1142 cpoffset = cpskip + dtheend; 1143 coffset = 0; 1144 } 1145 ctx.pc_offset = htole16(coffset >> 2); 1146 1147 /* XXX FIXME: jonathan asks, what the heck's that 0xfff0? */ 1148 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, 1149 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { 1150 err = ENOMEM; 1151 goto errout; 1152 } 1153 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1154 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1155 q->q_src_m, BUS_DMA_NOWAIT) != 0) { 1156 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1157 q->q_src_map = NULL; 1158 ubsecstats.hst_noload++; 1159 err = ENOMEM; 1160 goto errout; 1161 } 1162 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1163 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1164 q->q_src_io, BUS_DMA_NOWAIT) != 0) { 1165 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1166 q->q_src_map = NULL; 1167 ubsecstats.hst_noload++; 1168 err = ENOMEM; 1169 goto errout; 1170 } 1171 } 1172 nicealign = ubsec_dmamap_aligned(q->q_src_map); 1173 1174 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1175 1176 #ifdef UBSEC_DEBUG 1177 if (ubsec_debug) 1178 printf("src skip: %d nicealign: %u\n", sskip, nicealign); 1179 #endif 1180 for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { 1181 struct ubsec_pktbuf *pb; 1182 bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; 1183 bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; 1184 1185 if (sskip >= packl) { 1186 sskip -= packl; 1187 continue; 1188 } 1189 1190 packl -= sskip; 1191 packp += sskip; 1192 sskip = 0; 1193 1194 if (packl > 0xfffc) { 1195 err = EIO; 1196 goto errout; 1197 } 1198 1199 if (j == 0) 1200 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1201 else 1202 pb = &dmap->d_dma->d_sbuf[j - 1]; 1203 1204 pb->pb_addr = htole32(packp); 1205 1206 if (stheend) { 1207 if (packl > stheend) { 1208 pb->pb_len = htole32(stheend); 1209 stheend = 0; 1210 } else { 1211 pb->pb_len = htole32(packl); 1212 stheend -= packl; 1213 } 1214 } else 1215 pb->pb_len = htole32(packl); 1216 1217 if ((i + 1) == q->q_src_map->dm_nsegs) 1218 pb->pb_next = 0; 1219 else 1220 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1221 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1222 j++; 1223 } 1224 1225 if (enccrd == NULL && maccrd != NULL) { 1226 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1227 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1228 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1229 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1230 #ifdef UBSEC_DEBUG 1231 if (ubsec_debug) 1232 printf("opkt: %x %x %x\n", 1233 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1234 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1235 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1236 1237 #endif 1238 } else { 1239 if (crp->crp_flags & CRYPTO_F_IOV) { 1240 if (!nicealign) { 1241 ubsecstats.hst_iovmisaligned++; 1242 err = EINVAL; 1243 goto errout; 1244 } 1245 /* XXX: ``what the heck's that'' 0xfff0? */ 1246 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1247 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1248 &q->q_dst_map) != 0) { 1249 ubsecstats.hst_nomap++; 1250 err = ENOMEM; 1251 goto errout; 1252 } 1253 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1254 q->q_dst_io, BUS_DMA_NOWAIT) != 0) { 1255 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1256 q->q_dst_map = NULL; 1257 ubsecstats.hst_noload++; 1258 err = ENOMEM; 1259 goto errout; 1260 } 1261 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1262 if (nicealign) { 1263 q->q_dst_m = q->q_src_m; 1264 q->q_dst_map = q->q_src_map; 1265 } else { 1266 int totlen, len; 1267 struct mbuf *m, *top, **mp; 1268 1269 ubsecstats.hst_unaligned++; 1270 totlen = q->q_src_map->dm_mapsize; 1271 if (q->q_src_m->m_flags & M_PKTHDR) { 1272 len = MHLEN; 1273 MGETHDR(m, M_DONTWAIT, MT_DATA); 1274 /*XXX FIXME: m_dup_pkthdr */ 1275 if (m && 1 /*!m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)*/) { 1276 m_free(m); 1277 m = NULL; 1278 } 1279 } else { 1280 len = MLEN; 1281 MGET(m, M_DONTWAIT, MT_DATA); 1282 } 1283 if (m == NULL) { 1284 ubsecstats.hst_nombuf++; 1285 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1286 goto errout; 1287 } 1288 if (len == MHLEN) 1289 /*XXX was M_DUP_PKTHDR*/ 1290 M_COPY_PKTHDR(m, q->q_src_m); 1291 if (totlen >= MINCLSIZE) { 1292 MCLGET(m, M_DONTWAIT); 1293 if ((m->m_flags & M_EXT) == 0) { 1294 m_free(m); 1295 ubsecstats.hst_nomcl++; 1296 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1297 goto errout; 1298 } 1299 len = MCLBYTES; 1300 } 1301 m->m_len = len; 1302 top = NULL; 1303 mp = ⊤ 1304 1305 while (totlen > 0) { 1306 if (top) { 1307 MGET(m, M_DONTWAIT, MT_DATA); 1308 if (m == NULL) { 1309 m_freem(top); 1310 ubsecstats.hst_nombuf++; 1311 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1312 goto errout; 1313 } 1314 len = MLEN; 1315 } 1316 if (top && totlen >= MINCLSIZE) { 1317 MCLGET(m, M_DONTWAIT); 1318 if ((m->m_flags & M_EXT) == 0) { 1319 *mp = m; 1320 m_freem(top); 1321 ubsecstats.hst_nomcl++; 1322 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1323 goto errout; 1324 } 1325 len = MCLBYTES; 1326 } 1327 m->m_len = len = min(totlen, len); 1328 totlen -= len; 1329 *mp = m; 1330 mp = &m->m_next; 1331 } 1332 q->q_dst_m = top; 1333 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1334 cpskip, cpoffset); 1335 /* XXX again, what the heck is that 0xfff0? */ 1336 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1337 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1338 &q->q_dst_map) != 0) { 1339 ubsecstats.hst_nomap++; 1340 err = ENOMEM; 1341 goto errout; 1342 } 1343 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1344 q->q_dst_map, q->q_dst_m, 1345 BUS_DMA_NOWAIT) != 0) { 1346 bus_dmamap_destroy(sc->sc_dmat, 1347 q->q_dst_map); 1348 q->q_dst_map = NULL; 1349 ubsecstats.hst_noload++; 1350 err = ENOMEM; 1351 goto errout; 1352 } 1353 } 1354 } else { 1355 ubsecstats.hst_badflags++; 1356 err = EINVAL; 1357 goto errout; 1358 } 1359 1360 #ifdef UBSEC_DEBUG 1361 if (ubsec_debug) 1362 printf("dst skip: %d\n", dskip); 1363 #endif 1364 for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) { 1365 struct ubsec_pktbuf *pb; 1366 bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len; 1367 bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr; 1368 1369 if (dskip >= packl) { 1370 dskip -= packl; 1371 continue; 1372 } 1373 1374 packl -= dskip; 1375 packp += dskip; 1376 dskip = 0; 1377 1378 if (packl > 0xfffc) { 1379 err = EIO; 1380 goto errout; 1381 } 1382 1383 if (j == 0) 1384 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1385 else 1386 pb = &dmap->d_dma->d_dbuf[j - 1]; 1387 1388 pb->pb_addr = htole32(packp); 1389 1390 if (dtheend) { 1391 if (packl > dtheend) { 1392 pb->pb_len = htole32(dtheend); 1393 dtheend = 0; 1394 } else { 1395 pb->pb_len = htole32(packl); 1396 dtheend -= packl; 1397 } 1398 } else 1399 pb->pb_len = htole32(packl); 1400 1401 if ((i + 1) == q->q_dst_map->dm_nsegs) { 1402 if (maccrd) 1403 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1404 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1405 else 1406 pb->pb_next = 0; 1407 } else 1408 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1409 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1410 j++; 1411 } 1412 } 1413 1414 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1415 offsetof(struct ubsec_dmachunk, d_ctx)); 1416 1417 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1418 struct ubsec_pktctx_long *ctxl; 1419 1420 ctxl = (struct ubsec_pktctx_long *)((char *)dmap->d_alloc.dma_vaddr + 1421 offsetof(struct ubsec_dmachunk, d_ctx)); 1422 1423 /* transform small context into long context */ 1424 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1425 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1426 ctxl->pc_flags = ctx.pc_flags; 1427 ctxl->pc_offset = ctx.pc_offset; 1428 for (i = 0; i < 6; i++) 1429 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1430 for (i = 0; i < 5; i++) 1431 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1432 for (i = 0; i < 5; i++) 1433 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1434 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1435 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1436 } else 1437 memcpy((char *)dmap->d_alloc.dma_vaddr + 1438 offsetof(struct ubsec_dmachunk, d_ctx), &ctx, 1439 sizeof(struct ubsec_pktctx)); 1440 1441 s = splnet(); 1442 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1443 sc->sc_nqueue++; 1444 ubsecstats.hst_ipackets++; 1445 ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize; 1446 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= ubsec_maxbatch) 1447 ubsec_feed(sc); 1448 splx(s); 1449 return (0); 1450 1451 errout: 1452 if (q != NULL) { 1453 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1454 m_freem(q->q_dst_m); 1455 1456 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1457 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1458 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1459 } 1460 if (q->q_src_map != NULL) { 1461 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1462 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1463 } 1464 1465 s = splnet(); 1466 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1467 splx(s); 1468 } 1469 #if 0 /* jonathan says: this openbsd code seems to be subsumed elsewhere */ 1470 if (err == EINVAL) 1471 ubsecstats.hst_invalid++; 1472 else 1473 ubsecstats.hst_nomem++; 1474 #endif 1475 if (err != ERESTART) { 1476 crp->crp_etype = err; 1477 crypto_done(crp); 1478 } else { 1479 sc->sc_needwakeup |= CRYPTO_SYMQ; 1480 } 1481 return (err); 1482 } 1483 1484 static void 1485 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1486 { 1487 struct cryptop *crp = (struct cryptop *)q->q_crp; 1488 struct cryptodesc *crd; 1489 struct ubsec_dma *dmap = q->q_dma; 1490 1491 ubsecstats.hst_opackets++; 1492 ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1493 1494 bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0, 1495 dmap->d_alloc.dma_map->dm_mapsize, 1496 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1497 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1498 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1499 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1500 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1501 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1502 } 1503 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 1504 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1505 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1506 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1507 1508 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1509 m_freem(q->q_src_m); 1510 crp->crp_buf = (void *)q->q_dst_m; 1511 } 1512 1513 /* copy out IV for future use */ 1514 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1515 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1516 if (crd->crd_alg != CRYPTO_DES_CBC && 1517 crd->crd_alg != CRYPTO_3DES_CBC) 1518 continue; 1519 if (crp->crp_flags & CRYPTO_F_IMBUF) 1520 m_copydata((struct mbuf *)crp->crp_buf, 1521 crd->crd_skip + crd->crd_len - 8, 8, 1522 (void *)sc->sc_sessions[q->q_sesn].ses_iv); 1523 else if (crp->crp_flags & CRYPTO_F_IOV) { 1524 cuio_copydata((struct uio *)crp->crp_buf, 1525 crd->crd_skip + crd->crd_len - 8, 8, 1526 (void *)sc->sc_sessions[q->q_sesn].ses_iv); 1527 } 1528 break; 1529 } 1530 } 1531 1532 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1533 if (crd->crd_alg != CRYPTO_MD5_HMAC_96 && 1534 crd->crd_alg != CRYPTO_SHA1_HMAC_96) 1535 continue; 1536 if (crp->crp_flags & CRYPTO_F_IMBUF) 1537 m_copyback((struct mbuf *)crp->crp_buf, 1538 crd->crd_inject, 12, 1539 (void *)dmap->d_dma->d_macbuf); 1540 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1541 bcopy((void *)dmap->d_dma->d_macbuf, 1542 crp->crp_mac, 12); 1543 break; 1544 } 1545 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1546 crypto_done(crp); 1547 } 1548 1549 static void 1550 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1551 { 1552 int i, j, dlen, slen; 1553 char *dptr, *sptr; 1554 1555 j = 0; 1556 sptr = srcm->m_data; 1557 slen = srcm->m_len; 1558 dptr = dstm->m_data; 1559 dlen = dstm->m_len; 1560 1561 while (1) { 1562 for (i = 0; i < min(slen, dlen); i++) { 1563 if (j < hoffset || j >= toffset) 1564 *dptr++ = *sptr++; 1565 slen--; 1566 dlen--; 1567 j++; 1568 } 1569 if (slen == 0) { 1570 srcm = srcm->m_next; 1571 if (srcm == NULL) 1572 return; 1573 sptr = srcm->m_data; 1574 slen = srcm->m_len; 1575 } 1576 if (dlen == 0) { 1577 dstm = dstm->m_next; 1578 if (dstm == NULL) 1579 return; 1580 dptr = dstm->m_data; 1581 dlen = dstm->m_len; 1582 } 1583 } 1584 } 1585 1586 /* 1587 * feed the key generator, must be called at splnet() or higher. 1588 */ 1589 static void 1590 ubsec_feed2(struct ubsec_softc *sc) 1591 { 1592 struct ubsec_q2 *q; 1593 1594 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1595 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1596 break; 1597 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1598 1599 bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, 1600 q->q_mcr.dma_map->dm_mapsize, 1601 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1602 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1603 q->q_ctx.dma_map->dm_mapsize, 1604 BUS_DMASYNC_PREWRITE); 1605 1606 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1607 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1608 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, /*q,*/ q_next); 1609 --sc->sc_nqueue2; 1610 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1611 } 1612 } 1613 1614 /* 1615 * Callback for handling random numbers 1616 */ 1617 static void 1618 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1619 { 1620 struct cryptkop *krp; 1621 struct ubsec_ctx_keyop *ctx; 1622 1623 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1624 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1625 q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1626 1627 switch (q->q_type) { 1628 #ifndef UBSEC_NO_RNG 1629 case UBS_CTXOP_RNGSHA1: 1630 case UBS_CTXOP_RNGBYPASS: { 1631 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1632 u_int32_t *p; 1633 int i; 1634 1635 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1636 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1637 p = (u_int32_t *)rng->rng_buf.dma_vaddr; 1638 #ifndef __NetBSD__ 1639 for (i = 0; i < UBSEC_RNG_BUFSIZ; p++, i++) 1640 add_true_randomness(letoh32(*p)); 1641 rng->rng_used = 0; 1642 #else 1643 /* XXX NetBSD rnd subsystem too weak */ 1644 i = 0; (void)i; /* shut off gcc warnings */ 1645 #endif 1646 #ifdef __OpenBSD__ 1647 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1648 #else 1649 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1650 #endif 1651 break; 1652 } 1653 #endif 1654 case UBS_CTXOP_MODEXP: { 1655 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1656 u_int rlen, clen; 1657 1658 krp = me->me_krp; 1659 rlen = (me->me_modbits + 7) / 8; 1660 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1661 1662 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 1663 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1664 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 1665 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1666 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 1667 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1668 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 1669 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1670 1671 if (clen < rlen) 1672 krp->krp_status = E2BIG; 1673 else { 1674 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1675 memset(krp->krp_param[krp->krp_iparams].crp_p, 0, 1676 (krp->krp_param[krp->krp_iparams].crp_nbits 1677 + 7) / 8); 1678 bcopy(me->me_C.dma_vaddr, 1679 krp->krp_param[krp->krp_iparams].crp_p, 1680 (me->me_modbits + 7) / 8); 1681 } else 1682 ubsec_kshift_l(me->me_shiftbits, 1683 me->me_C.dma_vaddr, me->me_normbits, 1684 krp->krp_param[krp->krp_iparams].crp_p, 1685 krp->krp_param[krp->krp_iparams].crp_nbits); 1686 } 1687 1688 crypto_kdone(krp); 1689 1690 /* bzero all potentially sensitive data */ 1691 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 1692 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 1693 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 1694 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 1695 1696 /* Can't free here, so put us on the free list. */ 1697 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1698 break; 1699 } 1700 case UBS_CTXOP_RSAPRIV: { 1701 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1702 u_int len; 1703 1704 krp = rp->rpr_krp; 1705 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 0, 1706 rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1707 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 0, 1708 rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1709 1710 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1711 bcopy(rp->rpr_msgout.dma_vaddr, 1712 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1713 1714 crypto_kdone(krp); 1715 1716 memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); 1717 memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); 1718 memset(rp->rpr_q.q_ctx.dma_vaddr, 0, rp->rpr_q.q_ctx.dma_size); 1719 1720 /* Can't free here, so put us on the free list. */ 1721 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1722 break; 1723 } 1724 default: 1725 printf("%s: unknown ctx op: %x\n", device_xname(&sc->sc_dv), 1726 letoh16(ctx->ctx_op)); 1727 break; 1728 } 1729 } 1730 1731 #ifndef UBSEC_NO_RNG 1732 static void 1733 ubsec_rng(void *vsc) 1734 { 1735 struct ubsec_softc *sc = vsc; 1736 struct ubsec_q2_rng *rng = &sc->sc_rng; 1737 struct ubsec_mcr *mcr; 1738 struct ubsec_ctx_rngbypass *ctx; 1739 int s; 1740 1741 s = splnet(); 1742 if (rng->rng_used) { 1743 splx(s); 1744 return; 1745 } 1746 sc->sc_nqueue2++; 1747 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1748 goto out; 1749 1750 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1751 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1752 1753 mcr->mcr_pkts = htole16(1); 1754 mcr->mcr_flags = 0; 1755 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1756 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1757 mcr->mcr_ipktbuf.pb_len = 0; 1758 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1759 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1760 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1761 UBS_PKTBUF_LEN); 1762 mcr->mcr_opktbuf.pb_next = 0; 1763 1764 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1765 ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1); 1766 rng->rng_q.q_type = UBS_CTXOP_RNGSHA1; 1767 1768 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1769 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1770 1771 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1772 rng->rng_used = 1; 1773 ubsec_feed2(sc); 1774 ubsecstats.hst_rng++; 1775 splx(s); 1776 1777 return; 1778 1779 out: 1780 /* 1781 * Something weird happened, generate our own call back. 1782 */ 1783 sc->sc_nqueue2--; 1784 splx(s); 1785 #ifdef __OpenBSD__ 1786 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1787 #else 1788 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1789 #endif 1790 } 1791 #endif /* UBSEC_NO_RNG */ 1792 1793 static int 1794 ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size, 1795 struct ubsec_dma_alloc *dma,int mapflags) 1796 { 1797 int r; 1798 1799 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1800 &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0) 1801 goto fail_0; 1802 1803 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1804 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1805 goto fail_1; 1806 1807 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1808 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1809 goto fail_2; 1810 1811 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1812 size, NULL, BUS_DMA_NOWAIT)) != 0) 1813 goto fail_3; 1814 1815 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1816 dma->dma_size = size; 1817 return (0); 1818 1819 fail_3: 1820 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1821 fail_2: 1822 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1823 fail_1: 1824 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1825 fail_0: 1826 dma->dma_map = NULL; 1827 return (r); 1828 } 1829 1830 static void 1831 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1832 { 1833 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1834 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size); 1835 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1836 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1837 } 1838 1839 /* 1840 * Resets the board. Values in the regesters are left as is 1841 * from the reset (i.e. initial values are assigned elsewhere). 1842 */ 1843 static void 1844 ubsec_reset_board(struct ubsec_softc *sc) 1845 { 1846 volatile u_int32_t ctrl; 1847 1848 ctrl = READ_REG(sc, BS_CTRL); 1849 ctrl |= BS_CTRL_RESET; 1850 WRITE_REG(sc, BS_CTRL, ctrl); 1851 1852 /* 1853 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 1854 */ 1855 DELAY(10); 1856 } 1857 1858 /* 1859 * Init Broadcom registers 1860 */ 1861 static void 1862 ubsec_init_board(struct ubsec_softc *sc) 1863 { 1864 u_int32_t ctrl; 1865 1866 ctrl = READ_REG(sc, BS_CTRL); 1867 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 1868 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 1869 1870 /* 1871 * XXX: Sam Leffler's code has (UBS_FLAGS_KEY|UBS_FLAGS_RNG)). 1872 * anyone got hw docs? 1873 */ 1874 if (sc->sc_flags & UBS_FLAGS_KEY) 1875 ctrl |= BS_CTRL_MCR2INT; 1876 else 1877 ctrl &= ~BS_CTRL_MCR2INT; 1878 1879 if (sc->sc_flags & UBS_FLAGS_HWNORM) 1880 ctrl &= ~BS_CTRL_SWNORM; 1881 1882 WRITE_REG(sc, BS_CTRL, ctrl); 1883 } 1884 1885 /* 1886 * Init Broadcom PCI registers 1887 */ 1888 static void 1889 ubsec_init_pciregs(struct pci_attach_args *pa) 1890 { 1891 pci_chipset_tag_t pc = pa->pa_pc; 1892 u_int32_t misc; 1893 1894 /* 1895 * This will set the cache line size to 1, this will 1896 * force the BCM58xx chip just to do burst read/writes. 1897 * Cache line read/writes are to slow 1898 */ 1899 misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG); 1900 misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT)) 1901 | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT); 1902 pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc); 1903 } 1904 1905 /* 1906 * Clean up after a chip crash. 1907 * It is assumed that the caller in splnet() 1908 */ 1909 static void 1910 ubsec_cleanchip(struct ubsec_softc *sc) 1911 { 1912 struct ubsec_q *q; 1913 1914 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 1915 q = SIMPLEQ_FIRST(&sc->sc_qchip); 1916 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 1917 ubsec_free_q(sc, q); 1918 } 1919 sc->sc_nqchip = 0; 1920 } 1921 1922 /* 1923 * free a ubsec_q 1924 * It is assumed that the caller is within splnet() 1925 */ 1926 static int 1927 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 1928 { 1929 struct ubsec_q *q2; 1930 struct cryptop *crp; 1931 int npkts; 1932 int i; 1933 1934 npkts = q->q_nstacked_mcrs; 1935 1936 for (i = 0; i < npkts; i++) { 1937 if(q->q_stacked_mcr[i]) { 1938 q2 = q->q_stacked_mcr[i]; 1939 1940 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 1941 m_freem(q2->q_dst_m); 1942 1943 crp = (struct cryptop *)q2->q_crp; 1944 1945 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 1946 1947 crp->crp_etype = EFAULT; 1948 crypto_done(crp); 1949 } else { 1950 break; 1951 } 1952 } 1953 1954 /* 1955 * Free header MCR 1956 */ 1957 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1958 m_freem(q->q_dst_m); 1959 1960 crp = (struct cryptop *)q->q_crp; 1961 1962 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1963 1964 crp->crp_etype = EFAULT; 1965 crypto_done(crp); 1966 return(0); 1967 } 1968 1969 /* 1970 * Routine to reset the chip and clean up. 1971 * It is assumed that the caller is in splnet() 1972 */ 1973 static void 1974 ubsec_totalreset(struct ubsec_softc *sc) 1975 { 1976 ubsec_reset_board(sc); 1977 ubsec_init_board(sc); 1978 ubsec_cleanchip(sc); 1979 } 1980 1981 static int 1982 ubsec_dmamap_aligned(bus_dmamap_t map) 1983 { 1984 int i; 1985 1986 for (i = 0; i < map->dm_nsegs; i++) { 1987 if (map->dm_segs[i].ds_addr & 3) 1988 return (0); 1989 if ((i != (map->dm_nsegs - 1)) && 1990 (map->dm_segs[i].ds_len & 3)) 1991 return (0); 1992 } 1993 return (1); 1994 } 1995 1996 #ifdef __OpenBSD__ 1997 struct ubsec_softc * 1998 ubsec_kfind(struct cryptkop *krp) 1999 { 2000 struct ubsec_softc *sc; 2001 int i; 2002 2003 for (i = 0; i < ubsec_cd.cd_ndevs; i++) { 2004 sc = ubsec_cd.cd_devs[i]; 2005 if (sc == NULL) 2006 continue; 2007 if (sc->sc_cid == krp->krp_hid) 2008 return (sc); 2009 } 2010 return (NULL); 2011 } 2012 #endif 2013 2014 static void 2015 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2016 { 2017 switch (q->q_type) { 2018 case UBS_CTXOP_MODEXP: { 2019 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2020 2021 ubsec_dma_free(sc, &me->me_q.q_mcr); 2022 ubsec_dma_free(sc, &me->me_q.q_ctx); 2023 ubsec_dma_free(sc, &me->me_M); 2024 ubsec_dma_free(sc, &me->me_E); 2025 ubsec_dma_free(sc, &me->me_C); 2026 ubsec_dma_free(sc, &me->me_epb); 2027 free(me, M_DEVBUF); 2028 break; 2029 } 2030 case UBS_CTXOP_RSAPRIV: { 2031 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2032 2033 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2034 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2035 ubsec_dma_free(sc, &rp->rpr_msgin); 2036 ubsec_dma_free(sc, &rp->rpr_msgout); 2037 free(rp, M_DEVBUF); 2038 break; 2039 } 2040 default: 2041 printf("%s: invalid kfree 0x%x\n", device_xname(&sc->sc_dv), 2042 q->q_type); 2043 break; 2044 } 2045 } 2046 2047 static int 2048 ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2049 { 2050 struct ubsec_softc *sc; 2051 int r; 2052 2053 if (krp == NULL || krp->krp_callback == NULL) 2054 return (EINVAL); 2055 #ifdef __OpenBSD__ 2056 if ((sc = ubsec_kfind(krp)) == NULL) 2057 return (EINVAL); 2058 #else 2059 sc = arg; 2060 KASSERT(sc != NULL /*, ("ubsec_kprocess: null softc")*/); 2061 #endif 2062 2063 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2064 struct ubsec_q2 *q; 2065 2066 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2067 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, /*q,*/ q_next); 2068 ubsec_kfree(sc, q); 2069 } 2070 2071 switch (krp->krp_op) { 2072 case CRK_MOD_EXP: 2073 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2074 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2075 else 2076 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2077 break; 2078 case CRK_MOD_EXP_CRT: 2079 r = ubsec_kprocess_rsapriv(sc, krp, hint); 2080 break; 2081 default: 2082 printf("%s: kprocess: invalid op 0x%x\n", 2083 device_xname(&sc->sc_dv), krp->krp_op); 2084 krp->krp_status = EOPNOTSUPP; 2085 crypto_kdone(krp); 2086 r = 0; 2087 } 2088 return (r); 2089 } 2090 2091 /* 2092 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2093 */ 2094 static int 2095 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, 2096 int hint) 2097 { 2098 struct ubsec_q2_modexp *me; 2099 struct ubsec_mcr *mcr; 2100 struct ubsec_ctx_modexp *ctx; 2101 struct ubsec_pktbuf *epb; 2102 int s, err = 0; 2103 u_int nbits, normbits, mbits, shiftbits, ebits; 2104 2105 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2106 if (me == NULL) { 2107 err = ENOMEM; 2108 goto errout; 2109 } 2110 memset(me, 0, sizeof *me); 2111 me->me_krp = krp; 2112 me->me_q.q_type = UBS_CTXOP_MODEXP; 2113 2114 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2115 if (nbits <= 512) 2116 normbits = 512; 2117 else if (nbits <= 768) 2118 normbits = 768; 2119 else if (nbits <= 1024) 2120 normbits = 1024; 2121 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2122 normbits = 1536; 2123 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2124 normbits = 2048; 2125 else { 2126 err = E2BIG; 2127 goto errout; 2128 } 2129 2130 shiftbits = normbits - nbits; 2131 2132 me->me_modbits = nbits; 2133 me->me_shiftbits = shiftbits; 2134 me->me_normbits = normbits; 2135 2136 /* Sanity check: result bits must be >= true modulus bits. */ 2137 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2138 err = ERANGE; 2139 goto errout; 2140 } 2141 2142 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2143 &me->me_q.q_mcr, 0)) { 2144 err = ENOMEM; 2145 goto errout; 2146 } 2147 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2148 2149 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2150 &me->me_q.q_ctx, 0)) { 2151 err = ENOMEM; 2152 goto errout; 2153 } 2154 2155 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2156 if (mbits > nbits) { 2157 err = E2BIG; 2158 goto errout; 2159 } 2160 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2161 err = ENOMEM; 2162 goto errout; 2163 } 2164 ubsec_kshift_r(shiftbits, 2165 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2166 me->me_M.dma_vaddr, normbits); 2167 2168 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2169 err = ENOMEM; 2170 goto errout; 2171 } 2172 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2173 2174 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2175 if (ebits > nbits) { 2176 err = E2BIG; 2177 goto errout; 2178 } 2179 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2180 err = ENOMEM; 2181 goto errout; 2182 } 2183 ubsec_kshift_r(shiftbits, 2184 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2185 me->me_E.dma_vaddr, normbits); 2186 2187 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2188 &me->me_epb, 0)) { 2189 err = ENOMEM; 2190 goto errout; 2191 } 2192 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2193 epb->pb_addr = htole32(me->me_E.dma_paddr); 2194 epb->pb_next = 0; 2195 epb->pb_len = htole32(normbits / 8); 2196 2197 #ifdef UBSEC_DEBUG 2198 if (ubsec_debug) { 2199 printf("Epb "); 2200 ubsec_dump_pb(epb); 2201 } 2202 #endif 2203 2204 mcr->mcr_pkts = htole16(1); 2205 mcr->mcr_flags = 0; 2206 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2207 mcr->mcr_reserved = 0; 2208 mcr->mcr_pktlen = 0; 2209 2210 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2211 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2212 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2213 2214 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2215 mcr->mcr_opktbuf.pb_next = 0; 2216 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2217 2218 #ifdef DIAGNOSTIC 2219 /* Misaligned output buffer will hang the chip. */ 2220 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2221 panic("%s: modexp invalid addr 0x%x", 2222 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_addr)); 2223 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2224 panic("%s: modexp invalid len 0x%x", 2225 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_len)); 2226 #endif 2227 2228 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2229 memset(ctx, 0, sizeof(*ctx)); 2230 ubsec_kshift_r(shiftbits, 2231 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2232 ctx->me_N, normbits); 2233 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2234 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2235 ctx->me_E_len = htole16(nbits); 2236 ctx->me_N_len = htole16(nbits); 2237 2238 #ifdef UBSEC_DEBUG 2239 if (ubsec_debug) { 2240 ubsec_dump_mcr(mcr); 2241 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2242 } 2243 #endif 2244 2245 /* 2246 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2247 * everything else. 2248 */ 2249 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2250 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2251 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2252 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2253 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2254 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2255 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2256 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2257 2258 /* Enqueue and we're done... */ 2259 s = splnet(); 2260 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2261 ubsec_feed2(sc); 2262 ubsecstats.hst_modexp++; 2263 splx(s); 2264 2265 return (0); 2266 2267 errout: 2268 if (me != NULL) { 2269 if (me->me_q.q_mcr.dma_map != NULL) 2270 ubsec_dma_free(sc, &me->me_q.q_mcr); 2271 if (me->me_q.q_ctx.dma_map != NULL) { 2272 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 2273 ubsec_dma_free(sc, &me->me_q.q_ctx); 2274 } 2275 if (me->me_M.dma_map != NULL) { 2276 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2277 ubsec_dma_free(sc, &me->me_M); 2278 } 2279 if (me->me_E.dma_map != NULL) { 2280 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2281 ubsec_dma_free(sc, &me->me_E); 2282 } 2283 if (me->me_C.dma_map != NULL) { 2284 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2285 ubsec_dma_free(sc, &me->me_C); 2286 } 2287 if (me->me_epb.dma_map != NULL) 2288 ubsec_dma_free(sc, &me->me_epb); 2289 free(me, M_DEVBUF); 2290 } 2291 krp->krp_status = err; 2292 crypto_kdone(krp); 2293 return (0); 2294 } 2295 2296 /* 2297 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2298 */ 2299 static int 2300 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, 2301 int hint) 2302 { 2303 struct ubsec_q2_modexp *me; 2304 struct ubsec_mcr *mcr; 2305 struct ubsec_ctx_modexp *ctx; 2306 struct ubsec_pktbuf *epb; 2307 int s, err = 0; 2308 u_int nbits, normbits, mbits, shiftbits, ebits; 2309 2310 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2311 if (me == NULL) { 2312 err = ENOMEM; 2313 goto errout; 2314 } 2315 memset(me, 0, sizeof *me); 2316 me->me_krp = krp; 2317 me->me_q.q_type = UBS_CTXOP_MODEXP; 2318 2319 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2320 if (nbits <= 512) 2321 normbits = 512; 2322 else if (nbits <= 768) 2323 normbits = 768; 2324 else if (nbits <= 1024) 2325 normbits = 1024; 2326 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2327 normbits = 1536; 2328 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2329 normbits = 2048; 2330 else { 2331 err = E2BIG; 2332 goto errout; 2333 } 2334 2335 shiftbits = normbits - nbits; 2336 2337 /* XXX ??? */ 2338 me->me_modbits = nbits; 2339 me->me_shiftbits = shiftbits; 2340 me->me_normbits = normbits; 2341 2342 /* Sanity check: result bits must be >= true modulus bits. */ 2343 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2344 err = ERANGE; 2345 goto errout; 2346 } 2347 2348 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2349 &me->me_q.q_mcr, 0)) { 2350 err = ENOMEM; 2351 goto errout; 2352 } 2353 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2354 2355 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2356 &me->me_q.q_ctx, 0)) { 2357 err = ENOMEM; 2358 goto errout; 2359 } 2360 2361 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2362 if (mbits > nbits) { 2363 err = E2BIG; 2364 goto errout; 2365 } 2366 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2367 err = ENOMEM; 2368 goto errout; 2369 } 2370 memset(me->me_M.dma_vaddr, 0, normbits / 8); 2371 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2372 me->me_M.dma_vaddr, (mbits + 7) / 8); 2373 2374 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2375 err = ENOMEM; 2376 goto errout; 2377 } 2378 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2379 2380 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2381 if (ebits > nbits) { 2382 err = E2BIG; 2383 goto errout; 2384 } 2385 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2386 err = ENOMEM; 2387 goto errout; 2388 } 2389 memset(me->me_E.dma_vaddr, 0, normbits / 8); 2390 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2391 me->me_E.dma_vaddr, (ebits + 7) / 8); 2392 2393 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2394 &me->me_epb, 0)) { 2395 err = ENOMEM; 2396 goto errout; 2397 } 2398 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2399 epb->pb_addr = htole32(me->me_E.dma_paddr); 2400 epb->pb_next = 0; 2401 epb->pb_len = htole32((ebits + 7) / 8); 2402 2403 #ifdef UBSEC_DEBUG 2404 if (ubsec_debug) { 2405 printf("Epb "); 2406 ubsec_dump_pb(epb); 2407 } 2408 #endif 2409 2410 mcr->mcr_pkts = htole16(1); 2411 mcr->mcr_flags = 0; 2412 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2413 mcr->mcr_reserved = 0; 2414 mcr->mcr_pktlen = 0; 2415 2416 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2417 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2418 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2419 2420 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2421 mcr->mcr_opktbuf.pb_next = 0; 2422 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2423 2424 #ifdef DIAGNOSTIC 2425 /* Misaligned output buffer will hang the chip. */ 2426 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2427 panic("%s: modexp invalid addr 0x%x", 2428 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_addr)); 2429 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2430 panic("%s: modexp invalid len 0x%x", 2431 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_len)); 2432 #endif 2433 2434 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2435 memset(ctx, 0, sizeof(*ctx)); 2436 memcpy(ctx->me_N, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, 2437 (nbits + 7) / 8); 2438 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2439 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2440 ctx->me_E_len = htole16(ebits); 2441 ctx->me_N_len = htole16(nbits); 2442 2443 #ifdef UBSEC_DEBUG 2444 if (ubsec_debug) { 2445 ubsec_dump_mcr(mcr); 2446 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2447 } 2448 #endif 2449 2450 /* 2451 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2452 * everything else. 2453 */ 2454 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2455 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2456 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2457 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2458 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2459 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2460 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2461 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2462 2463 /* Enqueue and we're done... */ 2464 s = splnet(); 2465 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2466 ubsec_feed2(sc); 2467 splx(s); 2468 2469 return (0); 2470 2471 errout: 2472 if (me != NULL) { 2473 if (me->me_q.q_mcr.dma_map != NULL) 2474 ubsec_dma_free(sc, &me->me_q.q_mcr); 2475 if (me->me_q.q_ctx.dma_map != NULL) { 2476 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 2477 ubsec_dma_free(sc, &me->me_q.q_ctx); 2478 } 2479 if (me->me_M.dma_map != NULL) { 2480 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2481 ubsec_dma_free(sc, &me->me_M); 2482 } 2483 if (me->me_E.dma_map != NULL) { 2484 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2485 ubsec_dma_free(sc, &me->me_E); 2486 } 2487 if (me->me_C.dma_map != NULL) { 2488 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2489 ubsec_dma_free(sc, &me->me_C); 2490 } 2491 if (me->me_epb.dma_map != NULL) 2492 ubsec_dma_free(sc, &me->me_epb); 2493 free(me, M_DEVBUF); 2494 } 2495 krp->krp_status = err; 2496 crypto_kdone(krp); 2497 return (0); 2498 } 2499 2500 static int 2501 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, 2502 int hint) 2503 { 2504 struct ubsec_q2_rsapriv *rp = NULL; 2505 struct ubsec_mcr *mcr; 2506 struct ubsec_ctx_rsapriv *ctx; 2507 int s, err = 0; 2508 u_int padlen, msglen; 2509 2510 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2511 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2512 if (msglen > padlen) 2513 padlen = msglen; 2514 2515 if (padlen <= 256) 2516 padlen = 256; 2517 else if (padlen <= 384) 2518 padlen = 384; 2519 else if (padlen <= 512) 2520 padlen = 512; 2521 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2522 padlen = 768; 2523 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2524 padlen = 1024; 2525 else { 2526 err = E2BIG; 2527 goto errout; 2528 } 2529 2530 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2531 err = E2BIG; 2532 goto errout; 2533 } 2534 2535 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2536 err = E2BIG; 2537 goto errout; 2538 } 2539 2540 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2541 err = E2BIG; 2542 goto errout; 2543 } 2544 2545 rp = malloc(sizeof *rp, M_DEVBUF, M_NOWAIT|M_ZERO); 2546 if (rp == NULL) 2547 return (ENOMEM); 2548 rp->rpr_krp = krp; 2549 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2550 2551 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2552 &rp->rpr_q.q_mcr, 0)) { 2553 err = ENOMEM; 2554 goto errout; 2555 } 2556 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2557 2558 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2559 &rp->rpr_q.q_ctx, 0)) { 2560 err = ENOMEM; 2561 goto errout; 2562 } 2563 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2564 memset(ctx, 0, sizeof *ctx); 2565 2566 /* Copy in p */ 2567 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2568 &ctx->rpr_buf[0 * (padlen / 8)], 2569 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2570 2571 /* Copy in q */ 2572 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2573 &ctx->rpr_buf[1 * (padlen / 8)], 2574 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2575 2576 /* Copy in dp */ 2577 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2578 &ctx->rpr_buf[2 * (padlen / 8)], 2579 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2580 2581 /* Copy in dq */ 2582 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2583 &ctx->rpr_buf[3 * (padlen / 8)], 2584 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2585 2586 /* Copy in pinv */ 2587 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2588 &ctx->rpr_buf[4 * (padlen / 8)], 2589 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2590 2591 msglen = padlen * 2; 2592 2593 /* Copy in input message (aligned buffer/length). */ 2594 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2595 /* Is this likely? */ 2596 err = E2BIG; 2597 goto errout; 2598 } 2599 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2600 err = ENOMEM; 2601 goto errout; 2602 } 2603 memset(rp->rpr_msgin.dma_vaddr, 0, (msglen + 7) / 8); 2604 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2605 rp->rpr_msgin.dma_vaddr, 2606 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2607 2608 /* Prepare space for output message (aligned buffer/length). */ 2609 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2610 /* Is this likely? */ 2611 err = E2BIG; 2612 goto errout; 2613 } 2614 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2615 err = ENOMEM; 2616 goto errout; 2617 } 2618 memset(rp->rpr_msgout.dma_vaddr, 0, (msglen + 7) / 8); 2619 2620 mcr->mcr_pkts = htole16(1); 2621 mcr->mcr_flags = 0; 2622 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2623 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2624 mcr->mcr_ipktbuf.pb_next = 0; 2625 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2626 mcr->mcr_reserved = 0; 2627 mcr->mcr_pktlen = htole16(msglen); 2628 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2629 mcr->mcr_opktbuf.pb_next = 0; 2630 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2631 2632 #ifdef DIAGNOSTIC 2633 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2634 panic("%s: rsapriv: invalid msgin 0x%lx(0x%lx)", 2635 device_xname(&sc->sc_dv), (u_long) rp->rpr_msgin.dma_paddr, 2636 (u_long) rp->rpr_msgin.dma_size); 2637 } 2638 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2639 panic("%s: rsapriv: invalid msgout 0x%lx(0x%lx)", 2640 device_xname(&sc->sc_dv), (u_long) rp->rpr_msgout.dma_paddr, 2641 (u_long) rp->rpr_msgout.dma_size); 2642 } 2643 #endif 2644 2645 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2646 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2647 ctx->rpr_q_len = htole16(padlen); 2648 ctx->rpr_p_len = htole16(padlen); 2649 2650 /* 2651 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2652 * everything else. 2653 */ 2654 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 2655 0, rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2656 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 2657 0, rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2658 2659 /* Enqueue and we're done... */ 2660 s = splnet(); 2661 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2662 ubsec_feed2(sc); 2663 ubsecstats.hst_modexpcrt++; 2664 splx(s); 2665 return (0); 2666 2667 errout: 2668 if (rp != NULL) { 2669 if (rp->rpr_q.q_mcr.dma_map != NULL) 2670 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2671 if (rp->rpr_msgin.dma_map != NULL) { 2672 memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); 2673 ubsec_dma_free(sc, &rp->rpr_msgin); 2674 } 2675 if (rp->rpr_msgout.dma_map != NULL) { 2676 memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); 2677 ubsec_dma_free(sc, &rp->rpr_msgout); 2678 } 2679 free(rp, M_DEVBUF); 2680 } 2681 krp->krp_status = err; 2682 crypto_kdone(krp); 2683 return (0); 2684 } 2685 2686 #ifdef UBSEC_DEBUG 2687 static void 2688 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2689 { 2690 printf("addr 0x%x (0x%x) next 0x%x\n", 2691 pb->pb_addr, pb->pb_len, pb->pb_next); 2692 } 2693 2694 static void 2695 ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *c) 2696 { 2697 printf("CTX (0x%x):\n", c->ctx_len); 2698 switch (letoh16(c->ctx_op)) { 2699 case UBS_CTXOP_RNGBYPASS: 2700 case UBS_CTXOP_RNGSHA1: 2701 break; 2702 case UBS_CTXOP_MODEXP: 2703 { 2704 struct ubsec_ctx_modexp *cx = (void *)c; 2705 int i, len; 2706 2707 printf(" Elen %u, Nlen %u\n", 2708 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2709 len = (cx->me_N_len + 7)/8; 2710 for (i = 0; i < len; i++) 2711 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2712 printf("\n"); 2713 break; 2714 } 2715 default: 2716 printf("unknown context: %x\n", c->ctx_op); 2717 } 2718 printf("END CTX\n"); 2719 } 2720 2721 static void 2722 ubsec_dump_mcr(struct ubsec_mcr *mcr) 2723 { 2724 volatile struct ubsec_mcr_add *ma; 2725 int i; 2726 2727 printf("MCR:\n"); 2728 printf(" pkts: %u, flags 0x%x\n", 2729 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2730 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2731 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2732 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2733 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2734 letoh16(ma->mcr_reserved)); 2735 printf(" %d: ipkt ", i); 2736 ubsec_dump_pb(&ma->mcr_ipktbuf); 2737 printf(" %d: opkt ", i); 2738 ubsec_dump_pb(&ma->mcr_opktbuf); 2739 ma++; 2740 } 2741 printf("END MCR\n"); 2742 } 2743 #endif /* UBSEC_DEBUG */ 2744 2745 /* 2746 * Return the number of significant bits of a big number. 2747 */ 2748 static int 2749 ubsec_ksigbits(struct crparam *cr) 2750 { 2751 u_int plen = (cr->crp_nbits + 7) / 8; 2752 int i, sig = plen * 8; 2753 u_int8_t c, *p = cr->crp_p; 2754 2755 for (i = plen - 1; i >= 0; i--) { 2756 c = p[i]; 2757 if (c != 0) { 2758 while ((c & 0x80) == 0) { 2759 sig--; 2760 c <<= 1; 2761 } 2762 break; 2763 } 2764 sig -= 8; 2765 } 2766 return (sig); 2767 } 2768 2769 static void 2770 ubsec_kshift_r(u_int shiftbits, u_int8_t *src, u_int srcbits, 2771 u_int8_t *dst, u_int dstbits) 2772 { 2773 u_int slen, dlen; 2774 int i, si, di, n; 2775 2776 slen = (srcbits + 7) / 8; 2777 dlen = (dstbits + 7) / 8; 2778 2779 for (i = 0; i < slen; i++) 2780 dst[i] = src[i]; 2781 for (i = 0; i < dlen - slen; i++) 2782 dst[slen + i] = 0; 2783 2784 n = shiftbits / 8; 2785 if (n != 0) { 2786 si = dlen - n - 1; 2787 di = dlen - 1; 2788 while (si >= 0) 2789 dst[di--] = dst[si--]; 2790 while (di >= 0) 2791 dst[di--] = 0; 2792 } 2793 2794 n = shiftbits % 8; 2795 if (n != 0) { 2796 for (i = dlen - 1; i > 0; i--) 2797 dst[i] = (dst[i] << n) | 2798 (dst[i - 1] >> (8 - n)); 2799 dst[0] = dst[0] << n; 2800 } 2801 } 2802 2803 static void 2804 ubsec_kshift_l(u_int shiftbits, u_int8_t *src, u_int srcbits, 2805 u_int8_t *dst, u_int dstbits) 2806 { 2807 int slen, dlen, i, n; 2808 2809 slen = (srcbits + 7) / 8; 2810 dlen = (dstbits + 7) / 8; 2811 2812 n = shiftbits / 8; 2813 for (i = 0; i < slen; i++) 2814 dst[i] = src[i + n]; 2815 for (i = 0; i < dlen - slen; i++) 2816 dst[slen + i] = 0; 2817 2818 n = shiftbits % 8; 2819 if (n != 0) { 2820 for (i = 0; i < (dlen - 1); i++) 2821 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2822 dst[dlen - 1] = dst[dlen - 1] >> n; 2823 } 2824 } 2825