1 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.12 2003/06/04 17:56:59 sam Exp $ */ 2 /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ 3 4 /* 5 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 6 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 7 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 8 * 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by Jason L. Wright 22 * 4. The name of the author may not be used to endorse or promote products 23 * derived from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 34 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Effort sponsored in part by the Defense Advanced Research Projects 38 * Agency (DARPA) and Air Force Research Laboratory, Air Force 39 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 40 * 41 */ 42 43 /* 44 * uBsec 5[56]01, 58xx hardware crypto accelerator 45 */ 46 47 #include "opt_ubsec.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/proc.h> 52 #include <sys/errno.h> 53 #include <sys/malloc.h> 54 #include <sys/kernel.h> 55 #include <sys/mbuf.h> 56 #include <sys/sysctl.h> 57 #include <sys/endian.h> 58 #include <sys/bus.h> 59 #include <sys/rman.h> 60 #include <sys/md5.h> 61 #include <sys/random.h> 62 #include <sys/thread2.h> 63 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 67 #include <machine/clock.h> 68 69 #include <crypto/sha1.h> 70 #include <opencrypto/cryptodev.h> 71 #include <opencrypto/cryptosoft.h> 72 73 #include "cryptodev_if.h" 74 75 #include <bus/pci/pcivar.h> 76 #include <bus/pci/pcireg.h> 77 78 /* grr, #defines for gratuitous incompatibility in queue.h */ 79 #define SIMPLEQ_HEAD STAILQ_HEAD 80 #define SIMPLEQ_ENTRY STAILQ_ENTRY 81 #define SIMPLEQ_INIT STAILQ_INIT 82 #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL 83 #define SIMPLEQ_EMPTY STAILQ_EMPTY 84 #define SIMPLEQ_FIRST STAILQ_FIRST 85 #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD 86 #define SIMPLEQ_FOREACH STAILQ_FOREACH 87 /* ditto for endian.h */ 88 #define letoh16(x) le16toh(x) 89 #define letoh32(x) le32toh(x) 90 91 #ifdef UBSEC_RNDTEST 92 #include "../rndtest/rndtest.h" 93 #endif 94 #include "ubsecreg.h" 95 #include "ubsecvar.h" 96 97 /* 98 * Prototypes and count for the pci_device structure 99 */ 100 static int ubsec_probe(device_t); 101 static int ubsec_attach(device_t); 102 static int ubsec_detach(device_t); 103 static int ubsec_suspend(device_t); 104 static int ubsec_resume(device_t); 105 static void ubsec_shutdown(device_t); 106 static void ubsec_intr(void *); 107 static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *); 108 static int ubsec_freesession(device_t, u_int64_t); 109 static int ubsec_process(device_t, struct cryptop *, int); 110 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 111 static void ubsec_feed(struct ubsec_softc *); 112 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 113 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 114 static int ubsec_feed2(struct ubsec_softc *); 115 static void ubsec_rng(void *); 116 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 117 struct ubsec_dma_alloc *, int); 118 #define ubsec_dma_sync(_dma, _flags) \ 119 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 120 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 121 static int ubsec_dmamap_aligned(struct ubsec_operand *op); 122 123 static void ubsec_reset_board(struct ubsec_softc *sc); 124 static void ubsec_init_board(struct ubsec_softc *sc); 125 static void ubsec_init_pciregs(device_t dev); 126 static void ubsec_totalreset(struct ubsec_softc *sc); 127 128 static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); 129 130 static int ubsec_kprocess(device_t, struct cryptkop *, int); 131 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); 132 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); 133 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); 134 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 135 static int ubsec_ksigbits(struct crparam *); 136 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 137 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 138 139 140 static device_method_t ubsec_methods[] = { 141 /* Device interface */ 142 DEVMETHOD(device_probe, ubsec_probe), 143 DEVMETHOD(device_attach, ubsec_attach), 144 DEVMETHOD(device_detach, ubsec_detach), 145 DEVMETHOD(device_suspend, ubsec_suspend), 146 DEVMETHOD(device_resume, ubsec_resume), 147 DEVMETHOD(device_shutdown, ubsec_shutdown), 148 149 /* bus interface */ 150 DEVMETHOD(bus_print_child, bus_generic_print_child), 151 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 152 153 /* crypto device methods */ 154 DEVMETHOD(cryptodev_newsession, ubsec_newsession), 155 DEVMETHOD(cryptodev_freesession,ubsec_freesession), 156 DEVMETHOD(cryptodev_process, ubsec_process), 157 DEVMETHOD(cryptodev_kprocess, ubsec_kprocess), 158 159 DEVMETHOD_END 160 }; 161 static driver_t ubsec_driver = { 162 "ubsec", 163 ubsec_methods, 164 sizeof (struct ubsec_softc) 165 }; 166 static devclass_t ubsec_devclass; 167 168 DECLARE_DUMMY_MODULE(ubsec); 169 DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, NULL, NULL); 170 MODULE_DEPEND(ubsec, crypto, 1, 1, 1); 171 #ifdef UBSEC_RNDTEST 172 MODULE_DEPEND(ubsec, rndtest, 1, 1, 1); 173 #endif 174 175 SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters"); 176 177 #ifdef UBSEC_DEBUG 178 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 179 static void ubsec_dump_mcr(struct ubsec_mcr *); 180 static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); 181 182 static int ubsec_debug = 0; 183 SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 184 0, "control debugging msgs"); 185 #endif 186 187 #define READ_REG(sc,r) \ 188 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 189 190 #define WRITE_REG(sc,reg,val) \ 191 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 192 193 #define SWAP32(x) (x) = htole32(ntohl((x))) 194 #define HTOLE32(x) (x) = htole32(x) 195 196 197 struct ubsec_stats ubsecstats; 198 SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, 199 ubsec_stats, "driver statistics"); 200 201 static int 202 ubsec_probe(device_t dev) 203 { 204 if (pci_get_vendor(dev) == PCI_VENDOR_SUN && 205 (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 || 206 pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K)) 207 return (0); 208 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 209 (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || 210 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) 211 return (0); 212 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 213 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || 214 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 215 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || 216 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || 217 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 218 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 219 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 || 220 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825 221 )) 222 return (0); 223 return (ENXIO); 224 } 225 226 static const char* 227 ubsec_partname(struct ubsec_softc *sc) 228 { 229 /* XXX sprintf numbers when not decoded */ 230 switch (pci_get_vendor(sc->sc_dev)) { 231 case PCI_VENDOR_BROADCOM: 232 switch (pci_get_device(sc->sc_dev)) { 233 case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; 234 case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; 235 case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; 236 case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; 237 case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; 238 case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; 239 case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; 240 case PCI_PRODUCT_BROADCOM_5825: return "Broadcom 5825"; 241 } 242 return "Broadcom unknown-part"; 243 case PCI_VENDOR_BLUESTEEL: 244 switch (pci_get_device(sc->sc_dev)) { 245 case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; 246 } 247 return "Bluesteel unknown-part"; 248 case PCI_VENDOR_SUN: 249 switch (pci_get_device(sc->sc_dev)) { 250 case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821"; 251 case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K"; 252 } 253 return "Sun unknown-part"; 254 } 255 return "Unknown-vendor unknown-part"; 256 } 257 258 static void 259 default_harvest(struct rndtest_state *rsp __unused, void *buf, u_int count) 260 { 261 add_buffer_randomness_src(buf, count, RAND_SRC_UBSEC); 262 } 263 264 static int 265 ubsec_attach(device_t dev) 266 { 267 struct ubsec_softc *sc = device_get_softc(dev); 268 struct ubsec_dma *dmap; 269 u_int32_t cmd, i; 270 int rid; 271 272 KASSERT(sc != NULL, ("ubsec_attach: null software carrier!")); 273 bzero(sc, sizeof (*sc)); 274 sc->sc_dev = dev; 275 276 SIMPLEQ_INIT(&sc->sc_queue); 277 SIMPLEQ_INIT(&sc->sc_qchip); 278 SIMPLEQ_INIT(&sc->sc_queue2); 279 SIMPLEQ_INIT(&sc->sc_qchip2); 280 SIMPLEQ_INIT(&sc->sc_q2free); 281 282 /* XXX handle power management */ 283 284 sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; 285 286 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 287 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) 288 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 289 290 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 291 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 292 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) 293 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 294 295 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 296 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) 297 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 298 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 299 300 if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 301 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 302 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 303 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 || 304 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825)) || 305 (pci_get_vendor(dev) == PCI_VENDOR_SUN && 306 (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K || 307 pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) { 308 /* NB: the 5821/5822 defines some additional status bits */ 309 sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | 310 BS_STAT_MCR2_ALLEMPTY; 311 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 312 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 313 } 314 315 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 316 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; 317 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 318 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 319 320 if (!(cmd & PCIM_CMD_MEMEN)) { 321 device_printf(dev, "failed to enable memory mapping\n"); 322 goto bad; 323 } 324 325 if (!(cmd & PCIM_CMD_BUSMASTEREN)) { 326 device_printf(dev, "failed to enable bus mastering\n"); 327 goto bad; 328 } 329 330 /* 331 * Setup memory-mapping of PCI registers. 332 */ 333 rid = BS_BAR; 334 sc->sc_sr = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 335 0, ~0, 1, RF_ACTIVE); 336 if (sc->sc_sr == NULL) { 337 device_printf(dev, "cannot map register space\n"); 338 goto bad; 339 } 340 sc->sc_st = rman_get_bustag(sc->sc_sr); 341 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 342 343 /* 344 * Arrange interrupt line. 345 */ 346 rid = 0; 347 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 348 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 349 if (sc->sc_irq == NULL) { 350 device_printf(dev, "could not map interrupt\n"); 351 goto bad1; 352 } 353 /* 354 * NB: Network code assumes we are blocked with splimp() 355 * so make sure the IRQ is mapped appropriately. 356 */ 357 if (bus_setup_intr(dev, sc->sc_irq, 0, 358 ubsec_intr, sc, 359 &sc->sc_ih, NULL)) { 360 device_printf(dev, "could not establish interrupt\n"); 361 goto bad2; 362 } 363 364 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); 365 if (sc->sc_cid < 0) { 366 device_printf(dev, "could not get crypto driver id\n"); 367 goto bad3; 368 } 369 370 /* 371 * Setup DMA descriptor area. 372 */ 373 if (bus_dma_tag_create(NULL, /* parent */ 374 1, 0, /* alignment, bounds */ 375 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 376 BUS_SPACE_MAXADDR, /* highaddr */ 377 NULL, NULL, /* filter, filterarg */ 378 0x3ffff, /* maxsize */ 379 UBS_MAX_SCATTER, /* nsegments */ 380 0xffff, /* maxsegsize */ 381 BUS_DMA_ALLOCNOW, /* flags */ 382 &sc->sc_dmat)) { 383 device_printf(dev, "cannot allocate DMA tag\n"); 384 goto bad4; 385 } 386 SIMPLEQ_INIT(&sc->sc_freequeue); 387 dmap = sc->sc_dmaa; 388 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 389 struct ubsec_q *q; 390 391 q = kmalloc(sizeof(struct ubsec_q), M_DEVBUF, M_WAITOK); 392 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 393 &dmap->d_alloc, 0)) { 394 device_printf(dev, "cannot allocate dma buffers\n"); 395 kfree(q, M_DEVBUF); 396 break; 397 } 398 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 399 400 q->q_dma = dmap; 401 sc->sc_queuea[i] = q; 402 403 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 404 } 405 406 device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); 407 408 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); 409 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); 410 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); 411 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); 412 413 /* 414 * Reset Broadcom chip 415 */ 416 ubsec_reset_board(sc); 417 418 /* 419 * Init Broadcom specific PCI settings 420 */ 421 ubsec_init_pciregs(dev); 422 423 /* 424 * Init Broadcom chip 425 */ 426 ubsec_init_board(sc); 427 428 #ifndef UBSEC_NO_RNG 429 if (sc->sc_flags & UBS_FLAGS_RNG) { 430 sc->sc_statmask |= BS_STAT_MCR2_DONE; 431 #ifdef UBSEC_RNDTEST 432 sc->sc_rndtest = rndtest_attach(dev); 433 if (sc->sc_rndtest) 434 sc->sc_harvest = rndtest_harvest; 435 else 436 sc->sc_harvest = default_harvest; 437 #else 438 sc->sc_harvest = default_harvest; 439 #endif 440 441 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 442 &sc->sc_rng.rng_q.q_mcr, 0)) 443 goto skip_rng; 444 445 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 446 &sc->sc_rng.rng_q.q_ctx, 0)) { 447 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 448 goto skip_rng; 449 } 450 451 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 452 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 453 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 454 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 455 goto skip_rng; 456 } 457 458 if (hz >= 100) 459 sc->sc_rnghz = hz / 100; 460 else 461 sc->sc_rnghz = 1; 462 callout_init(&sc->sc_rngto); 463 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 464 skip_rng: 465 ; 466 } 467 #endif /* UBSEC_NO_RNG */ 468 469 if (sc->sc_flags & UBS_FLAGS_KEY) { 470 sc->sc_statmask |= BS_STAT_MCR2_DONE; 471 472 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); 473 #if 0 474 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); 475 #endif 476 } 477 return (0); 478 bad4: 479 crypto_unregister_all(sc->sc_cid); 480 bad3: 481 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 482 bad2: 483 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 484 bad1: 485 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 486 bad: 487 return (ENXIO); 488 } 489 490 /* 491 * Detach a device that successfully probed. 492 */ 493 static int 494 ubsec_detach(device_t dev) 495 { 496 struct ubsec_softc *sc = device_get_softc(dev); 497 498 KASSERT(sc != NULL, ("ubsec_detach: null software carrier")); 499 500 /* XXX wait/abort active ops */ 501 502 crit_enter(); 503 504 callout_stop(&sc->sc_rngto); 505 506 crypto_unregister_all(sc->sc_cid); 507 508 #ifdef UBSEC_RNDTEST 509 if (sc->sc_rndtest) 510 rndtest_detach(sc->sc_rndtest); 511 #endif 512 513 while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 514 struct ubsec_q *q; 515 516 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 517 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); 518 ubsec_dma_free(sc, &q->q_dma->d_alloc); 519 kfree(q, M_DEVBUF); 520 } 521 #ifndef UBSEC_NO_RNG 522 if (sc->sc_flags & UBS_FLAGS_RNG) { 523 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 524 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 525 ubsec_dma_free(sc, &sc->sc_rng.rng_buf); 526 } 527 #endif /* UBSEC_NO_RNG */ 528 529 bus_generic_detach(dev); 530 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 531 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 532 533 bus_dma_tag_destroy(sc->sc_dmat); 534 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 535 536 crit_exit(); 537 538 return (0); 539 } 540 541 /* 542 * Stop all chip i/o so that the kernel's probe routines don't 543 * get confused by errant DMAs when rebooting. 544 */ 545 static void 546 ubsec_shutdown(device_t dev) 547 { 548 #ifdef notyet 549 ubsec_stop(device_get_softc(dev)); 550 #endif 551 } 552 553 /* 554 * Device suspend routine. 555 */ 556 static int 557 ubsec_suspend(device_t dev) 558 { 559 struct ubsec_softc *sc = device_get_softc(dev); 560 561 KASSERT(sc != NULL, ("ubsec_suspend: null software carrier")); 562 #ifdef notyet 563 /* XXX stop the device and save PCI settings */ 564 #endif 565 sc->sc_suspended = 1; 566 567 return (0); 568 } 569 570 static int 571 ubsec_resume(device_t dev) 572 { 573 struct ubsec_softc *sc = device_get_softc(dev); 574 575 KASSERT(sc != NULL, ("ubsec_resume: null software carrier")); 576 #ifdef notyet 577 /* XXX retore PCI settings and start the device */ 578 #endif 579 sc->sc_suspended = 0; 580 return (0); 581 } 582 583 /* 584 * UBSEC Interrupt routine 585 */ 586 static void 587 ubsec_intr(void *arg) 588 { 589 struct ubsec_softc *sc = arg; 590 volatile u_int32_t stat; 591 struct ubsec_q *q; 592 struct ubsec_dma *dmap; 593 int npkts = 0, i; 594 595 stat = READ_REG(sc, BS_STAT); 596 stat &= sc->sc_statmask; 597 if (stat == 0) { 598 return; 599 } 600 601 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 602 603 /* 604 * Check to see if we have any packets waiting for us 605 */ 606 if ((stat & BS_STAT_MCR1_DONE)) { 607 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 608 q = SIMPLEQ_FIRST(&sc->sc_qchip); 609 dmap = q->q_dma; 610 611 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 612 break; 613 614 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); 615 616 npkts = q->q_nstacked_mcrs; 617 sc->sc_nqchip -= 1+npkts; 618 /* 619 * search for further sc_qchip ubsec_q's that share 620 * the same MCR, and complete them too, they must be 621 * at the top. 622 */ 623 for (i = 0; i < npkts; i++) { 624 if(q->q_stacked_mcr[i]) { 625 ubsec_callback(sc, q->q_stacked_mcr[i]); 626 } else { 627 break; 628 } 629 } 630 ubsec_callback(sc, q); 631 } 632 633 /* 634 * Don't send any more packet to chip if there has been 635 * a DMAERR. 636 */ 637 if (!(stat & BS_STAT_DMAERR)) 638 ubsec_feed(sc); 639 } 640 641 /* 642 * Check to see if we have any key setups/rng's waiting for us 643 */ 644 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 645 (stat & BS_STAT_MCR2_DONE)) { 646 struct ubsec_q2 *q2; 647 struct ubsec_mcr *mcr; 648 649 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 650 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 651 652 ubsec_dma_sync(&q2->q_mcr, 653 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 654 655 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 656 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 657 ubsec_dma_sync(&q2->q_mcr, 658 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 659 break; 660 } 661 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next); 662 ubsec_callback2(sc, q2); 663 /* 664 * Don't send any more packet to chip if there has been 665 * a DMAERR. 666 */ 667 if (!(stat & BS_STAT_DMAERR)) 668 ubsec_feed2(sc); 669 } 670 } 671 672 /* 673 * Check to see if we got any DMA Error 674 */ 675 if (stat & BS_STAT_DMAERR) { 676 #ifdef UBSEC_DEBUG 677 if (ubsec_debug) { 678 volatile u_int32_t a = READ_REG(sc, BS_ERR); 679 680 kprintf("dmaerr %s@%08x\n", 681 (a & BS_ERR_READ) ? "read" : "write", 682 a & BS_ERR_ADDR); 683 } 684 #endif /* UBSEC_DEBUG */ 685 ubsecstats.hst_dmaerr++; 686 ubsec_totalreset(sc); 687 ubsec_feed(sc); 688 } 689 690 if (sc->sc_needwakeup) { /* XXX check high watermark */ 691 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 692 #ifdef UBSEC_DEBUG 693 if (ubsec_debug) 694 device_printf(sc->sc_dev, "wakeup crypto (%x)\n", 695 sc->sc_needwakeup); 696 #endif /* UBSEC_DEBUG */ 697 sc->sc_needwakeup &= ~wakeup; 698 crypto_unblock(sc->sc_cid, wakeup); 699 } 700 } 701 702 /* 703 * ubsec_feed() - aggregate and post requests to chip 704 */ 705 static void 706 ubsec_feed(struct ubsec_softc *sc) 707 { 708 struct ubsec_q *q, *q2; 709 int npkts, i; 710 void *v; 711 u_int32_t stat; 712 713 /* 714 * Decide how many ops to combine in a single MCR. We cannot 715 * aggregate more than UBS_MAX_AGGR because this is the number 716 * of slots defined in the data structure. Note that 717 * aggregation only happens if ops are marked batch'able. 718 * Aggregating ops reduces the number of interrupts to the host 719 * but also (potentially) increases the latency for processing 720 * completed ops as we only get an interrupt when all aggregated 721 * ops have completed. 722 */ 723 if (sc->sc_nqueue == 0) 724 return; 725 if (sc->sc_nqueue > 1) { 726 npkts = 0; 727 SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { 728 npkts++; 729 if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) 730 break; 731 } 732 } else 733 npkts = 1; 734 /* 735 * Check device status before going any further. 736 */ 737 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 738 if (stat & BS_STAT_DMAERR) { 739 ubsec_totalreset(sc); 740 ubsecstats.hst_dmaerr++; 741 } else 742 ubsecstats.hst_mcr1full++; 743 return; 744 } 745 if (sc->sc_nqueue > ubsecstats.hst_maxqueue) 746 ubsecstats.hst_maxqueue = sc->sc_nqueue; 747 if (npkts > UBS_MAX_AGGR) 748 npkts = UBS_MAX_AGGR; 749 if (npkts < 2) /* special case 1 op */ 750 goto feed1; 751 752 ubsecstats.hst_totbatch += npkts-1; 753 #ifdef UBSEC_DEBUG 754 if (ubsec_debug) 755 kprintf("merging %d records\n", npkts); 756 #endif /* UBSEC_DEBUG */ 757 758 q = SIMPLEQ_FIRST(&sc->sc_queue); 759 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); 760 --sc->sc_nqueue; 761 762 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 763 if (q->q_dst_map != NULL) 764 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 765 766 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 767 768 for (i = 0; i < q->q_nstacked_mcrs; i++) { 769 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 770 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 771 BUS_DMASYNC_PREWRITE); 772 if (q2->q_dst_map != NULL) 773 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 774 BUS_DMASYNC_PREREAD); 775 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); 776 --sc->sc_nqueue; 777 778 v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - 779 sizeof(struct ubsec_mcr_add)); 780 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); 781 q->q_stacked_mcr[i] = q2; 782 } 783 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 784 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 785 sc->sc_nqchip += npkts; 786 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 787 ubsecstats.hst_maxqchip = sc->sc_nqchip; 788 ubsec_dma_sync(&q->q_dma->d_alloc, 789 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 790 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 791 offsetof(struct ubsec_dmachunk, d_mcr)); 792 return; 793 794 feed1: 795 q = SIMPLEQ_FIRST(&sc->sc_queue); 796 797 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 798 if (q->q_dst_map != NULL) 799 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 800 ubsec_dma_sync(&q->q_dma->d_alloc, 801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 802 803 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 804 offsetof(struct ubsec_dmachunk, d_mcr)); 805 #ifdef UBSEC_DEBUG 806 if (ubsec_debug) 807 kprintf("feed1: q->chip %p %08x stat %08x\n", 808 q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), 809 stat); 810 #endif /* UBSEC_DEBUG */ 811 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); 812 --sc->sc_nqueue; 813 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 814 sc->sc_nqchip++; 815 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 816 ubsecstats.hst_maxqchip = sc->sc_nqchip; 817 return; 818 } 819 820 static void 821 ubsec_setup_enckey(struct ubsec_session *ses, int algo, caddr_t key) 822 { 823 824 /* Go ahead and compute key in ubsec's byte order */ 825 if (algo == CRYPTO_DES_CBC) { 826 bcopy(key, &ses->ses_deskey[0], 8); 827 bcopy(key, &ses->ses_deskey[2], 8); 828 bcopy(key, &ses->ses_deskey[4], 8); 829 } else 830 bcopy(key, ses->ses_deskey, 24); 831 832 SWAP32(ses->ses_deskey[0]); 833 SWAP32(ses->ses_deskey[1]); 834 SWAP32(ses->ses_deskey[2]); 835 SWAP32(ses->ses_deskey[3]); 836 SWAP32(ses->ses_deskey[4]); 837 SWAP32(ses->ses_deskey[5]); 838 } 839 840 static void 841 ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen) 842 { 843 MD5_CTX md5ctx; 844 SHA1_CTX sha1ctx; 845 int i; 846 847 for (i = 0; i < klen; i++) 848 key[i] ^= HMAC_IPAD_VAL; 849 850 if (algo == CRYPTO_MD5_HMAC) { 851 MD5Init(&md5ctx); 852 MD5Update(&md5ctx, key, klen); 853 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen); 854 bcopy(&md5ctx.A, ses->ses_hminner, sizeof(md5ctx.A) * 4); 855 } else { 856 SHA1Init(&sha1ctx); 857 SHA1Update(&sha1ctx, key, klen); 858 SHA1Update(&sha1ctx, hmac_ipad_buffer, 859 SHA1_HMAC_BLOCK_LEN - klen); 860 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 861 } 862 863 for (i = 0; i < klen; i++) 864 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 865 866 if (algo == CRYPTO_MD5_HMAC) { 867 MD5Init(&md5ctx); 868 MD5Update(&md5ctx, key, klen); 869 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen); 870 bcopy(&md5ctx.A, ses->ses_hmouter, sizeof(md5ctx.A) * 4); 871 } else { 872 SHA1Init(&sha1ctx); 873 SHA1Update(&sha1ctx, key, klen); 874 SHA1Update(&sha1ctx, hmac_opad_buffer, 875 SHA1_HMAC_BLOCK_LEN - klen); 876 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 877 } 878 879 for (i = 0; i < klen; i++) 880 key[i] ^= HMAC_OPAD_VAL; 881 } 882 883 /* 884 * Allocate a new 'session' and return an encoded session id. 'sidp' 885 * contains our registration id, and should contain an encoded session 886 * id on successful allocation. 887 */ 888 static int 889 ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) 890 { 891 struct ubsec_softc *sc = device_get_softc(dev); 892 struct cryptoini *c, *encini = NULL, *macini = NULL; 893 struct ubsec_session *ses = NULL; 894 int sesn; 895 #if 0 896 MD5_CTX md5ctx; 897 SHA1_CTX sha1ctx; 898 int i; 899 #endif 900 901 KASSERT(sc != NULL, ("ubsec_newsession: null softc")); 902 if (sidp == NULL || cri == NULL || sc == NULL) 903 return (EINVAL); 904 905 for (c = cri; c != NULL; c = c->cri_next) { 906 if (c->cri_alg == CRYPTO_MD5_HMAC || 907 c->cri_alg == CRYPTO_SHA1_HMAC) { 908 if (macini) 909 return (EINVAL); 910 macini = c; 911 } else if (c->cri_alg == CRYPTO_DES_CBC || 912 c->cri_alg == CRYPTO_3DES_CBC) { 913 if (encini) 914 return (EINVAL); 915 encini = c; 916 } else 917 return (EINVAL); 918 } 919 if (encini == NULL && macini == NULL) 920 return (EINVAL); 921 922 if (sc->sc_sessions == NULL) { 923 ses = sc->sc_sessions = kmalloc(sizeof(struct ubsec_session), 924 M_DEVBUF, M_INTWAIT); 925 sesn = 0; 926 sc->sc_nsessions = 1; 927 } else { 928 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 929 if (sc->sc_sessions[sesn].ses_used == 0) { 930 ses = &sc->sc_sessions[sesn]; 931 break; 932 } 933 } 934 935 if (ses == NULL) { 936 sesn = sc->sc_nsessions; 937 ses = kmalloc((sesn + 1) * sizeof(struct ubsec_session), 938 M_DEVBUF, M_INTWAIT); 939 bcopy(sc->sc_sessions, ses, sesn * 940 sizeof(struct ubsec_session)); 941 bzero(sc->sc_sessions, sesn * 942 sizeof(struct ubsec_session)); 943 kfree(sc->sc_sessions, M_DEVBUF); 944 sc->sc_sessions = ses; 945 ses = &sc->sc_sessions[sesn]; 946 sc->sc_nsessions++; 947 } 948 } 949 950 bzero(ses, sizeof(struct ubsec_session)); 951 ses->ses_used = 1; 952 if (encini) { 953 read_random(ses->ses_iv, sizeof(ses->ses_iv)); 954 if (encini->cri_key != NULL) { 955 ubsec_setup_enckey(ses, encini->cri_alg, 956 encini->cri_key); 957 } 958 } 959 960 if (macini) { 961 ses->ses_mlen = macini->cri_mlen; 962 if (ses->ses_mlen == 0) { 963 if (macini->cri_alg == CRYPTO_MD5_HMAC) 964 ses->ses_mlen = MD5_HASH_LEN; 965 else 966 ses->ses_mlen = SHA1_HASH_LEN; 967 } 968 969 if (macini->cri_key != NULL) { 970 ubsec_setup_mackey(ses, macini->cri_alg, 971 macini->cri_key, macini->cri_klen/8); 972 } 973 } 974 975 *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn); 976 return (0); 977 } 978 979 /* 980 * Deallocate a session. 981 */ 982 static int 983 ubsec_freesession(device_t dev, u_int64_t tid) 984 { 985 struct ubsec_softc *sc = device_get_softc(dev); 986 int session; 987 u_int32_t sid = CRYPTO_SESID2LID(tid); 988 989 KASSERT(sc != NULL, ("ubsec_freesession: null softc")); 990 if (sc == NULL) 991 return (EINVAL); 992 993 session = UBSEC_SESSION(sid); 994 if (session >= sc->sc_nsessions) 995 return (EINVAL); 996 997 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 998 return (0); 999 } 1000 1001 static void 1002 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1003 { 1004 struct ubsec_operand *op = arg; 1005 1006 KASSERT(nsegs <= UBS_MAX_SCATTER, 1007 ("Too many DMA segments returned when mapping operand")); 1008 #ifdef UBSEC_DEBUG 1009 if (ubsec_debug) 1010 kprintf("ubsec_op_cb: mapsize %u nsegs %d error %d\n", 1011 (u_int) mapsize, nsegs, error); 1012 #endif 1013 if (error != 0) 1014 return; 1015 1016 op->mapsize = mapsize; 1017 op->nsegs = nsegs; 1018 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1019 } 1020 1021 static int 1022 ubsec_process(device_t dev, struct cryptop *crp, int hint) 1023 { 1024 struct ubsec_softc *sc = device_get_softc(dev); 1025 struct ubsec_q *q = NULL; 1026 int err = 0, i, j, nicealign; 1027 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 1028 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 1029 int sskip, dskip, stheend, dtheend; 1030 int16_t coffset; 1031 struct ubsec_session *ses; 1032 struct ubsec_pktctx ctx; 1033 struct ubsec_dma *dmap = NULL; 1034 1035 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 1036 ubsecstats.hst_invalid++; 1037 return (EINVAL); 1038 } 1039 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 1040 ubsecstats.hst_badsession++; 1041 return (EINVAL); 1042 } 1043 1044 crit_enter(); 1045 1046 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 1047 ubsecstats.hst_queuefull++; 1048 sc->sc_needwakeup |= CRYPTO_SYMQ; 1049 crit_exit(); 1050 return (ERESTART); 1051 } 1052 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 1053 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); 1054 crit_exit(); 1055 1056 dmap = q->q_dma; /* Save dma pointer */ 1057 bzero(q, sizeof(struct ubsec_q)); 1058 bzero(&ctx, sizeof(ctx)); 1059 1060 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 1061 q->q_dma = dmap; 1062 ses = &sc->sc_sessions[q->q_sesn]; 1063 1064 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1065 q->q_src_m = (struct mbuf *)crp->crp_buf; 1066 q->q_dst_m = (struct mbuf *)crp->crp_buf; 1067 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1068 q->q_src_io = (struct uio *)crp->crp_buf; 1069 q->q_dst_io = (struct uio *)crp->crp_buf; 1070 } else { 1071 ubsecstats.hst_badflags++; 1072 err = EINVAL; 1073 goto errout; /* XXX we don't handle contiguous blocks! */ 1074 } 1075 1076 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); 1077 1078 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 1079 dmap->d_dma->d_mcr.mcr_flags = 0; 1080 q->q_crp = crp; 1081 1082 crd1 = crp->crp_desc; 1083 if (crd1 == NULL) { 1084 ubsecstats.hst_nodesc++; 1085 err = EINVAL; 1086 goto errout; 1087 } 1088 crd2 = crd1->crd_next; 1089 1090 if (crd2 == NULL) { 1091 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 1092 crd1->crd_alg == CRYPTO_SHA1_HMAC) { 1093 maccrd = crd1; 1094 enccrd = NULL; 1095 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1096 crd1->crd_alg == CRYPTO_3DES_CBC) { 1097 maccrd = NULL; 1098 enccrd = crd1; 1099 } else { 1100 ubsecstats.hst_badalg++; 1101 err = EINVAL; 1102 goto errout; 1103 } 1104 } else { 1105 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 1106 crd1->crd_alg == CRYPTO_SHA1_HMAC) && 1107 (crd2->crd_alg == CRYPTO_DES_CBC || 1108 crd2->crd_alg == CRYPTO_3DES_CBC) && 1109 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1110 maccrd = crd1; 1111 enccrd = crd2; 1112 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1113 crd1->crd_alg == CRYPTO_3DES_CBC) && 1114 (crd2->crd_alg == CRYPTO_MD5_HMAC || 1115 crd2->crd_alg == CRYPTO_SHA1_HMAC) && 1116 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1117 enccrd = crd1; 1118 maccrd = crd2; 1119 } else { 1120 /* 1121 * We cannot order the ubsec as requested 1122 */ 1123 ubsecstats.hst_badalg++; 1124 err = EINVAL; 1125 goto errout; 1126 } 1127 } 1128 1129 if (enccrd) { 1130 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { 1131 ubsec_setup_enckey(ses, enccrd->crd_alg, 1132 enccrd->crd_key); 1133 } 1134 1135 encoffset = enccrd->crd_skip; 1136 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1137 1138 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1139 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1140 1141 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1142 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1143 else { 1144 ctx.pc_iv[0] = ses->ses_iv[0]; 1145 ctx.pc_iv[1] = ses->ses_iv[1]; 1146 } 1147 1148 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1149 crypto_copyback(crp->crp_flags, crp->crp_buf, 1150 enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); 1151 } 1152 } else { 1153 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1154 1155 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1156 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1157 else { 1158 crypto_copydata(crp->crp_flags, crp->crp_buf, 1159 enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); 1160 } 1161 } 1162 1163 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1164 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1165 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1166 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1167 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1168 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1169 SWAP32(ctx.pc_iv[0]); 1170 SWAP32(ctx.pc_iv[1]); 1171 } 1172 1173 if (maccrd) { 1174 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) { 1175 ubsec_setup_mackey(ses, maccrd->crd_alg, 1176 maccrd->crd_key, maccrd->crd_klen / 8); 1177 } 1178 1179 macoffset = maccrd->crd_skip; 1180 1181 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) 1182 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1183 else 1184 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1185 1186 for (i = 0; i < 5; i++) { 1187 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1188 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1189 1190 HTOLE32(ctx.pc_hminner[i]); 1191 HTOLE32(ctx.pc_hmouter[i]); 1192 } 1193 } 1194 1195 if (enccrd && maccrd) { 1196 /* 1197 * ubsec cannot handle packets where the end of encryption 1198 * and authentication are not the same, or where the 1199 * encrypted part begins before the authenticated part. 1200 */ 1201 if ((encoffset + enccrd->crd_len) != 1202 (macoffset + maccrd->crd_len)) { 1203 ubsecstats.hst_lenmismatch++; 1204 err = EINVAL; 1205 goto errout; 1206 } 1207 if (enccrd->crd_skip < maccrd->crd_skip) { 1208 ubsecstats.hst_skipmismatch++; 1209 err = EINVAL; 1210 goto errout; 1211 } 1212 sskip = maccrd->crd_skip; 1213 cpskip = dskip = enccrd->crd_skip; 1214 stheend = maccrd->crd_len; 1215 dtheend = enccrd->crd_len; 1216 coffset = enccrd->crd_skip - maccrd->crd_skip; 1217 cpoffset = cpskip + dtheend; 1218 #ifdef UBSEC_DEBUG 1219 if (ubsec_debug) { 1220 kprintf("mac: skip %d, len %d, inject %d\n", 1221 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1222 kprintf("enc: skip %d, len %d, inject %d\n", 1223 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1224 kprintf("src: skip %d, len %d\n", sskip, stheend); 1225 kprintf("dst: skip %d, len %d\n", dskip, dtheend); 1226 kprintf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1227 coffset, stheend, cpskip, cpoffset); 1228 } 1229 #endif 1230 } else { 1231 cpskip = dskip = sskip = macoffset + encoffset; 1232 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1233 cpoffset = cpskip + dtheend; 1234 coffset = 0; 1235 } 1236 ctx.pc_offset = htole16(coffset >> 2); 1237 1238 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { 1239 ubsecstats.hst_nomap++; 1240 err = ENOMEM; 1241 goto errout; 1242 } 1243 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1244 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1245 q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1246 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1247 q->q_src_map = NULL; 1248 ubsecstats.hst_noload++; 1249 err = ENOMEM; 1250 goto errout; 1251 } 1252 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1253 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1254 q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1255 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1256 q->q_src_map = NULL; 1257 ubsecstats.hst_noload++; 1258 err = ENOMEM; 1259 goto errout; 1260 } 1261 } 1262 nicealign = ubsec_dmamap_aligned(&q->q_src); 1263 1264 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1265 1266 #ifdef UBSEC_DEBUG 1267 if (ubsec_debug) 1268 kprintf("src skip: %d nicealign: %u\n", sskip, nicealign); 1269 #endif 1270 for (i = j = 0; i < q->q_src_nsegs; i++) { 1271 struct ubsec_pktbuf *pb; 1272 bus_size_t packl = q->q_src_segs[i].ds_len; 1273 bus_addr_t packp = q->q_src_segs[i].ds_addr; 1274 1275 if (sskip >= packl) { 1276 sskip -= packl; 1277 continue; 1278 } 1279 1280 packl -= sskip; 1281 packp += sskip; 1282 sskip = 0; 1283 1284 if (packl > 0xfffc) { 1285 err = EIO; 1286 goto errout; 1287 } 1288 1289 if (j == 0) 1290 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1291 else 1292 pb = &dmap->d_dma->d_sbuf[j - 1]; 1293 1294 pb->pb_addr = htole32(packp); 1295 1296 if (stheend) { 1297 if (packl > stheend) { 1298 pb->pb_len = htole32(stheend); 1299 stheend = 0; 1300 } else { 1301 pb->pb_len = htole32(packl); 1302 stheend -= packl; 1303 } 1304 } else 1305 pb->pb_len = htole32(packl); 1306 1307 if ((i + 1) == q->q_src_nsegs) 1308 pb->pb_next = 0; 1309 else 1310 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1311 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1312 j++; 1313 } 1314 1315 if (enccrd == NULL && maccrd != NULL) { 1316 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1317 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1318 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1319 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1320 #ifdef UBSEC_DEBUG 1321 if (ubsec_debug) 1322 kprintf("opkt: %x %x %x\n", 1323 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1324 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1325 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1326 #endif 1327 } else { 1328 if (crp->crp_flags & CRYPTO_F_IOV) { 1329 if (!nicealign) { 1330 ubsecstats.hst_iovmisaligned++; 1331 err = EINVAL; 1332 goto errout; 1333 } 1334 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 1335 &q->q_dst_map)) { 1336 ubsecstats.hst_nomap++; 1337 err = ENOMEM; 1338 goto errout; 1339 } 1340 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1341 q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { 1342 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1343 q->q_dst_map = NULL; 1344 ubsecstats.hst_noload++; 1345 err = ENOMEM; 1346 goto errout; 1347 } 1348 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1349 if (nicealign) { 1350 q->q_dst = q->q_src; 1351 } else { 1352 int totlen, len; 1353 struct mbuf *m, *top, **mp; 1354 1355 ubsecstats.hst_unaligned++; 1356 totlen = q->q_src_mapsize; 1357 if (q->q_src_m->m_flags & M_PKTHDR) { 1358 len = MHLEN; 1359 MGETHDR(m, M_NOWAIT, MT_DATA); 1360 if (m && !m_dup_pkthdr(m, q->q_src_m, M_NOWAIT)) { 1361 m_free(m); 1362 m = NULL; 1363 } 1364 } else { 1365 len = MLEN; 1366 MGET(m, M_NOWAIT, MT_DATA); 1367 } 1368 if (m == NULL) { 1369 ubsecstats.hst_nombuf++; 1370 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1371 goto errout; 1372 } 1373 if (totlen >= MINCLSIZE) { 1374 MCLGET(m, M_NOWAIT); 1375 if ((m->m_flags & M_EXT) == 0) { 1376 m_free(m); 1377 ubsecstats.hst_nomcl++; 1378 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1379 goto errout; 1380 } 1381 len = MCLBYTES; 1382 } 1383 m->m_len = len; 1384 top = NULL; 1385 mp = ⊤ 1386 1387 while (totlen > 0) { 1388 if (top) { 1389 MGET(m, M_NOWAIT, MT_DATA); 1390 if (m == NULL) { 1391 m_freem(top); 1392 ubsecstats.hst_nombuf++; 1393 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1394 goto errout; 1395 } 1396 len = MLEN; 1397 } 1398 if (top && totlen >= MINCLSIZE) { 1399 MCLGET(m, M_NOWAIT); 1400 if ((m->m_flags & M_EXT) == 0) { 1401 *mp = m; 1402 m_freem(top); 1403 ubsecstats.hst_nomcl++; 1404 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1405 goto errout; 1406 } 1407 len = MCLBYTES; 1408 } 1409 m->m_len = len = min(totlen, len); 1410 totlen -= len; 1411 *mp = m; 1412 mp = &m->m_next; 1413 } 1414 q->q_dst_m = top; 1415 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1416 cpskip, cpoffset); 1417 if (bus_dmamap_create(sc->sc_dmat, 1418 BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { 1419 ubsecstats.hst_nomap++; 1420 err = ENOMEM; 1421 goto errout; 1422 } 1423 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1424 q->q_dst_map, q->q_dst_m, 1425 ubsec_op_cb, &q->q_dst, 1426 BUS_DMA_NOWAIT) != 0) { 1427 bus_dmamap_destroy(sc->sc_dmat, 1428 q->q_dst_map); 1429 q->q_dst_map = NULL; 1430 ubsecstats.hst_noload++; 1431 err = ENOMEM; 1432 goto errout; 1433 } 1434 } 1435 } else { 1436 ubsecstats.hst_badflags++; 1437 err = EINVAL; 1438 goto errout; 1439 } 1440 1441 #ifdef UBSEC_DEBUG 1442 if (ubsec_debug) 1443 kprintf("dst skip: %d\n", dskip); 1444 #endif 1445 for (i = j = 0; i < q->q_dst_nsegs; i++) { 1446 struct ubsec_pktbuf *pb; 1447 bus_size_t packl = q->q_dst_segs[i].ds_len; 1448 bus_addr_t packp = q->q_dst_segs[i].ds_addr; 1449 1450 if (dskip >= packl) { 1451 dskip -= packl; 1452 continue; 1453 } 1454 1455 packl -= dskip; 1456 packp += dskip; 1457 dskip = 0; 1458 1459 if (packl > 0xfffc) { 1460 err = EIO; 1461 goto errout; 1462 } 1463 1464 if (j == 0) 1465 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1466 else 1467 pb = &dmap->d_dma->d_dbuf[j - 1]; 1468 1469 pb->pb_addr = htole32(packp); 1470 1471 if (dtheend) { 1472 if (packl > dtheend) { 1473 pb->pb_len = htole32(dtheend); 1474 dtheend = 0; 1475 } else { 1476 pb->pb_len = htole32(packl); 1477 dtheend -= packl; 1478 } 1479 } else 1480 pb->pb_len = htole32(packl); 1481 1482 if ((i + 1) == q->q_dst_nsegs) { 1483 if (maccrd) 1484 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1485 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1486 else 1487 pb->pb_next = 0; 1488 } else 1489 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1490 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1491 j++; 1492 } 1493 } 1494 1495 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1496 offsetof(struct ubsec_dmachunk, d_ctx)); 1497 1498 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1499 struct ubsec_pktctx_long *ctxl; 1500 1501 ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + 1502 offsetof(struct ubsec_dmachunk, d_ctx)); 1503 1504 /* transform small context into long context */ 1505 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1506 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1507 ctxl->pc_flags = ctx.pc_flags; 1508 ctxl->pc_offset = ctx.pc_offset; 1509 for (i = 0; i < 6; i++) 1510 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1511 for (i = 0; i < 5; i++) 1512 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1513 for (i = 0; i < 5; i++) 1514 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1515 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1516 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1517 } else 1518 bcopy(&ctx, dmap->d_alloc.dma_vaddr + 1519 offsetof(struct ubsec_dmachunk, d_ctx), 1520 sizeof(struct ubsec_pktctx)); 1521 1522 crit_enter(); 1523 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1524 sc->sc_nqueue++; 1525 ubsecstats.hst_ipackets++; 1526 ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; 1527 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) 1528 ubsec_feed(sc); 1529 crit_exit(); 1530 return (0); 1531 1532 errout: 1533 if (q != NULL) { 1534 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1535 m_freem(q->q_dst_m); 1536 1537 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1538 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1539 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1540 } 1541 if (q->q_src_map != NULL) { 1542 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1543 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1544 } 1545 1546 crit_enter(); 1547 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1548 crit_exit(); 1549 } 1550 if (err != ERESTART) { 1551 crp->crp_etype = err; 1552 crypto_done(crp); 1553 } else { 1554 sc->sc_needwakeup |= CRYPTO_SYMQ; 1555 } 1556 return (err); 1557 } 1558 1559 static void 1560 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1561 { 1562 struct cryptop *crp = (struct cryptop *)q->q_crp; 1563 struct cryptodesc *crd; 1564 struct ubsec_dma *dmap = q->q_dma; 1565 1566 ubsecstats.hst_opackets++; 1567 ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1568 1569 ubsec_dma_sync(&dmap->d_alloc, 1570 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1571 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1572 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1573 BUS_DMASYNC_POSTREAD); 1574 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1575 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1576 } 1577 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); 1578 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1579 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1580 1581 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1582 m_freem(q->q_src_m); 1583 crp->crp_buf = (caddr_t)q->q_dst_m; 1584 } 1585 1586 /* copy out IV for future use */ 1587 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1588 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1589 if (crd->crd_alg != CRYPTO_DES_CBC && 1590 crd->crd_alg != CRYPTO_3DES_CBC) 1591 continue; 1592 crypto_copydata(crp->crp_flags, crp->crp_buf, 1593 crd->crd_skip + crd->crd_len - 8, 8, 1594 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1595 break; 1596 } 1597 } 1598 1599 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1600 if (crd->crd_alg != CRYPTO_MD5_HMAC && 1601 crd->crd_alg != CRYPTO_SHA1_HMAC) 1602 continue; 1603 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, 1604 sc->sc_sessions[q->q_sesn].ses_mlen, 1605 (caddr_t)dmap->d_dma->d_macbuf); 1606 } 1607 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1608 crypto_done(crp); 1609 } 1610 1611 static void 1612 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1613 { 1614 int i, j, dlen, slen; 1615 caddr_t dptr, sptr; 1616 1617 j = 0; 1618 sptr = srcm->m_data; 1619 slen = srcm->m_len; 1620 dptr = dstm->m_data; 1621 dlen = dstm->m_len; 1622 1623 while (1) { 1624 for (i = 0; i < min(slen, dlen); i++) { 1625 if (j < hoffset || j >= toffset) 1626 *dptr++ = *sptr++; 1627 slen--; 1628 dlen--; 1629 j++; 1630 } 1631 if (slen == 0) { 1632 srcm = srcm->m_next; 1633 if (srcm == NULL) 1634 return; 1635 sptr = srcm->m_data; 1636 slen = srcm->m_len; 1637 } 1638 if (dlen == 0) { 1639 dstm = dstm->m_next; 1640 if (dstm == NULL) 1641 return; 1642 dptr = dstm->m_data; 1643 dlen = dstm->m_len; 1644 } 1645 } 1646 } 1647 1648 /* 1649 * feed the key generator, must be called at splimp() or higher. 1650 */ 1651 static int 1652 ubsec_feed2(struct ubsec_softc *sc) 1653 { 1654 struct ubsec_q2 *q; 1655 1656 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1657 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1658 break; 1659 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1660 1661 ubsec_dma_sync(&q->q_mcr, 1662 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1663 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); 1664 1665 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1666 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q_next); 1667 --sc->sc_nqueue2; 1668 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1669 } 1670 return (0); 1671 } 1672 1673 /* 1674 * Callback for handling random numbers 1675 */ 1676 static void 1677 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1678 { 1679 struct cryptkop *krp; 1680 struct ubsec_ctx_keyop *ctx; 1681 1682 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1683 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); 1684 1685 switch (q->q_type) { 1686 #ifndef UBSEC_NO_RNG 1687 case UBS_CTXOP_RNGBYPASS: { 1688 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1689 1690 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); 1691 (*sc->sc_harvest)(sc->sc_rndtest, 1692 rng->rng_buf.dma_vaddr, 1693 UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)); 1694 rng->rng_used = 0; 1695 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1696 break; 1697 } 1698 #endif 1699 case UBS_CTXOP_MODEXP: { 1700 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1701 u_int rlen, clen; 1702 1703 krp = me->me_krp; 1704 rlen = (me->me_modbits + 7) / 8; 1705 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1706 1707 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); 1708 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); 1709 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); 1710 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); 1711 1712 if (clen < rlen) 1713 krp->krp_status = E2BIG; 1714 else { 1715 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1716 bzero(krp->krp_param[krp->krp_iparams].crp_p, 1717 (krp->krp_param[krp->krp_iparams].crp_nbits 1718 + 7) / 8); 1719 bcopy(me->me_C.dma_vaddr, 1720 krp->krp_param[krp->krp_iparams].crp_p, 1721 (me->me_modbits + 7) / 8); 1722 } else 1723 ubsec_kshift_l(me->me_shiftbits, 1724 me->me_C.dma_vaddr, me->me_normbits, 1725 krp->krp_param[krp->krp_iparams].crp_p, 1726 krp->krp_param[krp->krp_iparams].crp_nbits); 1727 } 1728 1729 crypto_kdone(krp); 1730 1731 /* bzero all potentially sensitive data */ 1732 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 1733 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 1734 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 1735 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 1736 1737 /* Can't free here, so put us on the free list. */ 1738 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1739 break; 1740 } 1741 case UBS_CTXOP_RSAPRIV: { 1742 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1743 u_int len; 1744 1745 krp = rp->rpr_krp; 1746 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); 1747 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); 1748 1749 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1750 bcopy(rp->rpr_msgout.dma_vaddr, 1751 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1752 1753 crypto_kdone(krp); 1754 1755 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 1756 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 1757 bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); 1758 1759 /* Can't free here, so put us on the free list. */ 1760 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1761 break; 1762 } 1763 default: 1764 device_printf(sc->sc_dev, "unknown ctx op: %x\n", 1765 letoh16(ctx->ctx_op)); 1766 break; 1767 } 1768 } 1769 1770 #ifndef UBSEC_NO_RNG 1771 static void 1772 ubsec_rng(void *vsc) 1773 { 1774 struct ubsec_softc *sc = vsc; 1775 struct ubsec_q2_rng *rng = &sc->sc_rng; 1776 struct ubsec_mcr *mcr; 1777 struct ubsec_ctx_rngbypass *ctx; 1778 1779 crit_enter(); 1780 if (rng->rng_used) { 1781 crit_exit(); 1782 return; 1783 } 1784 sc->sc_nqueue2++; 1785 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1786 goto out; 1787 1788 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1789 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1790 1791 mcr->mcr_pkts = htole16(1); 1792 mcr->mcr_flags = 0; 1793 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1794 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1795 mcr->mcr_ipktbuf.pb_len = 0; 1796 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1797 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1798 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1799 UBS_PKTBUF_LEN); 1800 mcr->mcr_opktbuf.pb_next = 0; 1801 1802 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1803 ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); 1804 rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; 1805 1806 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); 1807 1808 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1809 rng->rng_used = 1; 1810 ubsec_feed2(sc); 1811 ubsecstats.hst_rng++; 1812 crit_exit(); 1813 1814 return; 1815 1816 out: 1817 /* 1818 * Something weird happened, generate our own call back. 1819 */ 1820 sc->sc_nqueue2--; 1821 crit_exit(); 1822 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1823 } 1824 #endif /* UBSEC_NO_RNG */ 1825 1826 static void 1827 ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1828 { 1829 bus_addr_t *paddr = (bus_addr_t*) arg; 1830 *paddr = segs->ds_addr; 1831 } 1832 1833 static int 1834 ubsec_dma_malloc( 1835 struct ubsec_softc *sc, 1836 bus_size_t size, 1837 struct ubsec_dma_alloc *dma, 1838 int mapflags 1839 ) 1840 { 1841 int r; 1842 1843 /* XXX could specify sc_dmat as parent but that just adds overhead */ 1844 r = bus_dma_tag_create(NULL, /* parent */ 1845 1, 0, /* alignment, bounds */ 1846 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1847 BUS_SPACE_MAXADDR, /* highaddr */ 1848 NULL, NULL, /* filter, filterarg */ 1849 size, /* maxsize */ 1850 1, /* nsegments */ 1851 size, /* maxsegsize */ 1852 BUS_DMA_ALLOCNOW, /* flags */ 1853 &dma->dma_tag); 1854 if (r != 0) { 1855 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1856 "bus_dma_tag_create failed; error %u\n", r); 1857 goto fail_0; 1858 } 1859 1860 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); 1861 if (r != 0) { 1862 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1863 "bus_dmamap_create failed; error %u\n", r); 1864 goto fail_1; 1865 } 1866 1867 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1868 BUS_DMA_NOWAIT, &dma->dma_map); 1869 if (r != 0) { 1870 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1871 "bus_dmammem_alloc failed; size %ju, error %u\n", 1872 (intmax_t)size, r); 1873 goto fail_2; 1874 } 1875 1876 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1877 size, 1878 ubsec_dmamap_cb, 1879 &dma->dma_paddr, 1880 mapflags | BUS_DMA_NOWAIT); 1881 if (r != 0) { 1882 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1883 "bus_dmamap_load failed; error %u\n", r); 1884 goto fail_3; 1885 } 1886 1887 dma->dma_size = size; 1888 return (0); 1889 1890 fail_3: 1891 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1892 fail_2: 1893 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1894 fail_1: 1895 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1896 bus_dma_tag_destroy(dma->dma_tag); 1897 fail_0: 1898 dma->dma_map = NULL; 1899 dma->dma_tag = NULL; 1900 return (r); 1901 } 1902 1903 static void 1904 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1905 { 1906 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1907 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1908 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1909 bus_dma_tag_destroy(dma->dma_tag); 1910 } 1911 1912 /* 1913 * Resets the board. Values in the regesters are left as is 1914 * from the reset (i.e. initial values are assigned elsewhere). 1915 */ 1916 static void 1917 ubsec_reset_board(struct ubsec_softc *sc) 1918 { 1919 volatile u_int32_t ctrl; 1920 1921 ctrl = READ_REG(sc, BS_CTRL); 1922 ctrl |= BS_CTRL_RESET; 1923 WRITE_REG(sc, BS_CTRL, ctrl); 1924 1925 /* 1926 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 1927 */ 1928 DELAY(10); 1929 } 1930 1931 /* 1932 * Init Broadcom registers 1933 */ 1934 static void 1935 ubsec_init_board(struct ubsec_softc *sc) 1936 { 1937 u_int32_t ctrl; 1938 1939 ctrl = READ_REG(sc, BS_CTRL); 1940 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 1941 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 1942 1943 if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) 1944 ctrl |= BS_CTRL_MCR2INT; 1945 else 1946 ctrl &= ~BS_CTRL_MCR2INT; 1947 1948 if (sc->sc_flags & UBS_FLAGS_HWNORM) 1949 ctrl &= ~BS_CTRL_SWNORM; 1950 1951 WRITE_REG(sc, BS_CTRL, ctrl); 1952 } 1953 1954 /* 1955 * Init Broadcom PCI registers 1956 */ 1957 static void 1958 ubsec_init_pciregs(device_t dev) 1959 { 1960 #if 0 1961 u_int32_t misc; 1962 1963 misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); 1964 misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) 1965 | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); 1966 misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) 1967 | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); 1968 pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); 1969 #endif 1970 1971 /* 1972 * This will set the cache line size to 1, this will 1973 * force the BCM58xx chip just to do burst read/writes. 1974 * Cache line read/writes are to slow 1975 */ 1976 pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); 1977 } 1978 1979 /* 1980 * Clean up after a chip crash. 1981 * It is assumed that the caller in splimp() 1982 */ 1983 static void 1984 ubsec_cleanchip(struct ubsec_softc *sc) 1985 { 1986 struct ubsec_q *q; 1987 1988 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 1989 q = SIMPLEQ_FIRST(&sc->sc_qchip); 1990 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); 1991 ubsec_free_q(sc, q); 1992 } 1993 sc->sc_nqchip = 0; 1994 } 1995 1996 /* 1997 * free a ubsec_q 1998 * It is assumed that the caller is within spimp() 1999 */ 2000 static int 2001 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 2002 { 2003 struct ubsec_q *q2; 2004 struct cryptop *crp; 2005 int npkts; 2006 int i; 2007 2008 npkts = q->q_nstacked_mcrs; 2009 2010 for (i = 0; i < npkts; i++) { 2011 if(q->q_stacked_mcr[i]) { 2012 q2 = q->q_stacked_mcr[i]; 2013 2014 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 2015 m_freem(q2->q_dst_m); 2016 2017 crp = (struct cryptop *)q2->q_crp; 2018 2019 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 2020 2021 crp->crp_etype = EFAULT; 2022 crypto_done(crp); 2023 } else { 2024 break; 2025 } 2026 } 2027 2028 /* 2029 * Free header MCR 2030 */ 2031 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 2032 m_freem(q->q_dst_m); 2033 2034 crp = (struct cryptop *)q->q_crp; 2035 2036 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 2037 2038 crp->crp_etype = EFAULT; 2039 crypto_done(crp); 2040 return(0); 2041 } 2042 2043 /* 2044 * Routine to reset the chip and clean up. 2045 * It is assumed that the caller is in splimp() 2046 */ 2047 static void 2048 ubsec_totalreset(struct ubsec_softc *sc) 2049 { 2050 ubsec_reset_board(sc); 2051 ubsec_init_board(sc); 2052 ubsec_cleanchip(sc); 2053 } 2054 2055 static int 2056 ubsec_dmamap_aligned(struct ubsec_operand *op) 2057 { 2058 int i; 2059 2060 for (i = 0; i < op->nsegs; i++) { 2061 if (op->segs[i].ds_addr & 3) 2062 return (0); 2063 if ((i != (op->nsegs - 1)) && 2064 (op->segs[i].ds_len & 3)) 2065 return (0); 2066 } 2067 return (1); 2068 } 2069 2070 static void 2071 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2072 { 2073 switch (q->q_type) { 2074 case UBS_CTXOP_MODEXP: { 2075 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2076 2077 ubsec_dma_free(sc, &me->me_q.q_mcr); 2078 ubsec_dma_free(sc, &me->me_q.q_ctx); 2079 ubsec_dma_free(sc, &me->me_M); 2080 ubsec_dma_free(sc, &me->me_E); 2081 ubsec_dma_free(sc, &me->me_C); 2082 ubsec_dma_free(sc, &me->me_epb); 2083 kfree(me, M_DEVBUF); 2084 break; 2085 } 2086 case UBS_CTXOP_RSAPRIV: { 2087 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2088 2089 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2090 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2091 ubsec_dma_free(sc, &rp->rpr_msgin); 2092 ubsec_dma_free(sc, &rp->rpr_msgout); 2093 kfree(rp, M_DEVBUF); 2094 break; 2095 } 2096 default: 2097 device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); 2098 break; 2099 } 2100 } 2101 2102 static int 2103 ubsec_kprocess(device_t dev, struct cryptkop *krp, int hint) 2104 { 2105 struct ubsec_softc *sc = device_get_softc(dev); 2106 int r; 2107 2108 if (krp == NULL || krp->krp_callback == NULL) 2109 return (EINVAL); 2110 2111 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2112 struct ubsec_q2 *q; 2113 2114 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2115 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q_next); 2116 ubsec_kfree(sc, q); 2117 } 2118 2119 switch (krp->krp_op) { 2120 case CRK_MOD_EXP: 2121 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2122 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2123 else 2124 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2125 break; 2126 case CRK_MOD_EXP_CRT: 2127 return (ubsec_kprocess_rsapriv(sc, krp, hint)); 2128 default: 2129 device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", 2130 krp->krp_op); 2131 krp->krp_status = EOPNOTSUPP; 2132 crypto_kdone(krp); 2133 return (0); 2134 } 2135 return (0); /* silence compiler */ 2136 } 2137 2138 /* 2139 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2140 */ 2141 static int 2142 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2143 { 2144 struct ubsec_q2_modexp *me; 2145 struct ubsec_mcr *mcr; 2146 struct ubsec_ctx_modexp *ctx; 2147 struct ubsec_pktbuf *epb; 2148 int err = 0; 2149 u_int nbits, normbits, mbits, shiftbits, ebits; 2150 2151 me = kmalloc(sizeof *me, M_DEVBUF, M_INTWAIT | M_ZERO); 2152 me->me_krp = krp; 2153 me->me_q.q_type = UBS_CTXOP_MODEXP; 2154 2155 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2156 if (nbits <= 512) 2157 normbits = 512; 2158 else if (nbits <= 768) 2159 normbits = 768; 2160 else if (nbits <= 1024) 2161 normbits = 1024; 2162 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2163 normbits = 1536; 2164 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2165 normbits = 2048; 2166 else { 2167 err = E2BIG; 2168 goto errout; 2169 } 2170 2171 shiftbits = normbits - nbits; 2172 2173 me->me_modbits = nbits; 2174 me->me_shiftbits = shiftbits; 2175 me->me_normbits = normbits; 2176 2177 /* Sanity check: result bits must be >= true modulus bits. */ 2178 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2179 err = ERANGE; 2180 goto errout; 2181 } 2182 2183 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2184 &me->me_q.q_mcr, 0)) { 2185 err = ENOMEM; 2186 goto errout; 2187 } 2188 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2189 2190 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2191 &me->me_q.q_ctx, 0)) { 2192 err = ENOMEM; 2193 goto errout; 2194 } 2195 2196 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2197 if (mbits > nbits) { 2198 err = E2BIG; 2199 goto errout; 2200 } 2201 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2202 err = ENOMEM; 2203 goto errout; 2204 } 2205 ubsec_kshift_r(shiftbits, 2206 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2207 me->me_M.dma_vaddr, normbits); 2208 2209 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2210 err = ENOMEM; 2211 goto errout; 2212 } 2213 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2214 2215 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2216 if (ebits > nbits) { 2217 err = E2BIG; 2218 goto errout; 2219 } 2220 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2221 err = ENOMEM; 2222 goto errout; 2223 } 2224 ubsec_kshift_r(shiftbits, 2225 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2226 me->me_E.dma_vaddr, normbits); 2227 2228 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2229 &me->me_epb, 0)) { 2230 err = ENOMEM; 2231 goto errout; 2232 } 2233 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2234 epb->pb_addr = htole32(me->me_E.dma_paddr); 2235 epb->pb_next = 0; 2236 epb->pb_len = htole32(normbits / 8); 2237 2238 #ifdef UBSEC_DEBUG 2239 if (ubsec_debug) { 2240 kprintf("Epb "); 2241 ubsec_dump_pb(epb); 2242 } 2243 #endif 2244 2245 mcr->mcr_pkts = htole16(1); 2246 mcr->mcr_flags = 0; 2247 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2248 mcr->mcr_reserved = 0; 2249 mcr->mcr_pktlen = 0; 2250 2251 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2252 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2253 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2254 2255 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2256 mcr->mcr_opktbuf.pb_next = 0; 2257 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2258 2259 #ifdef DIAGNOSTIC 2260 /* Misaligned output buffer will hang the chip. */ 2261 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2262 panic("%s: modexp invalid addr 0x%x", 2263 device_get_nameunit(sc->sc_dev), 2264 letoh32(mcr->mcr_opktbuf.pb_addr)); 2265 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2266 panic("%s: modexp invalid len 0x%x", 2267 device_get_nameunit(sc->sc_dev), 2268 letoh32(mcr->mcr_opktbuf.pb_len)); 2269 #endif 2270 2271 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2272 bzero(ctx, sizeof(*ctx)); 2273 ubsec_kshift_r(shiftbits, 2274 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2275 ctx->me_N, normbits); 2276 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2277 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2278 ctx->me_E_len = htole16(nbits); 2279 ctx->me_N_len = htole16(nbits); 2280 2281 #ifdef UBSEC_DEBUG 2282 if (ubsec_debug) { 2283 ubsec_dump_mcr(mcr); 2284 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2285 } 2286 #endif 2287 2288 /* 2289 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2290 * everything else. 2291 */ 2292 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2293 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2294 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2295 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2296 2297 /* Enqueue and we're done... */ 2298 crit_enter(); 2299 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2300 ubsec_feed2(sc); 2301 ubsecstats.hst_modexp++; 2302 crit_exit(); 2303 2304 return (0); 2305 2306 errout: 2307 if (me != NULL) { 2308 if (me->me_q.q_mcr.dma_map != NULL) 2309 ubsec_dma_free(sc, &me->me_q.q_mcr); 2310 if (me->me_q.q_ctx.dma_map != NULL) { 2311 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2312 ubsec_dma_free(sc, &me->me_q.q_ctx); 2313 } 2314 if (me->me_M.dma_map != NULL) { 2315 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2316 ubsec_dma_free(sc, &me->me_M); 2317 } 2318 if (me->me_E.dma_map != NULL) { 2319 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2320 ubsec_dma_free(sc, &me->me_E); 2321 } 2322 if (me->me_C.dma_map != NULL) { 2323 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2324 ubsec_dma_free(sc, &me->me_C); 2325 } 2326 if (me->me_epb.dma_map != NULL) 2327 ubsec_dma_free(sc, &me->me_epb); 2328 kfree(me, M_DEVBUF); 2329 } 2330 krp->krp_status = err; 2331 crypto_kdone(krp); 2332 return (0); 2333 } 2334 2335 /* 2336 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2337 */ 2338 static int 2339 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2340 { 2341 struct ubsec_q2_modexp *me; 2342 struct ubsec_mcr *mcr; 2343 struct ubsec_ctx_modexp *ctx; 2344 struct ubsec_pktbuf *epb; 2345 int err = 0; 2346 u_int nbits, normbits, mbits, shiftbits, ebits; 2347 2348 me = kmalloc(sizeof *me, M_DEVBUF, M_INTWAIT | M_ZERO); 2349 me->me_krp = krp; 2350 me->me_q.q_type = UBS_CTXOP_MODEXP; 2351 2352 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2353 if (nbits <= 512) 2354 normbits = 512; 2355 else if (nbits <= 768) 2356 normbits = 768; 2357 else if (nbits <= 1024) 2358 normbits = 1024; 2359 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2360 normbits = 1536; 2361 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2362 normbits = 2048; 2363 else { 2364 err = E2BIG; 2365 goto errout; 2366 } 2367 2368 shiftbits = normbits - nbits; 2369 2370 /* XXX ??? */ 2371 me->me_modbits = nbits; 2372 me->me_shiftbits = shiftbits; 2373 me->me_normbits = normbits; 2374 2375 /* Sanity check: result bits must be >= true modulus bits. */ 2376 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2377 err = ERANGE; 2378 goto errout; 2379 } 2380 2381 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2382 &me->me_q.q_mcr, 0)) { 2383 err = ENOMEM; 2384 goto errout; 2385 } 2386 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2387 2388 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2389 &me->me_q.q_ctx, 0)) { 2390 err = ENOMEM; 2391 goto errout; 2392 } 2393 2394 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2395 if (mbits > nbits) { 2396 err = E2BIG; 2397 goto errout; 2398 } 2399 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2400 err = ENOMEM; 2401 goto errout; 2402 } 2403 bzero(me->me_M.dma_vaddr, normbits / 8); 2404 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2405 me->me_M.dma_vaddr, (mbits + 7) / 8); 2406 2407 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2408 err = ENOMEM; 2409 goto errout; 2410 } 2411 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2412 2413 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2414 if (ebits > nbits) { 2415 err = E2BIG; 2416 goto errout; 2417 } 2418 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2419 err = ENOMEM; 2420 goto errout; 2421 } 2422 bzero(me->me_E.dma_vaddr, normbits / 8); 2423 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2424 me->me_E.dma_vaddr, (ebits + 7) / 8); 2425 2426 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2427 &me->me_epb, 0)) { 2428 err = ENOMEM; 2429 goto errout; 2430 } 2431 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2432 epb->pb_addr = htole32(me->me_E.dma_paddr); 2433 epb->pb_next = 0; 2434 epb->pb_len = htole32((ebits + 7) / 8); 2435 2436 #ifdef UBSEC_DEBUG 2437 if (ubsec_debug) { 2438 kprintf("Epb "); 2439 ubsec_dump_pb(epb); 2440 } 2441 #endif 2442 2443 mcr->mcr_pkts = htole16(1); 2444 mcr->mcr_flags = 0; 2445 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2446 mcr->mcr_reserved = 0; 2447 mcr->mcr_pktlen = 0; 2448 2449 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2450 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2451 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2452 2453 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2454 mcr->mcr_opktbuf.pb_next = 0; 2455 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2456 2457 #ifdef DIAGNOSTIC 2458 /* Misaligned output buffer will hang the chip. */ 2459 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2460 panic("%s: modexp invalid addr 0x%x", 2461 device_get_nameunit(sc->sc_dev), 2462 letoh32(mcr->mcr_opktbuf.pb_addr)); 2463 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2464 panic("%s: modexp invalid len 0x%x", 2465 device_get_nameunit(sc->sc_dev), 2466 letoh32(mcr->mcr_opktbuf.pb_len)); 2467 #endif 2468 2469 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2470 bzero(ctx, sizeof(*ctx)); 2471 bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, 2472 (nbits + 7) / 8); 2473 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2474 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2475 ctx->me_E_len = htole16(ebits); 2476 ctx->me_N_len = htole16(nbits); 2477 2478 #ifdef UBSEC_DEBUG 2479 if (ubsec_debug) { 2480 ubsec_dump_mcr(mcr); 2481 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2482 } 2483 #endif 2484 2485 /* 2486 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2487 * everything else. 2488 */ 2489 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2490 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2491 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2492 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2493 2494 /* Enqueue and we're done... */ 2495 crit_enter(); 2496 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2497 ubsec_feed2(sc); 2498 crit_exit(); 2499 2500 return (0); 2501 2502 errout: 2503 if (me != NULL) { 2504 if (me->me_q.q_mcr.dma_map != NULL) 2505 ubsec_dma_free(sc, &me->me_q.q_mcr); 2506 if (me->me_q.q_ctx.dma_map != NULL) { 2507 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2508 ubsec_dma_free(sc, &me->me_q.q_ctx); 2509 } 2510 if (me->me_M.dma_map != NULL) { 2511 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2512 ubsec_dma_free(sc, &me->me_M); 2513 } 2514 if (me->me_E.dma_map != NULL) { 2515 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2516 ubsec_dma_free(sc, &me->me_E); 2517 } 2518 if (me->me_C.dma_map != NULL) { 2519 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2520 ubsec_dma_free(sc, &me->me_C); 2521 } 2522 if (me->me_epb.dma_map != NULL) 2523 ubsec_dma_free(sc, &me->me_epb); 2524 kfree(me, M_DEVBUF); 2525 } 2526 krp->krp_status = err; 2527 crypto_kdone(krp); 2528 return (0); 2529 } 2530 2531 static int 2532 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2533 { 2534 struct ubsec_q2_rsapriv *rp = NULL; 2535 struct ubsec_mcr *mcr; 2536 struct ubsec_ctx_rsapriv *ctx; 2537 int err = 0; 2538 u_int padlen, msglen; 2539 2540 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2541 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2542 if (msglen > padlen) 2543 padlen = msglen; 2544 2545 if (padlen <= 256) 2546 padlen = 256; 2547 else if (padlen <= 384) 2548 padlen = 384; 2549 else if (padlen <= 512) 2550 padlen = 512; 2551 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2552 padlen = 768; 2553 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2554 padlen = 1024; 2555 else { 2556 err = E2BIG; 2557 goto errout; 2558 } 2559 2560 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2561 err = E2BIG; 2562 goto errout; 2563 } 2564 2565 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2566 err = E2BIG; 2567 goto errout; 2568 } 2569 2570 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2571 err = E2BIG; 2572 goto errout; 2573 } 2574 2575 rp = kmalloc(sizeof *rp, M_DEVBUF, M_INTWAIT | M_ZERO); 2576 rp->rpr_krp = krp; 2577 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2578 2579 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2580 &rp->rpr_q.q_mcr, 0)) { 2581 err = ENOMEM; 2582 goto errout; 2583 } 2584 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2585 2586 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2587 &rp->rpr_q.q_ctx, 0)) { 2588 err = ENOMEM; 2589 goto errout; 2590 } 2591 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2592 bzero(ctx, sizeof *ctx); 2593 2594 /* Copy in p */ 2595 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2596 &ctx->rpr_buf[0 * (padlen / 8)], 2597 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2598 2599 /* Copy in q */ 2600 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2601 &ctx->rpr_buf[1 * (padlen / 8)], 2602 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2603 2604 /* Copy in dp */ 2605 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2606 &ctx->rpr_buf[2 * (padlen / 8)], 2607 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2608 2609 /* Copy in dq */ 2610 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2611 &ctx->rpr_buf[3 * (padlen / 8)], 2612 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2613 2614 /* Copy in pinv */ 2615 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2616 &ctx->rpr_buf[4 * (padlen / 8)], 2617 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2618 2619 msglen = padlen * 2; 2620 2621 /* Copy in input message (aligned buffer/length). */ 2622 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2623 /* Is this likely? */ 2624 err = E2BIG; 2625 goto errout; 2626 } 2627 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2628 err = ENOMEM; 2629 goto errout; 2630 } 2631 bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); 2632 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2633 rp->rpr_msgin.dma_vaddr, 2634 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2635 2636 /* Prepare space for output message (aligned buffer/length). */ 2637 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2638 /* Is this likely? */ 2639 err = E2BIG; 2640 goto errout; 2641 } 2642 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2643 err = ENOMEM; 2644 goto errout; 2645 } 2646 bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); 2647 2648 mcr->mcr_pkts = htole16(1); 2649 mcr->mcr_flags = 0; 2650 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2651 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2652 mcr->mcr_ipktbuf.pb_next = 0; 2653 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2654 mcr->mcr_reserved = 0; 2655 mcr->mcr_pktlen = htole16(msglen); 2656 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2657 mcr->mcr_opktbuf.pb_next = 0; 2658 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2659 2660 #ifdef DIAGNOSTIC 2661 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2662 panic("%s: rsapriv: invalid msgin %x(0x%jx)", 2663 device_get_nameunit(sc->sc_dev), 2664 rp->rpr_msgin.dma_paddr, 2665 (uintmax_t)rp->rpr_msgin.dma_size); 2666 } 2667 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2668 panic("%s: rsapriv: invalid msgout %x(0x%jx)", 2669 device_get_nameunit(sc->sc_dev), 2670 rp->rpr_msgout.dma_paddr, 2671 (uintmax_t)rp->rpr_msgout.dma_size); 2672 } 2673 #endif 2674 2675 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2676 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2677 ctx->rpr_q_len = htole16(padlen); 2678 ctx->rpr_p_len = htole16(padlen); 2679 2680 /* 2681 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2682 * everything else. 2683 */ 2684 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); 2685 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); 2686 2687 /* Enqueue and we're done... */ 2688 crit_enter(); 2689 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2690 ubsec_feed2(sc); 2691 ubsecstats.hst_modexpcrt++; 2692 crit_exit(); 2693 return (0); 2694 2695 errout: 2696 if (rp != NULL) { 2697 if (rp->rpr_q.q_mcr.dma_map != NULL) 2698 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2699 if (rp->rpr_msgin.dma_map != NULL) { 2700 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 2701 ubsec_dma_free(sc, &rp->rpr_msgin); 2702 } 2703 if (rp->rpr_msgout.dma_map != NULL) { 2704 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 2705 ubsec_dma_free(sc, &rp->rpr_msgout); 2706 } 2707 kfree(rp, M_DEVBUF); 2708 } 2709 krp->krp_status = err; 2710 crypto_kdone(krp); 2711 return (0); 2712 } 2713 2714 #ifdef UBSEC_DEBUG 2715 static void 2716 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2717 { 2718 kprintf("addr 0x%x (0x%x) next 0x%x\n", 2719 pb->pb_addr, pb->pb_len, pb->pb_next); 2720 } 2721 2722 static void 2723 ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) 2724 { 2725 kprintf("CTX (0x%x):\n", c->ctx_len); 2726 switch (letoh16(c->ctx_op)) { 2727 case UBS_CTXOP_RNGBYPASS: 2728 case UBS_CTXOP_RNGSHA1: 2729 break; 2730 case UBS_CTXOP_MODEXP: 2731 { 2732 struct ubsec_ctx_modexp *cx = (void *)c; 2733 int i, len; 2734 2735 kprintf(" Elen %u, Nlen %u\n", 2736 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2737 len = (cx->me_N_len + 7)/8; 2738 for (i = 0; i < len; i++) 2739 kprintf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2740 kprintf("\n"); 2741 break; 2742 } 2743 default: 2744 kprintf("unknown context: %x\n", c->ctx_op); 2745 } 2746 kprintf("END CTX\n"); 2747 } 2748 2749 static void 2750 ubsec_dump_mcr(struct ubsec_mcr *mcr) 2751 { 2752 volatile struct ubsec_mcr_add *ma; 2753 int i; 2754 2755 kprintf("MCR:\n"); 2756 kprintf(" pkts: %u, flags 0x%x\n", 2757 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2758 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2759 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2760 kprintf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2761 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2762 letoh16(ma->mcr_reserved)); 2763 kprintf(" %d: ipkt ", i); 2764 ubsec_dump_pb(&ma->mcr_ipktbuf); 2765 kprintf(" %d: opkt ", i); 2766 ubsec_dump_pb(&ma->mcr_opktbuf); 2767 ma++; 2768 } 2769 kprintf("END MCR\n"); 2770 } 2771 #endif /* UBSEC_DEBUG */ 2772 2773 /* 2774 * Return the number of significant bits of a big number. 2775 */ 2776 static int 2777 ubsec_ksigbits(struct crparam *cr) 2778 { 2779 u_int plen = (cr->crp_nbits + 7) / 8; 2780 int i, sig = plen * 8; 2781 u_int8_t c, *p = cr->crp_p; 2782 2783 for (i = plen - 1; i >= 0; i--) { 2784 c = p[i]; 2785 if (c != 0) { 2786 while ((c & 0x80) == 0) { 2787 sig--; 2788 c <<= 1; 2789 } 2790 break; 2791 } 2792 sig -= 8; 2793 } 2794 return (sig); 2795 } 2796 2797 static void 2798 ubsec_kshift_r( 2799 u_int shiftbits, 2800 u_int8_t *src, u_int srcbits, 2801 u_int8_t *dst, u_int dstbits) 2802 { 2803 u_int slen, dlen; 2804 int i, si, di, n; 2805 2806 slen = (srcbits + 7) / 8; 2807 dlen = (dstbits + 7) / 8; 2808 2809 for (i = 0; i < slen; i++) 2810 dst[i] = src[i]; 2811 for (i = 0; i < dlen - slen; i++) 2812 dst[slen + i] = 0; 2813 2814 n = shiftbits / 8; 2815 if (n != 0) { 2816 si = dlen - n - 1; 2817 di = dlen - 1; 2818 while (si >= 0) 2819 dst[di--] = dst[si--]; 2820 while (di >= 0) 2821 dst[di--] = 0; 2822 } 2823 2824 n = shiftbits % 8; 2825 if (n != 0) { 2826 for (i = dlen - 1; i > 0; i--) 2827 dst[i] = (dst[i] << n) | 2828 (dst[i - 1] >> (8 - n)); 2829 dst[0] = dst[0] << n; 2830 } 2831 } 2832 2833 static void 2834 ubsec_kshift_l( 2835 u_int shiftbits, 2836 u_int8_t *src, u_int srcbits, 2837 u_int8_t *dst, u_int dstbits) 2838 { 2839 int slen, dlen, i, n; 2840 2841 slen = (srcbits + 7) / 8; 2842 dlen = (dstbits + 7) / 8; 2843 2844 n = shiftbits / 8; 2845 for (i = 0; i < slen; i++) 2846 dst[i] = src[i + n]; 2847 for (i = 0; i < dlen - slen; i++) 2848 dst[slen + i] = 0; 2849 2850 n = shiftbits % 8; 2851 if (n != 0) { 2852 for (i = 0; i < (dlen - 1); i++) 2853 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2854 dst[dlen - 1] = dst[dlen - 1] >> n; 2855 } 2856 } 2857