1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */ 2 /* $DragonFly: src/sys/dev/crypto/hifn/hifn7751.c,v 1.14 2007/12/04 09:11:12 hasso Exp $ */ 3 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ 4 5 /* 6 * Invertex AEON / Hifn 7751 driver 7 * Copyright (c) 1999 Invertex Inc. All rights reserved. 8 * Copyright (c) 1999 Theo de Raadt 9 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 10 * http://www.netsec.net 11 * Copyright (c) 2003 Hifn Inc. 12 * 13 * This driver is based on a previous driver by Invertex, for which they 14 * requested: Please send any comments, feedback, bug-fixes, or feature 15 * requests to software@invertex.com. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. The name of the author may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Effort sponsored in part by the Defense Advanced Research Projects 41 * Agency (DARPA) and Air Force Research Laboratory, Air Force 42 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 43 * 44 */ 45 46 /* 47 * Driver for various Hifn encryption processors. 48 */ 49 #include "opt_hifn.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/errno.h> 55 #include <sys/malloc.h> 56 #include <sys/kernel.h> 57 #include <sys/mbuf.h> 58 #include <sys/sysctl.h> 59 #include <sys/bus.h> 60 #include <sys/rman.h> 61 #include <sys/random.h> 62 #include <sys/thread2.h> 63 #include <sys/uio.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 68 #include <machine/clock.h> 69 #include <opencrypto/cryptodev.h> 70 71 #include <bus/pci/pcivar.h> 72 #include <bus/pci/pcireg.h> 73 74 #ifdef HIFN_RNDTEST 75 #include "../rndtest/rndtest.h" 76 #endif 77 #include "hifn7751reg.h" 78 #include "hifn7751var.h" 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int hifn_probe(device_t); 84 static int hifn_attach(device_t); 85 static int hifn_detach(device_t); 86 static int hifn_suspend(device_t); 87 static int hifn_resume(device_t); 88 static void hifn_shutdown(device_t); 89 90 static device_method_t hifn_methods[] = { 91 /* Device interface */ 92 DEVMETHOD(device_probe, hifn_probe), 93 DEVMETHOD(device_attach, hifn_attach), 94 DEVMETHOD(device_detach, hifn_detach), 95 DEVMETHOD(device_suspend, hifn_suspend), 96 DEVMETHOD(device_resume, hifn_resume), 97 DEVMETHOD(device_shutdown, hifn_shutdown), 98 99 /* bus interface */ 100 DEVMETHOD(bus_print_child, bus_generic_print_child), 101 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 102 103 { 0, 0 } 104 }; 105 static driver_t hifn_driver = { 106 "hifn", 107 hifn_methods, 108 sizeof (struct hifn_softc) 109 }; 110 static devclass_t hifn_devclass; 111 112 DECLARE_DUMMY_MODULE(hifn); 113 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); 114 MODULE_DEPEND(hifn, crypto, 1, 1, 1); 115 #ifdef HIFN_RNDTEST 116 MODULE_DEPEND(hifn, rndtest, 1, 1, 1); 117 #endif 118 119 static void hifn_reset_board(struct hifn_softc *, int); 120 static void hifn_reset_puc(struct hifn_softc *); 121 static void hifn_puc_wait(struct hifn_softc *); 122 static int hifn_enable_crypto(struct hifn_softc *); 123 static void hifn_set_retry(struct hifn_softc *sc); 124 static void hifn_init_dma(struct hifn_softc *); 125 static void hifn_init_pci_registers(struct hifn_softc *); 126 static int hifn_sramsize(struct hifn_softc *); 127 static int hifn_dramsize(struct hifn_softc *); 128 static int hifn_ramtype(struct hifn_softc *); 129 static void hifn_sessions(struct hifn_softc *); 130 static void hifn_intr(void *); 131 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 132 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 133 static int hifn_newsession(void *, u_int32_t *, struct cryptoini *); 134 static int hifn_freesession(void *, u_int64_t); 135 static int hifn_process(void *, struct cryptop *, int); 136 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 137 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); 138 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 139 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 140 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 141 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 142 static int hifn_init_pubrng(struct hifn_softc *); 143 #ifndef HIFN_NO_RNG 144 static void hifn_rng(void *); 145 #endif 146 static void hifn_tick(void *); 147 static void hifn_abort(struct hifn_softc *); 148 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 149 150 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); 151 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); 152 153 static __inline__ u_int32_t 154 READ_REG_0(struct hifn_softc *sc, bus_size_t reg) 155 { 156 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); 157 sc->sc_bar0_lastreg = (bus_size_t) -1; 158 return (v); 159 } 160 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) 161 162 static __inline__ u_int32_t 163 READ_REG_1(struct hifn_softc *sc, bus_size_t reg) 164 { 165 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); 166 sc->sc_bar1_lastreg = (bus_size_t) -1; 167 return (v); 168 } 169 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) 170 171 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); 172 173 #ifdef HIFN_DEBUG 174 static int hifn_debug = 0; 175 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 176 0, "control debugging msgs"); 177 #endif 178 179 static struct hifn_stats hifnstats; 180 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, 181 hifn_stats, "driver statistics"); 182 static int hifn_maxbatch = 1; 183 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 184 0, "max ops to batch w/o interrupt"); 185 186 /* 187 * Probe for a supported device. The PCI vendor and device 188 * IDs are used to detect devices we know how to handle. 189 */ 190 static int 191 hifn_probe(device_t dev) 192 { 193 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && 194 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) 195 return (0); 196 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 197 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || 198 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 199 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 200 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || 201 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) 202 return (0); 203 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 204 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) 205 return (0); 206 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN) { 207 device_printf(dev,"device id = 0x%x\n", pci_get_device(dev) ); 208 return (0); 209 } 210 return (ENXIO); 211 } 212 213 static void 214 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 215 { 216 bus_addr_t *paddr = (bus_addr_t*) arg; 217 *paddr = segs->ds_addr; 218 } 219 220 static const char* 221 hifn_partname(struct hifn_softc *sc) 222 { 223 /* XXX sprintf numbers when not decoded */ 224 switch (pci_get_vendor(sc->sc_dev)) { 225 case PCI_VENDOR_HIFN: 226 switch (pci_get_device(sc->sc_dev)) { 227 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; 228 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; 229 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; 230 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; 231 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; 232 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; 233 } 234 return "Hifn unknown-part"; 235 case PCI_VENDOR_INVERTEX: 236 switch (pci_get_device(sc->sc_dev)) { 237 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; 238 } 239 return "Invertex unknown-part"; 240 case PCI_VENDOR_NETSEC: 241 switch (pci_get_device(sc->sc_dev)) { 242 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; 243 } 244 return "NetSec unknown-part"; 245 } 246 return "Unknown-vendor unknown-part"; 247 } 248 249 static void 250 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 251 { 252 u_int32_t *p = (u_int32_t *)buf; 253 for (count /= sizeof (u_int32_t); count; count--) 254 add_true_randomness(*p++); 255 } 256 257 static u_int 258 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) 259 { 260 if (v > max) { 261 device_printf(dev, "Warning, %s %u out of range, " 262 "using max %u\n", what, v, max); 263 v = max; 264 } else if (v < min) { 265 device_printf(dev, "Warning, %s %u out of range, " 266 "using min %u\n", what, v, min); 267 v = min; 268 } 269 return v; 270 } 271 272 /* 273 * Select PLL configuration for 795x parts. This is complicated in 274 * that we cannot determine the optimal parameters without user input. 275 * The reference clock is derived from an external clock through a 276 * multiplier. The external clock is either the host bus (i.e. PCI) 277 * or an external clock generator. When using the PCI bus we assume 278 * the clock is either 33 or 66 MHz; for an external source we cannot 279 * tell the speed. 280 * 281 * PLL configuration is done with a string: "pci" for PCI bus, or "ext" 282 * for an external source, followed by the frequency. We calculate 283 * the appropriate multiplier and PLL register contents accordingly. 284 * When no configuration is given we default to "pci66" since that 285 * always will allow the card to work. If a card is using the PCI 286 * bus clock and in a 33MHz slot then it will be operating at half 287 * speed until the correct information is provided. 288 */ 289 static void 290 hifn_getpllconfig(device_t dev, u_int *pll) 291 { 292 char *pllspec; 293 u_int freq, mul, fl, fh; 294 u_int32_t pllconfig; 295 char *nxt; 296 297 if (resource_string_value("hifn", device_get_unit(dev), 298 "pllconfig", &pllspec)) 299 pllspec = "pci66"; 300 fl = 33, fh = 66; 301 pllconfig = 0; 302 if (strncmp(pllspec, "ext", 3) == 0) { 303 pllspec += 3; 304 pllconfig |= HIFN_PLL_REF_SEL; 305 switch (pci_get_device(dev)) { 306 case PCI_PRODUCT_HIFN_7955: 307 case PCI_PRODUCT_HIFN_7956: 308 fl = 20, fh = 100; 309 break; 310 #ifdef notyet 311 case PCI_PRODUCT_HIFN_7954: 312 fl = 20, fh = 66; 313 break; 314 #endif 315 } 316 } else if (strncmp(pllspec, "pci", 3) == 0) 317 pllspec += 3; 318 freq = strtoul(pllspec, &nxt, 10); 319 if (nxt == pllspec) 320 freq = 66; 321 else 322 freq = checkmaxmin(dev, "frequency", freq, fl, fh); 323 /* 324 * Calculate multiplier. We target a Fck of 266 MHz, 325 * allowing only even values, possibly rounded down. 326 * Multipliers > 8 must set the charge pump current. 327 */ 328 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); 329 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; 330 if (mul > 8) 331 pllconfig |= HIFN_PLL_IS; 332 *pll = pllconfig; 333 } 334 335 /* 336 * Attach an interface that successfully probed. 337 */ 338 static int 339 hifn_attach(device_t dev) 340 { 341 struct hifn_softc *sc = device_get_softc(dev); 342 u_int32_t cmd; 343 caddr_t kva; 344 int rseg, rid; 345 char rbase; 346 u_int16_t ena, rev; 347 348 KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); 349 bzero(sc, sizeof (*sc)); 350 sc->sc_dev = dev; 351 352 /* XXX handle power management */ 353 354 /* 355 * The 7951 and 795x have a random number generator and 356 * public key support; note this. 357 */ 358 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 359 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 360 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 361 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) 362 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; 363 /* 364 * The 7811 has a random number generator and 365 * we also note it's identity 'cuz of some quirks. 366 */ 367 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 368 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) 369 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; 370 371 /* 372 * The 795x parts support AES. 373 */ 374 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 375 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 376 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { 377 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; 378 /* 379 * Select PLL configuration. This depends on the 380 * bus and board design and must be manually configured 381 * if the default setting is unacceptable. 382 */ 383 hifn_getpllconfig(dev, &sc->sc_pllconfig); 384 } 385 386 /* 387 * Configure support for memory-mapped access to 388 * registers and for DMA operations. 389 */ 390 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) 391 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 392 cmd |= PCIM_ENA; 393 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 394 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 395 if ((cmd & PCIM_ENA) != PCIM_ENA) { 396 device_printf(dev, "failed to enable %s\n", 397 (cmd & PCIM_ENA) == 0 ? 398 "memory mapping & bus mastering" : 399 (cmd & PCIM_CMD_MEMEN) == 0 ? 400 "memory mapping" : "bus mastering"); 401 goto fail_pci; 402 } 403 #undef PCIM_ENA 404 405 /* 406 * Setup PCI resources. Note that we record the bus 407 * tag and handle for each register mapping, this is 408 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, 409 * and WRITE_REG_1 macros throughout the driver. 410 */ 411 rid = HIFN_BAR0; 412 sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 413 0, ~0, 1, RF_ACTIVE); 414 if (sc->sc_bar0res == NULL) { 415 device_printf(dev, "cannot map bar%d register space\n", 0); 416 goto fail_pci; 417 } 418 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); 419 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); 420 sc->sc_bar0_lastreg = (bus_size_t) -1; 421 422 rid = HIFN_BAR1; 423 sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 424 0, ~0, 1, RF_ACTIVE); 425 if (sc->sc_bar1res == NULL) { 426 device_printf(dev, "cannot map bar%d register space\n", 1); 427 goto fail_io0; 428 } 429 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); 430 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); 431 sc->sc_bar1_lastreg = (bus_size_t) -1; 432 433 hifn_set_retry(sc); 434 435 /* 436 * Setup the area where the Hifn DMA's descriptors 437 * and associated data structures. 438 */ 439 if (bus_dma_tag_create(NULL, /* parent */ 440 1, 0, /* alignment,boundary */ 441 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 442 BUS_SPACE_MAXADDR, /* highaddr */ 443 NULL, NULL, /* filter, filterarg */ 444 HIFN_MAX_DMALEN, /* maxsize */ 445 MAX_SCATTER, /* nsegments */ 446 HIFN_MAX_SEGLEN, /* maxsegsize */ 447 BUS_DMA_ALLOCNOW, /* flags */ 448 &sc->sc_dmat)) { 449 device_printf(dev, "cannot allocate DMA tag\n"); 450 goto fail_io1; 451 } 452 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 453 device_printf(dev, "cannot create dma map\n"); 454 bus_dma_tag_destroy(sc->sc_dmat); 455 goto fail_io1; 456 } 457 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 458 device_printf(dev, "cannot alloc dma buffer\n"); 459 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 460 bus_dma_tag_destroy(sc->sc_dmat); 461 goto fail_io1; 462 } 463 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, 464 sizeof (*sc->sc_dma), 465 hifn_dmamap_cb, &sc->sc_dma_physaddr, 466 BUS_DMA_NOWAIT)) { 467 device_printf(dev, "cannot load dma map\n"); 468 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); 469 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 470 bus_dma_tag_destroy(sc->sc_dmat); 471 goto fail_io1; 472 } 473 sc->sc_dma = (struct hifn_dma *)kva; 474 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 475 476 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); 477 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); 478 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); 479 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); 480 481 /* 482 * Reset the board and do the ``secret handshake'' 483 * to enable the crypto support. Then complete the 484 * initialization procedure by setting up the interrupt 485 * and hooking in to the system crypto support so we'll 486 * get used for system services like the crypto device, 487 * IPsec, RNG device, etc. 488 */ 489 hifn_reset_board(sc, 0); 490 491 if (hifn_enable_crypto(sc) != 0) { 492 device_printf(dev, "crypto enabling failed\n"); 493 goto fail_mem; 494 } 495 hifn_reset_puc(sc); 496 497 hifn_init_dma(sc); 498 hifn_init_pci_registers(sc); 499 500 /* XXX can't dynamically determine ram type for 795x; force dram */ 501 if (sc->sc_flags & HIFN_IS_7956) 502 sc->sc_drammodel = 1; 503 else if (hifn_ramtype(sc)) 504 goto fail_mem; 505 506 if (sc->sc_drammodel == 0) 507 hifn_sramsize(sc); 508 else 509 hifn_dramsize(sc); 510 511 /* 512 * Workaround for NetSec 7751 rev A: half ram size because two 513 * of the address lines were left floating 514 */ 515 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 516 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && 517 pci_get_revid(dev) == 0x61) /*XXX???*/ 518 sc->sc_ramsize >>= 1; 519 520 /* 521 * Arrange the interrupt line. 522 */ 523 rid = 0; 524 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 525 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 526 if (sc->sc_irq == NULL) { 527 device_printf(dev, "could not map interrupt\n"); 528 goto fail_mem; 529 } 530 /* 531 * NB: Network code assumes we are blocked with splimp() 532 * so make sure the IRQ is marked appropriately. 533 */ 534 if (bus_setup_intr(dev, sc->sc_irq, INTR_FAST, 535 hifn_intr, sc, 536 &sc->sc_intrhand, NULL)) { 537 device_printf(dev, "could not setup interrupt\n"); 538 goto fail_intr2; 539 } 540 541 hifn_sessions(sc); 542 543 /* 544 * NB: Keep only the low 16 bits; this masks the chip id 545 * from the 7951. 546 */ 547 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; 548 549 rseg = sc->sc_ramsize / 1024; 550 rbase = 'K'; 551 if (sc->sc_ramsize >= (1024 * 1024)) { 552 rbase = 'M'; 553 rseg /= 1024; 554 } 555 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n", 556 hifn_partname(sc), rev, 557 rseg, rbase, sc->sc_drammodel ? 'd' : 's', 558 sc->sc_maxses); 559 560 if (sc->sc_flags & HIFN_IS_7956) 561 kprintf(", pll=0x%x<%s clk, %ux mult>", 562 sc->sc_pllconfig, 563 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 564 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); 565 kprintf("\n"); 566 567 sc->sc_cid = crypto_get_driverid(0); 568 if (sc->sc_cid < 0) { 569 device_printf(dev, "could not get crypto driver id\n"); 570 goto fail_intr; 571 } 572 573 WRITE_REG_0(sc, HIFN_0_PUCNFG, 574 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 575 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 576 577 switch (ena) { 578 case HIFN_PUSTAT_ENA_2: 579 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 580 hifn_newsession, hifn_freesession, hifn_process, sc); 581 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 582 hifn_newsession, hifn_freesession, hifn_process, sc); 583 if (sc->sc_flags & HIFN_HAS_AES) 584 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 585 hifn_newsession, hifn_freesession, 586 hifn_process, sc); 587 /*FALLTHROUGH*/ 588 case HIFN_PUSTAT_ENA_1: 589 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 590 hifn_newsession, hifn_freesession, hifn_process, sc); 591 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 592 hifn_newsession, hifn_freesession, hifn_process, sc); 593 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 594 hifn_newsession, hifn_freesession, hifn_process, sc); 595 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 596 hifn_newsession, hifn_freesession, hifn_process, sc); 597 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 598 hifn_newsession, hifn_freesession, hifn_process, sc); 599 break; 600 } 601 602 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 603 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 604 605 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 606 hifn_init_pubrng(sc); 607 608 /* NB: 1 means the callout runs w/o Giant locked */ 609 callout_init(&sc->sc_tickto); 610 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 611 612 return (0); 613 614 fail_intr: 615 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 616 fail_intr2: 617 /* XXX don't store rid */ 618 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 619 fail_mem: 620 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 621 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 622 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 623 bus_dma_tag_destroy(sc->sc_dmat); 624 625 /* Turn off DMA polling */ 626 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 627 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 628 fail_io1: 629 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 630 fail_io0: 631 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 632 fail_pci: 633 return (ENXIO); 634 } 635 636 /* 637 * Detach an interface that successfully probed. 638 */ 639 static int 640 hifn_detach(device_t dev) 641 { 642 struct hifn_softc *sc = device_get_softc(dev); 643 644 KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); 645 646 /* disable interrupts */ 647 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); 648 649 crit_enter(); 650 /*XXX other resources */ 651 callout_stop(&sc->sc_tickto); 652 callout_stop(&sc->sc_rngto); 653 #ifdef HIFN_RNDTEST 654 if (sc->sc_rndtest) 655 rndtest_detach(sc->sc_rndtest); 656 #endif 657 658 /* Turn off DMA polling */ 659 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 660 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 661 662 crypto_unregister_all(sc->sc_cid); 663 664 bus_generic_detach(dev); /*XXX should be no children, right? */ 665 666 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 667 /* XXX don't store rid */ 668 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 669 670 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 671 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 672 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 673 bus_dma_tag_destroy(sc->sc_dmat); 674 675 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 676 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 677 crit_exit(); 678 679 return (0); 680 } 681 682 /* 683 * Stop all chip I/O so that the kernel's probe routines don't 684 * get confused by errant DMAs when rebooting. 685 */ 686 static void 687 hifn_shutdown(device_t dev) 688 { 689 #ifdef notyet 690 hifn_stop(device_get_softc(dev)); 691 #endif 692 } 693 694 /* 695 * Device suspend routine. Stop the interface and save some PCI 696 * settings in case the BIOS doesn't restore them properly on 697 * resume. 698 */ 699 static int 700 hifn_suspend(device_t dev) 701 { 702 struct hifn_softc *sc = device_get_softc(dev); 703 #ifdef notyet 704 int i; 705 706 hifn_stop(sc); 707 for (i = 0; i < 5; i++) 708 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 709 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 710 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 711 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 712 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 713 #endif 714 sc->sc_suspended = 1; 715 716 return (0); 717 } 718 719 /* 720 * Device resume routine. Restore some PCI settings in case the BIOS 721 * doesn't, re-enable busmastering, and restart the interface if 722 * appropriate. 723 */ 724 static int 725 hifn_resume(device_t dev) 726 { 727 struct hifn_softc *sc = device_get_softc(dev); 728 #ifdef notyet 729 int i; 730 731 /* better way to do this? */ 732 for (i = 0; i < 5; i++) 733 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 734 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 735 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 736 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 737 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 738 739 /* reenable busmastering */ 740 pci_enable_busmaster(dev); 741 pci_enable_io(dev, HIFN_RES); 742 743 /* reinitialize interface if necessary */ 744 if (ifp->if_flags & IFF_UP) 745 rl_init(sc); 746 #endif 747 sc->sc_suspended = 0; 748 749 return (0); 750 } 751 752 static int 753 hifn_init_pubrng(struct hifn_softc *sc) 754 { 755 u_int32_t r; 756 int i; 757 758 #ifdef HIFN_RNDTEST 759 sc->sc_rndtest = rndtest_attach(sc->sc_dev); 760 if (sc->sc_rndtest) 761 sc->sc_harvest = rndtest_harvest; 762 else 763 sc->sc_harvest = default_harvest; 764 #else 765 sc->sc_harvest = default_harvest; 766 #endif 767 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 768 /* Reset 7951 public key/rng engine */ 769 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 770 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 771 772 for (i = 0; i < 100; i++) { 773 DELAY(1000); 774 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 775 HIFN_PUBRST_RESET) == 0) 776 break; 777 } 778 779 if (i == 100) { 780 device_printf(sc->sc_dev, "public key init failed\n"); 781 return (1); 782 } 783 } 784 785 #ifndef HIFN_NO_RNG 786 /* Enable the rng, if available */ 787 if (sc->sc_flags & HIFN_HAS_RNG) { 788 if (sc->sc_flags & HIFN_IS_7811) { 789 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 790 if (r & HIFN_7811_RNGENA_ENA) { 791 r &= ~HIFN_7811_RNGENA_ENA; 792 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 793 } 794 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 795 HIFN_7811_RNGCFG_DEFL); 796 r |= HIFN_7811_RNGENA_ENA; 797 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 798 } else 799 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 800 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 801 HIFN_RNGCFG_ENA); 802 803 sc->sc_rngfirst = 1; 804 if (hz >= 100) 805 sc->sc_rnghz = hz / 100; 806 else 807 sc->sc_rnghz = 1; 808 /* NB: 1 means the callout runs w/o Giant locked */ 809 callout_init(&sc->sc_rngto); 810 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 811 } 812 #endif 813 814 /* Enable public key engine, if available */ 815 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 816 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 817 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 818 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 819 } 820 821 return (0); 822 } 823 824 #ifndef HIFN_NO_RNG 825 static void 826 hifn_rng(void *vsc) 827 { 828 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 829 struct hifn_softc *sc = vsc; 830 u_int32_t sts, num[2]; 831 int i; 832 833 if (sc->sc_flags & HIFN_IS_7811) { 834 for (i = 0; i < 5; i++) { 835 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 836 if (sts & HIFN_7811_RNGSTS_UFL) { 837 device_printf(sc->sc_dev, 838 "RNG underflow: disabling\n"); 839 return; 840 } 841 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 842 break; 843 844 /* 845 * There are at least two words in the RNG FIFO 846 * at this point. 847 */ 848 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 849 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 850 /* NB: discard first data read */ 851 if (sc->sc_rngfirst) 852 sc->sc_rngfirst = 0; 853 else 854 (*sc->sc_harvest)(sc->sc_rndtest, 855 num, sizeof (num)); 856 } 857 } else { 858 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); 859 860 /* NB: discard first data read */ 861 if (sc->sc_rngfirst) 862 sc->sc_rngfirst = 0; 863 else 864 (*sc->sc_harvest)(sc->sc_rndtest, 865 num, sizeof (num[0])); 866 } 867 868 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 869 #undef RANDOM_BITS 870 } 871 #endif 872 873 static void 874 hifn_puc_wait(struct hifn_softc *sc) 875 { 876 int i; 877 878 for (i = 5000; i > 0; i--) { 879 DELAY(1); 880 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 881 break; 882 } 883 if (!i) 884 device_printf(sc->sc_dev, "proc unit did not reset\n"); 885 } 886 887 /* 888 * Reset the processing unit. 889 */ 890 static void 891 hifn_reset_puc(struct hifn_softc *sc) 892 { 893 /* Reset processing unit */ 894 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 895 hifn_puc_wait(sc); 896 } 897 898 /* 899 * Set the Retry and TRDY registers; note that we set them to 900 * zero because the 7811 locks up when forced to retry (section 901 * 3.6 of "Specification Update SU-0014-04". Not clear if we 902 * should do this for all Hifn parts, but it doesn't seem to hurt. 903 */ 904 static void 905 hifn_set_retry(struct hifn_softc *sc) 906 { 907 /* NB: RETRY only responds to 8-bit reads/writes */ 908 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); 909 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); 910 } 911 912 /* 913 * Resets the board. Values in the regesters are left as is 914 * from the reset (i.e. initial values are assigned elsewhere). 915 */ 916 static void 917 hifn_reset_board(struct hifn_softc *sc, int full) 918 { 919 u_int32_t reg; 920 921 /* 922 * Set polling in the DMA configuration register to zero. 0x7 avoids 923 * resetting the board and zeros out the other fields. 924 */ 925 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 926 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 927 928 /* 929 * Now that polling has been disabled, we have to wait 1 ms 930 * before resetting the board. 931 */ 932 DELAY(1000); 933 934 /* Reset the DMA unit */ 935 if (full) { 936 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 937 DELAY(1000); 938 } else { 939 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 940 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 941 hifn_reset_puc(sc); 942 } 943 944 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); 945 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 946 947 /* Bring dma unit out of reset */ 948 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 949 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 950 951 hifn_puc_wait(sc); 952 hifn_set_retry(sc); 953 954 if (sc->sc_flags & HIFN_IS_7811) { 955 for (reg = 0; reg < 1000; reg++) { 956 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 957 HIFN_MIPSRST_CRAMINIT) 958 break; 959 DELAY(1000); 960 } 961 if (reg == 1000) 962 kprintf(": cram init timeout\n"); 963 } 964 } 965 966 static u_int32_t 967 hifn_next_signature(u_int32_t a, u_int cnt) 968 { 969 int i; 970 u_int32_t v; 971 972 for (i = 0; i < cnt; i++) { 973 974 /* get the parity */ 975 v = a & 0x80080125; 976 v ^= v >> 16; 977 v ^= v >> 8; 978 v ^= v >> 4; 979 v ^= v >> 2; 980 v ^= v >> 1; 981 982 a = (v & 1) ^ (a << 1); 983 } 984 985 return a; 986 } 987 988 struct pci2id { 989 u_short pci_vendor; 990 u_short pci_prod; 991 char card_id[13]; 992 }; 993 static struct pci2id pci2id[] = { 994 { 995 PCI_VENDOR_HIFN, 996 PCI_PRODUCT_HIFN_7951, 997 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 998 0x00, 0x00, 0x00, 0x00, 0x00 } 999 }, { 1000 PCI_VENDOR_HIFN, 1001 PCI_PRODUCT_HIFN_7955, 1002 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1003 0x00, 0x00, 0x00, 0x00, 0x00 } 1004 }, { 1005 PCI_VENDOR_HIFN, 1006 PCI_PRODUCT_HIFN_7956, 1007 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1008 0x00, 0x00, 0x00, 0x00, 0x00 } 1009 }, { 1010 PCI_VENDOR_NETSEC, 1011 PCI_PRODUCT_NETSEC_7751, 1012 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1013 0x00, 0x00, 0x00, 0x00, 0x00 } 1014 }, { 1015 PCI_VENDOR_INVERTEX, 1016 PCI_PRODUCT_INVERTEX_AEON, 1017 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1018 0x00, 0x00, 0x00, 0x00, 0x00 } 1019 }, { 1020 PCI_VENDOR_HIFN, 1021 PCI_PRODUCT_HIFN_7811, 1022 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1023 0x00, 0x00, 0x00, 0x00, 0x00 } 1024 }, { 1025 /* 1026 * Other vendors share this PCI ID as well, such as 1027 * http://www.powercrypt.com, and obviously they also 1028 * use the same key. 1029 */ 1030 PCI_VENDOR_HIFN, 1031 PCI_PRODUCT_HIFN_7751, 1032 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1033 0x00, 0x00, 0x00, 0x00, 0x00 } 1034 }, 1035 }; 1036 1037 /* 1038 * Checks to see if crypto is already enabled. If crypto isn't enable, 1039 * "hifn_enable_crypto" is called to enable it. The check is important, 1040 * as enabling crypto twice will lock the board. 1041 */ 1042 static int 1043 hifn_enable_crypto(struct hifn_softc *sc) 1044 { 1045 u_int32_t dmacfg, ramcfg, encl, addr, i; 1046 char *offtbl = NULL; 1047 1048 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 1049 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && 1050 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { 1051 offtbl = pci2id[i].card_id; 1052 break; 1053 } 1054 } 1055 if (offtbl == NULL) { 1056 device_printf(sc->sc_dev, "Unknown card!\n"); 1057 return (1); 1058 } 1059 1060 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1061 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 1062 1063 /* 1064 * The RAM config register's encrypt level bit needs to be set before 1065 * every read performed on the encryption level register. 1066 */ 1067 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1068 1069 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1070 1071 /* 1072 * Make sure we don't re-unlock. Two unlocks kills chip until the 1073 * next reboot. 1074 */ 1075 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 1076 #ifdef HIFN_DEBUG 1077 if (hifn_debug) 1078 device_printf(sc->sc_dev, 1079 "Strong crypto already enabled!\n"); 1080 #endif 1081 goto report; 1082 } 1083 1084 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 1085 #ifdef HIFN_DEBUG 1086 if (hifn_debug) 1087 device_printf(sc->sc_dev, 1088 "Unknown encryption level 0x%x\n", encl); 1089 #endif 1090 return 1; 1091 } 1092 1093 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 1094 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 1095 DELAY(1000); 1096 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); 1097 DELAY(1000); 1098 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); 1099 DELAY(1000); 1100 1101 for (i = 0; i <= 12; i++) { 1102 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 1103 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); 1104 1105 DELAY(1000); 1106 } 1107 1108 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1109 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1110 1111 #ifdef HIFN_DEBUG 1112 if (hifn_debug) { 1113 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 1114 device_printf(sc->sc_dev, "Engine is permanently " 1115 "locked until next system reset!\n"); 1116 else 1117 device_printf(sc->sc_dev, "Engine enabled " 1118 "successfully!\n"); 1119 } 1120 #endif 1121 1122 report: 1123 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 1124 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 1125 1126 switch (encl) { 1127 case HIFN_PUSTAT_ENA_1: 1128 case HIFN_PUSTAT_ENA_2: 1129 break; 1130 case HIFN_PUSTAT_ENA_0: 1131 default: 1132 device_printf(sc->sc_dev, "disabled"); 1133 break; 1134 } 1135 1136 return 0; 1137 } 1138 1139 /* 1140 * Give initial values to the registers listed in the "Register Space" 1141 * section of the HIFN Software Development reference manual. 1142 */ 1143 static void 1144 hifn_init_pci_registers(struct hifn_softc *sc) 1145 { 1146 /* write fixed values needed by the Initialization registers */ 1147 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1148 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1149 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1150 1151 /* write all 4 ring address registers */ 1152 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + 1153 offsetof(struct hifn_dma, cmdr[0])); 1154 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + 1155 offsetof(struct hifn_dma, srcr[0])); 1156 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + 1157 offsetof(struct hifn_dma, dstr[0])); 1158 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + 1159 offsetof(struct hifn_dma, resr[0])); 1160 1161 DELAY(2000); 1162 1163 /* write status register */ 1164 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1165 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1166 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1167 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1168 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1169 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1170 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1171 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1172 HIFN_DMACSR_S_WAIT | 1173 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1174 HIFN_DMACSR_C_WAIT | 1175 HIFN_DMACSR_ENGINE | 1176 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1177 HIFN_DMACSR_PUBDONE : 0) | 1178 ((sc->sc_flags & HIFN_IS_7811) ? 1179 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1180 1181 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1182 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1183 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1184 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1185 ((sc->sc_flags & HIFN_IS_7811) ? 1186 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1187 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1188 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1189 1190 if (sc->sc_flags & HIFN_IS_7956) { 1191 u_int32_t pll; 1192 1193 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1194 HIFN_PUCNFG_TCALLPHASES | 1195 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 1196 1197 /* turn off the clocks and insure bypass is set */ 1198 pll = READ_REG_1(sc, HIFN_1_PLL); 1199 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) 1200 | HIFN_PLL_BP; 1201 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1202 DELAY(10*1000); /* 10ms */ 1203 /* change configuration */ 1204 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; 1205 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1206 DELAY(10*1000); /* 10ms */ 1207 /* disable bypass */ 1208 pll &= ~HIFN_PLL_BP; 1209 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1210 /* enable clocks with new configuration */ 1211 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; 1212 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1213 } else { 1214 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1215 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1216 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1217 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1218 } 1219 1220 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1221 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1222 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1223 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1224 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1225 } 1226 1227 /* 1228 * The maximum number of sessions supported by the card 1229 * is dependent on the amount of context ram, which 1230 * encryption algorithms are enabled, and how compression 1231 * is configured. This should be configured before this 1232 * routine is called. 1233 */ 1234 static void 1235 hifn_sessions(struct hifn_softc *sc) 1236 { 1237 u_int32_t pucnfg; 1238 int ctxsize; 1239 1240 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1241 1242 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1243 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1244 ctxsize = 128; 1245 else 1246 ctxsize = 512; 1247 /* 1248 * 7955/7956 has internal context memory of 32K 1249 */ 1250 if (sc->sc_flags & HIFN_IS_7956) 1251 sc->sc_maxses = 32768 / ctxsize; 1252 else 1253 sc->sc_maxses = 1 + 1254 ((sc->sc_ramsize - 32768) / ctxsize); 1255 } else 1256 sc->sc_maxses = sc->sc_ramsize / 16384; 1257 1258 if (sc->sc_maxses > 2048) 1259 sc->sc_maxses = 2048; 1260 } 1261 1262 /* 1263 * Determine ram type (sram or dram). Board should be just out of a reset 1264 * state when this is called. 1265 */ 1266 static int 1267 hifn_ramtype(struct hifn_softc *sc) 1268 { 1269 u_int8_t data[8], dataexpect[8]; 1270 int i; 1271 1272 for (i = 0; i < sizeof(data); i++) 1273 data[i] = dataexpect[i] = 0x55; 1274 if (hifn_writeramaddr(sc, 0, data)) 1275 return (-1); 1276 if (hifn_readramaddr(sc, 0, data)) 1277 return (-1); 1278 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1279 sc->sc_drammodel = 1; 1280 return (0); 1281 } 1282 1283 for (i = 0; i < sizeof(data); i++) 1284 data[i] = dataexpect[i] = 0xaa; 1285 if (hifn_writeramaddr(sc, 0, data)) 1286 return (-1); 1287 if (hifn_readramaddr(sc, 0, data)) 1288 return (-1); 1289 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1290 sc->sc_drammodel = 1; 1291 return (0); 1292 } 1293 1294 return (0); 1295 } 1296 1297 #define HIFN_SRAM_MAX (32 << 20) 1298 #define HIFN_SRAM_STEP_SIZE 16384 1299 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1300 1301 static int 1302 hifn_sramsize(struct hifn_softc *sc) 1303 { 1304 u_int32_t a; 1305 u_int8_t data[8]; 1306 u_int8_t dataexpect[sizeof(data)]; 1307 int32_t i; 1308 1309 for (i = 0; i < sizeof(data); i++) 1310 data[i] = dataexpect[i] = i ^ 0x5a; 1311 1312 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1313 a = i * HIFN_SRAM_STEP_SIZE; 1314 bcopy(&i, data, sizeof(i)); 1315 hifn_writeramaddr(sc, a, data); 1316 } 1317 1318 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1319 a = i * HIFN_SRAM_STEP_SIZE; 1320 bcopy(&i, dataexpect, sizeof(i)); 1321 if (hifn_readramaddr(sc, a, data) < 0) 1322 return (0); 1323 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1324 return (0); 1325 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1326 } 1327 1328 return (0); 1329 } 1330 1331 /* 1332 * XXX For dram boards, one should really try all of the 1333 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1334 * is already set up correctly. 1335 */ 1336 static int 1337 hifn_dramsize(struct hifn_softc *sc) 1338 { 1339 u_int32_t cnfg; 1340 1341 if (sc->sc_flags & HIFN_IS_7956) { 1342 /* 1343 * 7955/7956 have a fixed internal ram of only 32K. 1344 */ 1345 sc->sc_ramsize = 32768; 1346 } else { 1347 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1348 HIFN_PUCNFG_DRAMMASK; 1349 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1350 } 1351 return (0); 1352 } 1353 1354 static void 1355 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) 1356 { 1357 struct hifn_dma *dma = sc->sc_dma; 1358 1359 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1360 dma->cmdi = 0; 1361 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1362 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1363 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1364 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1365 } 1366 *cmdp = dma->cmdi++; 1367 dma->cmdk = dma->cmdi; 1368 1369 if (dma->srci == HIFN_D_SRC_RSIZE) { 1370 dma->srci = 0; 1371 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1372 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1373 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1374 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1375 } 1376 *srcp = dma->srci++; 1377 dma->srck = dma->srci; 1378 1379 if (dma->dsti == HIFN_D_DST_RSIZE) { 1380 dma->dsti = 0; 1381 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1382 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1383 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1384 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1385 } 1386 *dstp = dma->dsti++; 1387 dma->dstk = dma->dsti; 1388 1389 if (dma->resi == HIFN_D_RES_RSIZE) { 1390 dma->resi = 0; 1391 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1392 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1393 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1394 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1395 } 1396 *resp = dma->resi++; 1397 dma->resk = dma->resi; 1398 } 1399 1400 static int 1401 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1402 { 1403 struct hifn_dma *dma = sc->sc_dma; 1404 hifn_base_command_t wc; 1405 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1406 int r, cmdi, resi, srci, dsti; 1407 1408 wc.masks = htole16(3 << 13); 1409 wc.session_num = htole16(addr >> 14); 1410 wc.total_source_count = htole16(8); 1411 wc.total_dest_count = htole16(addr & 0x3fff); 1412 1413 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1414 1415 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1416 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1417 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1418 1419 /* build write command */ 1420 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1421 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; 1422 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1423 1424 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr 1425 + offsetof(struct hifn_dma, test_src)); 1426 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr 1427 + offsetof(struct hifn_dma, test_dst)); 1428 1429 dma->cmdr[cmdi].l = htole32(16 | masks); 1430 dma->srcr[srci].l = htole32(8 | masks); 1431 dma->dstr[dsti].l = htole32(4 | masks); 1432 dma->resr[resi].l = htole32(4 | masks); 1433 1434 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1435 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1436 1437 for (r = 10000; r >= 0; r--) { 1438 DELAY(10); 1439 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1440 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1441 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1442 break; 1443 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1444 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1445 } 1446 if (r == 0) { 1447 device_printf(sc->sc_dev, "writeramaddr -- " 1448 "result[%d](addr %d) still valid\n", resi, addr); 1449 r = -1; 1450 return (-1); 1451 } else 1452 r = 0; 1453 1454 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1455 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1456 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1457 1458 return (r); 1459 } 1460 1461 static int 1462 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1463 { 1464 struct hifn_dma *dma = sc->sc_dma; 1465 hifn_base_command_t rc; 1466 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1467 int r, cmdi, srci, dsti, resi; 1468 1469 rc.masks = htole16(2 << 13); 1470 rc.session_num = htole16(addr >> 14); 1471 rc.total_source_count = htole16(addr & 0x3fff); 1472 rc.total_dest_count = htole16(8); 1473 1474 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1475 1476 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1477 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1478 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1479 1480 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1481 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; 1482 1483 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + 1484 offsetof(struct hifn_dma, test_src)); 1485 dma->test_src = 0; 1486 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + 1487 offsetof(struct hifn_dma, test_dst)); 1488 dma->test_dst = 0; 1489 dma->cmdr[cmdi].l = htole32(8 | masks); 1490 dma->srcr[srci].l = htole32(8 | masks); 1491 dma->dstr[dsti].l = htole32(8 | masks); 1492 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1493 1494 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1496 1497 for (r = 10000; r >= 0; r--) { 1498 DELAY(10); 1499 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1500 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1501 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1502 break; 1503 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1504 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1505 } 1506 if (r == 0) { 1507 device_printf(sc->sc_dev, "readramaddr -- " 1508 "result[%d](addr %d) still valid\n", resi, addr); 1509 r = -1; 1510 } else { 1511 r = 0; 1512 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1513 } 1514 1515 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1516 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1517 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1518 1519 return (r); 1520 } 1521 1522 /* 1523 * Initialize the descriptor rings. 1524 */ 1525 static void 1526 hifn_init_dma(struct hifn_softc *sc) 1527 { 1528 struct hifn_dma *dma = sc->sc_dma; 1529 int i; 1530 1531 hifn_set_retry(sc); 1532 1533 /* initialize static pointer values */ 1534 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1535 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + 1536 offsetof(struct hifn_dma, command_bufs[i][0])); 1537 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1538 dma->resr[i].p = htole32(sc->sc_dma_physaddr + 1539 offsetof(struct hifn_dma, result_bufs[i][0])); 1540 1541 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1542 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); 1543 dma->srcr[HIFN_D_SRC_RSIZE].p = 1544 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); 1545 dma->dstr[HIFN_D_DST_RSIZE].p = 1546 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); 1547 dma->resr[HIFN_D_RES_RSIZE].p = 1548 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); 1549 1550 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1551 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1552 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1553 } 1554 1555 /* 1556 * Writes out the raw command buffer space. Returns the 1557 * command buffer size. 1558 */ 1559 static u_int 1560 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1561 { 1562 u_int8_t *buf_pos; 1563 hifn_base_command_t *base_cmd; 1564 hifn_mac_command_t *mac_cmd; 1565 hifn_crypt_command_t *cry_cmd; 1566 int using_mac, using_crypt, len, ivlen; 1567 u_int32_t dlen, slen; 1568 1569 buf_pos = buf; 1570 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1571 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1572 1573 base_cmd = (hifn_base_command_t *)buf_pos; 1574 base_cmd->masks = htole16(cmd->base_masks); 1575 slen = cmd->src_mapsize; 1576 if (cmd->sloplen) 1577 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); 1578 else 1579 dlen = cmd->dst_mapsize; 1580 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1581 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1582 dlen >>= 16; 1583 slen >>= 16; 1584 #if 0 1585 base_cmd->session_num = htole16(cmd->session_num | 1586 #else 1587 base_cmd->session_num = htole16( 1588 #endif 1589 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1590 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1591 buf_pos += sizeof(hifn_base_command_t); 1592 1593 if (using_mac) { 1594 mac_cmd = (hifn_mac_command_t *)buf_pos; 1595 dlen = cmd->maccrd->crd_len; 1596 mac_cmd->source_count = htole16(dlen & 0xffff); 1597 dlen >>= 16; 1598 mac_cmd->masks = htole16(cmd->mac_masks | 1599 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1600 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1601 mac_cmd->reserved = 0; 1602 buf_pos += sizeof(hifn_mac_command_t); 1603 } 1604 1605 if (using_crypt) { 1606 cry_cmd = (hifn_crypt_command_t *)buf_pos; 1607 dlen = cmd->enccrd->crd_len; 1608 cry_cmd->source_count = htole16(dlen & 0xffff); 1609 dlen >>= 16; 1610 cry_cmd->masks = htole16(cmd->cry_masks | 1611 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1612 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1613 cry_cmd->reserved = 0; 1614 buf_pos += sizeof(hifn_crypt_command_t); 1615 } 1616 1617 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1618 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1619 buf_pos += HIFN_MAC_KEY_LENGTH; 1620 } 1621 1622 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1623 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1624 case HIFN_CRYPT_CMD_ALG_3DES: 1625 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1626 buf_pos += HIFN_3DES_KEY_LENGTH; 1627 break; 1628 case HIFN_CRYPT_CMD_ALG_DES: 1629 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1630 buf_pos += HIFN_DES_KEY_LENGTH; 1631 break; 1632 case HIFN_CRYPT_CMD_ALG_RC4: 1633 len = 256; 1634 do { 1635 int clen; 1636 1637 clen = MIN(cmd->cklen, len); 1638 bcopy(cmd->ck, buf_pos, clen); 1639 len -= clen; 1640 buf_pos += clen; 1641 } while (len > 0); 1642 bzero(buf_pos, 4); 1643 buf_pos += 4; 1644 break; 1645 case HIFN_CRYPT_CMD_ALG_AES: 1646 /* 1647 * AES keys are variable 128, 192 and 1648 * 256 bits (16, 24 and 32 bytes). 1649 */ 1650 bcopy(cmd->ck, buf_pos, cmd->cklen); 1651 buf_pos += cmd->cklen; 1652 break; 1653 } 1654 } 1655 1656 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1657 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1658 case HIFN_CRYPT_CMD_ALG_AES: 1659 ivlen = HIFN_AES_IV_LENGTH; 1660 break; 1661 default: 1662 ivlen = HIFN_IV_LENGTH; 1663 break; 1664 } 1665 bcopy(cmd->iv, buf_pos, ivlen); 1666 buf_pos += ivlen; 1667 } 1668 1669 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { 1670 bzero(buf_pos, 8); 1671 buf_pos += 8; 1672 } 1673 1674 return (buf_pos - buf); 1675 #undef MIN 1676 } 1677 1678 static int 1679 hifn_dmamap_aligned(struct hifn_operand *op) 1680 { 1681 int i; 1682 1683 for (i = 0; i < op->nsegs; i++) { 1684 if (op->segs[i].ds_addr & 3) 1685 return (0); 1686 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) 1687 return (0); 1688 } 1689 return (1); 1690 } 1691 1692 static int 1693 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1694 { 1695 struct hifn_dma *dma = sc->sc_dma; 1696 struct hifn_operand *dst = &cmd->dst; 1697 u_int32_t p, l; 1698 int idx, used = 0, i; 1699 1700 idx = dma->dsti; 1701 for (i = 0; i < dst->nsegs - 1; i++) { 1702 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1703 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1704 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); 1705 HIFN_DSTR_SYNC(sc, idx, 1706 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1707 used++; 1708 1709 if (++idx == HIFN_D_DST_RSIZE) { 1710 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1711 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1712 HIFN_DSTR_SYNC(sc, idx, 1713 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1714 idx = 0; 1715 } 1716 } 1717 1718 if (cmd->sloplen == 0) { 1719 p = dst->segs[i].ds_addr; 1720 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1721 dst->segs[i].ds_len; 1722 } else { 1723 p = sc->sc_dma_physaddr + 1724 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1725 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1726 sizeof(u_int32_t); 1727 1728 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { 1729 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1730 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1731 HIFN_D_MASKDONEIRQ | 1732 (dst->segs[i].ds_len - cmd->sloplen)); 1733 HIFN_DSTR_SYNC(sc, idx, 1734 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1735 used++; 1736 1737 if (++idx == HIFN_D_DST_RSIZE) { 1738 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1739 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1740 HIFN_DSTR_SYNC(sc, idx, 1741 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1742 idx = 0; 1743 } 1744 } 1745 } 1746 dma->dstr[idx].p = htole32(p); 1747 dma->dstr[idx].l = htole32(l); 1748 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1749 used++; 1750 1751 if (++idx == HIFN_D_DST_RSIZE) { 1752 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1753 HIFN_D_MASKDONEIRQ); 1754 HIFN_DSTR_SYNC(sc, idx, 1755 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1756 idx = 0; 1757 } 1758 1759 dma->dsti = idx; 1760 dma->dstu += used; 1761 return (idx); 1762 } 1763 1764 static int 1765 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1766 { 1767 struct hifn_dma *dma = sc->sc_dma; 1768 struct hifn_operand *src = &cmd->src; 1769 int idx, i; 1770 u_int32_t last = 0; 1771 1772 idx = dma->srci; 1773 for (i = 0; i < src->nsegs; i++) { 1774 if (i == src->nsegs - 1) 1775 last = HIFN_D_LAST; 1776 1777 dma->srcr[idx].p = htole32(src->segs[i].ds_addr); 1778 dma->srcr[idx].l = htole32(src->segs[i].ds_len | 1779 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1780 HIFN_SRCR_SYNC(sc, idx, 1781 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1782 1783 if (++idx == HIFN_D_SRC_RSIZE) { 1784 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1785 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1786 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1787 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1788 idx = 0; 1789 } 1790 } 1791 dma->srci = idx; 1792 dma->srcu += src->nsegs; 1793 return (idx); 1794 } 1795 1796 static void 1797 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1798 { 1799 struct hifn_operand *op = arg; 1800 1801 KASSERT(nsegs <= MAX_SCATTER, 1802 ("hifn_op_cb: too many DMA segments (%u > %u) " 1803 "returned when mapping operand", nsegs, MAX_SCATTER)); 1804 op->mapsize = mapsize; 1805 op->nsegs = nsegs; 1806 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1807 } 1808 1809 static int 1810 hifn_crypto( 1811 struct hifn_softc *sc, 1812 struct hifn_command *cmd, 1813 struct cryptop *crp, 1814 int hint) 1815 { 1816 struct hifn_dma *dma = sc->sc_dma; 1817 u_int32_t cmdlen; 1818 int cmdi, resi, err = 0; 1819 1820 /* 1821 * need 1 cmd, and 1 res 1822 * 1823 * NB: check this first since it's easy. 1824 */ 1825 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1826 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1827 #ifdef HIFN_DEBUG 1828 if (hifn_debug) { 1829 device_printf(sc->sc_dev, 1830 "cmd/result exhaustion, cmdu %u resu %u\n", 1831 dma->cmdu, dma->resu); 1832 } 1833 #endif 1834 hifnstats.hst_nomem_cr++; 1835 return (ERESTART); 1836 } 1837 1838 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { 1839 hifnstats.hst_nomem_map++; 1840 return (ENOMEM); 1841 } 1842 1843 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1844 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1845 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1846 hifnstats.hst_nomem_load++; 1847 err = ENOMEM; 1848 goto err_srcmap1; 1849 } 1850 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1851 cmd->src_io->uio_segflg = UIO_USERSPACE; 1852 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1853 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1854 hifnstats.hst_nomem_load++; 1855 err = ENOMEM; 1856 goto err_srcmap1; 1857 } 1858 } else { 1859 err = EINVAL; 1860 goto err_srcmap1; 1861 } 1862 1863 if (hifn_dmamap_aligned(&cmd->src)) { 1864 cmd->sloplen = cmd->src_mapsize & 3; 1865 cmd->dst = cmd->src; 1866 } else { 1867 if (crp->crp_flags & CRYPTO_F_IOV) { 1868 err = EINVAL; 1869 goto err_srcmap; 1870 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1871 int totlen, len; 1872 struct mbuf *m, *m0, *mlast; 1873 1874 KASSERT(cmd->dst_m == cmd->src_m, 1875 ("hifn_crypto: dst_m initialized improperly")); 1876 hifnstats.hst_unaligned++; 1877 /* 1878 * Source is not aligned on a longword boundary. 1879 * Copy the data to insure alignment. If we fail 1880 * to allocate mbufs or clusters while doing this 1881 * we return ERESTART so the operation is requeued 1882 * at the crypto later, but only if there are 1883 * ops already posted to the hardware; otherwise we 1884 * have no guarantee that we'll be re-entered. 1885 */ 1886 totlen = cmd->src_mapsize; 1887 if (cmd->src_m->m_flags & M_PKTHDR) { 1888 len = MHLEN; 1889 MGETHDR(m0, MB_DONTWAIT, MT_DATA); 1890 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, MB_DONTWAIT)) { 1891 m_free(m0); 1892 m0 = NULL; 1893 } 1894 } else { 1895 len = MLEN; 1896 MGET(m0, MB_DONTWAIT, MT_DATA); 1897 } 1898 if (m0 == NULL) { 1899 hifnstats.hst_nomem_mbuf++; 1900 err = dma->cmdu ? ERESTART : ENOMEM; 1901 goto err_srcmap; 1902 } 1903 if (totlen >= MINCLSIZE) { 1904 MCLGET(m0, MB_DONTWAIT); 1905 if ((m0->m_flags & M_EXT) == 0) { 1906 hifnstats.hst_nomem_mcl++; 1907 err = dma->cmdu ? ERESTART : ENOMEM; 1908 m_freem(m0); 1909 goto err_srcmap; 1910 } 1911 len = MCLBYTES; 1912 } 1913 totlen -= len; 1914 m0->m_pkthdr.len = m0->m_len = len; 1915 mlast = m0; 1916 1917 while (totlen > 0) { 1918 MGET(m, MB_DONTWAIT, MT_DATA); 1919 if (m == NULL) { 1920 hifnstats.hst_nomem_mbuf++; 1921 err = dma->cmdu ? ERESTART : ENOMEM; 1922 m_freem(m0); 1923 goto err_srcmap; 1924 } 1925 len = MLEN; 1926 if (totlen >= MINCLSIZE) { 1927 MCLGET(m, MB_DONTWAIT); 1928 if ((m->m_flags & M_EXT) == 0) { 1929 hifnstats.hst_nomem_mcl++; 1930 err = dma->cmdu ? ERESTART : ENOMEM; 1931 mlast->m_next = m; 1932 m_freem(m0); 1933 goto err_srcmap; 1934 } 1935 len = MCLBYTES; 1936 } 1937 1938 m->m_len = len; 1939 m0->m_pkthdr.len += len; 1940 totlen -= len; 1941 1942 mlast->m_next = m; 1943 mlast = m; 1944 } 1945 cmd->dst_m = m0; 1946 } 1947 } 1948 1949 if (cmd->dst_map == NULL) { 1950 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1951 hifnstats.hst_nomem_map++; 1952 err = ENOMEM; 1953 goto err_srcmap; 1954 } 1955 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1956 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1957 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1958 hifnstats.hst_nomem_map++; 1959 err = ENOMEM; 1960 goto err_dstmap1; 1961 } 1962 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1963 cmd->dst_io->uio_segflg |= UIO_USERSPACE; 1964 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1965 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1966 hifnstats.hst_nomem_load++; 1967 err = ENOMEM; 1968 goto err_dstmap1; 1969 } 1970 } 1971 } 1972 1973 #ifdef HIFN_DEBUG 1974 if (hifn_debug) { 1975 device_printf(sc->sc_dev, 1976 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1977 READ_REG_1(sc, HIFN_1_DMA_CSR), 1978 READ_REG_1(sc, HIFN_1_DMA_IER), 1979 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1980 cmd->src_nsegs, cmd->dst_nsegs); 1981 } 1982 #endif 1983 1984 if (cmd->src_map == cmd->dst_map) { 1985 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1986 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1987 } else { 1988 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1989 BUS_DMASYNC_PREWRITE); 1990 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1991 BUS_DMASYNC_PREREAD); 1992 } 1993 1994 /* 1995 * need N src, and N dst 1996 */ 1997 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || 1998 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { 1999 #ifdef HIFN_DEBUG 2000 if (hifn_debug) { 2001 device_printf(sc->sc_dev, 2002 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", 2003 dma->srcu, cmd->src_nsegs, 2004 dma->dstu, cmd->dst_nsegs); 2005 } 2006 #endif 2007 hifnstats.hst_nomem_sd++; 2008 err = ERESTART; 2009 goto err_dstmap; 2010 } 2011 2012 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2013 dma->cmdi = 0; 2014 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2015 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2016 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2017 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2018 } 2019 cmdi = dma->cmdi++; 2020 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2021 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2022 2023 /* .p for command/result already set */ 2024 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2025 HIFN_D_MASKDONEIRQ); 2026 HIFN_CMDR_SYNC(sc, cmdi, 2027 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2028 dma->cmdu++; 2029 if (sc->sc_c_busy == 0) { 2030 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 2031 sc->sc_c_busy = 1; 2032 } 2033 2034 /* 2035 * We don't worry about missing an interrupt (which a "command wait" 2036 * interrupt salvages us from), unless there is more than one command 2037 * in the queue. 2038 */ 2039 if (dma->cmdu > 1) { 2040 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2041 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2042 } 2043 2044 hifnstats.hst_ipackets++; 2045 hifnstats.hst_ibytes += cmd->src_mapsize; 2046 2047 hifn_dmamap_load_src(sc, cmd); 2048 if (sc->sc_s_busy == 0) { 2049 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 2050 sc->sc_s_busy = 1; 2051 } 2052 2053 /* 2054 * Unlike other descriptors, we don't mask done interrupt from 2055 * result descriptor. 2056 */ 2057 #ifdef HIFN_DEBUG 2058 if (hifn_debug) 2059 kprintf("load res\n"); 2060 #endif 2061 if (dma->resi == HIFN_D_RES_RSIZE) { 2062 dma->resi = 0; 2063 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2064 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2065 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2066 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2067 } 2068 resi = dma->resi++; 2069 KASSERT(dma->hifn_commands[resi] == NULL, 2070 ("hifn_crypto: command slot %u busy", resi)); 2071 dma->hifn_commands[resi] = cmd; 2072 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2073 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { 2074 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2075 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 2076 sc->sc_curbatch++; 2077 if (sc->sc_curbatch > hifnstats.hst_maxbatch) 2078 hifnstats.hst_maxbatch = sc->sc_curbatch; 2079 hifnstats.hst_totbatch++; 2080 } else { 2081 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2082 HIFN_D_VALID | HIFN_D_LAST); 2083 sc->sc_curbatch = 0; 2084 } 2085 HIFN_RESR_SYNC(sc, resi, 2086 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2087 dma->resu++; 2088 if (sc->sc_r_busy == 0) { 2089 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 2090 sc->sc_r_busy = 1; 2091 } 2092 2093 if (cmd->sloplen) 2094 cmd->slopidx = resi; 2095 2096 hifn_dmamap_load_dst(sc, cmd); 2097 2098 if (sc->sc_d_busy == 0) { 2099 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 2100 sc->sc_d_busy = 1; 2101 } 2102 2103 #ifdef HIFN_DEBUG 2104 if (hifn_debug) { 2105 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", 2106 READ_REG_1(sc, HIFN_1_DMA_CSR), 2107 READ_REG_1(sc, HIFN_1_DMA_IER)); 2108 } 2109 #endif 2110 2111 sc->sc_active = 5; 2112 KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); 2113 return (err); /* success */ 2114 2115 err_dstmap: 2116 if (cmd->src_map != cmd->dst_map) 2117 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2118 err_dstmap1: 2119 if (cmd->src_map != cmd->dst_map) 2120 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2121 err_srcmap: 2122 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2123 if (cmd->src_m != cmd->dst_m) 2124 m_freem(cmd->dst_m); 2125 } 2126 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2127 err_srcmap1: 2128 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2129 return (err); 2130 } 2131 2132 static void 2133 hifn_tick(void* vsc) 2134 { 2135 struct hifn_softc *sc = vsc; 2136 2137 crit_enter(); 2138 if (sc->sc_active == 0) { 2139 struct hifn_dma *dma = sc->sc_dma; 2140 u_int32_t r = 0; 2141 2142 if (dma->cmdu == 0 && sc->sc_c_busy) { 2143 sc->sc_c_busy = 0; 2144 r |= HIFN_DMACSR_C_CTRL_DIS; 2145 } 2146 if (dma->srcu == 0 && sc->sc_s_busy) { 2147 sc->sc_s_busy = 0; 2148 r |= HIFN_DMACSR_S_CTRL_DIS; 2149 } 2150 if (dma->dstu == 0 && sc->sc_d_busy) { 2151 sc->sc_d_busy = 0; 2152 r |= HIFN_DMACSR_D_CTRL_DIS; 2153 } 2154 if (dma->resu == 0 && sc->sc_r_busy) { 2155 sc->sc_r_busy = 0; 2156 r |= HIFN_DMACSR_R_CTRL_DIS; 2157 } 2158 if (r) 2159 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 2160 } else 2161 sc->sc_active--; 2162 crit_exit(); 2163 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 2164 } 2165 2166 static void 2167 hifn_intr(void *arg) 2168 { 2169 struct hifn_softc *sc = arg; 2170 struct hifn_dma *dma; 2171 u_int32_t dmacsr, restart; 2172 int i, u; 2173 2174 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 2175 2176 /* Nothing in the DMA unit interrupted */ 2177 if ((dmacsr & sc->sc_dmaier) == 0) { 2178 hifnstats.hst_noirq++; 2179 return; 2180 } 2181 2182 dma = sc->sc_dma; 2183 2184 #ifdef HIFN_DEBUG 2185 if (hifn_debug) { 2186 device_printf(sc->sc_dev, 2187 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", 2188 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, 2189 dma->cmdi, dma->srci, dma->dsti, dma->resi, 2190 dma->cmdk, dma->srck, dma->dstk, dma->resk, 2191 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2192 } 2193 #endif 2194 2195 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 2196 2197 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 2198 (dmacsr & HIFN_DMACSR_PUBDONE)) 2199 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 2200 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 2201 2202 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); 2203 if (restart) 2204 device_printf(sc->sc_dev, "overrun %x\n", dmacsr); 2205 2206 if (sc->sc_flags & HIFN_IS_7811) { 2207 if (dmacsr & HIFN_DMACSR_ILLR) 2208 device_printf(sc->sc_dev, "illegal read\n"); 2209 if (dmacsr & HIFN_DMACSR_ILLW) 2210 device_printf(sc->sc_dev, "illegal write\n"); 2211 } 2212 2213 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 2214 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 2215 if (restart) { 2216 device_printf(sc->sc_dev, "abort, resetting.\n"); 2217 hifnstats.hst_abort++; 2218 hifn_abort(sc); 2219 return; 2220 } 2221 2222 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { 2223 /* 2224 * If no slots to process and we receive a "waiting on 2225 * command" interrupt, we disable the "waiting on command" 2226 * (by clearing it). 2227 */ 2228 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 2229 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2230 } 2231 2232 /* clear the rings */ 2233 i = dma->resk; u = dma->resu; 2234 while (u != 0) { 2235 HIFN_RESR_SYNC(sc, i, 2236 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2237 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2238 HIFN_RESR_SYNC(sc, i, 2239 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2240 break; 2241 } 2242 2243 if (i != HIFN_D_RES_RSIZE) { 2244 struct hifn_command *cmd; 2245 u_int8_t *macbuf = NULL; 2246 2247 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2248 cmd = dma->hifn_commands[i]; 2249 KASSERT(cmd != NULL, 2250 ("hifn_intr: null command slot %u", i)); 2251 dma->hifn_commands[i] = NULL; 2252 2253 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2254 macbuf = dma->result_bufs[i]; 2255 macbuf += 12; 2256 } 2257 2258 hifn_callback(sc, cmd, macbuf); 2259 hifnstats.hst_opackets++; 2260 u--; 2261 } 2262 2263 if (++i == (HIFN_D_RES_RSIZE + 1)) 2264 i = 0; 2265 } 2266 dma->resk = i; dma->resu = u; 2267 2268 i = dma->srck; u = dma->srcu; 2269 while (u != 0) { 2270 if (i == HIFN_D_SRC_RSIZE) 2271 i = 0; 2272 HIFN_SRCR_SYNC(sc, i, 2273 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2274 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2275 HIFN_SRCR_SYNC(sc, i, 2276 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2277 break; 2278 } 2279 i++, u--; 2280 } 2281 dma->srck = i; dma->srcu = u; 2282 2283 i = dma->cmdk; u = dma->cmdu; 2284 while (u != 0) { 2285 HIFN_CMDR_SYNC(sc, i, 2286 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2287 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2288 HIFN_CMDR_SYNC(sc, i, 2289 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2290 break; 2291 } 2292 if (i != HIFN_D_CMD_RSIZE) { 2293 u--; 2294 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2295 } 2296 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2297 i = 0; 2298 } 2299 dma->cmdk = i; dma->cmdu = u; 2300 2301 if (sc->sc_needwakeup) { /* XXX check high watermark */ 2302 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 2303 #ifdef HIFN_DEBUG 2304 if (hifn_debug) 2305 device_printf(sc->sc_dev, 2306 "wakeup crypto (%x) u %d/%d/%d/%d\n", 2307 sc->sc_needwakeup, 2308 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2309 #endif 2310 sc->sc_needwakeup &= ~wakeup; 2311 crypto_unblock(sc->sc_cid, wakeup); 2312 } 2313 } 2314 2315 /* 2316 * Allocate a new 'session' and return an encoded session id. 'sidp' 2317 * contains our registration id, and should contain an encoded session 2318 * id on successful allocation. 2319 */ 2320 static int 2321 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 2322 { 2323 struct cryptoini *c; 2324 struct hifn_softc *sc = arg; 2325 int mac = 0, cry = 0, sesn; 2326 struct hifn_session *ses = NULL; 2327 2328 KASSERT(sc != NULL, ("hifn_newsession: null softc")); 2329 if (sidp == NULL || cri == NULL || sc == NULL) 2330 return (EINVAL); 2331 2332 if (sc->sc_sessions == NULL) { 2333 ses = sc->sc_sessions = (struct hifn_session *)kmalloc( 2334 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2335 if (ses == NULL) 2336 return (ENOMEM); 2337 sesn = 0; 2338 sc->sc_nsessions = 1; 2339 } else { 2340 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 2341 if (!sc->sc_sessions[sesn].hs_used) { 2342 ses = &sc->sc_sessions[sesn]; 2343 break; 2344 } 2345 } 2346 2347 if (ses == NULL) { 2348 sesn = sc->sc_nsessions; 2349 ses = (struct hifn_session *)kmalloc((sesn + 1) * 2350 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2351 if (ses == NULL) 2352 return (ENOMEM); 2353 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); 2354 bzero(sc->sc_sessions, sesn * sizeof(*ses)); 2355 kfree(sc->sc_sessions, M_DEVBUF); 2356 sc->sc_sessions = ses; 2357 ses = &sc->sc_sessions[sesn]; 2358 sc->sc_nsessions++; 2359 } 2360 } 2361 bzero(ses, sizeof(*ses)); 2362 ses->hs_used = 1; 2363 2364 for (c = cri; c != NULL; c = c->cri_next) { 2365 switch (c->cri_alg) { 2366 case CRYPTO_MD5: 2367 case CRYPTO_SHA1: 2368 case CRYPTO_MD5_HMAC: 2369 case CRYPTO_SHA1_HMAC: 2370 if (mac) 2371 return (EINVAL); 2372 mac = 1; 2373 break; 2374 case CRYPTO_DES_CBC: 2375 case CRYPTO_3DES_CBC: 2376 case CRYPTO_AES_CBC: 2377 /* XXX this may read fewer, does it matter? */ 2378 read_random(ses->hs_iv, 2379 c->cri_alg == CRYPTO_AES_CBC ? 2380 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2381 /*FALLTHROUGH*/ 2382 case CRYPTO_ARC4: 2383 if (cry) 2384 return (EINVAL); 2385 cry = 1; 2386 break; 2387 default: 2388 return (EINVAL); 2389 } 2390 } 2391 if (mac == 0 && cry == 0) 2392 return (EINVAL); 2393 2394 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn); 2395 2396 return (0); 2397 } 2398 2399 /* 2400 * Deallocate a session. 2401 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2402 * XXX to blow away any keys already stored there. 2403 */ 2404 #define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff) 2405 2406 static int 2407 hifn_freesession(void *arg, u_int64_t tid) 2408 { 2409 struct hifn_softc *sc = arg; 2410 int session; 2411 u_int32_t sid = CRYPTO_SESID2LID(tid); 2412 2413 KASSERT(sc != NULL, ("hifn_freesession: null softc")); 2414 if (sc == NULL) 2415 return (EINVAL); 2416 2417 session = HIFN_SESSION(sid); 2418 if (session >= sc->sc_nsessions) 2419 return (EINVAL); 2420 2421 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 2422 return (0); 2423 } 2424 2425 static int 2426 hifn_process(void *arg, struct cryptop *crp, int hint) 2427 { 2428 struct hifn_softc *sc = arg; 2429 struct hifn_command *cmd = NULL; 2430 int session, err, ivlen; 2431 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2432 2433 if (crp == NULL || crp->crp_callback == NULL) { 2434 hifnstats.hst_invalid++; 2435 return (EINVAL); 2436 } 2437 session = HIFN_SESSION(crp->crp_sid); 2438 2439 if (sc == NULL || session >= sc->sc_nsessions) { 2440 err = EINVAL; 2441 goto errout; 2442 } 2443 2444 cmd = kmalloc(sizeof(struct hifn_command), M_DEVBUF, M_INTWAIT | M_ZERO); 2445 if (cmd == NULL) { 2446 hifnstats.hst_nomem++; 2447 err = ENOMEM; 2448 goto errout; 2449 } 2450 2451 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2452 cmd->src_m = (struct mbuf *)crp->crp_buf; 2453 cmd->dst_m = (struct mbuf *)crp->crp_buf; 2454 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2455 cmd->src_io = (struct uio *)crp->crp_buf; 2456 cmd->dst_io = (struct uio *)crp->crp_buf; 2457 } else { 2458 err = EINVAL; 2459 goto errout; /* XXX we don't handle contiguous buffers! */ 2460 } 2461 2462 crd1 = crp->crp_desc; 2463 if (crd1 == NULL) { 2464 err = EINVAL; 2465 goto errout; 2466 } 2467 crd2 = crd1->crd_next; 2468 2469 if (crd2 == NULL) { 2470 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2471 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2472 crd1->crd_alg == CRYPTO_SHA1 || 2473 crd1->crd_alg == CRYPTO_MD5) { 2474 maccrd = crd1; 2475 enccrd = NULL; 2476 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2477 crd1->crd_alg == CRYPTO_3DES_CBC || 2478 crd1->crd_alg == CRYPTO_AES_CBC || 2479 crd1->crd_alg == CRYPTO_ARC4) { 2480 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2481 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2482 maccrd = NULL; 2483 enccrd = crd1; 2484 } else { 2485 err = EINVAL; 2486 goto errout; 2487 } 2488 } else { 2489 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2490 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2491 crd1->crd_alg == CRYPTO_MD5 || 2492 crd1->crd_alg == CRYPTO_SHA1) && 2493 (crd2->crd_alg == CRYPTO_DES_CBC || 2494 crd2->crd_alg == CRYPTO_3DES_CBC || 2495 crd2->crd_alg == CRYPTO_AES_CBC || 2496 crd2->crd_alg == CRYPTO_ARC4) && 2497 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2498 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2499 maccrd = crd1; 2500 enccrd = crd2; 2501 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2502 crd1->crd_alg == CRYPTO_ARC4 || 2503 crd1->crd_alg == CRYPTO_3DES_CBC || 2504 crd1->crd_alg == CRYPTO_AES_CBC) && 2505 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2506 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2507 crd2->crd_alg == CRYPTO_MD5 || 2508 crd2->crd_alg == CRYPTO_SHA1) && 2509 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2510 enccrd = crd1; 2511 maccrd = crd2; 2512 } else { 2513 /* 2514 * We cannot order the 7751 as requested 2515 */ 2516 err = EINVAL; 2517 goto errout; 2518 } 2519 } 2520 2521 if (enccrd) { 2522 cmd->enccrd = enccrd; 2523 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2524 switch (enccrd->crd_alg) { 2525 case CRYPTO_ARC4: 2526 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2527 break; 2528 case CRYPTO_DES_CBC: 2529 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2530 HIFN_CRYPT_CMD_MODE_CBC | 2531 HIFN_CRYPT_CMD_NEW_IV; 2532 break; 2533 case CRYPTO_3DES_CBC: 2534 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2535 HIFN_CRYPT_CMD_MODE_CBC | 2536 HIFN_CRYPT_CMD_NEW_IV; 2537 break; 2538 case CRYPTO_AES_CBC: 2539 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2540 HIFN_CRYPT_CMD_MODE_CBC | 2541 HIFN_CRYPT_CMD_NEW_IV; 2542 break; 2543 default: 2544 err = EINVAL; 2545 goto errout; 2546 } 2547 if (enccrd->crd_alg != CRYPTO_ARC4) { 2548 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2549 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2550 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2551 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2552 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2553 else 2554 bcopy(sc->sc_sessions[session].hs_iv, 2555 cmd->iv, ivlen); 2556 2557 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2558 == 0) { 2559 if (crp->crp_flags & CRYPTO_F_IMBUF) 2560 m_copyback(cmd->src_m, 2561 enccrd->crd_inject, 2562 ivlen, cmd->iv); 2563 else if (crp->crp_flags & CRYPTO_F_IOV) 2564 cuio_copyback(cmd->src_io, 2565 enccrd->crd_inject, 2566 ivlen, cmd->iv); 2567 } 2568 } else { 2569 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2570 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2571 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2572 m_copydata(cmd->src_m, 2573 enccrd->crd_inject, ivlen, cmd->iv); 2574 else if (crp->crp_flags & CRYPTO_F_IOV) 2575 cuio_copydata(cmd->src_io, 2576 enccrd->crd_inject, ivlen, cmd->iv); 2577 } 2578 } 2579 2580 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) 2581 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2582 cmd->ck = enccrd->crd_key; 2583 cmd->cklen = enccrd->crd_klen >> 3; 2584 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2585 /* 2586 * Need to specify the size for the AES key in the masks. 2587 */ 2588 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2589 HIFN_CRYPT_CMD_ALG_AES) { 2590 switch (cmd->cklen) { 2591 case 16: 2592 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2593 break; 2594 case 24: 2595 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2596 break; 2597 case 32: 2598 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2599 break; 2600 default: 2601 err = EINVAL; 2602 goto errout; 2603 } 2604 } 2605 } 2606 2607 if (maccrd) { 2608 cmd->maccrd = maccrd; 2609 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2610 2611 switch (maccrd->crd_alg) { 2612 case CRYPTO_MD5: 2613 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2614 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2615 HIFN_MAC_CMD_POS_IPSEC; 2616 break; 2617 case CRYPTO_MD5_HMAC: 2618 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2619 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2620 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2621 break; 2622 case CRYPTO_SHA1: 2623 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2624 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2625 HIFN_MAC_CMD_POS_IPSEC; 2626 break; 2627 case CRYPTO_SHA1_HMAC: 2628 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2629 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2630 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2631 break; 2632 } 2633 2634 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2635 maccrd->crd_alg == CRYPTO_MD5_HMAC) { 2636 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2637 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2638 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2639 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2640 } 2641 } 2642 2643 cmd->crp = crp; 2644 cmd->session_num = session; 2645 cmd->softc = sc; 2646 2647 err = hifn_crypto(sc, cmd, crp, hint); 2648 if (!err) { 2649 return 0; 2650 } else if (err == ERESTART) { 2651 /* 2652 * There weren't enough resources to dispatch the request 2653 * to the part. Notify the caller so they'll requeue this 2654 * request and resubmit it again soon. 2655 */ 2656 #ifdef HIFN_DEBUG 2657 if (hifn_debug) 2658 device_printf(sc->sc_dev, "requeue request\n"); 2659 #endif 2660 kfree(cmd, M_DEVBUF); 2661 sc->sc_needwakeup |= CRYPTO_SYMQ; 2662 return (err); 2663 } 2664 2665 errout: 2666 if (cmd != NULL) 2667 kfree(cmd, M_DEVBUF); 2668 if (err == EINVAL) 2669 hifnstats.hst_invalid++; 2670 else 2671 hifnstats.hst_nomem++; 2672 crp->crp_etype = err; 2673 crypto_done(crp); 2674 return (err); 2675 } 2676 2677 static void 2678 hifn_abort(struct hifn_softc *sc) 2679 { 2680 struct hifn_dma *dma = sc->sc_dma; 2681 struct hifn_command *cmd; 2682 struct cryptop *crp; 2683 int i, u; 2684 2685 i = dma->resk; u = dma->resu; 2686 while (u != 0) { 2687 cmd = dma->hifn_commands[i]; 2688 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); 2689 dma->hifn_commands[i] = NULL; 2690 crp = cmd->crp; 2691 2692 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2693 /* Salvage what we can. */ 2694 u_int8_t *macbuf; 2695 2696 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2697 macbuf = dma->result_bufs[i]; 2698 macbuf += 12; 2699 } else 2700 macbuf = NULL; 2701 hifnstats.hst_opackets++; 2702 hifn_callback(sc, cmd, macbuf); 2703 } else { 2704 if (cmd->src_map == cmd->dst_map) { 2705 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2706 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2707 } else { 2708 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2709 BUS_DMASYNC_POSTWRITE); 2710 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2711 BUS_DMASYNC_POSTREAD); 2712 } 2713 2714 if (cmd->src_m != cmd->dst_m) { 2715 m_freem(cmd->src_m); 2716 crp->crp_buf = (caddr_t)cmd->dst_m; 2717 } 2718 2719 /* non-shared buffers cannot be restarted */ 2720 if (cmd->src_map != cmd->dst_map) { 2721 /* 2722 * XXX should be EAGAIN, delayed until 2723 * after the reset. 2724 */ 2725 crp->crp_etype = ENOMEM; 2726 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2727 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2728 } else 2729 crp->crp_etype = ENOMEM; 2730 2731 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2732 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2733 2734 kfree(cmd, M_DEVBUF); 2735 if (crp->crp_etype != EAGAIN) 2736 crypto_done(crp); 2737 } 2738 2739 if (++i == HIFN_D_RES_RSIZE) 2740 i = 0; 2741 u--; 2742 } 2743 dma->resk = i; dma->resu = u; 2744 2745 hifn_reset_board(sc, 1); 2746 hifn_init_dma(sc); 2747 hifn_init_pci_registers(sc); 2748 } 2749 2750 static void 2751 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) 2752 { 2753 struct hifn_dma *dma = sc->sc_dma; 2754 struct cryptop *crp = cmd->crp; 2755 struct cryptodesc *crd; 2756 struct mbuf *m; 2757 int totlen, i, u, ivlen; 2758 2759 if (cmd->src_map == cmd->dst_map) { 2760 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2761 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2762 } else { 2763 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2764 BUS_DMASYNC_POSTWRITE); 2765 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2766 BUS_DMASYNC_POSTREAD); 2767 } 2768 2769 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2770 if (cmd->src_m != cmd->dst_m) { 2771 crp->crp_buf = (caddr_t)cmd->dst_m; 2772 totlen = cmd->src_mapsize; 2773 for (m = cmd->dst_m; m != NULL; m = m->m_next) { 2774 if (totlen < m->m_len) { 2775 m->m_len = totlen; 2776 totlen = 0; 2777 } else 2778 totlen -= m->m_len; 2779 } 2780 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; 2781 m_freem(cmd->src_m); 2782 } 2783 } 2784 2785 if (cmd->sloplen != 0) { 2786 if (crp->crp_flags & CRYPTO_F_IMBUF) 2787 m_copyback((struct mbuf *)crp->crp_buf, 2788 cmd->src_mapsize - cmd->sloplen, 2789 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2790 else if (crp->crp_flags & CRYPTO_F_IOV) 2791 cuio_copyback((struct uio *)crp->crp_buf, 2792 cmd->src_mapsize - cmd->sloplen, 2793 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2794 } 2795 2796 i = dma->dstk; u = dma->dstu; 2797 while (u != 0) { 2798 if (i == HIFN_D_DST_RSIZE) 2799 i = 0; 2800 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2801 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2802 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2803 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2804 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2805 break; 2806 } 2807 i++, u--; 2808 } 2809 dma->dstk = i; dma->dstu = u; 2810 2811 hifnstats.hst_obytes += cmd->dst_mapsize; 2812 2813 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2814 HIFN_BASE_CMD_CRYPT) { 2815 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2816 if (crd->crd_alg != CRYPTO_DES_CBC && 2817 crd->crd_alg != CRYPTO_3DES_CBC && 2818 crd->crd_alg != CRYPTO_AES_CBC) 2819 continue; 2820 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2821 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2822 if (crp->crp_flags & CRYPTO_F_IMBUF) 2823 m_copydata((struct mbuf *)crp->crp_buf, 2824 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2825 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2826 else if (crp->crp_flags & CRYPTO_F_IOV) { 2827 cuio_copydata((struct uio *)crp->crp_buf, 2828 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2829 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2830 } 2831 break; 2832 } 2833 } 2834 2835 if (macbuf != NULL) { 2836 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2837 int len; 2838 2839 if (crd->crd_alg == CRYPTO_MD5) 2840 len = 16; 2841 else if (crd->crd_alg == CRYPTO_SHA1) 2842 len = 20; 2843 else if (crd->crd_alg == CRYPTO_MD5_HMAC || 2844 crd->crd_alg == CRYPTO_SHA1_HMAC) 2845 len = 12; 2846 else 2847 continue; 2848 2849 if (crp->crp_flags & CRYPTO_F_IMBUF) 2850 m_copyback((struct mbuf *)crp->crp_buf, 2851 crd->crd_inject, len, macbuf); 2852 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2853 bcopy((caddr_t)macbuf, crp->crp_mac, len); 2854 break; 2855 } 2856 } 2857 2858 if (cmd->src_map != cmd->dst_map) { 2859 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2860 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2861 } 2862 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2863 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2864 kfree(cmd, M_DEVBUF); 2865 crypto_done(crp); 2866 } 2867 2868 /* 2869 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2870 * and Group 1 registers; avoid conditions that could create 2871 * burst writes by doing a read in between the writes. 2872 * 2873 * NB: The read we interpose is always to the same register; 2874 * we do this because reading from an arbitrary (e.g. last) 2875 * register may not always work. 2876 */ 2877 static void 2878 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2879 { 2880 if (sc->sc_flags & HIFN_IS_7811) { 2881 if (sc->sc_bar0_lastreg == reg - 4) 2882 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); 2883 sc->sc_bar0_lastreg = reg; 2884 } 2885 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2886 } 2887 2888 static void 2889 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2890 { 2891 if (sc->sc_flags & HIFN_IS_7811) { 2892 if (sc->sc_bar1_lastreg == reg - 4) 2893 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2894 sc->sc_bar1_lastreg = reg; 2895 } 2896 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2897 } 2898