1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */ 2 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ 3 4 /* 5 * Invertex AEON / Hifn 7751 driver 6 * Copyright (c) 1999 Invertex Inc. All rights reserved. 7 * Copyright (c) 1999 Theo de Raadt 8 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 9 * http://www.netsec.net 10 * Copyright (c) 2003 Hifn Inc. 11 * 12 * This driver is based on a previous driver by Invertex, for which they 13 * requested: Please send any comments, feedback, bug-fixes, or feature 14 * requests to software@invertex.com. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. The name of the author may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * Effort sponsored in part by the Defense Advanced Research Projects 40 * Agency (DARPA) and Air Force Research Laboratory, Air Force 41 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 42 * 43 */ 44 45 /* 46 * Driver for various Hifn encryption processors. 47 */ 48 #include "opt_hifn.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/proc.h> 53 #include <sys/errno.h> 54 #include <sys/malloc.h> 55 #include <sys/kernel.h> 56 #include <sys/mbuf.h> 57 #include <sys/sysctl.h> 58 #include <sys/bus.h> 59 #include <sys/rman.h> 60 #include <sys/random.h> 61 #include <sys/uio.h> 62 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 66 #include <machine/clock.h> 67 #include <opencrypto/cryptodev.h> 68 69 #include "cryptodev_if.h" 70 71 #include <bus/pci/pcivar.h> 72 #include <bus/pci/pcireg.h> 73 74 #ifdef HIFN_RNDTEST 75 #include "../rndtest/rndtest.h" 76 #endif 77 #include "hifn7751reg.h" 78 #include "hifn7751var.h" 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int hifn_probe(device_t); 84 static int hifn_attach(device_t); 85 static int hifn_detach(device_t); 86 static int hifn_suspend(device_t); 87 static int hifn_resume(device_t); 88 static void hifn_shutdown(device_t); 89 90 static void hifn_reset_board(struct hifn_softc *, int); 91 static void hifn_reset_puc(struct hifn_softc *); 92 static void hifn_puc_wait(struct hifn_softc *); 93 static int hifn_enable_crypto(struct hifn_softc *); 94 static void hifn_set_retry(struct hifn_softc *sc); 95 static void hifn_init_dma(struct hifn_softc *); 96 static void hifn_init_pci_registers(struct hifn_softc *); 97 static int hifn_sramsize(struct hifn_softc *); 98 static int hifn_dramsize(struct hifn_softc *); 99 static int hifn_ramtype(struct hifn_softc *); 100 static void hifn_sessions(struct hifn_softc *); 101 static void hifn_intr(void *); 102 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 103 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 104 static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *); 105 static int hifn_freesession(device_t, u_int64_t); 106 static int hifn_process(device_t, struct cryptop *, int); 107 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 108 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); 109 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 110 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 111 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 112 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 113 static int hifn_init_pubrng(struct hifn_softc *); 114 #ifndef HIFN_NO_RNG 115 static void hifn_rng(void *); 116 #endif 117 static void hifn_tick(void *); 118 static void hifn_abort(struct hifn_softc *); 119 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 120 121 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); 122 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); 123 124 125 static device_method_t hifn_methods[] = { 126 /* Device interface */ 127 DEVMETHOD(device_probe, hifn_probe), 128 DEVMETHOD(device_attach, hifn_attach), 129 DEVMETHOD(device_detach, hifn_detach), 130 DEVMETHOD(device_suspend, hifn_suspend), 131 DEVMETHOD(device_resume, hifn_resume), 132 DEVMETHOD(device_shutdown, hifn_shutdown), 133 134 /* bus interface */ 135 DEVMETHOD(bus_print_child, bus_generic_print_child), 136 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 137 138 /* crypto device methods */ 139 DEVMETHOD(cryptodev_newsession, hifn_newsession), 140 DEVMETHOD(cryptodev_freesession,hifn_freesession), 141 DEVMETHOD(cryptodev_process, hifn_process), 142 143 DEVMETHOD_END 144 }; 145 static driver_t hifn_driver = { 146 "hifn", 147 hifn_methods, 148 sizeof (struct hifn_softc) 149 }; 150 static devclass_t hifn_devclass; 151 152 DECLARE_DUMMY_MODULE(hifn); 153 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, NULL, NULL); 154 MODULE_DEPEND(hifn, crypto, 1, 1, 1); 155 #ifdef HIFN_RNDTEST 156 MODULE_DEPEND(hifn, rndtest, 1, 1, 1); 157 #endif 158 159 static __inline__ u_int32_t 160 READ_REG_0(struct hifn_softc *sc, bus_size_t reg) 161 { 162 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); 163 sc->sc_bar0_lastreg = (bus_size_t) -1; 164 return (v); 165 } 166 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) 167 168 static __inline__ u_int32_t 169 READ_REG_1(struct hifn_softc *sc, bus_size_t reg) 170 { 171 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); 172 sc->sc_bar1_lastreg = (bus_size_t) -1; 173 return (v); 174 } 175 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) 176 177 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); 178 179 #ifdef HIFN_DEBUG 180 static int hifn_debug = 0; 181 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 182 0, "control debugging msgs"); 183 #endif 184 185 static struct hifn_stats hifnstats; 186 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, 187 hifn_stats, "driver statistics"); 188 static int hifn_maxbatch = 1; 189 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 190 0, "max ops to batch w/o interrupt"); 191 192 /* 193 * Probe for a supported device. The PCI vendor and device 194 * IDs are used to detect devices we know how to handle. 195 */ 196 static int 197 hifn_probe(device_t dev) 198 { 199 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && 200 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) 201 return (0); 202 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 203 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || 204 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 205 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 206 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || 207 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) 208 return (0); 209 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 210 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) 211 return (0); 212 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN) { 213 device_printf(dev,"device id = 0x%x\n", pci_get_device(dev) ); 214 return (0); 215 } 216 return (ENXIO); 217 } 218 219 static void 220 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 221 { 222 bus_addr_t *paddr = (bus_addr_t*) arg; 223 *paddr = segs->ds_addr; 224 } 225 226 static const char* 227 hifn_partname(struct hifn_softc *sc) 228 { 229 /* XXX sprintf numbers when not decoded */ 230 switch (pci_get_vendor(sc->sc_dev)) { 231 case PCI_VENDOR_HIFN: 232 switch (pci_get_device(sc->sc_dev)) { 233 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; 234 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; 235 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; 236 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; 237 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; 238 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; 239 } 240 return "Hifn unknown-part"; 241 case PCI_VENDOR_INVERTEX: 242 switch (pci_get_device(sc->sc_dev)) { 243 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; 244 } 245 return "Invertex unknown-part"; 246 case PCI_VENDOR_NETSEC: 247 switch (pci_get_device(sc->sc_dev)) { 248 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; 249 } 250 return "NetSec unknown-part"; 251 } 252 return "Unknown-vendor unknown-part"; 253 } 254 255 static void 256 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 257 { 258 add_buffer_randomness_src(buf, count, RAND_SRC_HIFN); 259 } 260 261 static u_int 262 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) 263 { 264 if (v > max) { 265 device_printf(dev, "Warning, %s %u out of range, " 266 "using max %u\n", what, v, max); 267 v = max; 268 } else if (v < min) { 269 device_printf(dev, "Warning, %s %u out of range, " 270 "using min %u\n", what, v, min); 271 v = min; 272 } 273 return v; 274 } 275 276 /* 277 * Select PLL configuration for 795x parts. This is complicated in 278 * that we cannot determine the optimal parameters without user input. 279 * The reference clock is derived from an external clock through a 280 * multiplier. The external clock is either the host bus (i.e. PCI) 281 * or an external clock generator. When using the PCI bus we assume 282 * the clock is either 33 or 66 MHz; for an external source we cannot 283 * tell the speed. 284 * 285 * PLL configuration is done with a string: "pci" for PCI bus, or "ext" 286 * for an external source, followed by the frequency. We calculate 287 * the appropriate multiplier and PLL register contents accordingly. 288 * When no configuration is given we default to "pci66" since that 289 * always will allow the card to work. If a card is using the PCI 290 * bus clock and in a 33MHz slot then it will be operating at half 291 * speed until the correct information is provided. 292 * 293 * We use a default setting of "ext66" because according to Mike Ham 294 * of HiFn, almost every board in existence has an external crystal 295 * populated at 66Mhz. Using PCI can be a problem on modern motherboards, 296 * because PCI33 can have clocks from 0 to 33Mhz, and some have 297 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll. 298 */ 299 static void 300 hifn_getpllconfig(device_t dev, u_int *pll) 301 { 302 const char *pllspec; 303 u_int freq, mul, fl, fh; 304 u_int32_t pllconfig; 305 char *nxt; 306 307 if (resource_string_value("hifn", device_get_unit(dev), 308 "pllconfig", &pllspec)) 309 pllspec = "ext66"; 310 fl = 33, fh = 66; 311 pllconfig = 0; 312 if (strncmp(pllspec, "ext", 3) == 0) { 313 pllspec += 3; 314 pllconfig |= HIFN_PLL_REF_SEL; 315 switch (pci_get_device(dev)) { 316 case PCI_PRODUCT_HIFN_7955: 317 case PCI_PRODUCT_HIFN_7956: 318 fl = 20, fh = 100; 319 break; 320 #ifdef notyet 321 case PCI_PRODUCT_HIFN_7954: 322 fl = 20, fh = 66; 323 break; 324 #endif 325 } 326 } else if (strncmp(pllspec, "pci", 3) == 0) 327 pllspec += 3; 328 freq = strtoul(pllspec, &nxt, 10); 329 if (nxt == pllspec) 330 freq = 66; 331 else 332 freq = checkmaxmin(dev, "frequency", freq, fl, fh); 333 /* 334 * Calculate multiplier. We target a Fck of 266 MHz, 335 * allowing only even values, possibly rounded down. 336 * Multipliers > 8 must set the charge pump current. 337 */ 338 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); 339 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; 340 if (mul > 8) 341 pllconfig |= HIFN_PLL_IS; 342 *pll = pllconfig; 343 } 344 345 /* 346 * Attach an interface that successfully probed. 347 */ 348 static int 349 hifn_attach(device_t dev) 350 { 351 struct hifn_softc *sc = device_get_softc(dev); 352 u_int32_t cmd; 353 caddr_t kva; 354 int rseg, rid; 355 char rbase; 356 u_int16_t ena, rev; 357 358 KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); 359 bzero(sc, sizeof (*sc)); 360 sc->sc_dev = dev; 361 362 lockinit(&sc->sc_lock, __DECONST(char *, device_get_nameunit(dev)), 363 0, LK_CANRECURSE); 364 365 /* XXX handle power management */ 366 367 /* 368 * The 7951 and 795x have a random number generator and 369 * public key support; note this. 370 */ 371 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 372 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 373 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 374 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) 375 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; 376 /* 377 * The 7811 has a random number generator and 378 * we also note it's identity 'cuz of some quirks. 379 */ 380 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 381 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) 382 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; 383 384 /* 385 * The 795x parts support AES. 386 */ 387 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 388 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 389 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { 390 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; 391 /* 392 * Select PLL configuration. This depends on the 393 * bus and board design and must be manually configured 394 * if the default setting is unacceptable. 395 */ 396 hifn_getpllconfig(dev, &sc->sc_pllconfig); 397 } 398 399 /* 400 * Configure support for memory-mapped access to 401 * registers and for DMA operations. 402 */ 403 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) 404 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 405 cmd |= PCIM_ENA; 406 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 407 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 408 if ((cmd & PCIM_ENA) != PCIM_ENA) { 409 device_printf(dev, "failed to enable %s\n", 410 (cmd & PCIM_ENA) == 0 ? 411 "memory mapping & bus mastering" : 412 (cmd & PCIM_CMD_MEMEN) == 0 ? 413 "memory mapping" : "bus mastering"); 414 goto fail_pci; 415 } 416 #undef PCIM_ENA 417 418 /* 419 * Setup PCI resources. Note that we record the bus 420 * tag and handle for each register mapping, this is 421 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, 422 * and WRITE_REG_1 macros throughout the driver. 423 */ 424 rid = HIFN_BAR0; 425 sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 426 0, ~0, 1, RF_ACTIVE); 427 if (sc->sc_bar0res == NULL) { 428 device_printf(dev, "cannot map bar%d register space\n", 0); 429 goto fail_pci; 430 } 431 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); 432 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); 433 sc->sc_bar0_lastreg = (bus_size_t) -1; 434 435 rid = HIFN_BAR1; 436 sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 437 0, ~0, 1, RF_ACTIVE); 438 if (sc->sc_bar1res == NULL) { 439 device_printf(dev, "cannot map bar%d register space\n", 1); 440 goto fail_io0; 441 } 442 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); 443 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); 444 sc->sc_bar1_lastreg = (bus_size_t) -1; 445 446 hifn_set_retry(sc); 447 448 /* 449 * Setup the area where the Hifn DMA's descriptors 450 * and associated data structures. 451 */ 452 if (bus_dma_tag_create(NULL, /* parent */ 453 1, 0, /* alignment,boundary */ 454 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 455 BUS_SPACE_MAXADDR, /* highaddr */ 456 NULL, NULL, /* filter, filterarg */ 457 HIFN_MAX_DMALEN, /* maxsize */ 458 MAX_SCATTER, /* nsegments */ 459 HIFN_MAX_SEGLEN, /* maxsegsize */ 460 BUS_DMA_ALLOCNOW, /* flags */ 461 &sc->sc_dmat)) { 462 device_printf(dev, "cannot allocate DMA tag\n"); 463 goto fail_io1; 464 } 465 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 466 device_printf(dev, "cannot create dma map\n"); 467 bus_dma_tag_destroy(sc->sc_dmat); 468 goto fail_io1; 469 } 470 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 471 device_printf(dev, "cannot alloc dma buffer\n"); 472 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 473 bus_dma_tag_destroy(sc->sc_dmat); 474 goto fail_io1; 475 } 476 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, 477 sizeof (*sc->sc_dma), 478 hifn_dmamap_cb, &sc->sc_dma_physaddr, 479 BUS_DMA_NOWAIT)) { 480 device_printf(dev, "cannot load dma map\n"); 481 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); 482 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 483 bus_dma_tag_destroy(sc->sc_dmat); 484 goto fail_io1; 485 } 486 sc->sc_dma = (struct hifn_dma *)kva; 487 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 488 489 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); 490 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); 491 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); 492 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); 493 494 /* 495 * Reset the board and do the ``secret handshake'' 496 * to enable the crypto support. Then complete the 497 * initialization procedure by setting up the interrupt 498 * and hooking in to the system crypto support so we'll 499 * get used for system services like the crypto device, 500 * IPsec, RNG device, etc. 501 */ 502 hifn_reset_board(sc, 0); 503 504 if (hifn_enable_crypto(sc) != 0) { 505 device_printf(dev, "crypto enabling failed\n"); 506 goto fail_mem; 507 } 508 hifn_reset_puc(sc); 509 510 hifn_init_dma(sc); 511 hifn_init_pci_registers(sc); 512 513 /* XXX can't dynamically determine ram type for 795x; force dram */ 514 if (sc->sc_flags & HIFN_IS_7956) 515 sc->sc_drammodel = 1; 516 else if (hifn_ramtype(sc)) 517 goto fail_mem; 518 519 if (sc->sc_drammodel == 0) 520 hifn_sramsize(sc); 521 else 522 hifn_dramsize(sc); 523 524 /* 525 * Workaround for NetSec 7751 rev A: half ram size because two 526 * of the address lines were left floating 527 */ 528 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 529 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && 530 pci_get_revid(dev) == 0x61) /*XXX???*/ 531 sc->sc_ramsize >>= 1; 532 533 /* 534 * Arrange the interrupt line. 535 */ 536 rid = 0; 537 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 538 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 539 if (sc->sc_irq == NULL) { 540 device_printf(dev, "could not map interrupt\n"); 541 goto fail_mem; 542 } 543 /* 544 * NB: Network code assumes we are blocked with splimp() 545 * so make sure the IRQ is marked appropriately. 546 */ 547 if (bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, 548 hifn_intr, sc, 549 &sc->sc_intrhand, NULL)) { 550 device_printf(dev, "could not setup interrupt\n"); 551 goto fail_intr2; 552 } 553 554 hifn_sessions(sc); 555 556 /* 557 * NB: Keep only the low 16 bits; this masks the chip id 558 * from the 7951. 559 */ 560 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; 561 562 rseg = sc->sc_ramsize / 1024; 563 rbase = 'K'; 564 if (sc->sc_ramsize >= (1024 * 1024)) { 565 rbase = 'M'; 566 rseg /= 1024; 567 } 568 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n", 569 hifn_partname(sc), rev, 570 rseg, rbase, sc->sc_drammodel ? 'd' : 's', 571 sc->sc_maxses); 572 573 if (sc->sc_flags & HIFN_IS_7956) 574 kprintf(", pll=0x%x<%s clk, %ux mult>", 575 sc->sc_pllconfig, 576 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 577 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); 578 kprintf("\n"); 579 580 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); 581 if (sc->sc_cid < 0) { 582 device_printf(dev, "could not get crypto driver id\n"); 583 goto fail_intr; 584 } 585 586 WRITE_REG_0(sc, HIFN_0_PUCNFG, 587 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 588 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 589 590 switch (ena) { 591 case HIFN_PUSTAT_ENA_2: 592 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); 593 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); 594 if (sc->sc_flags & HIFN_HAS_AES) 595 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); 596 /*FALLTHROUGH*/ 597 case HIFN_PUSTAT_ENA_1: 598 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); 599 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); 600 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); 601 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); 602 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); 603 break; 604 } 605 606 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 608 609 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 610 hifn_init_pubrng(sc); 611 612 /* NB: 1 means the callout runs w/o Giant locked */ 613 callout_init_mp(&sc->sc_tickto); 614 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 615 616 return (0); 617 618 fail_intr: 619 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 620 fail_intr2: 621 /* XXX don't store rid */ 622 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 623 fail_mem: 624 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 625 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 626 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 627 bus_dma_tag_destroy(sc->sc_dmat); 628 629 /* Turn off DMA polling */ 630 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 631 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 632 fail_io1: 633 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 634 fail_io0: 635 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 636 fail_pci: 637 lockuninit(&sc->sc_lock); 638 return (ENXIO); 639 } 640 641 /* 642 * Detach an interface that successfully probed. 643 */ 644 static int 645 hifn_detach(device_t dev) 646 { 647 struct hifn_softc *sc = device_get_softc(dev); 648 649 KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); 650 651 /* disable interrupts */ 652 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); 653 654 /*XXX other resources */ 655 callout_stop(&sc->sc_tickto); 656 callout_stop(&sc->sc_rngto); 657 #ifdef HIFN_RNDTEST 658 if (sc->sc_rndtest) 659 rndtest_detach(sc->sc_rndtest); 660 #endif 661 662 /* Turn off DMA polling */ 663 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 664 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 665 666 crypto_unregister_all(sc->sc_cid); 667 668 bus_generic_detach(dev); /*XXX should be no children, right? */ 669 670 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 671 /* XXX don't store rid */ 672 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 673 674 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 675 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 676 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 677 bus_dma_tag_destroy(sc->sc_dmat); 678 679 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 680 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 681 682 lockuninit(&sc->sc_lock); 683 684 return (0); 685 } 686 687 /* 688 * Stop all chip I/O so that the kernel's probe routines don't 689 * get confused by errant DMAs when rebooting. 690 */ 691 static void 692 hifn_shutdown(device_t dev) 693 { 694 #ifdef notyet 695 hifn_stop(device_get_softc(dev)); 696 #endif 697 } 698 699 /* 700 * Device suspend routine. Stop the interface and save some PCI 701 * settings in case the BIOS doesn't restore them properly on 702 * resume. 703 */ 704 static int 705 hifn_suspend(device_t dev) 706 { 707 struct hifn_softc *sc = device_get_softc(dev); 708 #ifdef notyet 709 int i; 710 711 hifn_stop(sc); 712 for (i = 0; i < 5; i++) 713 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 714 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 715 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 716 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 717 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 718 #endif 719 sc->sc_suspended = 1; 720 721 return (0); 722 } 723 724 /* 725 * Device resume routine. Restore some PCI settings in case the BIOS 726 * doesn't, re-enable busmastering, and restart the interface if 727 * appropriate. 728 */ 729 static int 730 hifn_resume(device_t dev) 731 { 732 struct hifn_softc *sc = device_get_softc(dev); 733 #ifdef notyet 734 int i; 735 736 /* better way to do this? */ 737 for (i = 0; i < 5; i++) 738 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 739 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 740 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 741 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 742 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 743 744 /* reenable busmastering */ 745 pci_enable_busmaster(dev); 746 pci_enable_io(dev, HIFN_RES); 747 748 /* reinitialize interface if necessary */ 749 if (ifp->if_flags & IFF_UP) 750 rl_init(sc); 751 #endif 752 sc->sc_suspended = 0; 753 754 return (0); 755 } 756 757 static int 758 hifn_init_pubrng(struct hifn_softc *sc) 759 { 760 u_int32_t r; 761 int i; 762 763 #ifdef HIFN_RNDTEST 764 sc->sc_rndtest = rndtest_attach(sc->sc_dev); 765 if (sc->sc_rndtest) 766 sc->sc_harvest = rndtest_harvest; 767 else 768 sc->sc_harvest = default_harvest; 769 #else 770 sc->sc_harvest = default_harvest; 771 #endif 772 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 773 /* Reset 7951 public key/rng engine */ 774 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 775 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 776 777 for (i = 0; i < 100; i++) { 778 DELAY(1000); 779 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 780 HIFN_PUBRST_RESET) == 0) 781 break; 782 } 783 784 if (i == 100) { 785 device_printf(sc->sc_dev, "public key init failed\n"); 786 return (1); 787 } 788 } 789 790 #ifndef HIFN_NO_RNG 791 /* Enable the rng, if available */ 792 if (sc->sc_flags & HIFN_HAS_RNG) { 793 if (sc->sc_flags & HIFN_IS_7811) { 794 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 795 if (r & HIFN_7811_RNGENA_ENA) { 796 r &= ~HIFN_7811_RNGENA_ENA; 797 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 798 } 799 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 800 HIFN_7811_RNGCFG_DEFL); 801 r |= HIFN_7811_RNGENA_ENA; 802 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 803 } else 804 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 805 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 806 HIFN_RNGCFG_ENA); 807 808 sc->sc_rngfirst = 1; 809 if (hz >= 100) 810 sc->sc_rnghz = hz / 100; 811 else 812 sc->sc_rnghz = 1; 813 /* NB: 1 means the callout runs w/o Giant locked */ 814 callout_init_mp(&sc->sc_rngto); 815 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 816 } 817 #endif 818 819 /* Enable public key engine, if available */ 820 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 821 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 822 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 823 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 824 } 825 826 return (0); 827 } 828 829 #ifndef HIFN_NO_RNG 830 static void 831 hifn_rng(void *vsc) 832 { 833 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 834 struct hifn_softc *sc = vsc; 835 u_int32_t sts, num[2]; 836 int i; 837 838 if (sc->sc_flags & HIFN_IS_7811) { 839 for (i = 0; i < 5; i++) { 840 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 841 if (sts & HIFN_7811_RNGSTS_UFL) { 842 device_printf(sc->sc_dev, 843 "RNG underflow: disabling\n"); 844 return; 845 } 846 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 847 break; 848 849 /* 850 * There are at least two words in the RNG FIFO 851 * at this point. 852 */ 853 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 854 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 855 /* NB: discard first data read */ 856 if (sc->sc_rngfirst) 857 sc->sc_rngfirst = 0; 858 else 859 (*sc->sc_harvest)(sc->sc_rndtest, 860 num, sizeof (num)); 861 } 862 } else { 863 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); 864 865 /* NB: discard first data read */ 866 if (sc->sc_rngfirst) 867 sc->sc_rngfirst = 0; 868 else 869 (*sc->sc_harvest)(sc->sc_rndtest, 870 num, sizeof (num[0])); 871 } 872 873 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 874 #undef RANDOM_BITS 875 } 876 #endif 877 878 static void 879 hifn_puc_wait(struct hifn_softc *sc) 880 { 881 int i; 882 int reg = HIFN_0_PUCTRL; 883 884 if (sc->sc_flags & HIFN_IS_7956) { 885 reg = HIFN_0_PUCTRL2; 886 } 887 888 for (i = 5000; i > 0; i--) { 889 DELAY(1); 890 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET)) 891 break; 892 } 893 if (!i) 894 device_printf(sc->sc_dev, "proc unit did not reset\n"); 895 } 896 897 /* 898 * Reset the processing unit. 899 */ 900 static void 901 hifn_reset_puc(struct hifn_softc *sc) 902 { 903 int reg = HIFN_0_PUCTRL; 904 905 if (sc->sc_flags & HIFN_IS_7956) { 906 reg = HIFN_0_PUCTRL2; 907 } 908 909 /* Reset processing unit */ 910 WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA); 911 hifn_puc_wait(sc); 912 } 913 914 /* 915 * Set the Retry and TRDY registers; note that we set them to 916 * zero because the 7811 locks up when forced to retry (section 917 * 3.6 of "Specification Update SU-0014-04". Not clear if we 918 * should do this for all Hifn parts, but it doesn't seem to hurt. 919 */ 920 static void 921 hifn_set_retry(struct hifn_softc *sc) 922 { 923 /* NB: RETRY only responds to 8-bit reads/writes */ 924 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); 925 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); 926 } 927 928 /* 929 * Resets the board. Values in the regesters are left as is 930 * from the reset (i.e. initial values are assigned elsewhere). 931 */ 932 static void 933 hifn_reset_board(struct hifn_softc *sc, int full) 934 { 935 u_int32_t reg; 936 937 /* 938 * Set polling in the DMA configuration register to zero. 0x7 avoids 939 * resetting the board and zeros out the other fields. 940 */ 941 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 942 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 943 944 /* 945 * Now that polling has been disabled, we have to wait 1 ms 946 * before resetting the board. 947 */ 948 DELAY(1000); 949 950 /* Reset the DMA unit */ 951 if (full) { 952 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 953 DELAY(1000); 954 } else { 955 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 956 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 957 hifn_reset_puc(sc); 958 } 959 960 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); 961 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 962 963 /* Bring dma unit out of reset */ 964 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 965 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 966 967 hifn_puc_wait(sc); 968 hifn_set_retry(sc); 969 970 if (sc->sc_flags & HIFN_IS_7811) { 971 for (reg = 0; reg < 1000; reg++) { 972 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 973 HIFN_MIPSRST_CRAMINIT) 974 break; 975 DELAY(1000); 976 } 977 if (reg == 1000) 978 kprintf(": cram init timeout\n"); 979 } else { 980 /* set up DMA configuration register #2 */ 981 /* turn off all PK and BAR0 swaps */ 982 WRITE_REG_1(sc, HIFN_1_DMA_CNFG2, 983 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)| 984 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)| 985 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)| 986 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT)); 987 } 988 } 989 990 static u_int32_t 991 hifn_next_signature(u_int32_t a, u_int cnt) 992 { 993 int i; 994 u_int32_t v; 995 996 for (i = 0; i < cnt; i++) { 997 998 /* get the parity */ 999 v = a & 0x80080125; 1000 v ^= v >> 16; 1001 v ^= v >> 8; 1002 v ^= v >> 4; 1003 v ^= v >> 2; 1004 v ^= v >> 1; 1005 1006 a = (v & 1) ^ (a << 1); 1007 } 1008 1009 return a; 1010 } 1011 1012 struct pci2id { 1013 u_short pci_vendor; 1014 u_short pci_prod; 1015 char card_id[13]; 1016 }; 1017 static struct pci2id pci2id[] = { 1018 { 1019 PCI_VENDOR_HIFN, 1020 PCI_PRODUCT_HIFN_7951, 1021 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1022 0x00, 0x00, 0x00, 0x00, 0x00 } 1023 }, { 1024 PCI_VENDOR_HIFN, 1025 PCI_PRODUCT_HIFN_7955, 1026 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1027 0x00, 0x00, 0x00, 0x00, 0x00 } 1028 }, { 1029 PCI_VENDOR_HIFN, 1030 PCI_PRODUCT_HIFN_7956, 1031 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1032 0x00, 0x00, 0x00, 0x00, 0x00 } 1033 }, { 1034 PCI_VENDOR_NETSEC, 1035 PCI_PRODUCT_NETSEC_7751, 1036 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1037 0x00, 0x00, 0x00, 0x00, 0x00 } 1038 }, { 1039 PCI_VENDOR_INVERTEX, 1040 PCI_PRODUCT_INVERTEX_AEON, 1041 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1042 0x00, 0x00, 0x00, 0x00, 0x00 } 1043 }, { 1044 PCI_VENDOR_HIFN, 1045 PCI_PRODUCT_HIFN_7811, 1046 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1047 0x00, 0x00, 0x00, 0x00, 0x00 } 1048 }, { 1049 /* 1050 * Other vendors share this PCI ID as well, such as 1051 * http://www.powercrypt.com, and obviously they also 1052 * use the same key. 1053 */ 1054 PCI_VENDOR_HIFN, 1055 PCI_PRODUCT_HIFN_7751, 1056 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1057 0x00, 0x00, 0x00, 0x00, 0x00 } 1058 }, 1059 }; 1060 1061 /* 1062 * Checks to see if crypto is already enabled. If crypto isn't enable, 1063 * "hifn_enable_crypto" is called to enable it. The check is important, 1064 * as enabling crypto twice will lock the board. 1065 */ 1066 static int 1067 hifn_enable_crypto(struct hifn_softc *sc) 1068 { 1069 u_int32_t dmacfg, ramcfg, encl, addr, i; 1070 char *offtbl = NULL; 1071 1072 for (i = 0; i < NELEM(pci2id); i++) { 1073 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && 1074 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { 1075 offtbl = pci2id[i].card_id; 1076 break; 1077 } 1078 } 1079 if (offtbl == NULL) { 1080 device_printf(sc->sc_dev, "Unknown card!\n"); 1081 return (1); 1082 } 1083 1084 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1085 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 1086 1087 /* 1088 * The RAM config register's encrypt level bit needs to be set before 1089 * every read performed on the encryption level register. 1090 */ 1091 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1092 1093 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1094 1095 /* 1096 * Make sure we don't re-unlock. Two unlocks kills chip until the 1097 * next reboot. 1098 */ 1099 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 1100 #ifdef HIFN_DEBUG 1101 if (hifn_debug) 1102 device_printf(sc->sc_dev, 1103 "Strong crypto already enabled!\n"); 1104 #endif 1105 goto report; 1106 } 1107 1108 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 1109 #ifdef HIFN_DEBUG 1110 if (hifn_debug) 1111 device_printf(sc->sc_dev, 1112 "Unknown encryption level 0x%x\n", encl); 1113 #endif 1114 return 1; 1115 } 1116 1117 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 1118 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 1119 DELAY(1000); 1120 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); 1121 DELAY(1000); 1122 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); 1123 DELAY(1000); 1124 1125 for (i = 0; i <= 12; i++) { 1126 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 1127 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); 1128 1129 DELAY(1000); 1130 } 1131 1132 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1133 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1134 1135 #ifdef HIFN_DEBUG 1136 if (hifn_debug) { 1137 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 1138 device_printf(sc->sc_dev, "Engine is permanently " 1139 "locked until next system reset!\n"); 1140 else 1141 device_printf(sc->sc_dev, "Engine enabled " 1142 "successfully!\n"); 1143 } 1144 #endif 1145 1146 report: 1147 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 1148 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 1149 1150 switch (encl) { 1151 case HIFN_PUSTAT_ENA_1: 1152 case HIFN_PUSTAT_ENA_2: 1153 break; 1154 case HIFN_PUSTAT_ENA_0: 1155 default: 1156 device_printf(sc->sc_dev, "disabled"); 1157 break; 1158 } 1159 1160 return 0; 1161 } 1162 1163 /* 1164 * Give initial values to the registers listed in the "Register Space" 1165 * section of the HIFN Software Development reference manual. 1166 */ 1167 static void 1168 hifn_init_pci_registers(struct hifn_softc *sc) 1169 { 1170 /* write fixed values needed by the Initialization registers */ 1171 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1172 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1173 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1174 1175 /* write all 4 ring address registers */ 1176 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + 1177 offsetof(struct hifn_dma, cmdr[0])); 1178 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + 1179 offsetof(struct hifn_dma, srcr[0])); 1180 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + 1181 offsetof(struct hifn_dma, dstr[0])); 1182 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + 1183 offsetof(struct hifn_dma, resr[0])); 1184 1185 DELAY(2000); 1186 1187 /* write status register */ 1188 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1189 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1190 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1191 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1192 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1193 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1194 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1195 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1196 HIFN_DMACSR_S_WAIT | 1197 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1198 HIFN_DMACSR_C_WAIT | 1199 HIFN_DMACSR_ENGINE | 1200 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1201 HIFN_DMACSR_PUBDONE : 0) | 1202 ((sc->sc_flags & HIFN_IS_7811) ? 1203 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1204 1205 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1206 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1207 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1208 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1209 ((sc->sc_flags & HIFN_IS_7811) ? 1210 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1211 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1212 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1213 1214 if (sc->sc_flags & HIFN_IS_7956) { 1215 u_int32_t pll; 1216 1217 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1218 HIFN_PUCNFG_TCALLPHASES | 1219 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 1220 1221 /* turn off the clocks and insure bypass is set */ 1222 pll = READ_REG_1(sc, HIFN_1_PLL); 1223 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) 1224 | HIFN_PLL_BP | HIFN_PLL_MBSET; 1225 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1226 DELAY(10*1000); /* 10ms */ 1227 /* change configuration */ 1228 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; 1229 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1230 DELAY(10*1000); /* 10ms */ 1231 /* disable bypass */ 1232 pll &= ~HIFN_PLL_BP; 1233 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1234 /* enable clocks with new configuration */ 1235 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; 1236 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1237 } else { 1238 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1239 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1240 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1241 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1242 } 1243 1244 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1245 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1246 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1247 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1248 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1249 } 1250 1251 /* 1252 * The maximum number of sessions supported by the card 1253 * is dependent on the amount of context ram, which 1254 * encryption algorithms are enabled, and how compression 1255 * is configured. This should be configured before this 1256 * routine is called. 1257 */ 1258 static void 1259 hifn_sessions(struct hifn_softc *sc) 1260 { 1261 u_int32_t pucnfg; 1262 int ctxsize; 1263 1264 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1265 1266 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1267 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1268 ctxsize = 128; 1269 else 1270 ctxsize = 512; 1271 /* 1272 * 7955/7956 has internal context memory of 32K 1273 */ 1274 if (sc->sc_flags & HIFN_IS_7956) 1275 sc->sc_maxses = 32768 / ctxsize; 1276 else 1277 sc->sc_maxses = 1 + 1278 ((sc->sc_ramsize - 32768) / ctxsize); 1279 } else 1280 sc->sc_maxses = sc->sc_ramsize / 16384; 1281 1282 if (sc->sc_maxses > 2048) 1283 sc->sc_maxses = 2048; 1284 } 1285 1286 /* 1287 * Determine ram type (sram or dram). Board should be just out of a reset 1288 * state when this is called. 1289 */ 1290 static int 1291 hifn_ramtype(struct hifn_softc *sc) 1292 { 1293 u_int8_t data[8], dataexpect[8]; 1294 int i; 1295 1296 for (i = 0; i < sizeof(data); i++) 1297 data[i] = dataexpect[i] = 0x55; 1298 if (hifn_writeramaddr(sc, 0, data)) 1299 return (-1); 1300 if (hifn_readramaddr(sc, 0, data)) 1301 return (-1); 1302 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1303 sc->sc_drammodel = 1; 1304 return (0); 1305 } 1306 1307 for (i = 0; i < sizeof(data); i++) 1308 data[i] = dataexpect[i] = 0xaa; 1309 if (hifn_writeramaddr(sc, 0, data)) 1310 return (-1); 1311 if (hifn_readramaddr(sc, 0, data)) 1312 return (-1); 1313 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1314 sc->sc_drammodel = 1; 1315 return (0); 1316 } 1317 1318 return (0); 1319 } 1320 1321 #define HIFN_SRAM_MAX (32 << 20) 1322 #define HIFN_SRAM_STEP_SIZE 16384 1323 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1324 1325 static int 1326 hifn_sramsize(struct hifn_softc *sc) 1327 { 1328 u_int32_t a; 1329 u_int8_t data[8]; 1330 u_int8_t dataexpect[sizeof(data)]; 1331 int32_t i; 1332 1333 for (i = 0; i < sizeof(data); i++) 1334 data[i] = dataexpect[i] = i ^ 0x5a; 1335 1336 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1337 a = i * HIFN_SRAM_STEP_SIZE; 1338 bcopy(&i, data, sizeof(i)); 1339 hifn_writeramaddr(sc, a, data); 1340 } 1341 1342 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1343 a = i * HIFN_SRAM_STEP_SIZE; 1344 bcopy(&i, dataexpect, sizeof(i)); 1345 if (hifn_readramaddr(sc, a, data) < 0) 1346 return (0); 1347 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1348 return (0); 1349 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1350 } 1351 1352 return (0); 1353 } 1354 1355 /* 1356 * XXX For dram boards, one should really try all of the 1357 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1358 * is already set up correctly. 1359 */ 1360 static int 1361 hifn_dramsize(struct hifn_softc *sc) 1362 { 1363 u_int32_t cnfg; 1364 1365 if (sc->sc_flags & HIFN_IS_7956) { 1366 /* 1367 * 7955/7956 have a fixed internal ram of only 32K. 1368 */ 1369 sc->sc_ramsize = 32768; 1370 } else { 1371 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1372 HIFN_PUCNFG_DRAMMASK; 1373 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1374 } 1375 return (0); 1376 } 1377 1378 static void 1379 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) 1380 { 1381 struct hifn_dma *dma = sc->sc_dma; 1382 1383 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1384 dma->cmdi = 0; 1385 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1386 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1387 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1388 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1389 } 1390 *cmdp = dma->cmdi++; 1391 dma->cmdk = dma->cmdi; 1392 1393 if (dma->srci == HIFN_D_SRC_RSIZE) { 1394 dma->srci = 0; 1395 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1396 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1397 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1398 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1399 } 1400 *srcp = dma->srci++; 1401 dma->srck = dma->srci; 1402 1403 if (dma->dsti == HIFN_D_DST_RSIZE) { 1404 dma->dsti = 0; 1405 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1406 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1407 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1408 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1409 } 1410 *dstp = dma->dsti++; 1411 dma->dstk = dma->dsti; 1412 1413 if (dma->resi == HIFN_D_RES_RSIZE) { 1414 dma->resi = 0; 1415 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1416 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1417 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1418 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1419 } 1420 *resp = dma->resi++; 1421 dma->resk = dma->resi; 1422 } 1423 1424 static int 1425 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1426 { 1427 struct hifn_dma *dma = sc->sc_dma; 1428 hifn_base_command_t wc; 1429 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1430 int r, cmdi, resi, srci, dsti; 1431 1432 wc.masks = htole16(3 << 13); 1433 wc.session_num = htole16(addr >> 14); 1434 wc.total_source_count = htole16(8); 1435 wc.total_dest_count = htole16(addr & 0x3fff); 1436 1437 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1438 1439 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1440 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1441 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1442 1443 /* build write command */ 1444 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1445 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; 1446 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1447 1448 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr 1449 + offsetof(struct hifn_dma, test_src)); 1450 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr 1451 + offsetof(struct hifn_dma, test_dst)); 1452 1453 dma->cmdr[cmdi].l = htole32(16 | masks); 1454 dma->srcr[srci].l = htole32(8 | masks); 1455 dma->dstr[dsti].l = htole32(4 | masks); 1456 dma->resr[resi].l = htole32(4 | masks); 1457 1458 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1459 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1460 1461 for (r = 10000; r >= 0; r--) { 1462 DELAY(10); 1463 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1464 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1465 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1466 break; 1467 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1468 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1469 } 1470 if (r == 0) { 1471 device_printf(sc->sc_dev, "writeramaddr -- " 1472 "result[%d](addr %d) still valid\n", resi, addr); 1473 r = -1; 1474 return (-1); 1475 } else 1476 r = 0; 1477 1478 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1479 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1480 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1481 1482 return (r); 1483 } 1484 1485 static int 1486 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1487 { 1488 struct hifn_dma *dma = sc->sc_dma; 1489 hifn_base_command_t rc; 1490 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1491 int r, cmdi, srci, dsti, resi; 1492 1493 rc.masks = htole16(2 << 13); 1494 rc.session_num = htole16(addr >> 14); 1495 rc.total_source_count = htole16(addr & 0x3fff); 1496 rc.total_dest_count = htole16(8); 1497 1498 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1499 1500 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1501 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1502 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1503 1504 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1505 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; 1506 1507 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + 1508 offsetof(struct hifn_dma, test_src)); 1509 dma->test_src = 0; 1510 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + 1511 offsetof(struct hifn_dma, test_dst)); 1512 dma->test_dst = 0; 1513 dma->cmdr[cmdi].l = htole32(8 | masks); 1514 dma->srcr[srci].l = htole32(8 | masks); 1515 dma->dstr[dsti].l = htole32(8 | masks); 1516 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1517 1518 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1519 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1520 1521 for (r = 10000; r >= 0; r--) { 1522 DELAY(10); 1523 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1524 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1525 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1526 break; 1527 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1528 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1529 } 1530 if (r == 0) { 1531 device_printf(sc->sc_dev, "readramaddr -- " 1532 "result[%d](addr %d) still valid\n", resi, addr); 1533 r = -1; 1534 } else { 1535 r = 0; 1536 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1537 } 1538 1539 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1540 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1541 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1542 1543 return (r); 1544 } 1545 1546 /* 1547 * Initialize the descriptor rings. 1548 */ 1549 static void 1550 hifn_init_dma(struct hifn_softc *sc) 1551 { 1552 struct hifn_dma *dma = sc->sc_dma; 1553 int i; 1554 1555 hifn_set_retry(sc); 1556 1557 /* initialize static pointer values */ 1558 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1559 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + 1560 offsetof(struct hifn_dma, command_bufs[i][0])); 1561 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1562 dma->resr[i].p = htole32(sc->sc_dma_physaddr + 1563 offsetof(struct hifn_dma, result_bufs[i][0])); 1564 1565 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1566 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); 1567 dma->srcr[HIFN_D_SRC_RSIZE].p = 1568 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); 1569 dma->dstr[HIFN_D_DST_RSIZE].p = 1570 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); 1571 dma->resr[HIFN_D_RES_RSIZE].p = 1572 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); 1573 1574 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1575 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1576 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1577 } 1578 1579 /* 1580 * Writes out the raw command buffer space. Returns the 1581 * command buffer size. 1582 */ 1583 static u_int 1584 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1585 { 1586 u_int8_t *buf_pos; 1587 hifn_base_command_t *base_cmd; 1588 hifn_mac_command_t *mac_cmd; 1589 hifn_crypt_command_t *cry_cmd; 1590 int using_mac, using_crypt, len, ivlen; 1591 u_int32_t dlen, slen; 1592 1593 buf_pos = buf; 1594 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1595 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1596 1597 base_cmd = (hifn_base_command_t *)buf_pos; 1598 base_cmd->masks = htole16(cmd->base_masks); 1599 slen = cmd->src_mapsize; 1600 if (cmd->sloplen) 1601 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); 1602 else 1603 dlen = cmd->dst_mapsize; 1604 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1605 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1606 dlen >>= 16; 1607 slen >>= 16; 1608 1609 base_cmd->session_num = htole16( 1610 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1611 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1612 buf_pos += sizeof(hifn_base_command_t); 1613 1614 if (using_mac) { 1615 mac_cmd = (hifn_mac_command_t *)buf_pos; 1616 dlen = cmd->maccrd->crd_len; 1617 mac_cmd->source_count = htole16(dlen & 0xffff); 1618 dlen >>= 16; 1619 mac_cmd->masks = htole16(cmd->mac_masks | 1620 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1621 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1622 mac_cmd->reserved = 0; 1623 buf_pos += sizeof(hifn_mac_command_t); 1624 } 1625 1626 if (using_crypt) { 1627 cry_cmd = (hifn_crypt_command_t *)buf_pos; 1628 dlen = cmd->enccrd->crd_len; 1629 cry_cmd->source_count = htole16(dlen & 0xffff); 1630 dlen >>= 16; 1631 cry_cmd->masks = htole16(cmd->cry_masks | 1632 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1633 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1634 cry_cmd->reserved = 0; 1635 buf_pos += sizeof(hifn_crypt_command_t); 1636 } 1637 1638 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1639 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1640 buf_pos += HIFN_MAC_KEY_LENGTH; 1641 } 1642 1643 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1644 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1645 case HIFN_CRYPT_CMD_ALG_3DES: 1646 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1647 buf_pos += HIFN_3DES_KEY_LENGTH; 1648 break; 1649 case HIFN_CRYPT_CMD_ALG_DES: 1650 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1651 buf_pos += HIFN_DES_KEY_LENGTH; 1652 break; 1653 case HIFN_CRYPT_CMD_ALG_RC4: 1654 len = 256; 1655 do { 1656 int clen; 1657 1658 clen = MIN(cmd->cklen, len); 1659 bcopy(cmd->ck, buf_pos, clen); 1660 len -= clen; 1661 buf_pos += clen; 1662 } while (len > 0); 1663 bzero(buf_pos, 4); 1664 buf_pos += 4; 1665 break; 1666 case HIFN_CRYPT_CMD_ALG_AES: 1667 /* 1668 * AES keys are variable 128, 192 and 1669 * 256 bits (16, 24 and 32 bytes). 1670 */ 1671 bcopy(cmd->ck, buf_pos, cmd->cklen); 1672 buf_pos += cmd->cklen; 1673 break; 1674 } 1675 } 1676 1677 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1678 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1679 case HIFN_CRYPT_CMD_ALG_AES: 1680 ivlen = HIFN_AES_IV_LENGTH; 1681 break; 1682 default: 1683 ivlen = HIFN_IV_LENGTH; 1684 break; 1685 } 1686 bcopy(cmd->iv, buf_pos, ivlen); 1687 buf_pos += ivlen; 1688 } 1689 1690 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { 1691 bzero(buf_pos, 8); 1692 buf_pos += 8; 1693 } 1694 1695 return (buf_pos - buf); 1696 #undef MIN 1697 } 1698 1699 static int 1700 hifn_dmamap_aligned(struct hifn_operand *op) 1701 { 1702 int i; 1703 1704 for (i = 0; i < op->nsegs; i++) { 1705 if (op->segs[i].ds_addr & 3) 1706 return (0); 1707 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) 1708 return (0); 1709 } 1710 return (1); 1711 } 1712 1713 static __inline int 1714 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx) 1715 { 1716 struct hifn_dma *dma = sc->sc_dma; 1717 1718 if (++idx == HIFN_D_DST_RSIZE) { 1719 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1720 HIFN_D_MASKDONEIRQ); 1721 HIFN_DSTR_SYNC(sc, idx, 1722 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1723 idx = 0; 1724 } 1725 return (idx); 1726 } 1727 1728 static int 1729 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1730 { 1731 struct hifn_dma *dma = sc->sc_dma; 1732 struct hifn_operand *dst = &cmd->dst; 1733 u_int32_t p, l; 1734 int idx, used = 0, i; 1735 1736 idx = dma->dsti; 1737 for (i = 0; i < dst->nsegs - 1; i++) { 1738 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1739 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1740 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); 1741 HIFN_DSTR_SYNC(sc, idx, 1742 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1743 used++; 1744 1745 idx = hifn_dmamap_dstwrap(sc, idx); 1746 } 1747 1748 if (cmd->sloplen == 0) { 1749 p = dst->segs[i].ds_addr; 1750 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1751 dst->segs[i].ds_len; 1752 } else { 1753 p = sc->sc_dma_physaddr + 1754 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1755 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1756 sizeof(u_int32_t); 1757 1758 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { 1759 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1760 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1761 HIFN_D_MASKDONEIRQ | 1762 (dst->segs[i].ds_len - cmd->sloplen)); 1763 HIFN_DSTR_SYNC(sc, idx, 1764 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1765 used++; 1766 1767 idx = hifn_dmamap_dstwrap(sc, idx); 1768 } 1769 } 1770 dma->dstr[idx].p = htole32(p); 1771 dma->dstr[idx].l = htole32(l); 1772 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1773 used++; 1774 1775 idx = hifn_dmamap_dstwrap(sc, idx); 1776 1777 dma->dsti = idx; 1778 dma->dstu += used; 1779 return (idx); 1780 } 1781 1782 static __inline int 1783 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx) 1784 { 1785 struct hifn_dma *dma = sc->sc_dma; 1786 1787 if (++idx == HIFN_D_SRC_RSIZE) { 1788 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1789 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1790 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1791 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1792 idx = 0; 1793 } 1794 return (idx); 1795 } 1796 1797 static int 1798 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1799 { 1800 struct hifn_dma *dma = sc->sc_dma; 1801 struct hifn_operand *src = &cmd->src; 1802 int idx, i; 1803 u_int32_t last = 0; 1804 1805 idx = dma->srci; 1806 for (i = 0; i < src->nsegs; i++) { 1807 if (i == src->nsegs - 1) 1808 last = HIFN_D_LAST; 1809 1810 dma->srcr[idx].p = htole32(src->segs[i].ds_addr); 1811 dma->srcr[idx].l = htole32(src->segs[i].ds_len | 1812 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1813 HIFN_SRCR_SYNC(sc, idx, 1814 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1815 1816 idx = hifn_dmamap_srcwrap(sc, idx); 1817 } 1818 dma->srci = idx; 1819 dma->srcu += src->nsegs; 1820 return (idx); 1821 } 1822 1823 static void 1824 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1825 { 1826 struct hifn_operand *op = arg; 1827 1828 KASSERT(nsegs <= MAX_SCATTER, 1829 ("hifn_op_cb: too many DMA segments (%u > %u) " 1830 "returned when mapping operand", nsegs, MAX_SCATTER)); 1831 op->mapsize = mapsize; 1832 op->nsegs = nsegs; 1833 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1834 } 1835 1836 static int 1837 hifn_crypto( 1838 struct hifn_softc *sc, 1839 struct hifn_command *cmd, 1840 struct cryptop *crp, 1841 int hint) 1842 { 1843 struct hifn_dma *dma = sc->sc_dma; 1844 u_int32_t cmdlen, csr; 1845 int cmdi, resi, err = 0; 1846 1847 /* 1848 * need 1 cmd, and 1 res 1849 * 1850 * NB: check this first since it's easy. 1851 */ 1852 HIFN_LOCK(sc); 1853 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1854 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1855 #ifdef HIFN_DEBUG 1856 if (hifn_debug) { 1857 device_printf(sc->sc_dev, 1858 "cmd/result exhaustion, cmdu %u resu %u\n", 1859 dma->cmdu, dma->resu); 1860 } 1861 #endif 1862 hifnstats.hst_nomem_cr++; 1863 HIFN_UNLOCK(sc); 1864 return (ERESTART); 1865 } 1866 1867 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { 1868 hifnstats.hst_nomem_map++; 1869 HIFN_UNLOCK(sc); 1870 return (ENOMEM); 1871 } 1872 1873 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1874 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1875 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1876 hifnstats.hst_nomem_load++; 1877 err = ENOMEM; 1878 goto err_srcmap1; 1879 } 1880 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1881 #if 0 1882 cmd->src_io->uio_segflg = UIO_USERSPACE; 1883 #endif 1884 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1885 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1886 hifnstats.hst_nomem_load++; 1887 err = ENOMEM; 1888 goto err_srcmap1; 1889 } 1890 } else { 1891 err = EINVAL; 1892 goto err_srcmap1; 1893 } 1894 1895 if (hifn_dmamap_aligned(&cmd->src)) { 1896 cmd->sloplen = cmd->src_mapsize & 3; 1897 cmd->dst = cmd->src; 1898 } else { 1899 if (crp->crp_flags & CRYPTO_F_IOV) { 1900 err = EINVAL; 1901 goto err_srcmap; 1902 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1903 int totlen, len; 1904 struct mbuf *m, *m0, *mlast; 1905 1906 KASSERT(cmd->dst_m == cmd->src_m, 1907 ("hifn_crypto: dst_m initialized improperly")); 1908 hifnstats.hst_unaligned++; 1909 /* 1910 * Source is not aligned on a longword boundary. 1911 * Copy the data to insure alignment. If we fail 1912 * to allocate mbufs or clusters while doing this 1913 * we return ERESTART so the operation is requeued 1914 * at the crypto later, but only if there are 1915 * ops already posted to the hardware; otherwise we 1916 * have no guarantee that we'll be re-entered. 1917 */ 1918 totlen = cmd->src_mapsize; 1919 if (cmd->src_m->m_flags & M_PKTHDR) { 1920 len = MHLEN; 1921 MGETHDR(m0, M_NOWAIT, MT_DATA); 1922 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) { 1923 m_free(m0); 1924 m0 = NULL; 1925 } 1926 } else { 1927 len = MLEN; 1928 MGET(m0, M_NOWAIT, MT_DATA); 1929 } 1930 if (m0 == NULL) { 1931 hifnstats.hst_nomem_mbuf++; 1932 err = dma->cmdu ? ERESTART : ENOMEM; 1933 goto err_srcmap; 1934 } 1935 if (totlen >= MINCLSIZE) { 1936 MCLGET(m0, M_NOWAIT); 1937 if ((m0->m_flags & M_EXT) == 0) { 1938 hifnstats.hst_nomem_mcl++; 1939 err = dma->cmdu ? ERESTART : ENOMEM; 1940 m_freem(m0); 1941 goto err_srcmap; 1942 } 1943 len = MCLBYTES; 1944 } 1945 totlen -= len; 1946 m0->m_pkthdr.len = m0->m_len = len; 1947 mlast = m0; 1948 1949 while (totlen > 0) { 1950 MGET(m, M_NOWAIT, MT_DATA); 1951 if (m == NULL) { 1952 hifnstats.hst_nomem_mbuf++; 1953 err = dma->cmdu ? ERESTART : ENOMEM; 1954 m_freem(m0); 1955 goto err_srcmap; 1956 } 1957 len = MLEN; 1958 if (totlen >= MINCLSIZE) { 1959 MCLGET(m, M_NOWAIT); 1960 if ((m->m_flags & M_EXT) == 0) { 1961 hifnstats.hst_nomem_mcl++; 1962 err = dma->cmdu ? ERESTART : ENOMEM; 1963 mlast->m_next = m; 1964 m_freem(m0); 1965 goto err_srcmap; 1966 } 1967 len = MCLBYTES; 1968 } 1969 1970 m->m_len = len; 1971 m0->m_pkthdr.len += len; 1972 totlen -= len; 1973 1974 mlast->m_next = m; 1975 mlast = m; 1976 } 1977 cmd->dst_m = m0; 1978 } 1979 } 1980 1981 if (cmd->dst_map == NULL) { 1982 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1983 hifnstats.hst_nomem_map++; 1984 err = ENOMEM; 1985 goto err_srcmap; 1986 } 1987 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1988 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1989 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1990 hifnstats.hst_nomem_map++; 1991 err = ENOMEM; 1992 goto err_dstmap1; 1993 } 1994 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1995 #if 0 1996 cmd->dst_io->uio_segflg |= UIO_USERSPACE; 1997 #endif 1998 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1999 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 2000 hifnstats.hst_nomem_load++; 2001 err = ENOMEM; 2002 goto err_dstmap1; 2003 } 2004 } 2005 } 2006 2007 #ifdef HIFN_DEBUG 2008 if (hifn_debug) { 2009 device_printf(sc->sc_dev, 2010 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 2011 READ_REG_1(sc, HIFN_1_DMA_CSR), 2012 READ_REG_1(sc, HIFN_1_DMA_IER), 2013 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 2014 cmd->src_nsegs, cmd->dst_nsegs); 2015 } 2016 #endif 2017 2018 if (cmd->src_map == cmd->dst_map) { 2019 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2020 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2021 } else { 2022 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2023 BUS_DMASYNC_PREWRITE); 2024 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2025 BUS_DMASYNC_PREREAD); 2026 } 2027 2028 /* 2029 * need N src, and N dst 2030 */ 2031 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || 2032 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { 2033 #ifdef HIFN_DEBUG 2034 if (hifn_debug) { 2035 device_printf(sc->sc_dev, 2036 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", 2037 dma->srcu, cmd->src_nsegs, 2038 dma->dstu, cmd->dst_nsegs); 2039 } 2040 #endif 2041 hifnstats.hst_nomem_sd++; 2042 err = ERESTART; 2043 goto err_dstmap; 2044 } 2045 2046 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2047 dma->cmdi = 0; 2048 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2049 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2050 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2051 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2052 } 2053 cmdi = dma->cmdi++; 2054 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2055 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2056 2057 /* .p for command/result already set */ 2058 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2059 HIFN_D_MASKDONEIRQ); 2060 HIFN_CMDR_SYNC(sc, cmdi, 2061 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2062 dma->cmdu++; 2063 2064 /* 2065 * We don't worry about missing an interrupt (which a "command wait" 2066 * interrupt salvages us from), unless there is more than one command 2067 * in the queue. 2068 */ 2069 if (dma->cmdu > 1) { 2070 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2071 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2072 } 2073 2074 hifnstats.hst_ipackets++; 2075 hifnstats.hst_ibytes += cmd->src_mapsize; 2076 2077 hifn_dmamap_load_src(sc, cmd); 2078 2079 /* 2080 * Unlike other descriptors, we don't mask done interrupt from 2081 * result descriptor. 2082 */ 2083 #ifdef HIFN_DEBUG 2084 if (hifn_debug) 2085 kprintf("load res\n"); 2086 #endif 2087 if (dma->resi == HIFN_D_RES_RSIZE) { 2088 dma->resi = 0; 2089 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2090 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2091 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2092 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2093 } 2094 resi = dma->resi++; 2095 KASSERT(dma->hifn_commands[resi] == NULL, 2096 ("hifn_crypto: command slot %u busy", resi)); 2097 dma->hifn_commands[resi] = cmd; 2098 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2099 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { 2100 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2101 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 2102 sc->sc_curbatch++; 2103 if (sc->sc_curbatch > hifnstats.hst_maxbatch) 2104 hifnstats.hst_maxbatch = sc->sc_curbatch; 2105 hifnstats.hst_totbatch++; 2106 } else { 2107 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2108 HIFN_D_VALID | HIFN_D_LAST); 2109 sc->sc_curbatch = 0; 2110 } 2111 HIFN_RESR_SYNC(sc, resi, 2112 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2113 dma->resu++; 2114 2115 if (cmd->sloplen) 2116 cmd->slopidx = resi; 2117 2118 hifn_dmamap_load_dst(sc, cmd); 2119 2120 csr = 0; 2121 if (sc->sc_c_busy == 0) { 2122 csr |= HIFN_DMACSR_C_CTRL_ENA; 2123 sc->sc_c_busy = 1; 2124 } 2125 if (sc->sc_s_busy == 0) { 2126 csr |= HIFN_DMACSR_S_CTRL_ENA; 2127 sc->sc_s_busy = 1; 2128 } 2129 if (sc->sc_r_busy == 0) { 2130 csr |= HIFN_DMACSR_R_CTRL_ENA; 2131 sc->sc_r_busy = 1; 2132 } 2133 if (sc->sc_d_busy == 0) { 2134 csr |= HIFN_DMACSR_D_CTRL_ENA; 2135 sc->sc_d_busy = 1; 2136 } 2137 if (csr) 2138 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr); 2139 2140 #ifdef HIFN_DEBUG 2141 if (hifn_debug) { 2142 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", 2143 READ_REG_1(sc, HIFN_1_DMA_CSR), 2144 READ_REG_1(sc, HIFN_1_DMA_IER)); 2145 } 2146 #endif 2147 2148 sc->sc_active = 5; 2149 HIFN_UNLOCK(sc); 2150 KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); 2151 return (err); /* success */ 2152 2153 err_dstmap: 2154 if (cmd->src_map != cmd->dst_map) 2155 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2156 err_dstmap1: 2157 if (cmd->src_map != cmd->dst_map) 2158 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2159 err_srcmap: 2160 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2161 if (cmd->src_m != cmd->dst_m) 2162 m_freem(cmd->dst_m); 2163 } 2164 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2165 err_srcmap1: 2166 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2167 HIFN_UNLOCK(sc); 2168 return (err); 2169 } 2170 2171 static void 2172 hifn_tick(void* vsc) 2173 { 2174 struct hifn_softc *sc = vsc; 2175 2176 HIFN_LOCK(sc); 2177 if (sc->sc_active == 0) { 2178 struct hifn_dma *dma = sc->sc_dma; 2179 u_int32_t r = 0; 2180 2181 if (dma->cmdu == 0 && sc->sc_c_busy) { 2182 sc->sc_c_busy = 0; 2183 r |= HIFN_DMACSR_C_CTRL_DIS; 2184 } 2185 if (dma->srcu == 0 && sc->sc_s_busy) { 2186 sc->sc_s_busy = 0; 2187 r |= HIFN_DMACSR_S_CTRL_DIS; 2188 } 2189 if (dma->dstu == 0 && sc->sc_d_busy) { 2190 sc->sc_d_busy = 0; 2191 r |= HIFN_DMACSR_D_CTRL_DIS; 2192 } 2193 if (dma->resu == 0 && sc->sc_r_busy) { 2194 sc->sc_r_busy = 0; 2195 r |= HIFN_DMACSR_R_CTRL_DIS; 2196 } 2197 if (r) 2198 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 2199 } else 2200 sc->sc_active--; 2201 HIFN_UNLOCK(sc); 2202 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 2203 } 2204 2205 static void 2206 hifn_intr(void *arg) 2207 { 2208 struct hifn_softc *sc = arg; 2209 struct hifn_dma *dma; 2210 u_int32_t dmacsr, restart; 2211 int i, u; 2212 2213 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 2214 2215 /* Nothing in the DMA unit interrupted */ 2216 if ((dmacsr & sc->sc_dmaier) == 0) { 2217 hifnstats.hst_noirq++; 2218 return; 2219 } 2220 2221 HIFN_LOCK(sc); 2222 2223 dma = sc->sc_dma; 2224 2225 #ifdef HIFN_DEBUG 2226 if (hifn_debug) { 2227 device_printf(sc->sc_dev, 2228 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", 2229 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, 2230 dma->cmdi, dma->srci, dma->dsti, dma->resi, 2231 dma->cmdk, dma->srck, dma->dstk, dma->resk, 2232 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2233 } 2234 #endif 2235 2236 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 2237 2238 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 2239 (dmacsr & HIFN_DMACSR_PUBDONE)) 2240 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 2241 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 2242 2243 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); 2244 if (restart) 2245 device_printf(sc->sc_dev, "overrun %x\n", dmacsr); 2246 2247 if (sc->sc_flags & HIFN_IS_7811) { 2248 if (dmacsr & HIFN_DMACSR_ILLR) 2249 device_printf(sc->sc_dev, "illegal read\n"); 2250 if (dmacsr & HIFN_DMACSR_ILLW) 2251 device_printf(sc->sc_dev, "illegal write\n"); 2252 } 2253 2254 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 2255 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 2256 if (restart) { 2257 device_printf(sc->sc_dev, "abort, resetting.\n"); 2258 hifnstats.hst_abort++; 2259 hifn_abort(sc); 2260 HIFN_UNLOCK(sc); 2261 return; 2262 } 2263 2264 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { 2265 /* 2266 * If no slots to process and we receive a "waiting on 2267 * command" interrupt, we disable the "waiting on command" 2268 * (by clearing it). 2269 */ 2270 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 2271 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2272 } 2273 2274 /* clear the rings */ 2275 i = dma->resk; u = dma->resu; 2276 while (u != 0) { 2277 HIFN_RESR_SYNC(sc, i, 2278 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2279 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2280 HIFN_RESR_SYNC(sc, i, 2281 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2282 break; 2283 } 2284 2285 if (i != HIFN_D_RES_RSIZE) { 2286 struct hifn_command *cmd; 2287 u_int8_t *macbuf = NULL; 2288 2289 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2290 cmd = dma->hifn_commands[i]; 2291 KASSERT(cmd != NULL, 2292 ("hifn_intr: null command slot %u", i)); 2293 dma->hifn_commands[i] = NULL; 2294 2295 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2296 macbuf = dma->result_bufs[i]; 2297 macbuf += 12; 2298 } 2299 2300 hifn_callback(sc, cmd, macbuf); 2301 hifnstats.hst_opackets++; 2302 u--; 2303 } 2304 2305 if (++i == (HIFN_D_RES_RSIZE + 1)) 2306 i = 0; 2307 } 2308 dma->resk = i; dma->resu = u; 2309 2310 i = dma->srck; u = dma->srcu; 2311 while (u != 0) { 2312 if (i == HIFN_D_SRC_RSIZE) 2313 i = 0; 2314 HIFN_SRCR_SYNC(sc, i, 2315 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2316 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2317 HIFN_SRCR_SYNC(sc, i, 2318 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2319 break; 2320 } 2321 i++, u--; 2322 } 2323 dma->srck = i; dma->srcu = u; 2324 2325 i = dma->cmdk; u = dma->cmdu; 2326 while (u != 0) { 2327 HIFN_CMDR_SYNC(sc, i, 2328 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2329 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2330 HIFN_CMDR_SYNC(sc, i, 2331 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2332 break; 2333 } 2334 if (i != HIFN_D_CMD_RSIZE) { 2335 u--; 2336 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2337 } 2338 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2339 i = 0; 2340 } 2341 dma->cmdk = i; dma->cmdu = u; 2342 2343 HIFN_UNLOCK(sc); 2344 2345 if (sc->sc_needwakeup) { /* XXX check high watermark */ 2346 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 2347 #ifdef HIFN_DEBUG 2348 if (hifn_debug) 2349 device_printf(sc->sc_dev, 2350 "wakeup crypto (%x) u %d/%d/%d/%d\n", 2351 sc->sc_needwakeup, 2352 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2353 #endif 2354 sc->sc_needwakeup &= ~wakeup; 2355 crypto_unblock(sc->sc_cid, wakeup); 2356 } 2357 } 2358 2359 /* 2360 * Allocate a new 'session' and return an encoded session id. 'sidp' 2361 * contains our registration id, and should contain an encoded session 2362 * id on successful allocation. 2363 */ 2364 static int 2365 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) 2366 { 2367 struct cryptoini *c; 2368 struct hifn_softc *sc = device_get_softc(dev); 2369 int mac = 0, cry = 0, sesn; 2370 struct hifn_session *ses = NULL; 2371 2372 KASSERT(sc != NULL, ("hifn_newsession: null softc")); 2373 if (sidp == NULL || cri == NULL || sc == NULL) 2374 return (EINVAL); 2375 2376 HIFN_LOCK(sc); 2377 if (sc->sc_sessions == NULL) { 2378 ses = sc->sc_sessions = (struct hifn_session *)kmalloc( 2379 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2380 if (ses == NULL) { 2381 HIFN_UNLOCK(sc); 2382 return (ENOMEM); 2383 } 2384 sesn = 0; 2385 sc->sc_nsessions = 1; 2386 } else { 2387 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 2388 if (!sc->sc_sessions[sesn].hs_used) { 2389 ses = &sc->sc_sessions[sesn]; 2390 break; 2391 } 2392 } 2393 2394 if (ses == NULL) { 2395 sesn = sc->sc_nsessions; 2396 ses = (struct hifn_session *)kmalloc((sesn + 1) * 2397 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2398 if (ses == NULL) { 2399 HIFN_UNLOCK(sc); 2400 return (ENOMEM); 2401 } 2402 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); 2403 bzero(sc->sc_sessions, sesn * sizeof(*ses)); 2404 kfree(sc->sc_sessions, M_DEVBUF); 2405 sc->sc_sessions = ses; 2406 ses = &sc->sc_sessions[sesn]; 2407 sc->sc_nsessions++; 2408 } 2409 } 2410 HIFN_UNLOCK(sc); 2411 2412 bzero(ses, sizeof(*ses)); 2413 ses->hs_used = 1; 2414 2415 for (c = cri; c != NULL; c = c->cri_next) { 2416 switch (c->cri_alg) { 2417 case CRYPTO_MD5: 2418 case CRYPTO_SHA1: 2419 case CRYPTO_MD5_HMAC: 2420 case CRYPTO_SHA1_HMAC: 2421 if (mac) 2422 return (EINVAL); 2423 mac = 1; 2424 ses->hs_mlen = c->cri_mlen; 2425 if (ses->hs_mlen == 0) { 2426 switch (c->cri_alg) { 2427 case CRYPTO_MD5: 2428 case CRYPTO_MD5_HMAC: 2429 ses->hs_mlen = 16; 2430 break; 2431 case CRYPTO_SHA1: 2432 case CRYPTO_SHA1_HMAC: 2433 ses->hs_mlen = 20; 2434 break; 2435 } 2436 } 2437 break; 2438 case CRYPTO_DES_CBC: 2439 case CRYPTO_3DES_CBC: 2440 case CRYPTO_AES_CBC: 2441 /* XXX this may read fewer, does it matter? */ 2442 read_random(ses->hs_iv, 2443 (c->cri_alg == CRYPTO_AES_CBC ? 2444 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH), 2445 0); 2446 /*FALLTHROUGH*/ 2447 case CRYPTO_ARC4: 2448 if (cry) 2449 return (EINVAL); 2450 cry = 1; 2451 break; 2452 default: 2453 return (EINVAL); 2454 } 2455 } 2456 if (mac == 0 && cry == 0) 2457 return (EINVAL); 2458 2459 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn); 2460 2461 return (0); 2462 } 2463 2464 /* 2465 * Deallocate a session. 2466 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2467 * XXX to blow away any keys already stored there. 2468 */ 2469 #define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff) 2470 2471 static int 2472 hifn_freesession(device_t dev, u_int64_t tid) 2473 { 2474 struct hifn_softc *sc = device_get_softc(dev); 2475 int session, error; 2476 u_int32_t sid = CRYPTO_SESID2LID(tid); 2477 2478 KASSERT(sc != NULL, ("hifn_freesession: null softc")); 2479 if (sc == NULL) 2480 return (EINVAL); 2481 2482 HIFN_LOCK(sc); 2483 session = HIFN_SESSION(sid); 2484 if (session < sc->sc_nsessions) { 2485 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session)); 2486 error = 0; 2487 } else 2488 error = EINVAL; 2489 HIFN_UNLOCK(sc); 2490 2491 return (error); 2492 } 2493 2494 static int 2495 hifn_process(device_t dev, struct cryptop *crp, int hint) 2496 { 2497 struct hifn_softc *sc = device_get_softc(dev); 2498 struct hifn_command *cmd = NULL; 2499 int session, err, ivlen; 2500 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2501 2502 if (crp == NULL || crp->crp_callback == NULL) { 2503 hifnstats.hst_invalid++; 2504 return (EINVAL); 2505 } 2506 session = HIFN_SESSION(crp->crp_sid); 2507 2508 if (sc == NULL || session >= sc->sc_nsessions) { 2509 err = EINVAL; 2510 goto errout; 2511 } 2512 2513 cmd = kmalloc(sizeof(struct hifn_command), M_DEVBUF, M_INTWAIT | M_ZERO); 2514 2515 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2516 cmd->src_m = (struct mbuf *)crp->crp_buf; 2517 cmd->dst_m = (struct mbuf *)crp->crp_buf; 2518 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2519 cmd->src_io = (struct uio *)crp->crp_buf; 2520 cmd->dst_io = (struct uio *)crp->crp_buf; 2521 } else { 2522 err = EINVAL; 2523 goto errout; /* XXX we don't handle contiguous buffers! */ 2524 } 2525 2526 crd1 = crp->crp_desc; 2527 if (crd1 == NULL) { 2528 err = EINVAL; 2529 goto errout; 2530 } 2531 crd2 = crd1->crd_next; 2532 2533 if (crd2 == NULL) { 2534 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2535 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2536 crd1->crd_alg == CRYPTO_SHA1 || 2537 crd1->crd_alg == CRYPTO_MD5) { 2538 maccrd = crd1; 2539 enccrd = NULL; 2540 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2541 crd1->crd_alg == CRYPTO_3DES_CBC || 2542 crd1->crd_alg == CRYPTO_AES_CBC || 2543 crd1->crd_alg == CRYPTO_ARC4) { 2544 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2545 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2546 maccrd = NULL; 2547 enccrd = crd1; 2548 } else { 2549 err = EINVAL; 2550 goto errout; 2551 } 2552 } else { 2553 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2554 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2555 crd1->crd_alg == CRYPTO_MD5 || 2556 crd1->crd_alg == CRYPTO_SHA1) && 2557 (crd2->crd_alg == CRYPTO_DES_CBC || 2558 crd2->crd_alg == CRYPTO_3DES_CBC || 2559 crd2->crd_alg == CRYPTO_AES_CBC || 2560 crd2->crd_alg == CRYPTO_ARC4) && 2561 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2562 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2563 maccrd = crd1; 2564 enccrd = crd2; 2565 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2566 crd1->crd_alg == CRYPTO_ARC4 || 2567 crd1->crd_alg == CRYPTO_3DES_CBC || 2568 crd1->crd_alg == CRYPTO_AES_CBC) && 2569 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2570 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2571 crd2->crd_alg == CRYPTO_MD5 || 2572 crd2->crd_alg == CRYPTO_SHA1) && 2573 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2574 enccrd = crd1; 2575 maccrd = crd2; 2576 } else { 2577 /* 2578 * We cannot order the 7751 as requested 2579 */ 2580 err = EINVAL; 2581 goto errout; 2582 } 2583 } 2584 2585 if (enccrd) { 2586 cmd->enccrd = enccrd; 2587 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2588 switch (enccrd->crd_alg) { 2589 case CRYPTO_ARC4: 2590 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2591 break; 2592 case CRYPTO_DES_CBC: 2593 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2594 HIFN_CRYPT_CMD_MODE_CBC | 2595 HIFN_CRYPT_CMD_NEW_IV; 2596 break; 2597 case CRYPTO_3DES_CBC: 2598 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2599 HIFN_CRYPT_CMD_MODE_CBC | 2600 HIFN_CRYPT_CMD_NEW_IV; 2601 break; 2602 case CRYPTO_AES_CBC: 2603 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2604 HIFN_CRYPT_CMD_MODE_CBC | 2605 HIFN_CRYPT_CMD_NEW_IV; 2606 break; 2607 default: 2608 err = EINVAL; 2609 goto errout; 2610 } 2611 if (enccrd->crd_alg != CRYPTO_ARC4) { 2612 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2613 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2614 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2615 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2616 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2617 else 2618 bcopy(sc->sc_sessions[session].hs_iv, 2619 cmd->iv, ivlen); 2620 2621 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2622 == 0) { 2623 crypto_copyback(crp->crp_flags, 2624 crp->crp_buf, enccrd->crd_inject, 2625 ivlen, cmd->iv); 2626 } 2627 } else { 2628 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2629 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2630 else { 2631 crypto_copydata(crp->crp_flags, 2632 crp->crp_buf, enccrd->crd_inject, 2633 ivlen, cmd->iv); 2634 } 2635 } 2636 } 2637 2638 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) 2639 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2640 cmd->ck = enccrd->crd_key; 2641 cmd->cklen = enccrd->crd_klen >> 3; 2642 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2643 2644 /* 2645 * Need to specify the size for the AES key in the masks. 2646 */ 2647 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2648 HIFN_CRYPT_CMD_ALG_AES) { 2649 switch (cmd->cklen) { 2650 case 16: 2651 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2652 break; 2653 case 24: 2654 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2655 break; 2656 case 32: 2657 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2658 break; 2659 default: 2660 err = EINVAL; 2661 goto errout; 2662 } 2663 } 2664 } 2665 2666 if (maccrd) { 2667 cmd->maccrd = maccrd; 2668 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2669 2670 switch (maccrd->crd_alg) { 2671 case CRYPTO_MD5: 2672 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2673 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2674 HIFN_MAC_CMD_POS_IPSEC; 2675 break; 2676 case CRYPTO_MD5_HMAC: 2677 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2678 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2679 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2680 break; 2681 case CRYPTO_SHA1: 2682 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2683 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2684 HIFN_MAC_CMD_POS_IPSEC; 2685 break; 2686 case CRYPTO_SHA1_HMAC: 2687 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2688 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2689 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2690 break; 2691 } 2692 2693 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2694 maccrd->crd_alg == CRYPTO_MD5_HMAC) { 2695 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2696 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2697 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2698 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2699 } 2700 } 2701 2702 cmd->crp = crp; 2703 cmd->session_num = session; 2704 cmd->softc = sc; 2705 2706 err = hifn_crypto(sc, cmd, crp, hint); 2707 if (!err) { 2708 return 0; 2709 } else if (err == ERESTART) { 2710 /* 2711 * There weren't enough resources to dispatch the request 2712 * to the part. Notify the caller so they'll requeue this 2713 * request and resubmit it again soon. 2714 */ 2715 #ifdef HIFN_DEBUG 2716 if (hifn_debug) 2717 device_printf(sc->sc_dev, "requeue request\n"); 2718 #endif 2719 kfree(cmd, M_DEVBUF); 2720 sc->sc_needwakeup |= CRYPTO_SYMQ; 2721 return (err); 2722 } 2723 2724 errout: 2725 if (cmd != NULL) 2726 kfree(cmd, M_DEVBUF); 2727 if (err == EINVAL) 2728 hifnstats.hst_invalid++; 2729 else 2730 hifnstats.hst_nomem++; 2731 crp->crp_etype = err; 2732 crypto_done(crp); 2733 return (err); 2734 } 2735 2736 static void 2737 hifn_abort(struct hifn_softc *sc) 2738 { 2739 struct hifn_dma *dma = sc->sc_dma; 2740 struct hifn_command *cmd; 2741 struct cryptop *crp; 2742 int i, u; 2743 2744 i = dma->resk; u = dma->resu; 2745 while (u != 0) { 2746 cmd = dma->hifn_commands[i]; 2747 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); 2748 dma->hifn_commands[i] = NULL; 2749 crp = cmd->crp; 2750 2751 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2752 /* Salvage what we can. */ 2753 u_int8_t *macbuf; 2754 2755 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2756 macbuf = dma->result_bufs[i]; 2757 macbuf += 12; 2758 } else 2759 macbuf = NULL; 2760 hifnstats.hst_opackets++; 2761 hifn_callback(sc, cmd, macbuf); 2762 } else { 2763 if (cmd->src_map == cmd->dst_map) { 2764 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2765 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2766 } else { 2767 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2768 BUS_DMASYNC_POSTWRITE); 2769 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2770 BUS_DMASYNC_POSTREAD); 2771 } 2772 2773 if (cmd->src_m != cmd->dst_m) { 2774 m_freem(cmd->src_m); 2775 crp->crp_buf = (caddr_t)cmd->dst_m; 2776 } 2777 2778 /* non-shared buffers cannot be restarted */ 2779 if (cmd->src_map != cmd->dst_map) { 2780 /* 2781 * XXX should be EAGAIN, delayed until 2782 * after the reset. 2783 */ 2784 crp->crp_etype = ENOMEM; 2785 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2786 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2787 } else 2788 crp->crp_etype = ENOMEM; 2789 2790 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2791 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2792 2793 kfree(cmd, M_DEVBUF); 2794 if (crp->crp_etype != EAGAIN) 2795 crypto_done(crp); 2796 } 2797 2798 if (++i == HIFN_D_RES_RSIZE) 2799 i = 0; 2800 u--; 2801 } 2802 dma->resk = i; dma->resu = u; 2803 2804 hifn_reset_board(sc, 1); 2805 hifn_init_dma(sc); 2806 hifn_init_pci_registers(sc); 2807 } 2808 2809 static void 2810 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) 2811 { 2812 struct hifn_dma *dma = sc->sc_dma; 2813 struct cryptop *crp = cmd->crp; 2814 struct cryptodesc *crd; 2815 struct mbuf *m; 2816 int totlen, i, u, ivlen; 2817 2818 if (cmd->src_map == cmd->dst_map) { 2819 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2820 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2821 } else { 2822 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2823 BUS_DMASYNC_POSTWRITE); 2824 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2825 BUS_DMASYNC_POSTREAD); 2826 } 2827 2828 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2829 if (cmd->src_m != cmd->dst_m) { 2830 crp->crp_buf = (caddr_t)cmd->dst_m; 2831 totlen = cmd->src_mapsize; 2832 for (m = cmd->dst_m; m != NULL; m = m->m_next) { 2833 if (totlen < m->m_len) { 2834 m->m_len = totlen; 2835 totlen = 0; 2836 } else 2837 totlen -= m->m_len; 2838 } 2839 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; 2840 m_freem(cmd->src_m); 2841 } 2842 } 2843 2844 if (cmd->sloplen != 0) { 2845 crypto_copyback(crp->crp_flags, crp->crp_buf, 2846 cmd->src_mapsize - cmd->sloplen, cmd->sloplen, 2847 (caddr_t)&dma->slop[cmd->slopidx]); 2848 } 2849 2850 i = dma->dstk; u = dma->dstu; 2851 while (u != 0) { 2852 if (i == HIFN_D_DST_RSIZE) 2853 i = 0; 2854 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2855 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2856 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2857 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2858 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2859 break; 2860 } 2861 i++, u--; 2862 } 2863 dma->dstk = i; dma->dstu = u; 2864 2865 hifnstats.hst_obytes += cmd->dst_mapsize; 2866 2867 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2868 HIFN_BASE_CMD_CRYPT) { 2869 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2870 if (crd->crd_alg != CRYPTO_DES_CBC && 2871 crd->crd_alg != CRYPTO_3DES_CBC && 2872 crd->crd_alg != CRYPTO_AES_CBC) 2873 continue; 2874 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2875 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2876 crypto_copydata(crp->crp_flags, crp->crp_buf, 2877 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2878 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2879 break; 2880 } 2881 } 2882 2883 if (macbuf != NULL) { 2884 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2885 int len; 2886 2887 if (crd->crd_alg != CRYPTO_MD5 && 2888 crd->crd_alg != CRYPTO_SHA1 && 2889 crd->crd_alg != CRYPTO_MD5_HMAC && 2890 crd->crd_alg != CRYPTO_SHA1_HMAC) { 2891 continue; 2892 } 2893 len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen; 2894 crypto_copyback(crp->crp_flags, crp->crp_buf, 2895 crd->crd_inject, len, macbuf); 2896 break; 2897 } 2898 } 2899 2900 if (cmd->src_map != cmd->dst_map) { 2901 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2902 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2903 } 2904 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2905 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2906 kfree(cmd, M_DEVBUF); 2907 crypto_done(crp); 2908 } 2909 2910 /* 2911 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2912 * and Group 1 registers; avoid conditions that could create 2913 * burst writes by doing a read in between the writes. 2914 * 2915 * NB: The read we interpose is always to the same register; 2916 * we do this because reading from an arbitrary (e.g. last) 2917 * register may not always work. 2918 */ 2919 static void 2920 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2921 { 2922 if (sc->sc_flags & HIFN_IS_7811) { 2923 if (sc->sc_bar0_lastreg == reg - 4) 2924 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); 2925 sc->sc_bar0_lastreg = reg; 2926 } 2927 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2928 } 2929 2930 static void 2931 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2932 { 2933 if (sc->sc_flags & HIFN_IS_7811) { 2934 if (sc->sc_bar1_lastreg == reg - 4) 2935 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2936 sc->sc_bar1_lastreg = reg; 2937 } 2938 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2939 } 2940