1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */ 2 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ 3 4 /* 5 * Invertex AEON / Hifn 7751 driver 6 * Copyright (c) 1999 Invertex Inc. All rights reserved. 7 * Copyright (c) 1999 Theo de Raadt 8 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 9 * http://www.netsec.net 10 * Copyright (c) 2003 Hifn Inc. 11 * 12 * This driver is based on a previous driver by Invertex, for which they 13 * requested: Please send any comments, feedback, bug-fixes, or feature 14 * requests to software@invertex.com. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. The name of the author may not be used to endorse or promote products 26 * derived from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * Effort sponsored in part by the Defense Advanced Research Projects 40 * Agency (DARPA) and Air Force Research Laboratory, Air Force 41 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 42 * 43 */ 44 45 /* 46 * Driver for various Hifn encryption processors. 47 */ 48 #include "opt_hifn.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/proc.h> 53 #include <sys/errno.h> 54 #include <sys/malloc.h> 55 #include <sys/kernel.h> 56 #include <sys/mbuf.h> 57 #include <sys/sysctl.h> 58 #include <sys/bus.h> 59 #include <sys/rman.h> 60 #include <sys/random.h> 61 #include <sys/thread2.h> 62 #include <sys/uio.h> 63 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 67 #include <machine/clock.h> 68 #include <opencrypto/cryptodev.h> 69 70 #include "cryptodev_if.h" 71 72 #include <bus/pci/pcivar.h> 73 #include <bus/pci/pcireg.h> 74 75 #ifdef HIFN_RNDTEST 76 #include "../rndtest/rndtest.h" 77 #endif 78 #include "hifn7751reg.h" 79 #include "hifn7751var.h" 80 81 /* 82 * Prototypes and count for the pci_device structure 83 */ 84 static int hifn_probe(device_t); 85 static int hifn_attach(device_t); 86 static int hifn_detach(device_t); 87 static int hifn_suspend(device_t); 88 static int hifn_resume(device_t); 89 static void hifn_shutdown(device_t); 90 91 static void hifn_reset_board(struct hifn_softc *, int); 92 static void hifn_reset_puc(struct hifn_softc *); 93 static void hifn_puc_wait(struct hifn_softc *); 94 static int hifn_enable_crypto(struct hifn_softc *); 95 static void hifn_set_retry(struct hifn_softc *sc); 96 static void hifn_init_dma(struct hifn_softc *); 97 static void hifn_init_pci_registers(struct hifn_softc *); 98 static int hifn_sramsize(struct hifn_softc *); 99 static int hifn_dramsize(struct hifn_softc *); 100 static int hifn_ramtype(struct hifn_softc *); 101 static void hifn_sessions(struct hifn_softc *); 102 static void hifn_intr(void *); 103 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 104 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 105 static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *); 106 static int hifn_freesession(device_t, u_int64_t); 107 static int hifn_process(device_t, struct cryptop *, int); 108 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 109 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); 110 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 111 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 112 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 113 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 114 static int hifn_init_pubrng(struct hifn_softc *); 115 #ifndef HIFN_NO_RNG 116 static void hifn_rng(void *); 117 #endif 118 static void hifn_tick(void *); 119 static void hifn_abort(struct hifn_softc *); 120 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 121 122 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); 123 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); 124 125 126 static device_method_t hifn_methods[] = { 127 /* Device interface */ 128 DEVMETHOD(device_probe, hifn_probe), 129 DEVMETHOD(device_attach, hifn_attach), 130 DEVMETHOD(device_detach, hifn_detach), 131 DEVMETHOD(device_suspend, hifn_suspend), 132 DEVMETHOD(device_resume, hifn_resume), 133 DEVMETHOD(device_shutdown, hifn_shutdown), 134 135 /* bus interface */ 136 DEVMETHOD(bus_print_child, bus_generic_print_child), 137 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 138 139 /* crypto device methods */ 140 DEVMETHOD(cryptodev_newsession, hifn_newsession), 141 DEVMETHOD(cryptodev_freesession,hifn_freesession), 142 DEVMETHOD(cryptodev_process, hifn_process), 143 144 DEVMETHOD_END 145 }; 146 static driver_t hifn_driver = { 147 "hifn", 148 hifn_methods, 149 sizeof (struct hifn_softc) 150 }; 151 static devclass_t hifn_devclass; 152 153 DECLARE_DUMMY_MODULE(hifn); 154 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, NULL, NULL); 155 MODULE_DEPEND(hifn, crypto, 1, 1, 1); 156 #ifdef HIFN_RNDTEST 157 MODULE_DEPEND(hifn, rndtest, 1, 1, 1); 158 #endif 159 160 static __inline__ u_int32_t 161 READ_REG_0(struct hifn_softc *sc, bus_size_t reg) 162 { 163 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); 164 sc->sc_bar0_lastreg = (bus_size_t) -1; 165 return (v); 166 } 167 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) 168 169 static __inline__ u_int32_t 170 READ_REG_1(struct hifn_softc *sc, bus_size_t reg) 171 { 172 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); 173 sc->sc_bar1_lastreg = (bus_size_t) -1; 174 return (v); 175 } 176 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) 177 178 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); 179 180 #ifdef HIFN_DEBUG 181 static int hifn_debug = 0; 182 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 183 0, "control debugging msgs"); 184 #endif 185 186 static struct hifn_stats hifnstats; 187 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, 188 hifn_stats, "driver statistics"); 189 static int hifn_maxbatch = 1; 190 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 191 0, "max ops to batch w/o interrupt"); 192 193 /* 194 * Probe for a supported device. The PCI vendor and device 195 * IDs are used to detect devices we know how to handle. 196 */ 197 static int 198 hifn_probe(device_t dev) 199 { 200 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && 201 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) 202 return (0); 203 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 204 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || 205 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 206 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 207 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || 208 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) 209 return (0); 210 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 211 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) 212 return (0); 213 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN) { 214 device_printf(dev,"device id = 0x%x\n", pci_get_device(dev) ); 215 return (0); 216 } 217 return (ENXIO); 218 } 219 220 static void 221 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 222 { 223 bus_addr_t *paddr = (bus_addr_t*) arg; 224 *paddr = segs->ds_addr; 225 } 226 227 static const char* 228 hifn_partname(struct hifn_softc *sc) 229 { 230 /* XXX sprintf numbers when not decoded */ 231 switch (pci_get_vendor(sc->sc_dev)) { 232 case PCI_VENDOR_HIFN: 233 switch (pci_get_device(sc->sc_dev)) { 234 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; 235 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; 236 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; 237 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; 238 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; 239 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; 240 } 241 return "Hifn unknown-part"; 242 case PCI_VENDOR_INVERTEX: 243 switch (pci_get_device(sc->sc_dev)) { 244 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; 245 } 246 return "Invertex unknown-part"; 247 case PCI_VENDOR_NETSEC: 248 switch (pci_get_device(sc->sc_dev)) { 249 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; 250 } 251 return "NetSec unknown-part"; 252 } 253 return "Unknown-vendor unknown-part"; 254 } 255 256 static void 257 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 258 { 259 add_buffer_randomness_src(buf, count, RAND_SRC_HIFN); 260 } 261 262 static u_int 263 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) 264 { 265 if (v > max) { 266 device_printf(dev, "Warning, %s %u out of range, " 267 "using max %u\n", what, v, max); 268 v = max; 269 } else if (v < min) { 270 device_printf(dev, "Warning, %s %u out of range, " 271 "using min %u\n", what, v, min); 272 v = min; 273 } 274 return v; 275 } 276 277 /* 278 * Select PLL configuration for 795x parts. This is complicated in 279 * that we cannot determine the optimal parameters without user input. 280 * The reference clock is derived from an external clock through a 281 * multiplier. The external clock is either the host bus (i.e. PCI) 282 * or an external clock generator. When using the PCI bus we assume 283 * the clock is either 33 or 66 MHz; for an external source we cannot 284 * tell the speed. 285 * 286 * PLL configuration is done with a string: "pci" for PCI bus, or "ext" 287 * for an external source, followed by the frequency. We calculate 288 * the appropriate multiplier and PLL register contents accordingly. 289 * When no configuration is given we default to "pci66" since that 290 * always will allow the card to work. If a card is using the PCI 291 * bus clock and in a 33MHz slot then it will be operating at half 292 * speed until the correct information is provided. 293 * 294 * We use a default setting of "ext66" because according to Mike Ham 295 * of HiFn, almost every board in existence has an external crystal 296 * populated at 66Mhz. Using PCI can be a problem on modern motherboards, 297 * because PCI33 can have clocks from 0 to 33Mhz, and some have 298 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll. 299 */ 300 static void 301 hifn_getpllconfig(device_t dev, u_int *pll) 302 { 303 const char *pllspec; 304 u_int freq, mul, fl, fh; 305 u_int32_t pllconfig; 306 char *nxt; 307 308 if (resource_string_value("hifn", device_get_unit(dev), 309 "pllconfig", &pllspec)) 310 pllspec = "ext66"; 311 fl = 33, fh = 66; 312 pllconfig = 0; 313 if (strncmp(pllspec, "ext", 3) == 0) { 314 pllspec += 3; 315 pllconfig |= HIFN_PLL_REF_SEL; 316 switch (pci_get_device(dev)) { 317 case PCI_PRODUCT_HIFN_7955: 318 case PCI_PRODUCT_HIFN_7956: 319 fl = 20, fh = 100; 320 break; 321 #ifdef notyet 322 case PCI_PRODUCT_HIFN_7954: 323 fl = 20, fh = 66; 324 break; 325 #endif 326 } 327 } else if (strncmp(pllspec, "pci", 3) == 0) 328 pllspec += 3; 329 freq = strtoul(pllspec, &nxt, 10); 330 if (nxt == pllspec) 331 freq = 66; 332 else 333 freq = checkmaxmin(dev, "frequency", freq, fl, fh); 334 /* 335 * Calculate multiplier. We target a Fck of 266 MHz, 336 * allowing only even values, possibly rounded down. 337 * Multipliers > 8 must set the charge pump current. 338 */ 339 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); 340 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; 341 if (mul > 8) 342 pllconfig |= HIFN_PLL_IS; 343 *pll = pllconfig; 344 } 345 346 /* 347 * Attach an interface that successfully probed. 348 */ 349 static int 350 hifn_attach(device_t dev) 351 { 352 struct hifn_softc *sc = device_get_softc(dev); 353 u_int32_t cmd; 354 caddr_t kva; 355 int rseg, rid; 356 char rbase; 357 u_int16_t ena, rev; 358 359 KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); 360 bzero(sc, sizeof (*sc)); 361 sc->sc_dev = dev; 362 363 lockinit(&sc->sc_lock, __DECONST(char *, device_get_nameunit(dev)), 364 0, LK_CANRECURSE); 365 366 /* XXX handle power management */ 367 368 /* 369 * The 7951 and 795x have a random number generator and 370 * public key support; note this. 371 */ 372 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 373 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 374 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 375 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) 376 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; 377 /* 378 * The 7811 has a random number generator and 379 * we also note it's identity 'cuz of some quirks. 380 */ 381 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 382 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) 383 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; 384 385 /* 386 * The 795x parts support AES. 387 */ 388 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 389 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 390 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { 391 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; 392 /* 393 * Select PLL configuration. This depends on the 394 * bus and board design and must be manually configured 395 * if the default setting is unacceptable. 396 */ 397 hifn_getpllconfig(dev, &sc->sc_pllconfig); 398 } 399 400 /* 401 * Configure support for memory-mapped access to 402 * registers and for DMA operations. 403 */ 404 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) 405 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 406 cmd |= PCIM_ENA; 407 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 408 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 409 if ((cmd & PCIM_ENA) != PCIM_ENA) { 410 device_printf(dev, "failed to enable %s\n", 411 (cmd & PCIM_ENA) == 0 ? 412 "memory mapping & bus mastering" : 413 (cmd & PCIM_CMD_MEMEN) == 0 ? 414 "memory mapping" : "bus mastering"); 415 goto fail_pci; 416 } 417 #undef PCIM_ENA 418 419 /* 420 * Setup PCI resources. Note that we record the bus 421 * tag and handle for each register mapping, this is 422 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, 423 * and WRITE_REG_1 macros throughout the driver. 424 */ 425 rid = HIFN_BAR0; 426 sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 427 0, ~0, 1, RF_ACTIVE); 428 if (sc->sc_bar0res == NULL) { 429 device_printf(dev, "cannot map bar%d register space\n", 0); 430 goto fail_pci; 431 } 432 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); 433 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); 434 sc->sc_bar0_lastreg = (bus_size_t) -1; 435 436 rid = HIFN_BAR1; 437 sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 438 0, ~0, 1, RF_ACTIVE); 439 if (sc->sc_bar1res == NULL) { 440 device_printf(dev, "cannot map bar%d register space\n", 1); 441 goto fail_io0; 442 } 443 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); 444 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); 445 sc->sc_bar1_lastreg = (bus_size_t) -1; 446 447 hifn_set_retry(sc); 448 449 /* 450 * Setup the area where the Hifn DMA's descriptors 451 * and associated data structures. 452 */ 453 if (bus_dma_tag_create(NULL, /* parent */ 454 1, 0, /* alignment,boundary */ 455 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 456 BUS_SPACE_MAXADDR, /* highaddr */ 457 NULL, NULL, /* filter, filterarg */ 458 HIFN_MAX_DMALEN, /* maxsize */ 459 MAX_SCATTER, /* nsegments */ 460 HIFN_MAX_SEGLEN, /* maxsegsize */ 461 BUS_DMA_ALLOCNOW, /* flags */ 462 &sc->sc_dmat)) { 463 device_printf(dev, "cannot allocate DMA tag\n"); 464 goto fail_io1; 465 } 466 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 467 device_printf(dev, "cannot create dma map\n"); 468 bus_dma_tag_destroy(sc->sc_dmat); 469 goto fail_io1; 470 } 471 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 472 device_printf(dev, "cannot alloc dma buffer\n"); 473 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 474 bus_dma_tag_destroy(sc->sc_dmat); 475 goto fail_io1; 476 } 477 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, 478 sizeof (*sc->sc_dma), 479 hifn_dmamap_cb, &sc->sc_dma_physaddr, 480 BUS_DMA_NOWAIT)) { 481 device_printf(dev, "cannot load dma map\n"); 482 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); 483 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 484 bus_dma_tag_destroy(sc->sc_dmat); 485 goto fail_io1; 486 } 487 sc->sc_dma = (struct hifn_dma *)kva; 488 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 489 490 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); 491 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); 492 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); 493 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); 494 495 /* 496 * Reset the board and do the ``secret handshake'' 497 * to enable the crypto support. Then complete the 498 * initialization procedure by setting up the interrupt 499 * and hooking in to the system crypto support so we'll 500 * get used for system services like the crypto device, 501 * IPsec, RNG device, etc. 502 */ 503 hifn_reset_board(sc, 0); 504 505 if (hifn_enable_crypto(sc) != 0) { 506 device_printf(dev, "crypto enabling failed\n"); 507 goto fail_mem; 508 } 509 hifn_reset_puc(sc); 510 511 hifn_init_dma(sc); 512 hifn_init_pci_registers(sc); 513 514 /* XXX can't dynamically determine ram type for 795x; force dram */ 515 if (sc->sc_flags & HIFN_IS_7956) 516 sc->sc_drammodel = 1; 517 else if (hifn_ramtype(sc)) 518 goto fail_mem; 519 520 if (sc->sc_drammodel == 0) 521 hifn_sramsize(sc); 522 else 523 hifn_dramsize(sc); 524 525 /* 526 * Workaround for NetSec 7751 rev A: half ram size because two 527 * of the address lines were left floating 528 */ 529 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 530 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && 531 pci_get_revid(dev) == 0x61) /*XXX???*/ 532 sc->sc_ramsize >>= 1; 533 534 /* 535 * Arrange the interrupt line. 536 */ 537 rid = 0; 538 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 539 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 540 if (sc->sc_irq == NULL) { 541 device_printf(dev, "could not map interrupt\n"); 542 goto fail_mem; 543 } 544 /* 545 * NB: Network code assumes we are blocked with splimp() 546 * so make sure the IRQ is marked appropriately. 547 */ 548 if (bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, 549 hifn_intr, sc, 550 &sc->sc_intrhand, NULL)) { 551 device_printf(dev, "could not setup interrupt\n"); 552 goto fail_intr2; 553 } 554 555 hifn_sessions(sc); 556 557 /* 558 * NB: Keep only the low 16 bits; this masks the chip id 559 * from the 7951. 560 */ 561 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; 562 563 rseg = sc->sc_ramsize / 1024; 564 rbase = 'K'; 565 if (sc->sc_ramsize >= (1024 * 1024)) { 566 rbase = 'M'; 567 rseg /= 1024; 568 } 569 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n", 570 hifn_partname(sc), rev, 571 rseg, rbase, sc->sc_drammodel ? 'd' : 's', 572 sc->sc_maxses); 573 574 if (sc->sc_flags & HIFN_IS_7956) 575 kprintf(", pll=0x%x<%s clk, %ux mult>", 576 sc->sc_pllconfig, 577 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 578 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); 579 kprintf("\n"); 580 581 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); 582 if (sc->sc_cid < 0) { 583 device_printf(dev, "could not get crypto driver id\n"); 584 goto fail_intr; 585 } 586 587 WRITE_REG_0(sc, HIFN_0_PUCNFG, 588 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 589 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 590 591 switch (ena) { 592 case HIFN_PUSTAT_ENA_2: 593 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); 594 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); 595 if (sc->sc_flags & HIFN_HAS_AES) 596 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); 597 /*FALLTHROUGH*/ 598 case HIFN_PUSTAT_ENA_1: 599 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); 600 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); 601 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); 602 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); 603 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); 604 break; 605 } 606 607 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 608 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 609 610 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 611 hifn_init_pubrng(sc); 612 613 /* NB: 1 means the callout runs w/o Giant locked */ 614 callout_init_mp(&sc->sc_tickto); 615 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 616 617 return (0); 618 619 fail_intr: 620 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 621 fail_intr2: 622 /* XXX don't store rid */ 623 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 624 fail_mem: 625 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 626 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 627 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 628 bus_dma_tag_destroy(sc->sc_dmat); 629 630 /* Turn off DMA polling */ 631 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 632 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 633 fail_io1: 634 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 635 fail_io0: 636 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 637 fail_pci: 638 lockuninit(&sc->sc_lock); 639 return (ENXIO); 640 } 641 642 /* 643 * Detach an interface that successfully probed. 644 */ 645 static int 646 hifn_detach(device_t dev) 647 { 648 struct hifn_softc *sc = device_get_softc(dev); 649 650 KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); 651 652 /* disable interrupts */ 653 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); 654 655 /*XXX other resources */ 656 callout_stop(&sc->sc_tickto); 657 callout_stop(&sc->sc_rngto); 658 #ifdef HIFN_RNDTEST 659 if (sc->sc_rndtest) 660 rndtest_detach(sc->sc_rndtest); 661 #endif 662 663 /* Turn off DMA polling */ 664 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 665 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 666 667 crypto_unregister_all(sc->sc_cid); 668 669 bus_generic_detach(dev); /*XXX should be no children, right? */ 670 671 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 672 /* XXX don't store rid */ 673 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 674 675 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 676 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 677 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 678 bus_dma_tag_destroy(sc->sc_dmat); 679 680 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 681 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 682 683 lockuninit(&sc->sc_lock); 684 685 return (0); 686 } 687 688 /* 689 * Stop all chip I/O so that the kernel's probe routines don't 690 * get confused by errant DMAs when rebooting. 691 */ 692 static void 693 hifn_shutdown(device_t dev) 694 { 695 #ifdef notyet 696 hifn_stop(device_get_softc(dev)); 697 #endif 698 } 699 700 /* 701 * Device suspend routine. Stop the interface and save some PCI 702 * settings in case the BIOS doesn't restore them properly on 703 * resume. 704 */ 705 static int 706 hifn_suspend(device_t dev) 707 { 708 struct hifn_softc *sc = device_get_softc(dev); 709 #ifdef notyet 710 int i; 711 712 hifn_stop(sc); 713 for (i = 0; i < 5; i++) 714 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 715 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 716 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 717 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 718 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 719 #endif 720 sc->sc_suspended = 1; 721 722 return (0); 723 } 724 725 /* 726 * Device resume routine. Restore some PCI settings in case the BIOS 727 * doesn't, re-enable busmastering, and restart the interface if 728 * appropriate. 729 */ 730 static int 731 hifn_resume(device_t dev) 732 { 733 struct hifn_softc *sc = device_get_softc(dev); 734 #ifdef notyet 735 int i; 736 737 /* better way to do this? */ 738 for (i = 0; i < 5; i++) 739 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 740 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 741 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 742 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 743 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 744 745 /* reenable busmastering */ 746 pci_enable_busmaster(dev); 747 pci_enable_io(dev, HIFN_RES); 748 749 /* reinitialize interface if necessary */ 750 if (ifp->if_flags & IFF_UP) 751 rl_init(sc); 752 #endif 753 sc->sc_suspended = 0; 754 755 return (0); 756 } 757 758 static int 759 hifn_init_pubrng(struct hifn_softc *sc) 760 { 761 u_int32_t r; 762 int i; 763 764 #ifdef HIFN_RNDTEST 765 sc->sc_rndtest = rndtest_attach(sc->sc_dev); 766 if (sc->sc_rndtest) 767 sc->sc_harvest = rndtest_harvest; 768 else 769 sc->sc_harvest = default_harvest; 770 #else 771 sc->sc_harvest = default_harvest; 772 #endif 773 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 774 /* Reset 7951 public key/rng engine */ 775 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 776 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 777 778 for (i = 0; i < 100; i++) { 779 DELAY(1000); 780 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 781 HIFN_PUBRST_RESET) == 0) 782 break; 783 } 784 785 if (i == 100) { 786 device_printf(sc->sc_dev, "public key init failed\n"); 787 return (1); 788 } 789 } 790 791 #ifndef HIFN_NO_RNG 792 /* Enable the rng, if available */ 793 if (sc->sc_flags & HIFN_HAS_RNG) { 794 if (sc->sc_flags & HIFN_IS_7811) { 795 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 796 if (r & HIFN_7811_RNGENA_ENA) { 797 r &= ~HIFN_7811_RNGENA_ENA; 798 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 799 } 800 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 801 HIFN_7811_RNGCFG_DEFL); 802 r |= HIFN_7811_RNGENA_ENA; 803 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 804 } else 805 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 806 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 807 HIFN_RNGCFG_ENA); 808 809 sc->sc_rngfirst = 1; 810 if (hz >= 100) 811 sc->sc_rnghz = hz / 100; 812 else 813 sc->sc_rnghz = 1; 814 /* NB: 1 means the callout runs w/o Giant locked */ 815 callout_init_mp(&sc->sc_rngto); 816 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 817 } 818 #endif 819 820 /* Enable public key engine, if available */ 821 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 822 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 823 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 824 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 825 } 826 827 return (0); 828 } 829 830 #ifndef HIFN_NO_RNG 831 static void 832 hifn_rng(void *vsc) 833 { 834 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 835 struct hifn_softc *sc = vsc; 836 u_int32_t sts, num[2]; 837 int i; 838 839 if (sc->sc_flags & HIFN_IS_7811) { 840 for (i = 0; i < 5; i++) { 841 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 842 if (sts & HIFN_7811_RNGSTS_UFL) { 843 device_printf(sc->sc_dev, 844 "RNG underflow: disabling\n"); 845 return; 846 } 847 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 848 break; 849 850 /* 851 * There are at least two words in the RNG FIFO 852 * at this point. 853 */ 854 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 855 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 856 /* NB: discard first data read */ 857 if (sc->sc_rngfirst) 858 sc->sc_rngfirst = 0; 859 else 860 (*sc->sc_harvest)(sc->sc_rndtest, 861 num, sizeof (num)); 862 } 863 } else { 864 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); 865 866 /* NB: discard first data read */ 867 if (sc->sc_rngfirst) 868 sc->sc_rngfirst = 0; 869 else 870 (*sc->sc_harvest)(sc->sc_rndtest, 871 num, sizeof (num[0])); 872 } 873 874 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 875 #undef RANDOM_BITS 876 } 877 #endif 878 879 static void 880 hifn_puc_wait(struct hifn_softc *sc) 881 { 882 int i; 883 int reg = HIFN_0_PUCTRL; 884 885 if (sc->sc_flags & HIFN_IS_7956) { 886 reg = HIFN_0_PUCTRL2; 887 } 888 889 for (i = 5000; i > 0; i--) { 890 DELAY(1); 891 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET)) 892 break; 893 } 894 if (!i) 895 device_printf(sc->sc_dev, "proc unit did not reset\n"); 896 } 897 898 /* 899 * Reset the processing unit. 900 */ 901 static void 902 hifn_reset_puc(struct hifn_softc *sc) 903 { 904 int reg = HIFN_0_PUCTRL; 905 906 if (sc->sc_flags & HIFN_IS_7956) { 907 reg = HIFN_0_PUCTRL2; 908 } 909 910 /* Reset processing unit */ 911 WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA); 912 hifn_puc_wait(sc); 913 } 914 915 /* 916 * Set the Retry and TRDY registers; note that we set them to 917 * zero because the 7811 locks up when forced to retry (section 918 * 3.6 of "Specification Update SU-0014-04". Not clear if we 919 * should do this for all Hifn parts, but it doesn't seem to hurt. 920 */ 921 static void 922 hifn_set_retry(struct hifn_softc *sc) 923 { 924 /* NB: RETRY only responds to 8-bit reads/writes */ 925 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); 926 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); 927 } 928 929 /* 930 * Resets the board. Values in the regesters are left as is 931 * from the reset (i.e. initial values are assigned elsewhere). 932 */ 933 static void 934 hifn_reset_board(struct hifn_softc *sc, int full) 935 { 936 u_int32_t reg; 937 938 /* 939 * Set polling in the DMA configuration register to zero. 0x7 avoids 940 * resetting the board and zeros out the other fields. 941 */ 942 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 943 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 944 945 /* 946 * Now that polling has been disabled, we have to wait 1 ms 947 * before resetting the board. 948 */ 949 DELAY(1000); 950 951 /* Reset the DMA unit */ 952 if (full) { 953 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 954 DELAY(1000); 955 } else { 956 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 957 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 958 hifn_reset_puc(sc); 959 } 960 961 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); 962 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 963 964 /* Bring dma unit out of reset */ 965 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 966 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 967 968 hifn_puc_wait(sc); 969 hifn_set_retry(sc); 970 971 if (sc->sc_flags & HIFN_IS_7811) { 972 for (reg = 0; reg < 1000; reg++) { 973 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 974 HIFN_MIPSRST_CRAMINIT) 975 break; 976 DELAY(1000); 977 } 978 if (reg == 1000) 979 kprintf(": cram init timeout\n"); 980 } else { 981 /* set up DMA configuration register #2 */ 982 /* turn off all PK and BAR0 swaps */ 983 WRITE_REG_1(sc, HIFN_1_DMA_CNFG2, 984 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)| 985 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)| 986 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)| 987 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT)); 988 } 989 } 990 991 static u_int32_t 992 hifn_next_signature(u_int32_t a, u_int cnt) 993 { 994 int i; 995 u_int32_t v; 996 997 for (i = 0; i < cnt; i++) { 998 999 /* get the parity */ 1000 v = a & 0x80080125; 1001 v ^= v >> 16; 1002 v ^= v >> 8; 1003 v ^= v >> 4; 1004 v ^= v >> 2; 1005 v ^= v >> 1; 1006 1007 a = (v & 1) ^ (a << 1); 1008 } 1009 1010 return a; 1011 } 1012 1013 struct pci2id { 1014 u_short pci_vendor; 1015 u_short pci_prod; 1016 char card_id[13]; 1017 }; 1018 static struct pci2id pci2id[] = { 1019 { 1020 PCI_VENDOR_HIFN, 1021 PCI_PRODUCT_HIFN_7951, 1022 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1023 0x00, 0x00, 0x00, 0x00, 0x00 } 1024 }, { 1025 PCI_VENDOR_HIFN, 1026 PCI_PRODUCT_HIFN_7955, 1027 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1028 0x00, 0x00, 0x00, 0x00, 0x00 } 1029 }, { 1030 PCI_VENDOR_HIFN, 1031 PCI_PRODUCT_HIFN_7956, 1032 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1033 0x00, 0x00, 0x00, 0x00, 0x00 } 1034 }, { 1035 PCI_VENDOR_NETSEC, 1036 PCI_PRODUCT_NETSEC_7751, 1037 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1038 0x00, 0x00, 0x00, 0x00, 0x00 } 1039 }, { 1040 PCI_VENDOR_INVERTEX, 1041 PCI_PRODUCT_INVERTEX_AEON, 1042 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1043 0x00, 0x00, 0x00, 0x00, 0x00 } 1044 }, { 1045 PCI_VENDOR_HIFN, 1046 PCI_PRODUCT_HIFN_7811, 1047 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1048 0x00, 0x00, 0x00, 0x00, 0x00 } 1049 }, { 1050 /* 1051 * Other vendors share this PCI ID as well, such as 1052 * http://www.powercrypt.com, and obviously they also 1053 * use the same key. 1054 */ 1055 PCI_VENDOR_HIFN, 1056 PCI_PRODUCT_HIFN_7751, 1057 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1058 0x00, 0x00, 0x00, 0x00, 0x00 } 1059 }, 1060 }; 1061 1062 /* 1063 * Checks to see if crypto is already enabled. If crypto isn't enable, 1064 * "hifn_enable_crypto" is called to enable it. The check is important, 1065 * as enabling crypto twice will lock the board. 1066 */ 1067 static int 1068 hifn_enable_crypto(struct hifn_softc *sc) 1069 { 1070 u_int32_t dmacfg, ramcfg, encl, addr, i; 1071 char *offtbl = NULL; 1072 1073 for (i = 0; i < NELEM(pci2id); i++) { 1074 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && 1075 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { 1076 offtbl = pci2id[i].card_id; 1077 break; 1078 } 1079 } 1080 if (offtbl == NULL) { 1081 device_printf(sc->sc_dev, "Unknown card!\n"); 1082 return (1); 1083 } 1084 1085 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1086 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 1087 1088 /* 1089 * The RAM config register's encrypt level bit needs to be set before 1090 * every read performed on the encryption level register. 1091 */ 1092 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1093 1094 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1095 1096 /* 1097 * Make sure we don't re-unlock. Two unlocks kills chip until the 1098 * next reboot. 1099 */ 1100 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 1101 #ifdef HIFN_DEBUG 1102 if (hifn_debug) 1103 device_printf(sc->sc_dev, 1104 "Strong crypto already enabled!\n"); 1105 #endif 1106 goto report; 1107 } 1108 1109 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 1110 #ifdef HIFN_DEBUG 1111 if (hifn_debug) 1112 device_printf(sc->sc_dev, 1113 "Unknown encryption level 0x%x\n", encl); 1114 #endif 1115 return 1; 1116 } 1117 1118 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 1119 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 1120 DELAY(1000); 1121 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); 1122 DELAY(1000); 1123 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); 1124 DELAY(1000); 1125 1126 for (i = 0; i <= 12; i++) { 1127 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 1128 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); 1129 1130 DELAY(1000); 1131 } 1132 1133 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1134 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1135 1136 #ifdef HIFN_DEBUG 1137 if (hifn_debug) { 1138 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 1139 device_printf(sc->sc_dev, "Engine is permanently " 1140 "locked until next system reset!\n"); 1141 else 1142 device_printf(sc->sc_dev, "Engine enabled " 1143 "successfully!\n"); 1144 } 1145 #endif 1146 1147 report: 1148 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 1149 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 1150 1151 switch (encl) { 1152 case HIFN_PUSTAT_ENA_1: 1153 case HIFN_PUSTAT_ENA_2: 1154 break; 1155 case HIFN_PUSTAT_ENA_0: 1156 default: 1157 device_printf(sc->sc_dev, "disabled"); 1158 break; 1159 } 1160 1161 return 0; 1162 } 1163 1164 /* 1165 * Give initial values to the registers listed in the "Register Space" 1166 * section of the HIFN Software Development reference manual. 1167 */ 1168 static void 1169 hifn_init_pci_registers(struct hifn_softc *sc) 1170 { 1171 /* write fixed values needed by the Initialization registers */ 1172 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1173 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1174 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1175 1176 /* write all 4 ring address registers */ 1177 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + 1178 offsetof(struct hifn_dma, cmdr[0])); 1179 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + 1180 offsetof(struct hifn_dma, srcr[0])); 1181 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + 1182 offsetof(struct hifn_dma, dstr[0])); 1183 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + 1184 offsetof(struct hifn_dma, resr[0])); 1185 1186 DELAY(2000); 1187 1188 /* write status register */ 1189 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1190 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1191 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1192 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1193 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1194 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1195 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1196 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1197 HIFN_DMACSR_S_WAIT | 1198 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1199 HIFN_DMACSR_C_WAIT | 1200 HIFN_DMACSR_ENGINE | 1201 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1202 HIFN_DMACSR_PUBDONE : 0) | 1203 ((sc->sc_flags & HIFN_IS_7811) ? 1204 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1205 1206 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1207 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1208 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1209 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1210 ((sc->sc_flags & HIFN_IS_7811) ? 1211 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1212 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1213 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1214 1215 if (sc->sc_flags & HIFN_IS_7956) { 1216 u_int32_t pll; 1217 1218 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1219 HIFN_PUCNFG_TCALLPHASES | 1220 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 1221 1222 /* turn off the clocks and insure bypass is set */ 1223 pll = READ_REG_1(sc, HIFN_1_PLL); 1224 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) 1225 | HIFN_PLL_BP | HIFN_PLL_MBSET; 1226 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1227 DELAY(10*1000); /* 10ms */ 1228 /* change configuration */ 1229 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; 1230 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1231 DELAY(10*1000); /* 10ms */ 1232 /* disable bypass */ 1233 pll &= ~HIFN_PLL_BP; 1234 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1235 /* enable clocks with new configuration */ 1236 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; 1237 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1238 } else { 1239 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1240 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1241 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1242 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1243 } 1244 1245 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1246 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1247 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1248 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1249 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1250 } 1251 1252 /* 1253 * The maximum number of sessions supported by the card 1254 * is dependent on the amount of context ram, which 1255 * encryption algorithms are enabled, and how compression 1256 * is configured. This should be configured before this 1257 * routine is called. 1258 */ 1259 static void 1260 hifn_sessions(struct hifn_softc *sc) 1261 { 1262 u_int32_t pucnfg; 1263 int ctxsize; 1264 1265 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1266 1267 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1268 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1269 ctxsize = 128; 1270 else 1271 ctxsize = 512; 1272 /* 1273 * 7955/7956 has internal context memory of 32K 1274 */ 1275 if (sc->sc_flags & HIFN_IS_7956) 1276 sc->sc_maxses = 32768 / ctxsize; 1277 else 1278 sc->sc_maxses = 1 + 1279 ((sc->sc_ramsize - 32768) / ctxsize); 1280 } else 1281 sc->sc_maxses = sc->sc_ramsize / 16384; 1282 1283 if (sc->sc_maxses > 2048) 1284 sc->sc_maxses = 2048; 1285 } 1286 1287 /* 1288 * Determine ram type (sram or dram). Board should be just out of a reset 1289 * state when this is called. 1290 */ 1291 static int 1292 hifn_ramtype(struct hifn_softc *sc) 1293 { 1294 u_int8_t data[8], dataexpect[8]; 1295 int i; 1296 1297 for (i = 0; i < sizeof(data); i++) 1298 data[i] = dataexpect[i] = 0x55; 1299 if (hifn_writeramaddr(sc, 0, data)) 1300 return (-1); 1301 if (hifn_readramaddr(sc, 0, data)) 1302 return (-1); 1303 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1304 sc->sc_drammodel = 1; 1305 return (0); 1306 } 1307 1308 for (i = 0; i < sizeof(data); i++) 1309 data[i] = dataexpect[i] = 0xaa; 1310 if (hifn_writeramaddr(sc, 0, data)) 1311 return (-1); 1312 if (hifn_readramaddr(sc, 0, data)) 1313 return (-1); 1314 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1315 sc->sc_drammodel = 1; 1316 return (0); 1317 } 1318 1319 return (0); 1320 } 1321 1322 #define HIFN_SRAM_MAX (32 << 20) 1323 #define HIFN_SRAM_STEP_SIZE 16384 1324 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1325 1326 static int 1327 hifn_sramsize(struct hifn_softc *sc) 1328 { 1329 u_int32_t a; 1330 u_int8_t data[8]; 1331 u_int8_t dataexpect[sizeof(data)]; 1332 int32_t i; 1333 1334 for (i = 0; i < sizeof(data); i++) 1335 data[i] = dataexpect[i] = i ^ 0x5a; 1336 1337 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1338 a = i * HIFN_SRAM_STEP_SIZE; 1339 bcopy(&i, data, sizeof(i)); 1340 hifn_writeramaddr(sc, a, data); 1341 } 1342 1343 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1344 a = i * HIFN_SRAM_STEP_SIZE; 1345 bcopy(&i, dataexpect, sizeof(i)); 1346 if (hifn_readramaddr(sc, a, data) < 0) 1347 return (0); 1348 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1349 return (0); 1350 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1351 } 1352 1353 return (0); 1354 } 1355 1356 /* 1357 * XXX For dram boards, one should really try all of the 1358 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1359 * is already set up correctly. 1360 */ 1361 static int 1362 hifn_dramsize(struct hifn_softc *sc) 1363 { 1364 u_int32_t cnfg; 1365 1366 if (sc->sc_flags & HIFN_IS_7956) { 1367 /* 1368 * 7955/7956 have a fixed internal ram of only 32K. 1369 */ 1370 sc->sc_ramsize = 32768; 1371 } else { 1372 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1373 HIFN_PUCNFG_DRAMMASK; 1374 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1375 } 1376 return (0); 1377 } 1378 1379 static void 1380 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) 1381 { 1382 struct hifn_dma *dma = sc->sc_dma; 1383 1384 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1385 dma->cmdi = 0; 1386 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1387 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1388 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1389 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1390 } 1391 *cmdp = dma->cmdi++; 1392 dma->cmdk = dma->cmdi; 1393 1394 if (dma->srci == HIFN_D_SRC_RSIZE) { 1395 dma->srci = 0; 1396 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1397 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1398 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1399 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1400 } 1401 *srcp = dma->srci++; 1402 dma->srck = dma->srci; 1403 1404 if (dma->dsti == HIFN_D_DST_RSIZE) { 1405 dma->dsti = 0; 1406 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1407 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1408 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1409 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1410 } 1411 *dstp = dma->dsti++; 1412 dma->dstk = dma->dsti; 1413 1414 if (dma->resi == HIFN_D_RES_RSIZE) { 1415 dma->resi = 0; 1416 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1417 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1418 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1419 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1420 } 1421 *resp = dma->resi++; 1422 dma->resk = dma->resi; 1423 } 1424 1425 static int 1426 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1427 { 1428 struct hifn_dma *dma = sc->sc_dma; 1429 hifn_base_command_t wc; 1430 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1431 int r, cmdi, resi, srci, dsti; 1432 1433 wc.masks = htole16(3 << 13); 1434 wc.session_num = htole16(addr >> 14); 1435 wc.total_source_count = htole16(8); 1436 wc.total_dest_count = htole16(addr & 0x3fff); 1437 1438 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1439 1440 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1441 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1442 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1443 1444 /* build write command */ 1445 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1446 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; 1447 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1448 1449 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr 1450 + offsetof(struct hifn_dma, test_src)); 1451 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr 1452 + offsetof(struct hifn_dma, test_dst)); 1453 1454 dma->cmdr[cmdi].l = htole32(16 | masks); 1455 dma->srcr[srci].l = htole32(8 | masks); 1456 dma->dstr[dsti].l = htole32(4 | masks); 1457 dma->resr[resi].l = htole32(4 | masks); 1458 1459 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1460 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1461 1462 for (r = 10000; r >= 0; r--) { 1463 DELAY(10); 1464 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1465 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1466 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1467 break; 1468 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1469 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1470 } 1471 if (r == 0) { 1472 device_printf(sc->sc_dev, "writeramaddr -- " 1473 "result[%d](addr %d) still valid\n", resi, addr); 1474 r = -1; 1475 return (-1); 1476 } else 1477 r = 0; 1478 1479 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1480 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1481 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1482 1483 return (r); 1484 } 1485 1486 static int 1487 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1488 { 1489 struct hifn_dma *dma = sc->sc_dma; 1490 hifn_base_command_t rc; 1491 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1492 int r, cmdi, srci, dsti, resi; 1493 1494 rc.masks = htole16(2 << 13); 1495 rc.session_num = htole16(addr >> 14); 1496 rc.total_source_count = htole16(addr & 0x3fff); 1497 rc.total_dest_count = htole16(8); 1498 1499 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1500 1501 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1502 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1503 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1504 1505 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1506 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; 1507 1508 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + 1509 offsetof(struct hifn_dma, test_src)); 1510 dma->test_src = 0; 1511 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + 1512 offsetof(struct hifn_dma, test_dst)); 1513 dma->test_dst = 0; 1514 dma->cmdr[cmdi].l = htole32(8 | masks); 1515 dma->srcr[srci].l = htole32(8 | masks); 1516 dma->dstr[dsti].l = htole32(8 | masks); 1517 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1518 1519 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1520 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1521 1522 for (r = 10000; r >= 0; r--) { 1523 DELAY(10); 1524 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1525 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1526 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1527 break; 1528 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1530 } 1531 if (r == 0) { 1532 device_printf(sc->sc_dev, "readramaddr -- " 1533 "result[%d](addr %d) still valid\n", resi, addr); 1534 r = -1; 1535 } else { 1536 r = 0; 1537 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1538 } 1539 1540 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1541 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1542 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1543 1544 return (r); 1545 } 1546 1547 /* 1548 * Initialize the descriptor rings. 1549 */ 1550 static void 1551 hifn_init_dma(struct hifn_softc *sc) 1552 { 1553 struct hifn_dma *dma = sc->sc_dma; 1554 int i; 1555 1556 hifn_set_retry(sc); 1557 1558 /* initialize static pointer values */ 1559 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1560 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + 1561 offsetof(struct hifn_dma, command_bufs[i][0])); 1562 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1563 dma->resr[i].p = htole32(sc->sc_dma_physaddr + 1564 offsetof(struct hifn_dma, result_bufs[i][0])); 1565 1566 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1567 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); 1568 dma->srcr[HIFN_D_SRC_RSIZE].p = 1569 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); 1570 dma->dstr[HIFN_D_DST_RSIZE].p = 1571 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); 1572 dma->resr[HIFN_D_RES_RSIZE].p = 1573 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); 1574 1575 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1576 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1577 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1578 } 1579 1580 /* 1581 * Writes out the raw command buffer space. Returns the 1582 * command buffer size. 1583 */ 1584 static u_int 1585 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1586 { 1587 u_int8_t *buf_pos; 1588 hifn_base_command_t *base_cmd; 1589 hifn_mac_command_t *mac_cmd; 1590 hifn_crypt_command_t *cry_cmd; 1591 int using_mac, using_crypt, len, ivlen; 1592 u_int32_t dlen, slen; 1593 1594 buf_pos = buf; 1595 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1596 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1597 1598 base_cmd = (hifn_base_command_t *)buf_pos; 1599 base_cmd->masks = htole16(cmd->base_masks); 1600 slen = cmd->src_mapsize; 1601 if (cmd->sloplen) 1602 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); 1603 else 1604 dlen = cmd->dst_mapsize; 1605 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1606 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1607 dlen >>= 16; 1608 slen >>= 16; 1609 1610 base_cmd->session_num = htole16( 1611 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1612 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1613 buf_pos += sizeof(hifn_base_command_t); 1614 1615 if (using_mac) { 1616 mac_cmd = (hifn_mac_command_t *)buf_pos; 1617 dlen = cmd->maccrd->crd_len; 1618 mac_cmd->source_count = htole16(dlen & 0xffff); 1619 dlen >>= 16; 1620 mac_cmd->masks = htole16(cmd->mac_masks | 1621 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1622 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1623 mac_cmd->reserved = 0; 1624 buf_pos += sizeof(hifn_mac_command_t); 1625 } 1626 1627 if (using_crypt) { 1628 cry_cmd = (hifn_crypt_command_t *)buf_pos; 1629 dlen = cmd->enccrd->crd_len; 1630 cry_cmd->source_count = htole16(dlen & 0xffff); 1631 dlen >>= 16; 1632 cry_cmd->masks = htole16(cmd->cry_masks | 1633 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1634 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1635 cry_cmd->reserved = 0; 1636 buf_pos += sizeof(hifn_crypt_command_t); 1637 } 1638 1639 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1640 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1641 buf_pos += HIFN_MAC_KEY_LENGTH; 1642 } 1643 1644 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1645 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1646 case HIFN_CRYPT_CMD_ALG_3DES: 1647 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1648 buf_pos += HIFN_3DES_KEY_LENGTH; 1649 break; 1650 case HIFN_CRYPT_CMD_ALG_DES: 1651 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1652 buf_pos += HIFN_DES_KEY_LENGTH; 1653 break; 1654 case HIFN_CRYPT_CMD_ALG_RC4: 1655 len = 256; 1656 do { 1657 int clen; 1658 1659 clen = MIN(cmd->cklen, len); 1660 bcopy(cmd->ck, buf_pos, clen); 1661 len -= clen; 1662 buf_pos += clen; 1663 } while (len > 0); 1664 bzero(buf_pos, 4); 1665 buf_pos += 4; 1666 break; 1667 case HIFN_CRYPT_CMD_ALG_AES: 1668 /* 1669 * AES keys are variable 128, 192 and 1670 * 256 bits (16, 24 and 32 bytes). 1671 */ 1672 bcopy(cmd->ck, buf_pos, cmd->cklen); 1673 buf_pos += cmd->cklen; 1674 break; 1675 } 1676 } 1677 1678 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1679 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1680 case HIFN_CRYPT_CMD_ALG_AES: 1681 ivlen = HIFN_AES_IV_LENGTH; 1682 break; 1683 default: 1684 ivlen = HIFN_IV_LENGTH; 1685 break; 1686 } 1687 bcopy(cmd->iv, buf_pos, ivlen); 1688 buf_pos += ivlen; 1689 } 1690 1691 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { 1692 bzero(buf_pos, 8); 1693 buf_pos += 8; 1694 } 1695 1696 return (buf_pos - buf); 1697 #undef MIN 1698 } 1699 1700 static int 1701 hifn_dmamap_aligned(struct hifn_operand *op) 1702 { 1703 int i; 1704 1705 for (i = 0; i < op->nsegs; i++) { 1706 if (op->segs[i].ds_addr & 3) 1707 return (0); 1708 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) 1709 return (0); 1710 } 1711 return (1); 1712 } 1713 1714 static __inline int 1715 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx) 1716 { 1717 struct hifn_dma *dma = sc->sc_dma; 1718 1719 if (++idx == HIFN_D_DST_RSIZE) { 1720 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1721 HIFN_D_MASKDONEIRQ); 1722 HIFN_DSTR_SYNC(sc, idx, 1723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1724 idx = 0; 1725 } 1726 return (idx); 1727 } 1728 1729 static int 1730 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1731 { 1732 struct hifn_dma *dma = sc->sc_dma; 1733 struct hifn_operand *dst = &cmd->dst; 1734 u_int32_t p, l; 1735 int idx, used = 0, i; 1736 1737 idx = dma->dsti; 1738 for (i = 0; i < dst->nsegs - 1; i++) { 1739 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1740 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1741 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); 1742 HIFN_DSTR_SYNC(sc, idx, 1743 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1744 used++; 1745 1746 idx = hifn_dmamap_dstwrap(sc, idx); 1747 } 1748 1749 if (cmd->sloplen == 0) { 1750 p = dst->segs[i].ds_addr; 1751 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1752 dst->segs[i].ds_len; 1753 } else { 1754 p = sc->sc_dma_physaddr + 1755 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1756 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1757 sizeof(u_int32_t); 1758 1759 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { 1760 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1761 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1762 HIFN_D_MASKDONEIRQ | 1763 (dst->segs[i].ds_len - cmd->sloplen)); 1764 HIFN_DSTR_SYNC(sc, idx, 1765 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1766 used++; 1767 1768 idx = hifn_dmamap_dstwrap(sc, idx); 1769 } 1770 } 1771 dma->dstr[idx].p = htole32(p); 1772 dma->dstr[idx].l = htole32(l); 1773 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1774 used++; 1775 1776 idx = hifn_dmamap_dstwrap(sc, idx); 1777 1778 dma->dsti = idx; 1779 dma->dstu += used; 1780 return (idx); 1781 } 1782 1783 static __inline int 1784 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx) 1785 { 1786 struct hifn_dma *dma = sc->sc_dma; 1787 1788 if (++idx == HIFN_D_SRC_RSIZE) { 1789 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1790 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1791 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1792 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1793 idx = 0; 1794 } 1795 return (idx); 1796 } 1797 1798 static int 1799 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1800 { 1801 struct hifn_dma *dma = sc->sc_dma; 1802 struct hifn_operand *src = &cmd->src; 1803 int idx, i; 1804 u_int32_t last = 0; 1805 1806 idx = dma->srci; 1807 for (i = 0; i < src->nsegs; i++) { 1808 if (i == src->nsegs - 1) 1809 last = HIFN_D_LAST; 1810 1811 dma->srcr[idx].p = htole32(src->segs[i].ds_addr); 1812 dma->srcr[idx].l = htole32(src->segs[i].ds_len | 1813 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1814 HIFN_SRCR_SYNC(sc, idx, 1815 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1816 1817 idx = hifn_dmamap_srcwrap(sc, idx); 1818 } 1819 dma->srci = idx; 1820 dma->srcu += src->nsegs; 1821 return (idx); 1822 } 1823 1824 static void 1825 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1826 { 1827 struct hifn_operand *op = arg; 1828 1829 KASSERT(nsegs <= MAX_SCATTER, 1830 ("hifn_op_cb: too many DMA segments (%u > %u) " 1831 "returned when mapping operand", nsegs, MAX_SCATTER)); 1832 op->mapsize = mapsize; 1833 op->nsegs = nsegs; 1834 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1835 } 1836 1837 static int 1838 hifn_crypto( 1839 struct hifn_softc *sc, 1840 struct hifn_command *cmd, 1841 struct cryptop *crp, 1842 int hint) 1843 { 1844 struct hifn_dma *dma = sc->sc_dma; 1845 u_int32_t cmdlen, csr; 1846 int cmdi, resi, err = 0; 1847 1848 /* 1849 * need 1 cmd, and 1 res 1850 * 1851 * NB: check this first since it's easy. 1852 */ 1853 HIFN_LOCK(sc); 1854 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1855 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1856 #ifdef HIFN_DEBUG 1857 if (hifn_debug) { 1858 device_printf(sc->sc_dev, 1859 "cmd/result exhaustion, cmdu %u resu %u\n", 1860 dma->cmdu, dma->resu); 1861 } 1862 #endif 1863 hifnstats.hst_nomem_cr++; 1864 HIFN_UNLOCK(sc); 1865 return (ERESTART); 1866 } 1867 1868 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { 1869 hifnstats.hst_nomem_map++; 1870 HIFN_UNLOCK(sc); 1871 return (ENOMEM); 1872 } 1873 1874 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1875 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1876 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1877 hifnstats.hst_nomem_load++; 1878 err = ENOMEM; 1879 goto err_srcmap1; 1880 } 1881 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1882 #if 0 1883 cmd->src_io->uio_segflg = UIO_USERSPACE; 1884 #endif 1885 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1886 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1887 hifnstats.hst_nomem_load++; 1888 err = ENOMEM; 1889 goto err_srcmap1; 1890 } 1891 } else { 1892 err = EINVAL; 1893 goto err_srcmap1; 1894 } 1895 1896 if (hifn_dmamap_aligned(&cmd->src)) { 1897 cmd->sloplen = cmd->src_mapsize & 3; 1898 cmd->dst = cmd->src; 1899 } else { 1900 if (crp->crp_flags & CRYPTO_F_IOV) { 1901 err = EINVAL; 1902 goto err_srcmap; 1903 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1904 int totlen, len; 1905 struct mbuf *m, *m0, *mlast; 1906 1907 KASSERT(cmd->dst_m == cmd->src_m, 1908 ("hifn_crypto: dst_m initialized improperly")); 1909 hifnstats.hst_unaligned++; 1910 /* 1911 * Source is not aligned on a longword boundary. 1912 * Copy the data to insure alignment. If we fail 1913 * to allocate mbufs or clusters while doing this 1914 * we return ERESTART so the operation is requeued 1915 * at the crypto later, but only if there are 1916 * ops already posted to the hardware; otherwise we 1917 * have no guarantee that we'll be re-entered. 1918 */ 1919 totlen = cmd->src_mapsize; 1920 if (cmd->src_m->m_flags & M_PKTHDR) { 1921 len = MHLEN; 1922 MGETHDR(m0, MB_DONTWAIT, MT_DATA); 1923 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, MB_DONTWAIT)) { 1924 m_free(m0); 1925 m0 = NULL; 1926 } 1927 } else { 1928 len = MLEN; 1929 MGET(m0, MB_DONTWAIT, MT_DATA); 1930 } 1931 if (m0 == NULL) { 1932 hifnstats.hst_nomem_mbuf++; 1933 err = dma->cmdu ? ERESTART : ENOMEM; 1934 goto err_srcmap; 1935 } 1936 if (totlen >= MINCLSIZE) { 1937 MCLGET(m0, MB_DONTWAIT); 1938 if ((m0->m_flags & M_EXT) == 0) { 1939 hifnstats.hst_nomem_mcl++; 1940 err = dma->cmdu ? ERESTART : ENOMEM; 1941 m_freem(m0); 1942 goto err_srcmap; 1943 } 1944 len = MCLBYTES; 1945 } 1946 totlen -= len; 1947 m0->m_pkthdr.len = m0->m_len = len; 1948 mlast = m0; 1949 1950 while (totlen > 0) { 1951 MGET(m, MB_DONTWAIT, MT_DATA); 1952 if (m == NULL) { 1953 hifnstats.hst_nomem_mbuf++; 1954 err = dma->cmdu ? ERESTART : ENOMEM; 1955 m_freem(m0); 1956 goto err_srcmap; 1957 } 1958 len = MLEN; 1959 if (totlen >= MINCLSIZE) { 1960 MCLGET(m, MB_DONTWAIT); 1961 if ((m->m_flags & M_EXT) == 0) { 1962 hifnstats.hst_nomem_mcl++; 1963 err = dma->cmdu ? ERESTART : ENOMEM; 1964 mlast->m_next = m; 1965 m_freem(m0); 1966 goto err_srcmap; 1967 } 1968 len = MCLBYTES; 1969 } 1970 1971 m->m_len = len; 1972 m0->m_pkthdr.len += len; 1973 totlen -= len; 1974 1975 mlast->m_next = m; 1976 mlast = m; 1977 } 1978 cmd->dst_m = m0; 1979 } 1980 } 1981 1982 if (cmd->dst_map == NULL) { 1983 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1984 hifnstats.hst_nomem_map++; 1985 err = ENOMEM; 1986 goto err_srcmap; 1987 } 1988 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1989 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1990 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1991 hifnstats.hst_nomem_map++; 1992 err = ENOMEM; 1993 goto err_dstmap1; 1994 } 1995 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1996 #if 0 1997 cmd->dst_io->uio_segflg |= UIO_USERSPACE; 1998 #endif 1999 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 2000 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 2001 hifnstats.hst_nomem_load++; 2002 err = ENOMEM; 2003 goto err_dstmap1; 2004 } 2005 } 2006 } 2007 2008 #ifdef HIFN_DEBUG 2009 if (hifn_debug) { 2010 device_printf(sc->sc_dev, 2011 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 2012 READ_REG_1(sc, HIFN_1_DMA_CSR), 2013 READ_REG_1(sc, HIFN_1_DMA_IER), 2014 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 2015 cmd->src_nsegs, cmd->dst_nsegs); 2016 } 2017 #endif 2018 2019 if (cmd->src_map == cmd->dst_map) { 2020 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2021 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2022 } else { 2023 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2024 BUS_DMASYNC_PREWRITE); 2025 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2026 BUS_DMASYNC_PREREAD); 2027 } 2028 2029 /* 2030 * need N src, and N dst 2031 */ 2032 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || 2033 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { 2034 #ifdef HIFN_DEBUG 2035 if (hifn_debug) { 2036 device_printf(sc->sc_dev, 2037 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", 2038 dma->srcu, cmd->src_nsegs, 2039 dma->dstu, cmd->dst_nsegs); 2040 } 2041 #endif 2042 hifnstats.hst_nomem_sd++; 2043 err = ERESTART; 2044 goto err_dstmap; 2045 } 2046 2047 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2048 dma->cmdi = 0; 2049 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2050 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2051 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2052 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2053 } 2054 cmdi = dma->cmdi++; 2055 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2056 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2057 2058 /* .p for command/result already set */ 2059 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2060 HIFN_D_MASKDONEIRQ); 2061 HIFN_CMDR_SYNC(sc, cmdi, 2062 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2063 dma->cmdu++; 2064 2065 /* 2066 * We don't worry about missing an interrupt (which a "command wait" 2067 * interrupt salvages us from), unless there is more than one command 2068 * in the queue. 2069 */ 2070 if (dma->cmdu > 1) { 2071 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2072 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2073 } 2074 2075 hifnstats.hst_ipackets++; 2076 hifnstats.hst_ibytes += cmd->src_mapsize; 2077 2078 hifn_dmamap_load_src(sc, cmd); 2079 2080 /* 2081 * Unlike other descriptors, we don't mask done interrupt from 2082 * result descriptor. 2083 */ 2084 #ifdef HIFN_DEBUG 2085 if (hifn_debug) 2086 kprintf("load res\n"); 2087 #endif 2088 if (dma->resi == HIFN_D_RES_RSIZE) { 2089 dma->resi = 0; 2090 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2091 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2092 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2093 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2094 } 2095 resi = dma->resi++; 2096 KASSERT(dma->hifn_commands[resi] == NULL, 2097 ("hifn_crypto: command slot %u busy", resi)); 2098 dma->hifn_commands[resi] = cmd; 2099 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2100 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { 2101 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2102 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 2103 sc->sc_curbatch++; 2104 if (sc->sc_curbatch > hifnstats.hst_maxbatch) 2105 hifnstats.hst_maxbatch = sc->sc_curbatch; 2106 hifnstats.hst_totbatch++; 2107 } else { 2108 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2109 HIFN_D_VALID | HIFN_D_LAST); 2110 sc->sc_curbatch = 0; 2111 } 2112 HIFN_RESR_SYNC(sc, resi, 2113 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2114 dma->resu++; 2115 2116 if (cmd->sloplen) 2117 cmd->slopidx = resi; 2118 2119 hifn_dmamap_load_dst(sc, cmd); 2120 2121 csr = 0; 2122 if (sc->sc_c_busy == 0) { 2123 csr |= HIFN_DMACSR_C_CTRL_ENA; 2124 sc->sc_c_busy = 1; 2125 } 2126 if (sc->sc_s_busy == 0) { 2127 csr |= HIFN_DMACSR_S_CTRL_ENA; 2128 sc->sc_s_busy = 1; 2129 } 2130 if (sc->sc_r_busy == 0) { 2131 csr |= HIFN_DMACSR_R_CTRL_ENA; 2132 sc->sc_r_busy = 1; 2133 } 2134 if (sc->sc_d_busy == 0) { 2135 csr |= HIFN_DMACSR_D_CTRL_ENA; 2136 sc->sc_d_busy = 1; 2137 } 2138 if (csr) 2139 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr); 2140 2141 #ifdef HIFN_DEBUG 2142 if (hifn_debug) { 2143 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", 2144 READ_REG_1(sc, HIFN_1_DMA_CSR), 2145 READ_REG_1(sc, HIFN_1_DMA_IER)); 2146 } 2147 #endif 2148 2149 sc->sc_active = 5; 2150 HIFN_UNLOCK(sc); 2151 KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); 2152 return (err); /* success */ 2153 2154 err_dstmap: 2155 if (cmd->src_map != cmd->dst_map) 2156 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2157 err_dstmap1: 2158 if (cmd->src_map != cmd->dst_map) 2159 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2160 err_srcmap: 2161 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2162 if (cmd->src_m != cmd->dst_m) 2163 m_freem(cmd->dst_m); 2164 } 2165 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2166 err_srcmap1: 2167 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2168 HIFN_UNLOCK(sc); 2169 return (err); 2170 } 2171 2172 static void 2173 hifn_tick(void* vsc) 2174 { 2175 struct hifn_softc *sc = vsc; 2176 2177 HIFN_LOCK(sc); 2178 if (sc->sc_active == 0) { 2179 struct hifn_dma *dma = sc->sc_dma; 2180 u_int32_t r = 0; 2181 2182 if (dma->cmdu == 0 && sc->sc_c_busy) { 2183 sc->sc_c_busy = 0; 2184 r |= HIFN_DMACSR_C_CTRL_DIS; 2185 } 2186 if (dma->srcu == 0 && sc->sc_s_busy) { 2187 sc->sc_s_busy = 0; 2188 r |= HIFN_DMACSR_S_CTRL_DIS; 2189 } 2190 if (dma->dstu == 0 && sc->sc_d_busy) { 2191 sc->sc_d_busy = 0; 2192 r |= HIFN_DMACSR_D_CTRL_DIS; 2193 } 2194 if (dma->resu == 0 && sc->sc_r_busy) { 2195 sc->sc_r_busy = 0; 2196 r |= HIFN_DMACSR_R_CTRL_DIS; 2197 } 2198 if (r) 2199 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 2200 } else 2201 sc->sc_active--; 2202 HIFN_UNLOCK(sc); 2203 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 2204 } 2205 2206 static void 2207 hifn_intr(void *arg) 2208 { 2209 struct hifn_softc *sc = arg; 2210 struct hifn_dma *dma; 2211 u_int32_t dmacsr, restart; 2212 int i, u; 2213 2214 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 2215 2216 /* Nothing in the DMA unit interrupted */ 2217 if ((dmacsr & sc->sc_dmaier) == 0) { 2218 hifnstats.hst_noirq++; 2219 return; 2220 } 2221 2222 HIFN_LOCK(sc); 2223 2224 dma = sc->sc_dma; 2225 2226 #ifdef HIFN_DEBUG 2227 if (hifn_debug) { 2228 device_printf(sc->sc_dev, 2229 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", 2230 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, 2231 dma->cmdi, dma->srci, dma->dsti, dma->resi, 2232 dma->cmdk, dma->srck, dma->dstk, dma->resk, 2233 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2234 } 2235 #endif 2236 2237 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 2238 2239 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 2240 (dmacsr & HIFN_DMACSR_PUBDONE)) 2241 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 2242 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 2243 2244 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); 2245 if (restart) 2246 device_printf(sc->sc_dev, "overrun %x\n", dmacsr); 2247 2248 if (sc->sc_flags & HIFN_IS_7811) { 2249 if (dmacsr & HIFN_DMACSR_ILLR) 2250 device_printf(sc->sc_dev, "illegal read\n"); 2251 if (dmacsr & HIFN_DMACSR_ILLW) 2252 device_printf(sc->sc_dev, "illegal write\n"); 2253 } 2254 2255 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 2256 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 2257 if (restart) { 2258 device_printf(sc->sc_dev, "abort, resetting.\n"); 2259 hifnstats.hst_abort++; 2260 hifn_abort(sc); 2261 HIFN_UNLOCK(sc); 2262 return; 2263 } 2264 2265 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { 2266 /* 2267 * If no slots to process and we receive a "waiting on 2268 * command" interrupt, we disable the "waiting on command" 2269 * (by clearing it). 2270 */ 2271 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 2272 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2273 } 2274 2275 /* clear the rings */ 2276 i = dma->resk; u = dma->resu; 2277 while (u != 0) { 2278 HIFN_RESR_SYNC(sc, i, 2279 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2280 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2281 HIFN_RESR_SYNC(sc, i, 2282 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2283 break; 2284 } 2285 2286 if (i != HIFN_D_RES_RSIZE) { 2287 struct hifn_command *cmd; 2288 u_int8_t *macbuf = NULL; 2289 2290 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2291 cmd = dma->hifn_commands[i]; 2292 KASSERT(cmd != NULL, 2293 ("hifn_intr: null command slot %u", i)); 2294 dma->hifn_commands[i] = NULL; 2295 2296 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2297 macbuf = dma->result_bufs[i]; 2298 macbuf += 12; 2299 } 2300 2301 hifn_callback(sc, cmd, macbuf); 2302 hifnstats.hst_opackets++; 2303 u--; 2304 } 2305 2306 if (++i == (HIFN_D_RES_RSIZE + 1)) 2307 i = 0; 2308 } 2309 dma->resk = i; dma->resu = u; 2310 2311 i = dma->srck; u = dma->srcu; 2312 while (u != 0) { 2313 if (i == HIFN_D_SRC_RSIZE) 2314 i = 0; 2315 HIFN_SRCR_SYNC(sc, i, 2316 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2317 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2318 HIFN_SRCR_SYNC(sc, i, 2319 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2320 break; 2321 } 2322 i++, u--; 2323 } 2324 dma->srck = i; dma->srcu = u; 2325 2326 i = dma->cmdk; u = dma->cmdu; 2327 while (u != 0) { 2328 HIFN_CMDR_SYNC(sc, i, 2329 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2330 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2331 HIFN_CMDR_SYNC(sc, i, 2332 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2333 break; 2334 } 2335 if (i != HIFN_D_CMD_RSIZE) { 2336 u--; 2337 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2338 } 2339 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2340 i = 0; 2341 } 2342 dma->cmdk = i; dma->cmdu = u; 2343 2344 HIFN_UNLOCK(sc); 2345 2346 if (sc->sc_needwakeup) { /* XXX check high watermark */ 2347 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 2348 #ifdef HIFN_DEBUG 2349 if (hifn_debug) 2350 device_printf(sc->sc_dev, 2351 "wakeup crypto (%x) u %d/%d/%d/%d\n", 2352 sc->sc_needwakeup, 2353 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2354 #endif 2355 sc->sc_needwakeup &= ~wakeup; 2356 crypto_unblock(sc->sc_cid, wakeup); 2357 } 2358 } 2359 2360 /* 2361 * Allocate a new 'session' and return an encoded session id. 'sidp' 2362 * contains our registration id, and should contain an encoded session 2363 * id on successful allocation. 2364 */ 2365 static int 2366 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) 2367 { 2368 struct cryptoini *c; 2369 struct hifn_softc *sc = device_get_softc(dev); 2370 int mac = 0, cry = 0, sesn; 2371 struct hifn_session *ses = NULL; 2372 2373 KASSERT(sc != NULL, ("hifn_newsession: null softc")); 2374 if (sidp == NULL || cri == NULL || sc == NULL) 2375 return (EINVAL); 2376 2377 HIFN_LOCK(sc); 2378 if (sc->sc_sessions == NULL) { 2379 ses = sc->sc_sessions = (struct hifn_session *)kmalloc( 2380 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2381 if (ses == NULL) { 2382 HIFN_UNLOCK(sc); 2383 return (ENOMEM); 2384 } 2385 sesn = 0; 2386 sc->sc_nsessions = 1; 2387 } else { 2388 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 2389 if (!sc->sc_sessions[sesn].hs_used) { 2390 ses = &sc->sc_sessions[sesn]; 2391 break; 2392 } 2393 } 2394 2395 if (ses == NULL) { 2396 sesn = sc->sc_nsessions; 2397 ses = (struct hifn_session *)kmalloc((sesn + 1) * 2398 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2399 if (ses == NULL) { 2400 HIFN_UNLOCK(sc); 2401 return (ENOMEM); 2402 } 2403 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); 2404 bzero(sc->sc_sessions, sesn * sizeof(*ses)); 2405 kfree(sc->sc_sessions, M_DEVBUF); 2406 sc->sc_sessions = ses; 2407 ses = &sc->sc_sessions[sesn]; 2408 sc->sc_nsessions++; 2409 } 2410 } 2411 HIFN_UNLOCK(sc); 2412 2413 bzero(ses, sizeof(*ses)); 2414 ses->hs_used = 1; 2415 2416 for (c = cri; c != NULL; c = c->cri_next) { 2417 switch (c->cri_alg) { 2418 case CRYPTO_MD5: 2419 case CRYPTO_SHA1: 2420 case CRYPTO_MD5_HMAC: 2421 case CRYPTO_SHA1_HMAC: 2422 if (mac) 2423 return (EINVAL); 2424 mac = 1; 2425 ses->hs_mlen = c->cri_mlen; 2426 if (ses->hs_mlen == 0) { 2427 switch (c->cri_alg) { 2428 case CRYPTO_MD5: 2429 case CRYPTO_MD5_HMAC: 2430 ses->hs_mlen = 16; 2431 break; 2432 case CRYPTO_SHA1: 2433 case CRYPTO_SHA1_HMAC: 2434 ses->hs_mlen = 20; 2435 break; 2436 } 2437 } 2438 break; 2439 case CRYPTO_DES_CBC: 2440 case CRYPTO_3DES_CBC: 2441 case CRYPTO_AES_CBC: 2442 /* XXX this may read fewer, does it matter? */ 2443 read_random(ses->hs_iv, 2444 c->cri_alg == CRYPTO_AES_CBC ? 2445 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2446 /*FALLTHROUGH*/ 2447 case CRYPTO_ARC4: 2448 if (cry) 2449 return (EINVAL); 2450 cry = 1; 2451 break; 2452 default: 2453 return (EINVAL); 2454 } 2455 } 2456 if (mac == 0 && cry == 0) 2457 return (EINVAL); 2458 2459 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn); 2460 2461 return (0); 2462 } 2463 2464 /* 2465 * Deallocate a session. 2466 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2467 * XXX to blow away any keys already stored there. 2468 */ 2469 #define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff) 2470 2471 static int 2472 hifn_freesession(device_t dev, u_int64_t tid) 2473 { 2474 struct hifn_softc *sc = device_get_softc(dev); 2475 int session, error; 2476 u_int32_t sid = CRYPTO_SESID2LID(tid); 2477 2478 KASSERT(sc != NULL, ("hifn_freesession: null softc")); 2479 if (sc == NULL) 2480 return (EINVAL); 2481 2482 HIFN_LOCK(sc); 2483 session = HIFN_SESSION(sid); 2484 if (session < sc->sc_nsessions) { 2485 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session)); 2486 error = 0; 2487 } else 2488 error = EINVAL; 2489 HIFN_UNLOCK(sc); 2490 2491 return (error); 2492 } 2493 2494 static int 2495 hifn_process(device_t dev, struct cryptop *crp, int hint) 2496 { 2497 struct hifn_softc *sc = device_get_softc(dev); 2498 struct hifn_command *cmd = NULL; 2499 int session, err, ivlen; 2500 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2501 2502 if (crp == NULL || crp->crp_callback == NULL) { 2503 hifnstats.hst_invalid++; 2504 return (EINVAL); 2505 } 2506 session = HIFN_SESSION(crp->crp_sid); 2507 2508 if (sc == NULL || session >= sc->sc_nsessions) { 2509 err = EINVAL; 2510 goto errout; 2511 } 2512 2513 cmd = kmalloc(sizeof(struct hifn_command), M_DEVBUF, M_INTWAIT | M_ZERO); 2514 2515 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2516 cmd->src_m = (struct mbuf *)crp->crp_buf; 2517 cmd->dst_m = (struct mbuf *)crp->crp_buf; 2518 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2519 cmd->src_io = (struct uio *)crp->crp_buf; 2520 cmd->dst_io = (struct uio *)crp->crp_buf; 2521 } else { 2522 err = EINVAL; 2523 goto errout; /* XXX we don't handle contiguous buffers! */ 2524 } 2525 2526 crd1 = crp->crp_desc; 2527 if (crd1 == NULL) { 2528 err = EINVAL; 2529 goto errout; 2530 } 2531 crd2 = crd1->crd_next; 2532 2533 if (crd2 == NULL) { 2534 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2535 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2536 crd1->crd_alg == CRYPTO_SHA1 || 2537 crd1->crd_alg == CRYPTO_MD5) { 2538 maccrd = crd1; 2539 enccrd = NULL; 2540 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2541 crd1->crd_alg == CRYPTO_3DES_CBC || 2542 crd1->crd_alg == CRYPTO_AES_CBC || 2543 crd1->crd_alg == CRYPTO_ARC4) { 2544 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2545 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2546 maccrd = NULL; 2547 enccrd = crd1; 2548 } else { 2549 err = EINVAL; 2550 goto errout; 2551 } 2552 } else { 2553 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2554 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2555 crd1->crd_alg == CRYPTO_MD5 || 2556 crd1->crd_alg == CRYPTO_SHA1) && 2557 (crd2->crd_alg == CRYPTO_DES_CBC || 2558 crd2->crd_alg == CRYPTO_3DES_CBC || 2559 crd2->crd_alg == CRYPTO_AES_CBC || 2560 crd2->crd_alg == CRYPTO_ARC4) && 2561 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2562 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2563 maccrd = crd1; 2564 enccrd = crd2; 2565 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2566 crd1->crd_alg == CRYPTO_ARC4 || 2567 crd1->crd_alg == CRYPTO_3DES_CBC || 2568 crd1->crd_alg == CRYPTO_AES_CBC) && 2569 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2570 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2571 crd2->crd_alg == CRYPTO_MD5 || 2572 crd2->crd_alg == CRYPTO_SHA1) && 2573 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2574 enccrd = crd1; 2575 maccrd = crd2; 2576 } else { 2577 /* 2578 * We cannot order the 7751 as requested 2579 */ 2580 err = EINVAL; 2581 goto errout; 2582 } 2583 } 2584 2585 if (enccrd) { 2586 cmd->enccrd = enccrd; 2587 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2588 switch (enccrd->crd_alg) { 2589 case CRYPTO_ARC4: 2590 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2591 break; 2592 case CRYPTO_DES_CBC: 2593 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2594 HIFN_CRYPT_CMD_MODE_CBC | 2595 HIFN_CRYPT_CMD_NEW_IV; 2596 break; 2597 case CRYPTO_3DES_CBC: 2598 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2599 HIFN_CRYPT_CMD_MODE_CBC | 2600 HIFN_CRYPT_CMD_NEW_IV; 2601 break; 2602 case CRYPTO_AES_CBC: 2603 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2604 HIFN_CRYPT_CMD_MODE_CBC | 2605 HIFN_CRYPT_CMD_NEW_IV; 2606 break; 2607 default: 2608 err = EINVAL; 2609 goto errout; 2610 } 2611 if (enccrd->crd_alg != CRYPTO_ARC4) { 2612 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2613 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2614 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2615 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2616 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2617 else 2618 bcopy(sc->sc_sessions[session].hs_iv, 2619 cmd->iv, ivlen); 2620 2621 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2622 == 0) { 2623 crypto_copyback(crp->crp_flags, 2624 crp->crp_buf, enccrd->crd_inject, 2625 ivlen, cmd->iv); 2626 } 2627 } else { 2628 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2629 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2630 else { 2631 crypto_copydata(crp->crp_flags, 2632 crp->crp_buf, enccrd->crd_inject, 2633 ivlen, cmd->iv); 2634 } 2635 } 2636 } 2637 2638 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) 2639 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2640 cmd->ck = enccrd->crd_key; 2641 cmd->cklen = enccrd->crd_klen >> 3; 2642 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2643 2644 /* 2645 * Need to specify the size for the AES key in the masks. 2646 */ 2647 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2648 HIFN_CRYPT_CMD_ALG_AES) { 2649 switch (cmd->cklen) { 2650 case 16: 2651 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2652 break; 2653 case 24: 2654 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2655 break; 2656 case 32: 2657 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2658 break; 2659 default: 2660 err = EINVAL; 2661 goto errout; 2662 } 2663 } 2664 } 2665 2666 if (maccrd) { 2667 cmd->maccrd = maccrd; 2668 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2669 2670 switch (maccrd->crd_alg) { 2671 case CRYPTO_MD5: 2672 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2673 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2674 HIFN_MAC_CMD_POS_IPSEC; 2675 break; 2676 case CRYPTO_MD5_HMAC: 2677 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2678 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2679 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2680 break; 2681 case CRYPTO_SHA1: 2682 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2683 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2684 HIFN_MAC_CMD_POS_IPSEC; 2685 break; 2686 case CRYPTO_SHA1_HMAC: 2687 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2688 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2689 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2690 break; 2691 } 2692 2693 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2694 maccrd->crd_alg == CRYPTO_MD5_HMAC) { 2695 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2696 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2697 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2698 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2699 } 2700 } 2701 2702 cmd->crp = crp; 2703 cmd->session_num = session; 2704 cmd->softc = sc; 2705 2706 err = hifn_crypto(sc, cmd, crp, hint); 2707 if (!err) { 2708 return 0; 2709 } else if (err == ERESTART) { 2710 /* 2711 * There weren't enough resources to dispatch the request 2712 * to the part. Notify the caller so they'll requeue this 2713 * request and resubmit it again soon. 2714 */ 2715 #ifdef HIFN_DEBUG 2716 if (hifn_debug) 2717 device_printf(sc->sc_dev, "requeue request\n"); 2718 #endif 2719 kfree(cmd, M_DEVBUF); 2720 sc->sc_needwakeup |= CRYPTO_SYMQ; 2721 return (err); 2722 } 2723 2724 errout: 2725 if (cmd != NULL) 2726 kfree(cmd, M_DEVBUF); 2727 if (err == EINVAL) 2728 hifnstats.hst_invalid++; 2729 else 2730 hifnstats.hst_nomem++; 2731 crp->crp_etype = err; 2732 crypto_done(crp); 2733 return (err); 2734 } 2735 2736 static void 2737 hifn_abort(struct hifn_softc *sc) 2738 { 2739 struct hifn_dma *dma = sc->sc_dma; 2740 struct hifn_command *cmd; 2741 struct cryptop *crp; 2742 int i, u; 2743 2744 i = dma->resk; u = dma->resu; 2745 while (u != 0) { 2746 cmd = dma->hifn_commands[i]; 2747 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); 2748 dma->hifn_commands[i] = NULL; 2749 crp = cmd->crp; 2750 2751 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2752 /* Salvage what we can. */ 2753 u_int8_t *macbuf; 2754 2755 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2756 macbuf = dma->result_bufs[i]; 2757 macbuf += 12; 2758 } else 2759 macbuf = NULL; 2760 hifnstats.hst_opackets++; 2761 hifn_callback(sc, cmd, macbuf); 2762 } else { 2763 if (cmd->src_map == cmd->dst_map) { 2764 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2765 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2766 } else { 2767 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2768 BUS_DMASYNC_POSTWRITE); 2769 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2770 BUS_DMASYNC_POSTREAD); 2771 } 2772 2773 if (cmd->src_m != cmd->dst_m) { 2774 m_freem(cmd->src_m); 2775 crp->crp_buf = (caddr_t)cmd->dst_m; 2776 } 2777 2778 /* non-shared buffers cannot be restarted */ 2779 if (cmd->src_map != cmd->dst_map) { 2780 /* 2781 * XXX should be EAGAIN, delayed until 2782 * after the reset. 2783 */ 2784 crp->crp_etype = ENOMEM; 2785 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2786 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2787 } else 2788 crp->crp_etype = ENOMEM; 2789 2790 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2791 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2792 2793 kfree(cmd, M_DEVBUF); 2794 if (crp->crp_etype != EAGAIN) 2795 crypto_done(crp); 2796 } 2797 2798 if (++i == HIFN_D_RES_RSIZE) 2799 i = 0; 2800 u--; 2801 } 2802 dma->resk = i; dma->resu = u; 2803 2804 hifn_reset_board(sc, 1); 2805 hifn_init_dma(sc); 2806 hifn_init_pci_registers(sc); 2807 } 2808 2809 static void 2810 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) 2811 { 2812 struct hifn_dma *dma = sc->sc_dma; 2813 struct cryptop *crp = cmd->crp; 2814 struct cryptodesc *crd; 2815 struct mbuf *m; 2816 int totlen, i, u, ivlen; 2817 2818 if (cmd->src_map == cmd->dst_map) { 2819 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2820 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2821 } else { 2822 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2823 BUS_DMASYNC_POSTWRITE); 2824 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2825 BUS_DMASYNC_POSTREAD); 2826 } 2827 2828 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2829 if (cmd->src_m != cmd->dst_m) { 2830 crp->crp_buf = (caddr_t)cmd->dst_m; 2831 totlen = cmd->src_mapsize; 2832 for (m = cmd->dst_m; m != NULL; m = m->m_next) { 2833 if (totlen < m->m_len) { 2834 m->m_len = totlen; 2835 totlen = 0; 2836 } else 2837 totlen -= m->m_len; 2838 } 2839 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; 2840 m_freem(cmd->src_m); 2841 } 2842 } 2843 2844 if (cmd->sloplen != 0) { 2845 crypto_copyback(crp->crp_flags, crp->crp_buf, 2846 cmd->src_mapsize - cmd->sloplen, cmd->sloplen, 2847 (caddr_t)&dma->slop[cmd->slopidx]); 2848 } 2849 2850 i = dma->dstk; u = dma->dstu; 2851 while (u != 0) { 2852 if (i == HIFN_D_DST_RSIZE) 2853 i = 0; 2854 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2855 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2856 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2857 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2858 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2859 break; 2860 } 2861 i++, u--; 2862 } 2863 dma->dstk = i; dma->dstu = u; 2864 2865 hifnstats.hst_obytes += cmd->dst_mapsize; 2866 2867 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2868 HIFN_BASE_CMD_CRYPT) { 2869 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2870 if (crd->crd_alg != CRYPTO_DES_CBC && 2871 crd->crd_alg != CRYPTO_3DES_CBC && 2872 crd->crd_alg != CRYPTO_AES_CBC) 2873 continue; 2874 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2875 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2876 crypto_copydata(crp->crp_flags, crp->crp_buf, 2877 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2878 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2879 break; 2880 } 2881 } 2882 2883 if (macbuf != NULL) { 2884 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2885 int len; 2886 2887 if (crd->crd_alg != CRYPTO_MD5 && 2888 crd->crd_alg != CRYPTO_SHA1 && 2889 crd->crd_alg != CRYPTO_MD5_HMAC && 2890 crd->crd_alg != CRYPTO_SHA1_HMAC) { 2891 continue; 2892 } 2893 len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen; 2894 crypto_copyback(crp->crp_flags, crp->crp_buf, 2895 crd->crd_inject, len, macbuf); 2896 break; 2897 } 2898 } 2899 2900 if (cmd->src_map != cmd->dst_map) { 2901 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2902 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2903 } 2904 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2905 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2906 kfree(cmd, M_DEVBUF); 2907 crypto_done(crp); 2908 } 2909 2910 /* 2911 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2912 * and Group 1 registers; avoid conditions that could create 2913 * burst writes by doing a read in between the writes. 2914 * 2915 * NB: The read we interpose is always to the same register; 2916 * we do this because reading from an arbitrary (e.g. last) 2917 * register may not always work. 2918 */ 2919 static void 2920 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2921 { 2922 if (sc->sc_flags & HIFN_IS_7811) { 2923 if (sc->sc_bar0_lastreg == reg - 4) 2924 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); 2925 sc->sc_bar0_lastreg = reg; 2926 } 2927 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2928 } 2929 2930 static void 2931 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2932 { 2933 if (sc->sc_flags & HIFN_IS_7811) { 2934 if (sc->sc_bar1_lastreg == reg - 4) 2935 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2936 sc->sc_bar1_lastreg = reg; 2937 } 2938 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2939 } 2940