1 /* $FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.5.2.5 2003/06/04 17:56:59 sam Exp $ */
2 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
3
4 /*
5 * Invertex AEON / Hifn 7751 driver
6 * Copyright (c) 1999 Invertex Inc. All rights reserved.
7 * Copyright (c) 1999 Theo de Raadt
8 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
9 * http://www.netsec.net
10 * Copyright (c) 2003 Hifn Inc.
11 *
12 * This driver is based on a previous driver by Invertex, for which they
13 * requested: Please send any comments, feedback, bug-fixes, or feature
14 * requests to software@invertex.com.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The name of the author may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Effort sponsored in part by the Defense Advanced Research Projects
40 * Agency (DARPA) and Air Force Research Laboratory, Air Force
41 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 *
43 */
44
45 /*
46 * Driver for various Hifn encryption processors.
47 */
48 #include "opt_hifn.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/errno.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/mbuf.h>
57 #include <sys/sysctl.h>
58 #include <sys/bus.h>
59 #include <sys/rman.h>
60 #include <sys/random.h>
61 #include <sys/uio.h>
62
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65
66 #include <machine/clock.h>
67 #include <opencrypto/cryptodev.h>
68
69 #include "cryptodev_if.h"
70
71 #include <bus/pci/pcivar.h>
72 #include <bus/pci/pcireg.h>
73
74 #ifdef HIFN_RNDTEST
75 #include "../rndtest/rndtest.h"
76 #endif
77 #include "hifn7751reg.h"
78 #include "hifn7751var.h"
79
80 /*
81 * Prototypes and count for the pci_device structure
82 */
83 static int hifn_probe(device_t);
84 static int hifn_attach(device_t);
85 static int hifn_detach(device_t);
86 static int hifn_suspend(device_t);
87 static int hifn_resume(device_t);
88 static void hifn_shutdown(device_t);
89
90 static void hifn_reset_board(struct hifn_softc *, int);
91 static void hifn_reset_puc(struct hifn_softc *);
92 static void hifn_puc_wait(struct hifn_softc *);
93 static int hifn_enable_crypto(struct hifn_softc *);
94 static void hifn_set_retry(struct hifn_softc *sc);
95 static void hifn_init_dma(struct hifn_softc *);
96 static void hifn_init_pci_registers(struct hifn_softc *);
97 static int hifn_sramsize(struct hifn_softc *);
98 static int hifn_dramsize(struct hifn_softc *);
99 static int hifn_ramtype(struct hifn_softc *);
100 static void hifn_sessions(struct hifn_softc *);
101 static void hifn_intr(void *);
102 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
103 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
104 static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
105 static int hifn_freesession(device_t, u_int64_t);
106 static int hifn_process(device_t, struct cryptop *, int);
107 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
108 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
109 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
110 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
111 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
112 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
113 static int hifn_init_pubrng(struct hifn_softc *);
114 #ifndef HIFN_NO_RNG
115 static void hifn_rng(void *);
116 #endif
117 static void hifn_tick(void *);
118 static void hifn_abort(struct hifn_softc *);
119 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
120
121 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
122 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
123
124
125 static device_method_t hifn_methods[] = {
126 /* Device interface */
127 DEVMETHOD(device_probe, hifn_probe),
128 DEVMETHOD(device_attach, hifn_attach),
129 DEVMETHOD(device_detach, hifn_detach),
130 DEVMETHOD(device_suspend, hifn_suspend),
131 DEVMETHOD(device_resume, hifn_resume),
132 DEVMETHOD(device_shutdown, hifn_shutdown),
133
134 /* bus interface */
135 DEVMETHOD(bus_print_child, bus_generic_print_child),
136 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
137
138 /* crypto device methods */
139 DEVMETHOD(cryptodev_newsession, hifn_newsession),
140 DEVMETHOD(cryptodev_freesession,hifn_freesession),
141 DEVMETHOD(cryptodev_process, hifn_process),
142
143 DEVMETHOD_END
144 };
145 static driver_t hifn_driver = {
146 "hifn",
147 hifn_methods,
148 sizeof (struct hifn_softc)
149 };
150 static devclass_t hifn_devclass;
151
152 DECLARE_DUMMY_MODULE(hifn);
153 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, NULL, NULL);
154 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
155 #ifdef HIFN_RNDTEST
156 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
157 #endif
158
159 static __inline__ u_int32_t
READ_REG_0(struct hifn_softc * sc,bus_size_t reg)160 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
161 {
162 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
163 sc->sc_bar0_lastreg = (bus_size_t) -1;
164 return (v);
165 }
166 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
167
168 static __inline__ u_int32_t
READ_REG_1(struct hifn_softc * sc,bus_size_t reg)169 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
170 {
171 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
172 sc->sc_bar1_lastreg = (bus_size_t) -1;
173 return (v);
174 }
175 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
176
177 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
178
179 #ifdef HIFN_DEBUG
180 static int hifn_debug = 0;
181 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
182 0, "control debugging msgs");
183 #endif
184
185 static struct hifn_stats hifnstats;
186 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
187 hifn_stats, "driver statistics");
188 static int hifn_maxbatch = 1;
189 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
190 0, "max ops to batch w/o interrupt");
191
192 /*
193 * Probe for a supported device. The PCI vendor and device
194 * IDs are used to detect devices we know how to handle.
195 */
196 static int
hifn_probe(device_t dev)197 hifn_probe(device_t dev)
198 {
199 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
200 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
201 return (0);
202 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
203 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
204 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
205 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
206 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
207 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
208 return (0);
209 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
210 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
211 return (0);
212 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN) {
213 device_printf(dev,"device id = 0x%x\n", pci_get_device(dev) );
214 return (0);
215 }
216 return (ENXIO);
217 }
218
219 static void
hifn_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)220 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
221 {
222 bus_addr_t *paddr = (bus_addr_t*) arg;
223 *paddr = segs->ds_addr;
224 }
225
226 static const char*
hifn_partname(struct hifn_softc * sc)227 hifn_partname(struct hifn_softc *sc)
228 {
229 /* XXX sprintf numbers when not decoded */
230 switch (pci_get_vendor(sc->sc_dev)) {
231 case PCI_VENDOR_HIFN:
232 switch (pci_get_device(sc->sc_dev)) {
233 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
234 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
235 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
236 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
237 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
238 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
239 }
240 return "Hifn unknown-part";
241 case PCI_VENDOR_INVERTEX:
242 switch (pci_get_device(sc->sc_dev)) {
243 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
244 }
245 return "Invertex unknown-part";
246 case PCI_VENDOR_NETSEC:
247 switch (pci_get_device(sc->sc_dev)) {
248 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
249 }
250 return "NetSec unknown-part";
251 }
252 return "Unknown-vendor unknown-part";
253 }
254
255 static void
default_harvest(struct rndtest_state * rsp,void * buf,u_int count)256 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
257 {
258 add_buffer_randomness_src(buf, count, RAND_SRC_HIFN);
259 }
260
261 static u_int
checkmaxmin(device_t dev,const char * what,u_int v,u_int min,u_int max)262 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
263 {
264 if (v > max) {
265 device_printf(dev, "Warning, %s %u out of range, "
266 "using max %u\n", what, v, max);
267 v = max;
268 } else if (v < min) {
269 device_printf(dev, "Warning, %s %u out of range, "
270 "using min %u\n", what, v, min);
271 v = min;
272 }
273 return v;
274 }
275
276 /*
277 * Select PLL configuration for 795x parts. This is complicated in
278 * that we cannot determine the optimal parameters without user input.
279 * The reference clock is derived from an external clock through a
280 * multiplier. The external clock is either the host bus (i.e. PCI)
281 * or an external clock generator. When using the PCI bus we assume
282 * the clock is either 33 or 66 MHz; for an external source we cannot
283 * tell the speed.
284 *
285 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
286 * for an external source, followed by the frequency. We calculate
287 * the appropriate multiplier and PLL register contents accordingly.
288 * When no configuration is given we default to "pci66" since that
289 * always will allow the card to work. If a card is using the PCI
290 * bus clock and in a 33MHz slot then it will be operating at half
291 * speed until the correct information is provided.
292 *
293 * We use a default setting of "ext66" because according to Mike Ham
294 * of HiFn, almost every board in existence has an external crystal
295 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
296 * because PCI33 can have clocks from 0 to 33Mhz, and some have
297 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
298 */
299 static void
hifn_getpllconfig(device_t dev,u_int * pll)300 hifn_getpllconfig(device_t dev, u_int *pll)
301 {
302 const char *pllspec;
303 u_int freq, mul, fl, fh;
304 u_int32_t pllconfig;
305 char *nxt;
306
307 if (resource_string_value("hifn", device_get_unit(dev),
308 "pllconfig", &pllspec))
309 pllspec = "ext66";
310 fl = 33, fh = 66;
311 pllconfig = 0;
312 if (strncmp(pllspec, "ext", 3) == 0) {
313 pllspec += 3;
314 pllconfig |= HIFN_PLL_REF_SEL;
315 switch (pci_get_device(dev)) {
316 case PCI_PRODUCT_HIFN_7955:
317 case PCI_PRODUCT_HIFN_7956:
318 fl = 20, fh = 100;
319 break;
320 #ifdef notyet
321 case PCI_PRODUCT_HIFN_7954:
322 fl = 20, fh = 66;
323 break;
324 #endif
325 }
326 } else if (strncmp(pllspec, "pci", 3) == 0)
327 pllspec += 3;
328 freq = strtoul(pllspec, &nxt, 10);
329 if (nxt == pllspec)
330 freq = 66;
331 else
332 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
333 /*
334 * Calculate multiplier. We target a Fck of 266 MHz,
335 * allowing only even values, possibly rounded down.
336 * Multipliers > 8 must set the charge pump current.
337 */
338 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
339 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
340 if (mul > 8)
341 pllconfig |= HIFN_PLL_IS;
342 *pll = pllconfig;
343 }
344
345 /*
346 * Attach an interface that successfully probed.
347 */
348 static int
hifn_attach(device_t dev)349 hifn_attach(device_t dev)
350 {
351 struct hifn_softc *sc = device_get_softc(dev);
352 u_int32_t cmd;
353 caddr_t kva;
354 int rseg, rid;
355 char rbase;
356 u_int16_t ena, rev;
357
358 KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
359 bzero(sc, sizeof (*sc));
360 sc->sc_dev = dev;
361
362 lockinit(&sc->sc_lock, __DECONST(char *, device_get_nameunit(dev)),
363 0, LK_CANRECURSE);
364
365 /* XXX handle power management */
366
367 /*
368 * The 7951 and 795x have a random number generator and
369 * public key support; note this.
370 */
371 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
372 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
373 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
374 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
375 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
376 /*
377 * The 7811 has a random number generator and
378 * we also note it's identity 'cuz of some quirks.
379 */
380 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
381 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
382 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
383
384 /*
385 * The 795x parts support AES.
386 */
387 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
388 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
389 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
390 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
391 /*
392 * Select PLL configuration. This depends on the
393 * bus and board design and must be manually configured
394 * if the default setting is unacceptable.
395 */
396 hifn_getpllconfig(dev, &sc->sc_pllconfig);
397 }
398
399 /*
400 * Configure support for memory-mapped access to
401 * registers and for DMA operations.
402 */
403 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
404 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
405 cmd |= PCIM_ENA;
406 pci_write_config(dev, PCIR_COMMAND, cmd, 4);
407 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
408 if ((cmd & PCIM_ENA) != PCIM_ENA) {
409 device_printf(dev, "failed to enable %s\n",
410 (cmd & PCIM_ENA) == 0 ?
411 "memory mapping & bus mastering" :
412 (cmd & PCIM_CMD_MEMEN) == 0 ?
413 "memory mapping" : "bus mastering");
414 goto fail_pci;
415 }
416 #undef PCIM_ENA
417
418 /*
419 * Setup PCI resources. Note that we record the bus
420 * tag and handle for each register mapping, this is
421 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
422 * and WRITE_REG_1 macros throughout the driver.
423 */
424 rid = HIFN_BAR0;
425 sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
426 0, ~0, 1, RF_ACTIVE);
427 if (sc->sc_bar0res == NULL) {
428 device_printf(dev, "cannot map bar%d register space\n", 0);
429 goto fail_pci;
430 }
431 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
432 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
433 sc->sc_bar0_lastreg = (bus_size_t) -1;
434
435 rid = HIFN_BAR1;
436 sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
437 0, ~0, 1, RF_ACTIVE);
438 if (sc->sc_bar1res == NULL) {
439 device_printf(dev, "cannot map bar%d register space\n", 1);
440 goto fail_io0;
441 }
442 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
443 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
444 sc->sc_bar1_lastreg = (bus_size_t) -1;
445
446 hifn_set_retry(sc);
447
448 /*
449 * Setup the area where the Hifn DMA's descriptors
450 * and associated data structures.
451 */
452 if (bus_dma_tag_create(NULL, /* parent */
453 1, 0, /* alignment,boundary */
454 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
455 BUS_SPACE_MAXADDR, /* highaddr */
456 HIFN_MAX_DMALEN, /* maxsize */
457 MAX_SCATTER, /* nsegments */
458 HIFN_MAX_SEGLEN, /* maxsegsize */
459 BUS_DMA_ALLOCNOW, /* flags */
460 &sc->sc_dmat)) {
461 device_printf(dev, "cannot allocate DMA tag\n");
462 goto fail_io1;
463 }
464 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
465 device_printf(dev, "cannot create dma map\n");
466 bus_dma_tag_destroy(sc->sc_dmat);
467 goto fail_io1;
468 }
469 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
470 device_printf(dev, "cannot alloc dma buffer\n");
471 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
472 bus_dma_tag_destroy(sc->sc_dmat);
473 goto fail_io1;
474 }
475 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
476 sizeof (*sc->sc_dma),
477 hifn_dmamap_cb, &sc->sc_dma_physaddr,
478 BUS_DMA_NOWAIT)) {
479 device_printf(dev, "cannot load dma map\n");
480 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
481 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
482 bus_dma_tag_destroy(sc->sc_dmat);
483 goto fail_io1;
484 }
485 sc->sc_dma = (struct hifn_dma *)kva;
486 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
487
488 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
489 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
490 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
491 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
492
493 /*
494 * Reset the board and do the ``secret handshake''
495 * to enable the crypto support. Then complete the
496 * initialization procedure by setting up the interrupt
497 * and hooking in to the system crypto support so we'll
498 * get used for system services like the crypto device,
499 * IPsec, RNG device, etc.
500 */
501 hifn_reset_board(sc, 0);
502
503 if (hifn_enable_crypto(sc) != 0) {
504 device_printf(dev, "crypto enabling failed\n");
505 goto fail_mem;
506 }
507 hifn_reset_puc(sc);
508
509 hifn_init_dma(sc);
510 hifn_init_pci_registers(sc);
511
512 /* XXX can't dynamically determine ram type for 795x; force dram */
513 if (sc->sc_flags & HIFN_IS_7956)
514 sc->sc_drammodel = 1;
515 else if (hifn_ramtype(sc))
516 goto fail_mem;
517
518 if (sc->sc_drammodel == 0)
519 hifn_sramsize(sc);
520 else
521 hifn_dramsize(sc);
522
523 /*
524 * Workaround for NetSec 7751 rev A: half ram size because two
525 * of the address lines were left floating
526 */
527 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
528 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
529 pci_get_revid(dev) == 0x61) /*XXX???*/
530 sc->sc_ramsize >>= 1;
531
532 /*
533 * Arrange the interrupt line.
534 */
535 rid = 0;
536 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
537 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE);
538 if (sc->sc_irq == NULL) {
539 device_printf(dev, "could not map interrupt\n");
540 goto fail_mem;
541 }
542 /*
543 * NB: Network code assumes we are blocked with splimp()
544 * so make sure the IRQ is marked appropriately.
545 */
546 if (bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
547 hifn_intr, sc,
548 &sc->sc_intrhand, NULL)) {
549 device_printf(dev, "could not setup interrupt\n");
550 goto fail_intr2;
551 }
552
553 hifn_sessions(sc);
554
555 /*
556 * NB: Keep only the low 16 bits; this masks the chip id
557 * from the 7951.
558 */
559 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
560
561 rseg = sc->sc_ramsize / 1024;
562 rbase = 'K';
563 if (sc->sc_ramsize >= (1024 * 1024)) {
564 rbase = 'M';
565 rseg /= 1024;
566 }
567 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n",
568 hifn_partname(sc), rev,
569 rseg, rbase, sc->sc_drammodel ? 'd' : 's',
570 sc->sc_maxses);
571
572 if (sc->sc_flags & HIFN_IS_7956)
573 kprintf(", pll=0x%x<%s clk, %ux mult>",
574 sc->sc_pllconfig,
575 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
576 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
577 kprintf("\n");
578
579 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
580 if (sc->sc_cid < 0) {
581 device_printf(dev, "could not get crypto driver id\n");
582 goto fail_intr;
583 }
584
585 WRITE_REG_0(sc, HIFN_0_PUCNFG,
586 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
587 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
588
589 switch (ena) {
590 case HIFN_PUSTAT_ENA_2:
591 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
592 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
593 if (sc->sc_flags & HIFN_HAS_AES)
594 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
595 /*FALLTHROUGH*/
596 case HIFN_PUSTAT_ENA_1:
597 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
598 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
599 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
600 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
601 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
602 break;
603 }
604
605 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
606 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
607
608 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
609 hifn_init_pubrng(sc);
610
611 /* NB: 1 means the callout runs w/o Giant locked */
612 callout_init_mp(&sc->sc_tickto);
613 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
614
615 return (0);
616
617 fail_intr:
618 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
619 fail_intr2:
620 /* XXX don't store rid */
621 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
622 fail_mem:
623 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
624 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
625 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
626 bus_dma_tag_destroy(sc->sc_dmat);
627
628 /* Turn off DMA polling */
629 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
630 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
631 fail_io1:
632 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
633 fail_io0:
634 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
635 fail_pci:
636 lockuninit(&sc->sc_lock);
637 return (ENXIO);
638 }
639
640 /*
641 * Detach an interface that successfully probed.
642 */
643 static int
hifn_detach(device_t dev)644 hifn_detach(device_t dev)
645 {
646 struct hifn_softc *sc = device_get_softc(dev);
647
648 KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
649
650 /* disable interrupts */
651 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
652
653 /*XXX other resources */
654 callout_stop(&sc->sc_tickto);
655 callout_stop(&sc->sc_rngto);
656 #ifdef HIFN_RNDTEST
657 if (sc->sc_rndtest)
658 rndtest_detach(sc->sc_rndtest);
659 #endif
660
661 /* Turn off DMA polling */
662 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
663 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
664
665 crypto_unregister_all(sc->sc_cid);
666
667 bus_generic_detach(dev); /*XXX should be no children, right? */
668
669 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
670 /* XXX don't store rid */
671 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
672
673 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
674 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
675 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
676 bus_dma_tag_destroy(sc->sc_dmat);
677
678 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
679 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
680
681 lockuninit(&sc->sc_lock);
682
683 return (0);
684 }
685
686 /*
687 * Stop all chip I/O so that the kernel's probe routines don't
688 * get confused by errant DMAs when rebooting.
689 */
690 static void
hifn_shutdown(device_t dev)691 hifn_shutdown(device_t dev)
692 {
693 #ifdef notyet
694 hifn_stop(device_get_softc(dev));
695 #endif
696 }
697
698 /*
699 * Device suspend routine. Stop the interface and save some PCI
700 * settings in case the BIOS doesn't restore them properly on
701 * resume.
702 */
703 static int
hifn_suspend(device_t dev)704 hifn_suspend(device_t dev)
705 {
706 struct hifn_softc *sc = device_get_softc(dev);
707 #ifdef notyet
708 int i;
709
710 hifn_stop(sc);
711 for (i = 0; i < 5; i++)
712 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
713 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
714 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
715 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
716 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
717 #endif
718 sc->sc_suspended = 1;
719
720 return (0);
721 }
722
723 /*
724 * Device resume routine. Restore some PCI settings in case the BIOS
725 * doesn't, re-enable busmastering, and restart the interface if
726 * appropriate.
727 */
728 static int
hifn_resume(device_t dev)729 hifn_resume(device_t dev)
730 {
731 struct hifn_softc *sc = device_get_softc(dev);
732 #ifdef notyet
733 int i;
734
735 /* better way to do this? */
736 for (i = 0; i < 5; i++)
737 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
738 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
739 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
740 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
741 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
742
743 /* reenable busmastering */
744 pci_enable_busmaster(dev);
745 pci_enable_io(dev, HIFN_RES);
746
747 /* reinitialize interface if necessary */
748 if (ifp->if_flags & IFF_UP)
749 rl_init(sc);
750 #endif
751 sc->sc_suspended = 0;
752
753 return (0);
754 }
755
756 static int
hifn_init_pubrng(struct hifn_softc * sc)757 hifn_init_pubrng(struct hifn_softc *sc)
758 {
759 u_int32_t r;
760 int i;
761
762 #ifdef HIFN_RNDTEST
763 sc->sc_rndtest = rndtest_attach(sc->sc_dev);
764 if (sc->sc_rndtest)
765 sc->sc_harvest = rndtest_harvest;
766 else
767 sc->sc_harvest = default_harvest;
768 #else
769 sc->sc_harvest = default_harvest;
770 #endif
771 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
772 /* Reset 7951 public key/rng engine */
773 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
774 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
775
776 for (i = 0; i < 100; i++) {
777 DELAY(1000);
778 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
779 HIFN_PUBRST_RESET) == 0)
780 break;
781 }
782
783 if (i == 100) {
784 device_printf(sc->sc_dev, "public key init failed\n");
785 return (1);
786 }
787 }
788
789 #ifndef HIFN_NO_RNG
790 /* Enable the rng, if available */
791 if (sc->sc_flags & HIFN_HAS_RNG) {
792 if (sc->sc_flags & HIFN_IS_7811) {
793 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
794 if (r & HIFN_7811_RNGENA_ENA) {
795 r &= ~HIFN_7811_RNGENA_ENA;
796 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
797 }
798 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
799 HIFN_7811_RNGCFG_DEFL);
800 r |= HIFN_7811_RNGENA_ENA;
801 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
802 } else
803 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
804 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
805 HIFN_RNGCFG_ENA);
806
807 sc->sc_rngfirst = 1;
808 if (hz >= 100)
809 sc->sc_rnghz = hz / 100;
810 else
811 sc->sc_rnghz = 1;
812 /* NB: 1 means the callout runs w/o Giant locked */
813 callout_init_mp(&sc->sc_rngto);
814 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
815 }
816 #endif
817
818 /* Enable public key engine, if available */
819 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
820 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
821 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
822 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
823 }
824
825 return (0);
826 }
827
828 #ifndef HIFN_NO_RNG
829 static void
hifn_rng(void * vsc)830 hifn_rng(void *vsc)
831 {
832 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
833 struct hifn_softc *sc = vsc;
834 u_int32_t sts, num[2];
835 int i;
836
837 if (sc->sc_flags & HIFN_IS_7811) {
838 for (i = 0; i < 5; i++) {
839 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
840 if (sts & HIFN_7811_RNGSTS_UFL) {
841 device_printf(sc->sc_dev,
842 "RNG underflow: disabling\n");
843 return;
844 }
845 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
846 break;
847
848 /*
849 * There are at least two words in the RNG FIFO
850 * at this point.
851 */
852 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
853 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
854 /* NB: discard first data read */
855 if (sc->sc_rngfirst)
856 sc->sc_rngfirst = 0;
857 else
858 (*sc->sc_harvest)(sc->sc_rndtest,
859 num, sizeof (num));
860 }
861 } else {
862 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
863
864 /* NB: discard first data read */
865 if (sc->sc_rngfirst)
866 sc->sc_rngfirst = 0;
867 else
868 (*sc->sc_harvest)(sc->sc_rndtest,
869 num, sizeof (num[0]));
870 }
871
872 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
873 #undef RANDOM_BITS
874 }
875 #endif
876
877 static void
hifn_puc_wait(struct hifn_softc * sc)878 hifn_puc_wait(struct hifn_softc *sc)
879 {
880 int i;
881 int reg = HIFN_0_PUCTRL;
882
883 if (sc->sc_flags & HIFN_IS_7956) {
884 reg = HIFN_0_PUCTRL2;
885 }
886
887 for (i = 5000; i > 0; i--) {
888 DELAY(1);
889 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
890 break;
891 }
892 if (!i)
893 device_printf(sc->sc_dev, "proc unit did not reset\n");
894 }
895
896 /*
897 * Reset the processing unit.
898 */
899 static void
hifn_reset_puc(struct hifn_softc * sc)900 hifn_reset_puc(struct hifn_softc *sc)
901 {
902 int reg = HIFN_0_PUCTRL;
903
904 if (sc->sc_flags & HIFN_IS_7956) {
905 reg = HIFN_0_PUCTRL2;
906 }
907
908 /* Reset processing unit */
909 WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
910 hifn_puc_wait(sc);
911 }
912
913 /*
914 * Set the Retry and TRDY registers; note that we set them to
915 * zero because the 7811 locks up when forced to retry (section
916 * 3.6 of "Specification Update SU-0014-04". Not clear if we
917 * should do this for all Hifn parts, but it doesn't seem to hurt.
918 */
919 static void
hifn_set_retry(struct hifn_softc * sc)920 hifn_set_retry(struct hifn_softc *sc)
921 {
922 /* NB: RETRY only responds to 8-bit reads/writes */
923 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
924 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
925 }
926
927 /*
928 * Resets the board. Values in the regesters are left as is
929 * from the reset (i.e. initial values are assigned elsewhere).
930 */
931 static void
hifn_reset_board(struct hifn_softc * sc,int full)932 hifn_reset_board(struct hifn_softc *sc, int full)
933 {
934 u_int32_t reg;
935
936 /*
937 * Set polling in the DMA configuration register to zero. 0x7 avoids
938 * resetting the board and zeros out the other fields.
939 */
940 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
941 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
942
943 /*
944 * Now that polling has been disabled, we have to wait 1 ms
945 * before resetting the board.
946 */
947 DELAY(1000);
948
949 /* Reset the DMA unit */
950 if (full) {
951 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
952 DELAY(1000);
953 } else {
954 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
955 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
956 hifn_reset_puc(sc);
957 }
958
959 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
960 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
961
962 /* Bring dma unit out of reset */
963 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
964 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
965
966 hifn_puc_wait(sc);
967 hifn_set_retry(sc);
968
969 if (sc->sc_flags & HIFN_IS_7811) {
970 for (reg = 0; reg < 1000; reg++) {
971 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
972 HIFN_MIPSRST_CRAMINIT)
973 break;
974 DELAY(1000);
975 }
976 if (reg == 1000)
977 kprintf(": cram init timeout\n");
978 } else {
979 /* set up DMA configuration register #2 */
980 /* turn off all PK and BAR0 swaps */
981 WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
982 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
983 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
984 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
985 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
986 }
987 }
988
989 static u_int32_t
hifn_next_signature(u_int32_t a,u_int cnt)990 hifn_next_signature(u_int32_t a, u_int cnt)
991 {
992 int i;
993 u_int32_t v;
994
995 for (i = 0; i < cnt; i++) {
996
997 /* get the parity */
998 v = a & 0x80080125;
999 v ^= v >> 16;
1000 v ^= v >> 8;
1001 v ^= v >> 4;
1002 v ^= v >> 2;
1003 v ^= v >> 1;
1004
1005 a = (v & 1) ^ (a << 1);
1006 }
1007
1008 return a;
1009 }
1010
1011 struct pci2id {
1012 u_short pci_vendor;
1013 u_short pci_prod;
1014 char card_id[13];
1015 };
1016 static struct pci2id pci2id[] = {
1017 {
1018 PCI_VENDOR_HIFN,
1019 PCI_PRODUCT_HIFN_7951,
1020 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00, 0x00 }
1022 }, {
1023 PCI_VENDOR_HIFN,
1024 PCI_PRODUCT_HIFN_7955,
1025 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00, 0x00 }
1027 }, {
1028 PCI_VENDOR_HIFN,
1029 PCI_PRODUCT_HIFN_7956,
1030 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1031 0x00, 0x00, 0x00, 0x00, 0x00 }
1032 }, {
1033 PCI_VENDOR_NETSEC,
1034 PCI_PRODUCT_NETSEC_7751,
1035 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1036 0x00, 0x00, 0x00, 0x00, 0x00 }
1037 }, {
1038 PCI_VENDOR_INVERTEX,
1039 PCI_PRODUCT_INVERTEX_AEON,
1040 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00, 0x00 }
1042 }, {
1043 PCI_VENDOR_HIFN,
1044 PCI_PRODUCT_HIFN_7811,
1045 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1046 0x00, 0x00, 0x00, 0x00, 0x00 }
1047 }, {
1048 /*
1049 * Other vendors share this PCI ID as well, such as
1050 * http://www.powercrypt.com, and obviously they also
1051 * use the same key.
1052 */
1053 PCI_VENDOR_HIFN,
1054 PCI_PRODUCT_HIFN_7751,
1055 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1056 0x00, 0x00, 0x00, 0x00, 0x00 }
1057 },
1058 };
1059
1060 /*
1061 * Checks to see if crypto is already enabled. If crypto isn't enable,
1062 * "hifn_enable_crypto" is called to enable it. The check is important,
1063 * as enabling crypto twice will lock the board.
1064 */
1065 static int
hifn_enable_crypto(struct hifn_softc * sc)1066 hifn_enable_crypto(struct hifn_softc *sc)
1067 {
1068 u_int32_t dmacfg, ramcfg, encl, addr, i;
1069 char *offtbl = NULL;
1070
1071 for (i = 0; i < NELEM(pci2id); i++) {
1072 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1073 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1074 offtbl = pci2id[i].card_id;
1075 break;
1076 }
1077 }
1078 if (offtbl == NULL) {
1079 device_printf(sc->sc_dev, "Unknown card!\n");
1080 return (1);
1081 }
1082
1083 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1084 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1085
1086 /*
1087 * The RAM config register's encrypt level bit needs to be set before
1088 * every read performed on the encryption level register.
1089 */
1090 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1091
1092 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1093
1094 /*
1095 * Make sure we don't re-unlock. Two unlocks kills chip until the
1096 * next reboot.
1097 */
1098 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1099 #ifdef HIFN_DEBUG
1100 if (hifn_debug)
1101 device_printf(sc->sc_dev,
1102 "Strong crypto already enabled!\n");
1103 #endif
1104 goto report;
1105 }
1106
1107 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1108 #ifdef HIFN_DEBUG
1109 if (hifn_debug)
1110 device_printf(sc->sc_dev,
1111 "Unknown encryption level 0x%x\n", encl);
1112 #endif
1113 return 1;
1114 }
1115
1116 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1117 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1118 DELAY(1000);
1119 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1120 DELAY(1000);
1121 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1122 DELAY(1000);
1123
1124 for (i = 0; i <= 12; i++) {
1125 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1126 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1127
1128 DELAY(1000);
1129 }
1130
1131 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1132 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1133
1134 #ifdef HIFN_DEBUG
1135 if (hifn_debug) {
1136 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1137 device_printf(sc->sc_dev, "Engine is permanently "
1138 "locked until next system reset!\n");
1139 else
1140 device_printf(sc->sc_dev, "Engine enabled "
1141 "successfully!\n");
1142 }
1143 #endif
1144
1145 report:
1146 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1147 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1148
1149 switch (encl) {
1150 case HIFN_PUSTAT_ENA_1:
1151 case HIFN_PUSTAT_ENA_2:
1152 break;
1153 case HIFN_PUSTAT_ENA_0:
1154 default:
1155 device_printf(sc->sc_dev, "disabled");
1156 break;
1157 }
1158
1159 return 0;
1160 }
1161
1162 /*
1163 * Give initial values to the registers listed in the "Register Space"
1164 * section of the HIFN Software Development reference manual.
1165 */
1166 static void
hifn_init_pci_registers(struct hifn_softc * sc)1167 hifn_init_pci_registers(struct hifn_softc *sc)
1168 {
1169 /* write fixed values needed by the Initialization registers */
1170 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1171 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1172 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1173
1174 /* write all 4 ring address registers */
1175 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1176 offsetof(struct hifn_dma, cmdr[0]));
1177 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1178 offsetof(struct hifn_dma, srcr[0]));
1179 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1180 offsetof(struct hifn_dma, dstr[0]));
1181 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1182 offsetof(struct hifn_dma, resr[0]));
1183
1184 DELAY(2000);
1185
1186 /* write status register */
1187 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1188 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1189 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1190 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1191 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1192 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1193 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1194 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1195 HIFN_DMACSR_S_WAIT |
1196 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1197 HIFN_DMACSR_C_WAIT |
1198 HIFN_DMACSR_ENGINE |
1199 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1200 HIFN_DMACSR_PUBDONE : 0) |
1201 ((sc->sc_flags & HIFN_IS_7811) ?
1202 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1203
1204 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1205 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1206 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1207 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1208 ((sc->sc_flags & HIFN_IS_7811) ?
1209 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1210 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1211 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1212
1213 if (sc->sc_flags & HIFN_IS_7956) {
1214 u_int32_t pll;
1215
1216 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1217 HIFN_PUCNFG_TCALLPHASES |
1218 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1219
1220 /* turn off the clocks and insure bypass is set */
1221 pll = READ_REG_1(sc, HIFN_1_PLL);
1222 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1223 | HIFN_PLL_BP | HIFN_PLL_MBSET;
1224 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1225 DELAY(10*1000); /* 10ms */
1226 /* change configuration */
1227 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1228 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1229 DELAY(10*1000); /* 10ms */
1230 /* disable bypass */
1231 pll &= ~HIFN_PLL_BP;
1232 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1233 /* enable clocks with new configuration */
1234 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1235 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1236 } else {
1237 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1238 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1239 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1240 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1241 }
1242
1243 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1244 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1245 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1246 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1247 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1248 }
1249
1250 /*
1251 * The maximum number of sessions supported by the card
1252 * is dependent on the amount of context ram, which
1253 * encryption algorithms are enabled, and how compression
1254 * is configured. This should be configured before this
1255 * routine is called.
1256 */
1257 static void
hifn_sessions(struct hifn_softc * sc)1258 hifn_sessions(struct hifn_softc *sc)
1259 {
1260 u_int32_t pucnfg;
1261 int ctxsize;
1262
1263 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1264
1265 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1266 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1267 ctxsize = 128;
1268 else
1269 ctxsize = 512;
1270 /*
1271 * 7955/7956 has internal context memory of 32K
1272 */
1273 if (sc->sc_flags & HIFN_IS_7956)
1274 sc->sc_maxses = 32768 / ctxsize;
1275 else
1276 sc->sc_maxses = 1 +
1277 ((sc->sc_ramsize - 32768) / ctxsize);
1278 } else
1279 sc->sc_maxses = sc->sc_ramsize / 16384;
1280
1281 if (sc->sc_maxses > 2048)
1282 sc->sc_maxses = 2048;
1283 }
1284
1285 /*
1286 * Determine ram type (sram or dram). Board should be just out of a reset
1287 * state when this is called.
1288 */
1289 static int
hifn_ramtype(struct hifn_softc * sc)1290 hifn_ramtype(struct hifn_softc *sc)
1291 {
1292 u_int8_t data[8], dataexpect[8];
1293 int i;
1294
1295 for (i = 0; i < sizeof(data); i++)
1296 data[i] = dataexpect[i] = 0x55;
1297 if (hifn_writeramaddr(sc, 0, data))
1298 return (-1);
1299 if (hifn_readramaddr(sc, 0, data))
1300 return (-1);
1301 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1302 sc->sc_drammodel = 1;
1303 return (0);
1304 }
1305
1306 for (i = 0; i < sizeof(data); i++)
1307 data[i] = dataexpect[i] = 0xaa;
1308 if (hifn_writeramaddr(sc, 0, data))
1309 return (-1);
1310 if (hifn_readramaddr(sc, 0, data))
1311 return (-1);
1312 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1313 sc->sc_drammodel = 1;
1314 return (0);
1315 }
1316
1317 return (0);
1318 }
1319
1320 #define HIFN_SRAM_MAX (32 << 20)
1321 #define HIFN_SRAM_STEP_SIZE 16384
1322 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1323
1324 static int
hifn_sramsize(struct hifn_softc * sc)1325 hifn_sramsize(struct hifn_softc *sc)
1326 {
1327 u_int32_t a;
1328 u_int8_t data[8];
1329 u_int8_t dataexpect[sizeof(data)];
1330 int32_t i;
1331
1332 for (i = 0; i < sizeof(data); i++)
1333 data[i] = dataexpect[i] = i ^ 0x5a;
1334
1335 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1336 a = i * HIFN_SRAM_STEP_SIZE;
1337 bcopy(&i, data, sizeof(i));
1338 hifn_writeramaddr(sc, a, data);
1339 }
1340
1341 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1342 a = i * HIFN_SRAM_STEP_SIZE;
1343 bcopy(&i, dataexpect, sizeof(i));
1344 if (hifn_readramaddr(sc, a, data) < 0)
1345 return (0);
1346 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1347 return (0);
1348 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1349 }
1350
1351 return (0);
1352 }
1353
1354 /*
1355 * XXX For dram boards, one should really try all of the
1356 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1357 * is already set up correctly.
1358 */
1359 static int
hifn_dramsize(struct hifn_softc * sc)1360 hifn_dramsize(struct hifn_softc *sc)
1361 {
1362 u_int32_t cnfg;
1363
1364 if (sc->sc_flags & HIFN_IS_7956) {
1365 /*
1366 * 7955/7956 have a fixed internal ram of only 32K.
1367 */
1368 sc->sc_ramsize = 32768;
1369 } else {
1370 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1371 HIFN_PUCNFG_DRAMMASK;
1372 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1373 }
1374 return (0);
1375 }
1376
1377 static void
hifn_alloc_slot(struct hifn_softc * sc,int * cmdp,int * srcp,int * dstp,int * resp)1378 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1379 {
1380 struct hifn_dma *dma = sc->sc_dma;
1381
1382 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1383 dma->cmdi = 0;
1384 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1385 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1386 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1387 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1388 }
1389 *cmdp = dma->cmdi++;
1390 dma->cmdk = dma->cmdi;
1391
1392 if (dma->srci == HIFN_D_SRC_RSIZE) {
1393 dma->srci = 0;
1394 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1395 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1396 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1397 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1398 }
1399 *srcp = dma->srci++;
1400 dma->srck = dma->srci;
1401
1402 if (dma->dsti == HIFN_D_DST_RSIZE) {
1403 dma->dsti = 0;
1404 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1405 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1406 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1407 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1408 }
1409 *dstp = dma->dsti++;
1410 dma->dstk = dma->dsti;
1411
1412 if (dma->resi == HIFN_D_RES_RSIZE) {
1413 dma->resi = 0;
1414 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1415 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1416 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1417 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1418 }
1419 *resp = dma->resi++;
1420 dma->resk = dma->resi;
1421 }
1422
1423 static int
hifn_writeramaddr(struct hifn_softc * sc,int addr,u_int8_t * data)1424 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1425 {
1426 struct hifn_dma *dma = sc->sc_dma;
1427 hifn_base_command_t wc;
1428 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1429 int r, cmdi, resi, srci, dsti;
1430
1431 wc.masks = htole16(3 << 13);
1432 wc.session_num = htole16(addr >> 14);
1433 wc.total_source_count = htole16(8);
1434 wc.total_dest_count = htole16(addr & 0x3fff);
1435
1436 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1437
1438 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1439 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1440 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1441
1442 /* build write command */
1443 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1444 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1445 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1446
1447 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1448 + offsetof(struct hifn_dma, test_src));
1449 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1450 + offsetof(struct hifn_dma, test_dst));
1451
1452 dma->cmdr[cmdi].l = htole32(16 | masks);
1453 dma->srcr[srci].l = htole32(8 | masks);
1454 dma->dstr[dsti].l = htole32(4 | masks);
1455 dma->resr[resi].l = htole32(4 | masks);
1456
1457 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1458 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1459
1460 for (r = 10000; r >= 0; r--) {
1461 DELAY(10);
1462 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1463 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1464 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1465 break;
1466 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1467 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1468 }
1469 if (r == 0) {
1470 device_printf(sc->sc_dev, "writeramaddr -- "
1471 "result[%d](addr %d) still valid\n", resi, addr);
1472 r = -1;
1473 return (-1);
1474 } else
1475 r = 0;
1476
1477 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1478 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1479 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1480
1481 return (r);
1482 }
1483
1484 static int
hifn_readramaddr(struct hifn_softc * sc,int addr,u_int8_t * data)1485 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1486 {
1487 struct hifn_dma *dma = sc->sc_dma;
1488 hifn_base_command_t rc;
1489 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1490 int r, cmdi, srci, dsti, resi;
1491
1492 rc.masks = htole16(2 << 13);
1493 rc.session_num = htole16(addr >> 14);
1494 rc.total_source_count = htole16(addr & 0x3fff);
1495 rc.total_dest_count = htole16(8);
1496
1497 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1498
1499 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1500 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1501 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1502
1503 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1504 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1505
1506 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1507 offsetof(struct hifn_dma, test_src));
1508 dma->test_src = 0;
1509 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
1510 offsetof(struct hifn_dma, test_dst));
1511 dma->test_dst = 0;
1512 dma->cmdr[cmdi].l = htole32(8 | masks);
1513 dma->srcr[srci].l = htole32(8 | masks);
1514 dma->dstr[dsti].l = htole32(8 | masks);
1515 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1516
1517 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1519
1520 for (r = 10000; r >= 0; r--) {
1521 DELAY(10);
1522 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1523 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1524 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1525 break;
1526 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1527 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1528 }
1529 if (r == 0) {
1530 device_printf(sc->sc_dev, "readramaddr -- "
1531 "result[%d](addr %d) still valid\n", resi, addr);
1532 r = -1;
1533 } else {
1534 r = 0;
1535 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1536 }
1537
1538 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1539 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1540 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1541
1542 return (r);
1543 }
1544
1545 /*
1546 * Initialize the descriptor rings.
1547 */
1548 static void
hifn_init_dma(struct hifn_softc * sc)1549 hifn_init_dma(struct hifn_softc *sc)
1550 {
1551 struct hifn_dma *dma = sc->sc_dma;
1552 int i;
1553
1554 hifn_set_retry(sc);
1555
1556 /* initialize static pointer values */
1557 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1558 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1559 offsetof(struct hifn_dma, command_bufs[i][0]));
1560 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1561 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1562 offsetof(struct hifn_dma, result_bufs[i][0]));
1563
1564 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1565 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1566 dma->srcr[HIFN_D_SRC_RSIZE].p =
1567 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1568 dma->dstr[HIFN_D_DST_RSIZE].p =
1569 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1570 dma->resr[HIFN_D_RES_RSIZE].p =
1571 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1572
1573 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1574 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1575 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1576 }
1577
1578 /*
1579 * Writes out the raw command buffer space. Returns the
1580 * command buffer size.
1581 */
1582 static u_int
hifn_write_command(struct hifn_command * cmd,u_int8_t * buf)1583 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1584 {
1585 u_int8_t *buf_pos;
1586 hifn_base_command_t *base_cmd;
1587 hifn_mac_command_t *mac_cmd;
1588 hifn_crypt_command_t *cry_cmd;
1589 int using_mac, using_crypt, len, ivlen;
1590 u_int32_t dlen, slen;
1591
1592 buf_pos = buf;
1593 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1594 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1595
1596 base_cmd = (hifn_base_command_t *)buf_pos;
1597 base_cmd->masks = htole16(cmd->base_masks);
1598 slen = cmd->src_mapsize;
1599 if (cmd->sloplen)
1600 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1601 else
1602 dlen = cmd->dst_mapsize;
1603 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1604 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1605 dlen >>= 16;
1606 slen >>= 16;
1607
1608 base_cmd->session_num = htole16(
1609 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1610 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1611 buf_pos += sizeof(hifn_base_command_t);
1612
1613 if (using_mac) {
1614 mac_cmd = (hifn_mac_command_t *)buf_pos;
1615 dlen = cmd->maccrd->crd_len;
1616 mac_cmd->source_count = htole16(dlen & 0xffff);
1617 dlen >>= 16;
1618 mac_cmd->masks = htole16(cmd->mac_masks |
1619 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1620 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1621 mac_cmd->reserved = 0;
1622 buf_pos += sizeof(hifn_mac_command_t);
1623 }
1624
1625 if (using_crypt) {
1626 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1627 dlen = cmd->enccrd->crd_len;
1628 cry_cmd->source_count = htole16(dlen & 0xffff);
1629 dlen >>= 16;
1630 cry_cmd->masks = htole16(cmd->cry_masks |
1631 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1632 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1633 cry_cmd->reserved = 0;
1634 buf_pos += sizeof(hifn_crypt_command_t);
1635 }
1636
1637 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1638 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1639 buf_pos += HIFN_MAC_KEY_LENGTH;
1640 }
1641
1642 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1643 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1644 case HIFN_CRYPT_CMD_ALG_3DES:
1645 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1646 buf_pos += HIFN_3DES_KEY_LENGTH;
1647 break;
1648 case HIFN_CRYPT_CMD_ALG_DES:
1649 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1650 buf_pos += HIFN_DES_KEY_LENGTH;
1651 break;
1652 case HIFN_CRYPT_CMD_ALG_RC4:
1653 len = 256;
1654 do {
1655 int clen;
1656
1657 clen = MIN(cmd->cklen, len);
1658 bcopy(cmd->ck, buf_pos, clen);
1659 len -= clen;
1660 buf_pos += clen;
1661 } while (len > 0);
1662 bzero(buf_pos, 4);
1663 buf_pos += 4;
1664 break;
1665 case HIFN_CRYPT_CMD_ALG_AES:
1666 /*
1667 * AES keys are variable 128, 192 and
1668 * 256 bits (16, 24 and 32 bytes).
1669 */
1670 bcopy(cmd->ck, buf_pos, cmd->cklen);
1671 buf_pos += cmd->cklen;
1672 break;
1673 }
1674 }
1675
1676 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1677 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1678 case HIFN_CRYPT_CMD_ALG_AES:
1679 ivlen = HIFN_AES_IV_LENGTH;
1680 break;
1681 default:
1682 ivlen = HIFN_IV_LENGTH;
1683 break;
1684 }
1685 bcopy(cmd->iv, buf_pos, ivlen);
1686 buf_pos += ivlen;
1687 }
1688
1689 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1690 bzero(buf_pos, 8);
1691 buf_pos += 8;
1692 }
1693
1694 return (buf_pos - buf);
1695 #undef MIN
1696 }
1697
1698 static int
hifn_dmamap_aligned(struct hifn_operand * op)1699 hifn_dmamap_aligned(struct hifn_operand *op)
1700 {
1701 int i;
1702
1703 for (i = 0; i < op->nsegs; i++) {
1704 if (op->segs[i].ds_addr & 3)
1705 return (0);
1706 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1707 return (0);
1708 }
1709 return (1);
1710 }
1711
1712 static __inline int
hifn_dmamap_dstwrap(struct hifn_softc * sc,int idx)1713 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1714 {
1715 struct hifn_dma *dma = sc->sc_dma;
1716
1717 if (++idx == HIFN_D_DST_RSIZE) {
1718 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1719 HIFN_D_MASKDONEIRQ);
1720 HIFN_DSTR_SYNC(sc, idx,
1721 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1722 idx = 0;
1723 }
1724 return (idx);
1725 }
1726
1727 static int
hifn_dmamap_load_dst(struct hifn_softc * sc,struct hifn_command * cmd)1728 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1729 {
1730 struct hifn_dma *dma = sc->sc_dma;
1731 struct hifn_operand *dst = &cmd->dst;
1732 u_int32_t p, l;
1733 int idx, used = 0, i;
1734
1735 idx = dma->dsti;
1736 for (i = 0; i < dst->nsegs - 1; i++) {
1737 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1738 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1739 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1740 HIFN_DSTR_SYNC(sc, idx,
1741 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1742 used++;
1743
1744 idx = hifn_dmamap_dstwrap(sc, idx);
1745 }
1746
1747 if (cmd->sloplen == 0) {
1748 p = dst->segs[i].ds_addr;
1749 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1750 dst->segs[i].ds_len;
1751 } else {
1752 p = sc->sc_dma_physaddr +
1753 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1754 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1755 sizeof(u_int32_t);
1756
1757 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1758 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1759 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1760 HIFN_D_MASKDONEIRQ |
1761 (dst->segs[i].ds_len - cmd->sloplen));
1762 HIFN_DSTR_SYNC(sc, idx,
1763 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1764 used++;
1765
1766 idx = hifn_dmamap_dstwrap(sc, idx);
1767 }
1768 }
1769 dma->dstr[idx].p = htole32(p);
1770 dma->dstr[idx].l = htole32(l);
1771 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1772 used++;
1773
1774 idx = hifn_dmamap_dstwrap(sc, idx);
1775
1776 dma->dsti = idx;
1777 dma->dstu += used;
1778 return (idx);
1779 }
1780
1781 static __inline int
hifn_dmamap_srcwrap(struct hifn_softc * sc,int idx)1782 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1783 {
1784 struct hifn_dma *dma = sc->sc_dma;
1785
1786 if (++idx == HIFN_D_SRC_RSIZE) {
1787 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1788 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1789 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1790 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1791 idx = 0;
1792 }
1793 return (idx);
1794 }
1795
1796 static int
hifn_dmamap_load_src(struct hifn_softc * sc,struct hifn_command * cmd)1797 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1798 {
1799 struct hifn_dma *dma = sc->sc_dma;
1800 struct hifn_operand *src = &cmd->src;
1801 int idx, i;
1802 u_int32_t last = 0;
1803
1804 idx = dma->srci;
1805 for (i = 0; i < src->nsegs; i++) {
1806 if (i == src->nsegs - 1)
1807 last = HIFN_D_LAST;
1808
1809 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1810 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1811 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1812 HIFN_SRCR_SYNC(sc, idx,
1813 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1814
1815 idx = hifn_dmamap_srcwrap(sc, idx);
1816 }
1817 dma->srci = idx;
1818 dma->srcu += src->nsegs;
1819 return (idx);
1820 }
1821
1822 static void
hifn_op_cb(void * arg,bus_dma_segment_t * seg,int nsegs,bus_size_t mapsize,int error)1823 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1824 {
1825 struct hifn_operand *op = arg;
1826
1827 KASSERT(nsegs <= MAX_SCATTER,
1828 ("hifn_op_cb: too many DMA segments (%u > %u) "
1829 "returned when mapping operand", nsegs, MAX_SCATTER));
1830 op->mapsize = mapsize;
1831 op->nsegs = nsegs;
1832 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1833 }
1834
1835 static int
hifn_crypto(struct hifn_softc * sc,struct hifn_command * cmd,struct cryptop * crp,int hint)1836 hifn_crypto(
1837 struct hifn_softc *sc,
1838 struct hifn_command *cmd,
1839 struct cryptop *crp,
1840 int hint)
1841 {
1842 struct hifn_dma *dma = sc->sc_dma;
1843 u_int32_t cmdlen, csr;
1844 int cmdi, resi, err = 0;
1845
1846 /*
1847 * need 1 cmd, and 1 res
1848 *
1849 * NB: check this first since it's easy.
1850 */
1851 HIFN_LOCK(sc);
1852 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1853 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1854 #ifdef HIFN_DEBUG
1855 if (hifn_debug) {
1856 device_printf(sc->sc_dev,
1857 "cmd/result exhaustion, cmdu %u resu %u\n",
1858 dma->cmdu, dma->resu);
1859 }
1860 #endif
1861 hifnstats.hst_nomem_cr++;
1862 HIFN_UNLOCK(sc);
1863 return (ERESTART);
1864 }
1865
1866 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1867 hifnstats.hst_nomem_map++;
1868 HIFN_UNLOCK(sc);
1869 return (ENOMEM);
1870 }
1871
1872 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1873 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1874 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1875 hifnstats.hst_nomem_load++;
1876 err = ENOMEM;
1877 goto err_srcmap1;
1878 }
1879 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1880 #if 0
1881 cmd->src_io->uio_segflg = UIO_USERSPACE;
1882 #endif
1883 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1884 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1885 hifnstats.hst_nomem_load++;
1886 err = ENOMEM;
1887 goto err_srcmap1;
1888 }
1889 } else {
1890 err = EINVAL;
1891 goto err_srcmap1;
1892 }
1893
1894 if (hifn_dmamap_aligned(&cmd->src)) {
1895 cmd->sloplen = cmd->src_mapsize & 3;
1896 cmd->dst = cmd->src;
1897 } else {
1898 if (crp->crp_flags & CRYPTO_F_IOV) {
1899 err = EINVAL;
1900 goto err_srcmap;
1901 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1902 int totlen, len;
1903 struct mbuf *m, *m0, *mlast;
1904
1905 KASSERT(cmd->dst_m == cmd->src_m,
1906 ("hifn_crypto: dst_m initialized improperly"));
1907 hifnstats.hst_unaligned++;
1908 /*
1909 * Source is not aligned on a longword boundary.
1910 * Copy the data to insure alignment. If we fail
1911 * to allocate mbufs or clusters while doing this
1912 * we return ERESTART so the operation is requeued
1913 * at the crypto later, but only if there are
1914 * ops already posted to the hardware; otherwise we
1915 * have no guarantee that we'll be re-entered.
1916 */
1917 totlen = cmd->src_mapsize;
1918 if (cmd->src_m->m_flags & M_PKTHDR) {
1919 len = MHLEN;
1920 MGETHDR(m0, M_NOWAIT, MT_DATA);
1921 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) {
1922 m_free(m0);
1923 m0 = NULL;
1924 }
1925 } else {
1926 len = MLEN;
1927 MGET(m0, M_NOWAIT, MT_DATA);
1928 }
1929 if (m0 == NULL) {
1930 hifnstats.hst_nomem_mbuf++;
1931 err = dma->cmdu ? ERESTART : ENOMEM;
1932 goto err_srcmap;
1933 }
1934 if (totlen >= MINCLSIZE) {
1935 MCLGET(m0, M_NOWAIT);
1936 if ((m0->m_flags & M_EXT) == 0) {
1937 hifnstats.hst_nomem_mcl++;
1938 err = dma->cmdu ? ERESTART : ENOMEM;
1939 m_freem(m0);
1940 goto err_srcmap;
1941 }
1942 len = MCLBYTES;
1943 }
1944 totlen -= len;
1945 m0->m_pkthdr.len = m0->m_len = len;
1946 mlast = m0;
1947
1948 while (totlen > 0) {
1949 MGET(m, M_NOWAIT, MT_DATA);
1950 if (m == NULL) {
1951 hifnstats.hst_nomem_mbuf++;
1952 err = dma->cmdu ? ERESTART : ENOMEM;
1953 m_freem(m0);
1954 goto err_srcmap;
1955 }
1956 len = MLEN;
1957 if (totlen >= MINCLSIZE) {
1958 MCLGET(m, M_NOWAIT);
1959 if ((m->m_flags & M_EXT) == 0) {
1960 hifnstats.hst_nomem_mcl++;
1961 err = dma->cmdu ? ERESTART : ENOMEM;
1962 mlast->m_next = m;
1963 m_freem(m0);
1964 goto err_srcmap;
1965 }
1966 len = MCLBYTES;
1967 }
1968
1969 m->m_len = len;
1970 m0->m_pkthdr.len += len;
1971 totlen -= len;
1972
1973 mlast->m_next = m;
1974 mlast = m;
1975 }
1976 cmd->dst_m = m0;
1977 }
1978 }
1979
1980 if (cmd->dst_map == NULL) {
1981 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1982 hifnstats.hst_nomem_map++;
1983 err = ENOMEM;
1984 goto err_srcmap;
1985 }
1986 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1987 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1988 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1989 hifnstats.hst_nomem_map++;
1990 err = ENOMEM;
1991 goto err_dstmap1;
1992 }
1993 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1994 #if 0
1995 cmd->dst_io->uio_segflg |= UIO_USERSPACE;
1996 #endif
1997 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1998 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1999 hifnstats.hst_nomem_load++;
2000 err = ENOMEM;
2001 goto err_dstmap1;
2002 }
2003 }
2004 }
2005
2006 #ifdef HIFN_DEBUG
2007 if (hifn_debug) {
2008 device_printf(sc->sc_dev,
2009 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
2010 READ_REG_1(sc, HIFN_1_DMA_CSR),
2011 READ_REG_1(sc, HIFN_1_DMA_IER),
2012 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
2013 cmd->src_nsegs, cmd->dst_nsegs);
2014 }
2015 #endif
2016
2017 if (cmd->src_map == cmd->dst_map) {
2018 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2019 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2020 } else {
2021 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2022 BUS_DMASYNC_PREWRITE);
2023 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2024 BUS_DMASYNC_PREREAD);
2025 }
2026
2027 /*
2028 * need N src, and N dst
2029 */
2030 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
2031 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
2032 #ifdef HIFN_DEBUG
2033 if (hifn_debug) {
2034 device_printf(sc->sc_dev,
2035 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
2036 dma->srcu, cmd->src_nsegs,
2037 dma->dstu, cmd->dst_nsegs);
2038 }
2039 #endif
2040 hifnstats.hst_nomem_sd++;
2041 err = ERESTART;
2042 goto err_dstmap;
2043 }
2044
2045 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2046 dma->cmdi = 0;
2047 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2048 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2049 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2050 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2051 }
2052 cmdi = dma->cmdi++;
2053 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2054 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2055
2056 /* .p for command/result already set */
2057 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2058 HIFN_D_MASKDONEIRQ);
2059 HIFN_CMDR_SYNC(sc, cmdi,
2060 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2061 dma->cmdu++;
2062
2063 /*
2064 * We don't worry about missing an interrupt (which a "command wait"
2065 * interrupt salvages us from), unless there is more than one command
2066 * in the queue.
2067 */
2068 if (dma->cmdu > 1) {
2069 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2070 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2071 }
2072
2073 hifnstats.hst_ipackets++;
2074 hifnstats.hst_ibytes += cmd->src_mapsize;
2075
2076 hifn_dmamap_load_src(sc, cmd);
2077
2078 /*
2079 * Unlike other descriptors, we don't mask done interrupt from
2080 * result descriptor.
2081 */
2082 #ifdef HIFN_DEBUG
2083 if (hifn_debug)
2084 kprintf("load res\n");
2085 #endif
2086 if (dma->resi == HIFN_D_RES_RSIZE) {
2087 dma->resi = 0;
2088 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2089 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2090 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2091 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2092 }
2093 resi = dma->resi++;
2094 KASSERT(dma->hifn_commands[resi] == NULL,
2095 ("hifn_crypto: command slot %u busy", resi));
2096 dma->hifn_commands[resi] = cmd;
2097 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2098 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2099 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2100 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2101 sc->sc_curbatch++;
2102 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2103 hifnstats.hst_maxbatch = sc->sc_curbatch;
2104 hifnstats.hst_totbatch++;
2105 } else {
2106 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2107 HIFN_D_VALID | HIFN_D_LAST);
2108 sc->sc_curbatch = 0;
2109 }
2110 HIFN_RESR_SYNC(sc, resi,
2111 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2112 dma->resu++;
2113
2114 if (cmd->sloplen)
2115 cmd->slopidx = resi;
2116
2117 hifn_dmamap_load_dst(sc, cmd);
2118
2119 csr = 0;
2120 if (sc->sc_c_busy == 0) {
2121 csr |= HIFN_DMACSR_C_CTRL_ENA;
2122 sc->sc_c_busy = 1;
2123 }
2124 if (sc->sc_s_busy == 0) {
2125 csr |= HIFN_DMACSR_S_CTRL_ENA;
2126 sc->sc_s_busy = 1;
2127 }
2128 if (sc->sc_r_busy == 0) {
2129 csr |= HIFN_DMACSR_R_CTRL_ENA;
2130 sc->sc_r_busy = 1;
2131 }
2132 if (sc->sc_d_busy == 0) {
2133 csr |= HIFN_DMACSR_D_CTRL_ENA;
2134 sc->sc_d_busy = 1;
2135 }
2136 if (csr)
2137 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2138
2139 #ifdef HIFN_DEBUG
2140 if (hifn_debug) {
2141 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2142 READ_REG_1(sc, HIFN_1_DMA_CSR),
2143 READ_REG_1(sc, HIFN_1_DMA_IER));
2144 }
2145 #endif
2146
2147 sc->sc_active = 5;
2148 HIFN_UNLOCK(sc);
2149 KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2150 return (err); /* success */
2151
2152 err_dstmap:
2153 if (cmd->src_map != cmd->dst_map)
2154 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2155 err_dstmap1:
2156 if (cmd->src_map != cmd->dst_map)
2157 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2158 err_srcmap:
2159 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2160 if (cmd->src_m != cmd->dst_m)
2161 m_freem(cmd->dst_m);
2162 }
2163 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2164 err_srcmap1:
2165 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2166 HIFN_UNLOCK(sc);
2167 return (err);
2168 }
2169
2170 static void
hifn_tick(void * vsc)2171 hifn_tick(void* vsc)
2172 {
2173 struct hifn_softc *sc = vsc;
2174
2175 HIFN_LOCK(sc);
2176 if (sc->sc_active == 0) {
2177 struct hifn_dma *dma = sc->sc_dma;
2178 u_int32_t r = 0;
2179
2180 if (dma->cmdu == 0 && sc->sc_c_busy) {
2181 sc->sc_c_busy = 0;
2182 r |= HIFN_DMACSR_C_CTRL_DIS;
2183 }
2184 if (dma->srcu == 0 && sc->sc_s_busy) {
2185 sc->sc_s_busy = 0;
2186 r |= HIFN_DMACSR_S_CTRL_DIS;
2187 }
2188 if (dma->dstu == 0 && sc->sc_d_busy) {
2189 sc->sc_d_busy = 0;
2190 r |= HIFN_DMACSR_D_CTRL_DIS;
2191 }
2192 if (dma->resu == 0 && sc->sc_r_busy) {
2193 sc->sc_r_busy = 0;
2194 r |= HIFN_DMACSR_R_CTRL_DIS;
2195 }
2196 if (r)
2197 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2198 } else
2199 sc->sc_active--;
2200 HIFN_UNLOCK(sc);
2201 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2202 }
2203
2204 static void
hifn_intr(void * arg)2205 hifn_intr(void *arg)
2206 {
2207 struct hifn_softc *sc = arg;
2208 struct hifn_dma *dma;
2209 u_int32_t dmacsr, restart;
2210 int i, u;
2211
2212 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2213
2214 /* Nothing in the DMA unit interrupted */
2215 if ((dmacsr & sc->sc_dmaier) == 0) {
2216 hifnstats.hst_noirq++;
2217 return;
2218 }
2219
2220 HIFN_LOCK(sc);
2221
2222 dma = sc->sc_dma;
2223
2224 #ifdef HIFN_DEBUG
2225 if (hifn_debug) {
2226 device_printf(sc->sc_dev,
2227 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2228 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2229 dma->cmdi, dma->srci, dma->dsti, dma->resi,
2230 dma->cmdk, dma->srck, dma->dstk, dma->resk,
2231 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2232 }
2233 #endif
2234
2235 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2236
2237 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2238 (dmacsr & HIFN_DMACSR_PUBDONE))
2239 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2240 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2241
2242 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2243 if (restart)
2244 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2245
2246 if (sc->sc_flags & HIFN_IS_7811) {
2247 if (dmacsr & HIFN_DMACSR_ILLR)
2248 device_printf(sc->sc_dev, "illegal read\n");
2249 if (dmacsr & HIFN_DMACSR_ILLW)
2250 device_printf(sc->sc_dev, "illegal write\n");
2251 }
2252
2253 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2254 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2255 if (restart) {
2256 device_printf(sc->sc_dev, "abort, resetting.\n");
2257 hifnstats.hst_abort++;
2258 hifn_abort(sc);
2259 HIFN_UNLOCK(sc);
2260 return;
2261 }
2262
2263 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2264 /*
2265 * If no slots to process and we receive a "waiting on
2266 * command" interrupt, we disable the "waiting on command"
2267 * (by clearing it).
2268 */
2269 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2270 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2271 }
2272
2273 /* clear the rings */
2274 i = dma->resk; u = dma->resu;
2275 while (u != 0) {
2276 HIFN_RESR_SYNC(sc, i,
2277 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2278 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2279 HIFN_RESR_SYNC(sc, i,
2280 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2281 break;
2282 }
2283
2284 if (i != HIFN_D_RES_RSIZE) {
2285 struct hifn_command *cmd;
2286 u_int8_t *macbuf = NULL;
2287
2288 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2289 cmd = dma->hifn_commands[i];
2290 KASSERT(cmd != NULL,
2291 ("hifn_intr: null command slot %u", i));
2292 dma->hifn_commands[i] = NULL;
2293
2294 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2295 macbuf = dma->result_bufs[i];
2296 macbuf += 12;
2297 }
2298
2299 hifn_callback(sc, cmd, macbuf);
2300 hifnstats.hst_opackets++;
2301 u--;
2302 }
2303
2304 if (++i == (HIFN_D_RES_RSIZE + 1))
2305 i = 0;
2306 }
2307 dma->resk = i; dma->resu = u;
2308
2309 i = dma->srck; u = dma->srcu;
2310 while (u != 0) {
2311 if (i == HIFN_D_SRC_RSIZE)
2312 i = 0;
2313 HIFN_SRCR_SYNC(sc, i,
2314 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2315 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2316 HIFN_SRCR_SYNC(sc, i,
2317 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2318 break;
2319 }
2320 i++, u--;
2321 }
2322 dma->srck = i; dma->srcu = u;
2323
2324 i = dma->cmdk; u = dma->cmdu;
2325 while (u != 0) {
2326 HIFN_CMDR_SYNC(sc, i,
2327 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2328 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2329 HIFN_CMDR_SYNC(sc, i,
2330 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2331 break;
2332 }
2333 if (i != HIFN_D_CMD_RSIZE) {
2334 u--;
2335 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2336 }
2337 if (++i == (HIFN_D_CMD_RSIZE + 1))
2338 i = 0;
2339 }
2340 dma->cmdk = i; dma->cmdu = u;
2341
2342 HIFN_UNLOCK(sc);
2343
2344 if (sc->sc_needwakeup) { /* XXX check high watermark */
2345 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2346 #ifdef HIFN_DEBUG
2347 if (hifn_debug)
2348 device_printf(sc->sc_dev,
2349 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2350 sc->sc_needwakeup,
2351 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2352 #endif
2353 sc->sc_needwakeup &= ~wakeup;
2354 crypto_unblock(sc->sc_cid, wakeup);
2355 }
2356 }
2357
2358 /*
2359 * Allocate a new 'session' and return an encoded session id. 'sidp'
2360 * contains our registration id, and should contain an encoded session
2361 * id on successful allocation.
2362 */
2363 static int
hifn_newsession(device_t dev,u_int32_t * sidp,struct cryptoini * cri)2364 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2365 {
2366 struct cryptoini *c;
2367 struct hifn_softc *sc = device_get_softc(dev);
2368 int mac = 0, cry = 0, sesn;
2369 struct hifn_session *ses = NULL;
2370
2371 KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2372 if (sidp == NULL || cri == NULL || sc == NULL)
2373 return (EINVAL);
2374
2375 HIFN_LOCK(sc);
2376 if (sc->sc_sessions == NULL) {
2377 ses = sc->sc_sessions = (struct hifn_session *)kmalloc(
2378 sizeof(*ses), M_DEVBUF, M_NOWAIT);
2379 if (ses == NULL) {
2380 HIFN_UNLOCK(sc);
2381 return (ENOMEM);
2382 }
2383 sesn = 0;
2384 sc->sc_nsessions = 1;
2385 } else {
2386 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2387 if (!sc->sc_sessions[sesn].hs_used) {
2388 ses = &sc->sc_sessions[sesn];
2389 break;
2390 }
2391 }
2392
2393 if (ses == NULL) {
2394 sesn = sc->sc_nsessions;
2395 ses = (struct hifn_session *)kmalloc((sesn + 1) *
2396 sizeof(*ses), M_DEVBUF, M_NOWAIT);
2397 if (ses == NULL) {
2398 HIFN_UNLOCK(sc);
2399 return (ENOMEM);
2400 }
2401 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2402 bzero(sc->sc_sessions, sesn * sizeof(*ses));
2403 kfree(sc->sc_sessions, M_DEVBUF);
2404 sc->sc_sessions = ses;
2405 ses = &sc->sc_sessions[sesn];
2406 sc->sc_nsessions++;
2407 }
2408 }
2409 HIFN_UNLOCK(sc);
2410
2411 bzero(ses, sizeof(*ses));
2412 ses->hs_used = 1;
2413
2414 for (c = cri; c != NULL; c = c->cri_next) {
2415 switch (c->cri_alg) {
2416 case CRYPTO_MD5:
2417 case CRYPTO_SHA1:
2418 case CRYPTO_MD5_HMAC:
2419 case CRYPTO_SHA1_HMAC:
2420 if (mac)
2421 return (EINVAL);
2422 mac = 1;
2423 ses->hs_mlen = c->cri_mlen;
2424 if (ses->hs_mlen == 0) {
2425 switch (c->cri_alg) {
2426 case CRYPTO_MD5:
2427 case CRYPTO_MD5_HMAC:
2428 ses->hs_mlen = 16;
2429 break;
2430 case CRYPTO_SHA1:
2431 case CRYPTO_SHA1_HMAC:
2432 ses->hs_mlen = 20;
2433 break;
2434 }
2435 }
2436 break;
2437 case CRYPTO_DES_CBC:
2438 case CRYPTO_3DES_CBC:
2439 case CRYPTO_AES_CBC:
2440 /* XXX this may read fewer, does it matter? */
2441 read_random(ses->hs_iv,
2442 (c->cri_alg == CRYPTO_AES_CBC ?
2443 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH),
2444 0);
2445 /*FALLTHROUGH*/
2446 case CRYPTO_ARC4:
2447 if (cry)
2448 return (EINVAL);
2449 cry = 1;
2450 break;
2451 default:
2452 return (EINVAL);
2453 }
2454 }
2455 if (mac == 0 && cry == 0)
2456 return (EINVAL);
2457
2458 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2459
2460 return (0);
2461 }
2462
2463 /*
2464 * Deallocate a session.
2465 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2466 * XXX to blow away any keys already stored there.
2467 */
2468 #define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
2469
2470 static int
hifn_freesession(device_t dev,u_int64_t tid)2471 hifn_freesession(device_t dev, u_int64_t tid)
2472 {
2473 struct hifn_softc *sc = device_get_softc(dev);
2474 int session, error;
2475 u_int32_t sid = CRYPTO_SESID2LID(tid);
2476
2477 KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2478 if (sc == NULL)
2479 return (EINVAL);
2480
2481 HIFN_LOCK(sc);
2482 session = HIFN_SESSION(sid);
2483 if (session < sc->sc_nsessions) {
2484 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2485 error = 0;
2486 } else
2487 error = EINVAL;
2488 HIFN_UNLOCK(sc);
2489
2490 return (error);
2491 }
2492
2493 static int
hifn_process(device_t dev,struct cryptop * crp,int hint)2494 hifn_process(device_t dev, struct cryptop *crp, int hint)
2495 {
2496 struct hifn_softc *sc = device_get_softc(dev);
2497 struct hifn_command *cmd = NULL;
2498 int session, err, ivlen;
2499 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2500
2501 if (crp == NULL || crp->crp_callback == NULL) {
2502 hifnstats.hst_invalid++;
2503 return (EINVAL);
2504 }
2505 session = HIFN_SESSION(crp->crp_sid);
2506
2507 if (sc == NULL || session >= sc->sc_nsessions) {
2508 err = EINVAL;
2509 goto errout;
2510 }
2511
2512 cmd = kmalloc(sizeof(struct hifn_command), M_DEVBUF, M_INTWAIT | M_ZERO);
2513
2514 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2515 cmd->src_m = (struct mbuf *)crp->crp_buf;
2516 cmd->dst_m = (struct mbuf *)crp->crp_buf;
2517 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2518 cmd->src_io = (struct uio *)crp->crp_buf;
2519 cmd->dst_io = (struct uio *)crp->crp_buf;
2520 } else {
2521 err = EINVAL;
2522 goto errout; /* XXX we don't handle contiguous buffers! */
2523 }
2524
2525 crd1 = crp->crp_desc;
2526 if (crd1 == NULL) {
2527 err = EINVAL;
2528 goto errout;
2529 }
2530 crd2 = crd1->crd_next;
2531
2532 if (crd2 == NULL) {
2533 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2534 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2535 crd1->crd_alg == CRYPTO_SHA1 ||
2536 crd1->crd_alg == CRYPTO_MD5) {
2537 maccrd = crd1;
2538 enccrd = NULL;
2539 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2540 crd1->crd_alg == CRYPTO_3DES_CBC ||
2541 crd1->crd_alg == CRYPTO_AES_CBC ||
2542 crd1->crd_alg == CRYPTO_ARC4) {
2543 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2544 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2545 maccrd = NULL;
2546 enccrd = crd1;
2547 } else {
2548 err = EINVAL;
2549 goto errout;
2550 }
2551 } else {
2552 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2553 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2554 crd1->crd_alg == CRYPTO_MD5 ||
2555 crd1->crd_alg == CRYPTO_SHA1) &&
2556 (crd2->crd_alg == CRYPTO_DES_CBC ||
2557 crd2->crd_alg == CRYPTO_3DES_CBC ||
2558 crd2->crd_alg == CRYPTO_AES_CBC ||
2559 crd2->crd_alg == CRYPTO_ARC4) &&
2560 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2561 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2562 maccrd = crd1;
2563 enccrd = crd2;
2564 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2565 crd1->crd_alg == CRYPTO_ARC4 ||
2566 crd1->crd_alg == CRYPTO_3DES_CBC ||
2567 crd1->crd_alg == CRYPTO_AES_CBC) &&
2568 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2569 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2570 crd2->crd_alg == CRYPTO_MD5 ||
2571 crd2->crd_alg == CRYPTO_SHA1) &&
2572 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2573 enccrd = crd1;
2574 maccrd = crd2;
2575 } else {
2576 /*
2577 * We cannot order the 7751 as requested
2578 */
2579 err = EINVAL;
2580 goto errout;
2581 }
2582 }
2583
2584 if (enccrd) {
2585 cmd->enccrd = enccrd;
2586 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2587 switch (enccrd->crd_alg) {
2588 case CRYPTO_ARC4:
2589 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2590 break;
2591 case CRYPTO_DES_CBC:
2592 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2593 HIFN_CRYPT_CMD_MODE_CBC |
2594 HIFN_CRYPT_CMD_NEW_IV;
2595 break;
2596 case CRYPTO_3DES_CBC:
2597 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2598 HIFN_CRYPT_CMD_MODE_CBC |
2599 HIFN_CRYPT_CMD_NEW_IV;
2600 break;
2601 case CRYPTO_AES_CBC:
2602 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2603 HIFN_CRYPT_CMD_MODE_CBC |
2604 HIFN_CRYPT_CMD_NEW_IV;
2605 break;
2606 default:
2607 err = EINVAL;
2608 goto errout;
2609 }
2610 if (enccrd->crd_alg != CRYPTO_ARC4) {
2611 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2612 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2613 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2614 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2615 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2616 else
2617 bcopy(sc->sc_sessions[session].hs_iv,
2618 cmd->iv, ivlen);
2619
2620 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2621 == 0) {
2622 crypto_copyback(crp->crp_flags,
2623 crp->crp_buf, enccrd->crd_inject,
2624 ivlen, cmd->iv);
2625 }
2626 } else {
2627 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2628 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2629 else {
2630 crypto_copydata(crp->crp_flags,
2631 crp->crp_buf, enccrd->crd_inject,
2632 ivlen, cmd->iv);
2633 }
2634 }
2635 }
2636
2637 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2638 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2639 cmd->ck = enccrd->crd_key;
2640 cmd->cklen = enccrd->crd_klen >> 3;
2641 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2642
2643 /*
2644 * Need to specify the size for the AES key in the masks.
2645 */
2646 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2647 HIFN_CRYPT_CMD_ALG_AES) {
2648 switch (cmd->cklen) {
2649 case 16:
2650 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2651 break;
2652 case 24:
2653 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2654 break;
2655 case 32:
2656 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2657 break;
2658 default:
2659 err = EINVAL;
2660 goto errout;
2661 }
2662 }
2663 }
2664
2665 if (maccrd) {
2666 cmd->maccrd = maccrd;
2667 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2668
2669 switch (maccrd->crd_alg) {
2670 case CRYPTO_MD5:
2671 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2672 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2673 HIFN_MAC_CMD_POS_IPSEC;
2674 break;
2675 case CRYPTO_MD5_HMAC:
2676 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2677 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2678 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2679 break;
2680 case CRYPTO_SHA1:
2681 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2682 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2683 HIFN_MAC_CMD_POS_IPSEC;
2684 break;
2685 case CRYPTO_SHA1_HMAC:
2686 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2687 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2688 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2689 break;
2690 }
2691
2692 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2693 maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2694 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2695 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2696 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2697 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2698 }
2699 }
2700
2701 cmd->crp = crp;
2702 cmd->session_num = session;
2703 cmd->softc = sc;
2704
2705 err = hifn_crypto(sc, cmd, crp, hint);
2706 if (!err) {
2707 return 0;
2708 } else if (err == ERESTART) {
2709 /*
2710 * There weren't enough resources to dispatch the request
2711 * to the part. Notify the caller so they'll requeue this
2712 * request and resubmit it again soon.
2713 */
2714 #ifdef HIFN_DEBUG
2715 if (hifn_debug)
2716 device_printf(sc->sc_dev, "requeue request\n");
2717 #endif
2718 kfree(cmd, M_DEVBUF);
2719 sc->sc_needwakeup |= CRYPTO_SYMQ;
2720 return (err);
2721 }
2722
2723 errout:
2724 if (cmd != NULL)
2725 kfree(cmd, M_DEVBUF);
2726 if (err == EINVAL)
2727 hifnstats.hst_invalid++;
2728 else
2729 hifnstats.hst_nomem++;
2730 crp->crp_etype = err;
2731 crypto_done(crp);
2732 return (err);
2733 }
2734
2735 static void
hifn_abort(struct hifn_softc * sc)2736 hifn_abort(struct hifn_softc *sc)
2737 {
2738 struct hifn_dma *dma = sc->sc_dma;
2739 struct hifn_command *cmd;
2740 struct cryptop *crp;
2741 int i, u;
2742
2743 i = dma->resk; u = dma->resu;
2744 while (u != 0) {
2745 cmd = dma->hifn_commands[i];
2746 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2747 dma->hifn_commands[i] = NULL;
2748 crp = cmd->crp;
2749
2750 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2751 /* Salvage what we can. */
2752 u_int8_t *macbuf;
2753
2754 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2755 macbuf = dma->result_bufs[i];
2756 macbuf += 12;
2757 } else
2758 macbuf = NULL;
2759 hifnstats.hst_opackets++;
2760 hifn_callback(sc, cmd, macbuf);
2761 } else {
2762 if (cmd->src_map == cmd->dst_map) {
2763 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2764 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2765 } else {
2766 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2767 BUS_DMASYNC_POSTWRITE);
2768 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2769 BUS_DMASYNC_POSTREAD);
2770 }
2771
2772 if (cmd->src_m != cmd->dst_m) {
2773 m_freem(cmd->src_m);
2774 crp->crp_buf = (caddr_t)cmd->dst_m;
2775 }
2776
2777 /* non-shared buffers cannot be restarted */
2778 if (cmd->src_map != cmd->dst_map) {
2779 /*
2780 * XXX should be EAGAIN, delayed until
2781 * after the reset.
2782 */
2783 crp->crp_etype = ENOMEM;
2784 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2785 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2786 } else
2787 crp->crp_etype = ENOMEM;
2788
2789 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2790 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2791
2792 kfree(cmd, M_DEVBUF);
2793 if (crp->crp_etype != EAGAIN)
2794 crypto_done(crp);
2795 }
2796
2797 if (++i == HIFN_D_RES_RSIZE)
2798 i = 0;
2799 u--;
2800 }
2801 dma->resk = i; dma->resu = u;
2802
2803 hifn_reset_board(sc, 1);
2804 hifn_init_dma(sc);
2805 hifn_init_pci_registers(sc);
2806 }
2807
2808 static void
hifn_callback(struct hifn_softc * sc,struct hifn_command * cmd,u_int8_t * macbuf)2809 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2810 {
2811 struct hifn_dma *dma = sc->sc_dma;
2812 struct cryptop *crp = cmd->crp;
2813 struct cryptodesc *crd;
2814 struct mbuf *m;
2815 int totlen, i, u, ivlen;
2816
2817 if (cmd->src_map == cmd->dst_map) {
2818 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2819 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2820 } else {
2821 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2822 BUS_DMASYNC_POSTWRITE);
2823 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2824 BUS_DMASYNC_POSTREAD);
2825 }
2826
2827 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2828 if (cmd->src_m != cmd->dst_m) {
2829 crp->crp_buf = (caddr_t)cmd->dst_m;
2830 totlen = cmd->src_mapsize;
2831 for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2832 if (totlen < m->m_len) {
2833 m->m_len = totlen;
2834 totlen = 0;
2835 } else
2836 totlen -= m->m_len;
2837 }
2838 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2839 m_freem(cmd->src_m);
2840 }
2841 }
2842
2843 if (cmd->sloplen != 0) {
2844 crypto_copyback(crp->crp_flags, crp->crp_buf,
2845 cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2846 (caddr_t)&dma->slop[cmd->slopidx]);
2847 }
2848
2849 i = dma->dstk; u = dma->dstu;
2850 while (u != 0) {
2851 if (i == HIFN_D_DST_RSIZE)
2852 i = 0;
2853 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2854 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2855 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2856 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2857 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2858 break;
2859 }
2860 i++, u--;
2861 }
2862 dma->dstk = i; dma->dstu = u;
2863
2864 hifnstats.hst_obytes += cmd->dst_mapsize;
2865
2866 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2867 HIFN_BASE_CMD_CRYPT) {
2868 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2869 if (crd->crd_alg != CRYPTO_DES_CBC &&
2870 crd->crd_alg != CRYPTO_3DES_CBC &&
2871 crd->crd_alg != CRYPTO_AES_CBC)
2872 continue;
2873 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2874 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2875 crypto_copydata(crp->crp_flags, crp->crp_buf,
2876 crd->crd_skip + crd->crd_len - ivlen, ivlen,
2877 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2878 break;
2879 }
2880 }
2881
2882 if (macbuf != NULL) {
2883 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2884 int len;
2885
2886 if (crd->crd_alg != CRYPTO_MD5 &&
2887 crd->crd_alg != CRYPTO_SHA1 &&
2888 crd->crd_alg != CRYPTO_MD5_HMAC &&
2889 crd->crd_alg != CRYPTO_SHA1_HMAC) {
2890 continue;
2891 }
2892 len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2893 crypto_copyback(crp->crp_flags, crp->crp_buf,
2894 crd->crd_inject, len, macbuf);
2895 break;
2896 }
2897 }
2898
2899 if (cmd->src_map != cmd->dst_map) {
2900 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2901 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2902 }
2903 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2904 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2905 kfree(cmd, M_DEVBUF);
2906 crypto_done(crp);
2907 }
2908
2909 /*
2910 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2911 * and Group 1 registers; avoid conditions that could create
2912 * burst writes by doing a read in between the writes.
2913 *
2914 * NB: The read we interpose is always to the same register;
2915 * we do this because reading from an arbitrary (e.g. last)
2916 * register may not always work.
2917 */
2918 static void
hifn_write_reg_0(struct hifn_softc * sc,bus_size_t reg,u_int32_t val)2919 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2920 {
2921 if (sc->sc_flags & HIFN_IS_7811) {
2922 if (sc->sc_bar0_lastreg == reg - 4)
2923 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2924 sc->sc_bar0_lastreg = reg;
2925 }
2926 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2927 }
2928
2929 static void
hifn_write_reg_1(struct hifn_softc * sc,bus_size_t reg,u_int32_t val)2930 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2931 {
2932 if (sc->sc_flags & HIFN_IS_7811) {
2933 if (sc->sc_bar1_lastreg == reg - 4)
2934 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2935 sc->sc_bar1_lastreg = reg;
2936 }
2937 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2938 }
2939