1 /*	$NetBSD: mvxpsec.c,v 1.1 2015/06/03 04:20:02 hsuenaga Exp $	*/
2 /*
3  * Copyright (c) 2015 Internet Initiative Japan Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * Cryptographic Engine and Security Accelerator(MVXPSEC)
29  */
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/kernel.h>
34 #include <sys/queue.h>
35 #include <sys/conf.h>
36 #include <sys/proc.h>
37 #include <sys/bus.h>
38 #include <sys/evcnt.h>
39 #include <sys/device.h>
40 #include <sys/endian.h>
41 #include <sys/errno.h>
42 #include <sys/kmem.h>
43 #include <sys/mbuf.h>
44 #include <sys/callout.h>
45 #include <sys/pool.h>
46 #include <sys/cprng.h>
47 #include <sys/syslog.h>
48 #include <sys/mutex.h>
49 #include <sys/kthread.h>
50 #include <sys/atomic.h>
51 #include <sys/sha1.h>
52 #include <sys/md5.h>
53 
54 #include <uvm/uvm_extern.h>
55 
56 #include <crypto/rijndael/rijndael.h>
57 
58 #include <opencrypto/cryptodev.h>
59 #include <opencrypto/xform.h>
60 
61 #include <net/net_stats.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip6.h>
67 
68 #include <netipsec/esp_var.h>
69 
70 #include <arm/cpufunc.h>
71 #include <arm/marvell/mvsocvar.h>
72 #include <arm/marvell/armadaxpreg.h>
73 #include <dev/marvell/marvellreg.h>
74 #include <dev/marvell/marvellvar.h>
75 #include <dev/marvell/mvxpsecreg.h>
76 #include <dev/marvell/mvxpsecvar.h>
77 
78 #ifdef DEBUG
79 #define STATIC __attribute__ ((noinline)) extern
80 #define _STATIC __attribute__ ((noinline)) extern
81 #define INLINE __attribute__ ((noinline)) extern
82 #define _INLINE __attribute__ ((noinline)) extern
83 #else
84 #define STATIC static
85 #define _STATIC __attribute__ ((unused)) static
86 #define INLINE static inline
87 #define _INLINE __attribute__ ((unused)) static inline
88 #endif
89 
90 /*
91  * IRQ and SRAM spaces for each of unit
92  * XXX: move to attach_args
93  */
94 struct {
95 	int		err_int;
96 } mvxpsec_config[] = {
97 	{ .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */
98 	{ .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */
99 };
100 #define MVXPSEC_ERR_INT(sc) \
101     mvxpsec_config[device_unit((sc)->sc_dev)].err_int
102 
103 /*
104  * AES
105  */
106 #define MAXBC				(128/32)
107 #define MAXKC				(256/32)
108 #define MAXROUNDS			14
109 STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int,
110     uint8_t[MAXROUNDS+1][4][MAXBC]);
111 STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int);
112 
113 /*
114  * device driver autoconf interface
115  */
116 STATIC int mvxpsec_match(device_t, cfdata_t, void *);
117 STATIC void mvxpsec_attach(device_t, device_t, void *);
118 STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *);
119 
120 /*
121  * register setup
122  */
123 STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *);
124 
125 /*
126  * timer(callout) interface
127  *
128  * XXX: callout is not MP safe...
129  */
130 STATIC void mvxpsec_timer(void *);
131 
132 /*
133  * interrupt interface
134  */
135 STATIC int mvxpsec_intr(void *);
136 INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *);
137 STATIC int mvxpsec_eintr(void *);
138 STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *);
139 STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *);
140 INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int);
141 
142 /*
143  * memory allocators and VM management
144  */
145 STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *,
146     paddr_t, int);
147 STATIC int mvxpsec_init_sram(struct mvxpsec_softc *);
148 
149 /*
150  * Low-level DMA interface
151  */
152 STATIC int mvxpsec_init_dma(struct mvxpsec_softc *,
153     struct marvell_attach_args *);
154 INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *);
155 INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *);
156 INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *);
157 _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *);
158 INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *,
159     uint32_t, uint32_t, uint32_t);
160 INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *,
161     struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *);
162 
163 /*
164  * High-level DMA interface
165  */
166 INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *,
167     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
168 INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *,
169     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
170 INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *,
171     mvxpsec_dma_ring *);
172 INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *,
173     mvxpsec_dma_ring *);
174 INLINE void mvxpsec_dma_free(struct mvxpsec_softc *,
175     mvxpsec_dma_ring *);
176 INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
177 INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
178 
179 /*
180  * Session management interface (OpenCrypto)
181  */
182 #define MVXPSEC_SESSION(sid)	((sid) & 0x0fffffff)
183 #define MVXPSEC_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
184 /* pool management */
185 STATIC int mvxpsec_session_ctor(void *, void *, int);
186 STATIC void mvxpsec_session_dtor(void *, void *);
187 STATIC int mvxpsec_packet_ctor(void *, void *, int);
188 STATIC void mvxpsec_packet_dtor(void *, void *);
189 
190 /* session management */
191 STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *);
192 STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *);
193 INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int);
194 INLINE int mvxpsec_session_ref(struct mvxpsec_session *);
195 INLINE void mvxpsec_session_unref(struct mvxpsec_session *);
196 
197 /* packet management */
198 STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *);
199 INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *);
200 STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *);
201 STATIC int mvxpsec_done_packet(struct mvxpsec_packet *);
202 
203 /* session header manegement */
204 STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *);
205 
206 /* packet queue management */
207 INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int);
208 STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *);
209 
210 /* opencrypto opration */
211 INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *);
212 INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *);
213 
214 /* payload data management */
215 INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *);
216 STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t);
217 STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *);
218 STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *);
219 STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *);
220 _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *);
221 STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int);
222 STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int);
223 
224 /* key pre-computation */
225 STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *);
226 STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *);
227 
228 /* crypto operation management */
229 INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *);
230 INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int);
231 
232 /*
233  * parameter converters
234  */
235 INLINE uint32_t mvxpsec_alg2acc(uint32_t alg);
236 INLINE uint32_t mvxpsec_aesklen(int klen);
237 
238 /*
239  * string formatters
240  */
241 _STATIC const char *s_ctrlreg(uint32_t);
242 _STATIC const char *s_winreg(uint32_t);
243 _STATIC const char *s_errreg(uint32_t);
244 _STATIC const char *s_xpsecintr(uint32_t);
245 _STATIC const char *s_ctlalg(uint32_t);
246 _STATIC const char *s_xpsec_op(uint32_t);
247 _STATIC const char *s_xpsec_enc(uint32_t);
248 _STATIC const char *s_xpsec_mac(uint32_t);
249 _STATIC const char *s_xpsec_frag(uint32_t);
250 
251 /*
252  * debugging supports
253  */
254 #ifdef MVXPSEC_DEBUG
255 _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *);
256 _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *);
257 _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t);
258 _STATIC void mvxpsec_dump_data(const char *, void *, size_t);
259 
260 _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *);
261 _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *);
262 _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *);
263 
264 _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t);
265 _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t);
266 _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t);
267 _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t);
268 _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t);
269 _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t);
270 _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t);
271 _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t);
272 #endif
273 
274 /*
275  * global configurations, params, work spaces, ...
276  *
277  * XXX: use sysctl for global configurations
278  */
279 /* waiting for device */
280 static int mvxpsec_wait_interval = 10;		/* usec */
281 static int mvxpsec_wait_retry = 100;		/* times = wait for 1 [msec] */
282 #ifdef MVXPSEC_DEBUG
283 static uint32_t mvxpsec_debug = MVXPSEC_DEBUG;	/* debug level */
284 #endif
285 
286 /*
287  * Register accessors
288  */
289 #define MVXPSEC_WRITE(sc, off, val) \
290 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val))
291 #define MVXPSEC_READ(sc, off) \
292 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off))
293 
294 /*
295  * device driver autoconf interface
296  */
297 CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc),
298     mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL);
299 
300 STATIC int
mvxpsec_match(device_t dev,cfdata_t match,void * aux)301 mvxpsec_match(device_t dev, cfdata_t match, void *aux)
302 {
303 	struct marvell_attach_args *mva = aux;
304 	uint32_t tag;
305 	int window;
306 
307 	if (strcmp(mva->mva_name, match->cf_name) != 0)
308 		return 0;
309 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
310 		return 0;
311 
312 	switch (mva->mva_unit) {
313 	case 0:
314 		tag = ARMADAXP_TAG_CRYPT0;
315 		break;
316 	case 1:
317 		tag = ARMADAXP_TAG_CRYPT1;
318 		break;
319 	default:
320 		aprint_error_dev(dev,
321 		    "unit %d is not supported\n", mva->mva_unit);
322 		return 0;
323 	}
324 
325 	window = mvsoc_target(tag, NULL, NULL, NULL, NULL);
326 	if (window >= nwindow) {
327 		aprint_error_dev(dev,
328 		    "Security Accelerator SRAM is not configured.\n");
329 		return 0;
330 	}
331 
332 	return 1;
333 }
334 
335 STATIC void
mvxpsec_attach(device_t parent,device_t self,void * aux)336 mvxpsec_attach(device_t parent, device_t self, void *aux)
337 {
338 	struct marvell_attach_args *mva = aux;
339 	struct mvxpsec_softc *sc = device_private(self);
340 	int v;
341 	int i;
342 
343 	sc->sc_dev = self;
344 
345 	aprint_normal(": Marvell Crypto Engines and Security Accelerator\n");
346 	aprint_naive("\n");
347 #ifdef MVXPSEC_MULTI_PACKET
348 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n");
349 #else
350 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n");
351 #endif
352 	aprint_normal_dev(sc->sc_dev,
353 	    "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS);
354 
355 	/* mutex */
356 	mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET);
357 	mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET);
358 	mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET);
359 
360 	/* Packet queue */
361 	SIMPLEQ_INIT(&sc->sc_wait_queue);
362 	SIMPLEQ_INIT(&sc->sc_run_queue);
363 	SLIST_INIT(&sc->sc_free_list);
364 	sc->sc_wait_qlen = 0;
365 #ifdef MVXPSEC_MULTI_PACKET
366 	sc->sc_wait_qlimit = 16;
367 #else
368 	sc->sc_wait_qlimit = 0;
369 #endif
370 	sc->sc_free_qlen = 0;
371 
372 	/* Timer */
373 	callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */
374 	callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc);
375 
376 	/* I/O */
377 	sc->sc_iot = mva->mva_iot;
378 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
379 	    mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
380 		aprint_error_dev(self, "Cannot map registers\n");
381 		return;
382 	}
383 
384 	/* DMA */
385 	sc->sc_dmat = mva->mva_dmat;
386 	if (mvxpsec_init_dma(sc, mva) < 0)
387 		return;
388 
389 	/* SRAM */
390 	if (mvxpsec_init_sram(sc) < 0)
391 		return;
392 
393 	/* Registers */
394 	mvxpsec_wininit(sc, mva->mva_tags);
395 
396 	/* INTR */
397 	MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT);
398 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR);
399 	sc->sc_done_ih =
400 	    marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc);
401 	/* XXX: sould pass error IRQ using mva */
402 	sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc),
403 	    IPL_NET, mvxpsec_eintr, sc);
404 	aprint_normal_dev(self,
405 	    "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc));
406 
407 	/* Initialize TDMA (It's enabled here, but waiting for SA) */
408 	if (mvxpsec_dma_wait(sc) < 0)
409 		panic("%s: DMA DEVICE not responding\n", __func__);
410 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
411 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
412 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
413 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
414 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
415 	v  = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
416 	v |= MV_TDMA_CONTROL_ENABLE;
417 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v);
418 
419 	/* Initialize SA */
420 	if (mvxpsec_acc_wait(sc) < 0)
421 		panic("%s: MVXPSEC not responding\n", __func__);
422 	v  = MVXPSEC_READ(sc, MV_ACC_CONFIG);
423 	v &= ~MV_ACC_CONFIG_STOP_ON_ERR;
424 	v |= MV_ACC_CONFIG_MULT_PKT;
425 	v |= MV_ACC_CONFIG_WAIT_TDMA;
426 	v |= MV_ACC_CONFIG_ACT_TDMA;
427 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v);
428 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
429 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
430 
431 	/* Session */
432 	sc->sc_session_pool =
433 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
434 	    "mvxpsecpl", NULL, IPL_NET,
435 	    mvxpsec_session_ctor, mvxpsec_session_dtor, sc);
436 	pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS);
437 	pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2);
438 	sc->sc_last_session = NULL;
439 
440 	/* Pakcet */
441 	sc->sc_packet_pool =
442 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
443 	    "mvxpsec_pktpl", NULL, IPL_NET,
444 	    mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc);
445 	pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS);
446 	pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2);
447 
448 	/* Register to EVCNT framework */
449 	mvxpsec_evcnt_attach(sc);
450 
451 	/* Register to Opencrypto */
452 	for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) {
453 		sc->sc_sessions[i] = NULL;
454 	}
455 	if (mvxpsec_register(sc))
456 		panic("cannot initialize OpenCrypto module.\n");
457 
458 	return;
459 }
460 
461 STATIC void
mvxpsec_evcnt_attach(struct mvxpsec_softc * sc)462 mvxpsec_evcnt_attach(struct mvxpsec_softc *sc)
463 {
464 	struct mvxpsec_evcnt *sc_ev = &sc->sc_ev;
465 
466 	evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR,
467 	    NULL, device_xname(sc->sc_dev), "Main Intr.");
468 	evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR,
469 	    NULL, device_xname(sc->sc_dev), "Auth Intr.");
470 	evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR,
471 	    NULL, device_xname(sc->sc_dev), "DES Intr.");
472 	evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR,
473 	    NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr.");
474 	evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR,
475 	    NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr.");
476 	evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR,
477 	    NULL, device_xname(sc->sc_dev), "Crypto Intr.");
478 	evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR,
479 	    NULL, device_xname(sc->sc_dev), "SA Intr.");
480 	evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR,
481 	    NULL, device_xname(sc->sc_dev), "AccTDMA Intr.");
482 	evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR,
483 	    NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr.");
484 	evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR,
485 	    NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr.");
486 	evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR,
487 	    NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr.");
488 
489 	evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC,
490 	    NULL, device_xname(sc->sc_dev), "New-Session");
491 	evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC,
492 	    NULL, device_xname(sc->sc_dev), "Free-Session");
493 
494 	evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC,
495 	    NULL, device_xname(sc->sc_dev), "Packet-OK");
496 	evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC,
497 	    NULL, device_xname(sc->sc_dev), "Packet-ERR");
498 
499 	evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC,
500 	    NULL, device_xname(sc->sc_dev), "Packet-Dispatch");
501 	evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC,
502 	    NULL, device_xname(sc->sc_dev), "Queue-Dispatch");
503 	evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC,
504 	    NULL, device_xname(sc->sc_dev), "Queue-Full");
505 	evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC,
506 	    NULL, device_xname(sc->sc_dev), "Max-Dispatch");
507 	evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC,
508 	    NULL, device_xname(sc->sc_dev), "Max-Done");
509 }
510 
511 /*
512  * Register setup
513  */
mvxpsec_wininit(struct mvxpsec_softc * sc,enum marvell_tags * tags)514 STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags)
515 {
516 	device_t pdev = device_parent(sc->sc_dev);
517 	uint64_t base;
518 	uint32_t size, reg;
519 	int window, target, attr, rv, i;
520 
521 	/* disable all window */
522 	for (window = 0; window < MV_TDMA_NWINDOW; window++)
523 	{
524 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0);
525 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0);
526 	}
527 
528 	for (window = 0, i = 0;
529 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) {
530 		rv = marvell_winparams_by_tag(pdev, tags[i],
531 		    &target, &attr, &base, &size);
532 		if (rv != 0 || size == 0)
533 			continue;
534 
535 		if (base > 0xffffffffULL) {
536 			aprint_error_dev(sc->sc_dev,
537 			    "can't remap window %d\n", window);
538 			continue;
539 		}
540 
541 		reg  = MV_TDMA_BAR_BASE(base);
542 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg);
543 
544 		reg  = MV_TDMA_ATTR_TARGET(target);
545 		reg |= MV_TDMA_ATTR_ATTR(attr);
546 		reg |= MV_TDMA_ATTR_SIZE(size);
547 		reg |= MV_TDMA_ATTR_ENABLE;
548 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg);
549 
550 		window++;
551 	}
552 
553 	return 0;
554 }
555 
556 /*
557  * Timer handling
558  */
559 STATIC void
mvxpsec_timer(void * aux)560 mvxpsec_timer(void *aux)
561 {
562 	struct mvxpsec_softc *sc = aux;
563 	struct mvxpsec_packet *mv_p;
564 	uint32_t reg;
565 	int ndone;
566 	int refill;
567 	int s;
568 
569 	/* IPL_SOFTCLOCK */
570 
571 	log(LOG_ERR, "%s: device timeout.\n", __func__);
572 #ifdef MVXPSEC_DEBUG
573 	mvxpsec_dump_reg(sc);
574 #endif
575 
576 	s = splnet();
577 	/* stop security accelerator */
578 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
579 
580 	/* stop TDMA */
581 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0);
582 
583 	/* cleanup packet queue */
584 	mutex_enter(&sc->sc_queue_mtx);
585 	ndone = 0;
586 	while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) {
587 		SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue);
588 
589 		mv_p->crp->crp_etype = EINVAL;
590 		mvxpsec_done_packet(mv_p);
591 		ndone++;
592 	}
593 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
594 	sc->sc_flags &= ~HW_RUNNING;
595 	refill = (sc->sc_wait_qlen > 0) ? 1 : 0;
596 	mutex_exit(&sc->sc_queue_mtx);
597 
598 	/* reenable TDMA */
599 	if (mvxpsec_dma_wait(sc) < 0)
600 		panic("%s: failed to reset DMA DEVICE. give up.", __func__);
601 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
602 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
603 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
604 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
605 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
606 	reg  = MV_TDMA_DEFAULT_CONTROL;
607 	reg |= MV_TDMA_CONTROL_ENABLE;
608 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg);
609 
610 	if (mvxpsec_acc_wait(sc) < 0)
611 		panic("%s: failed to reset MVXPSEC. give up.", __func__);
612 	reg  = MV_ACC_CONFIG_MULT_PKT;
613 	reg |= MV_ACC_CONFIG_WAIT_TDMA;
614 	reg |= MV_ACC_CONFIG_ACT_TDMA;
615 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg);
616 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
617 
618 	if (refill) {
619 		mutex_enter(&sc->sc_queue_mtx);
620 		mvxpsec_dispatch_queue(sc);
621 		mutex_exit(&sc->sc_queue_mtx);
622 	}
623 
624 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
625 	splx(s);
626 }
627 
628 /*
629  * DMA handling
630  */
631 
632 /*
633  * Allocate kernel devmem and DMA safe memory with bus_dma API
634  * used for DMA descriptors.
635  *
636  * if phys != 0, assume phys is a DMA safe memory and bypass
637  * allocator.
638  */
639 STATIC struct mvxpsec_devmem *
mvxpsec_alloc_devmem(struct mvxpsec_softc * sc,paddr_t phys,int size)640 mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size)
641 {
642 	struct mvxpsec_devmem *devmem;
643 	bus_dma_segment_t seg;
644 	int rseg;
645 	int err;
646 
647 	if (sc == NULL)
648 		return NULL;
649 
650 	devmem = kmem_alloc(sizeof(*devmem), KM_NOSLEEP);
651 	if (devmem == NULL) {
652 		aprint_error_dev(sc->sc_dev, "can't alloc kmem\n");
653 		return NULL;
654 	}
655 
656 	devmem->size = size;
657 
658 	if (phys) {
659 		seg.ds_addr = phys;
660 		seg.ds_len = devmem->size;
661 		rseg = 1;
662 		err = 0;
663 	}
664 	else {
665 		err = bus_dmamem_alloc(sc->sc_dmat,
666 		    devmem->size, PAGE_SIZE, 0,
667 		    &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT);
668 	}
669 	if (err) {
670 		aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n");
671 		goto fail_kmem_free;
672 	}
673 
674 	err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
675 	     devmem->size, &devmem->kva, BUS_DMA_NOWAIT);
676 	if (err) {
677 		aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n");
678 		goto fail_dmamem_free;
679 	}
680 
681 	err = bus_dmamap_create(sc->sc_dmat,
682 	    size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map);
683 	if (err) {
684 		aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
685 		goto fail_unmap;
686 	}
687 
688 	err = bus_dmamap_load(sc->sc_dmat,
689 	    devmem->map, devmem->kva, devmem->size, NULL,
690 	    BUS_DMA_NOWAIT);
691 	if (err) {
692 		aprint_error_dev(sc->sc_dev,
693 		   "can't load DMA buffer VA:%p PA:0x%08x\n",
694 		    devmem->kva, (int)seg.ds_addr);
695 		goto fail_destroy;
696 	}
697 
698 	return devmem;
699 
700 fail_destroy:
701 	bus_dmamap_destroy(sc->sc_dmat, devmem->map);
702 fail_unmap:
703 	bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size);
704 fail_dmamem_free:
705 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
706 fail_kmem_free:
707 	kmem_free(devmem, sizeof(*devmem));
708 
709 	return NULL;
710 }
711 
712 /*
713  * Get DMA Descriptor from (DMA safe) descriptor pool.
714  */
715 INLINE struct mvxpsec_descriptor_handle *
mvxpsec_dma_getdesc(struct mvxpsec_softc * sc)716 mvxpsec_dma_getdesc(struct mvxpsec_softc *sc)
717 {
718 	struct mvxpsec_descriptor_handle *entry;
719 
720 	/* must called with sc->sc_dma_mtx held */
721 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
722 
723 	if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons)
724 		return NULL;
725 
726 	entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod];
727 	sc->sc_desc_ring_prod++;
728 	if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size)
729 		sc->sc_desc_ring_prod -= sc->sc_desc_ring_size;
730 
731 	return entry;
732 }
733 
734 /*
735  * Put DMA Descriptor to descriptor pool.
736  */
737 _INLINE void
mvxpsec_dma_putdesc(struct mvxpsec_softc * sc,struct mvxpsec_descriptor_handle * dh)738 mvxpsec_dma_putdesc(struct mvxpsec_softc *sc,
739     struct mvxpsec_descriptor_handle *dh)
740 {
741 	/* must called with sc->sc_dma_mtx held */
742 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
743 
744 	sc->sc_desc_ring_cons++;
745 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
746 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
747 
748 	return;
749 }
750 
751 /*
752  * Setup DMA Descriptor
753  * copy from 'src' to 'dst' by 'size' bytes.
754  * 'src' or 'dst' must be SRAM address.
755  */
756 INLINE void
mvxpsec_dma_setup(struct mvxpsec_descriptor_handle * dh,uint32_t dst,uint32_t src,uint32_t size)757 mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh,
758     uint32_t dst, uint32_t src, uint32_t size)
759 {
760 	struct mvxpsec_descriptor *desc;
761 
762 	desc = (struct mvxpsec_descriptor *)dh->_desc;
763 
764 	desc->tdma_dst = dst;
765 	desc->tdma_src = src;
766 	desc->tdma_word0 = size;
767 	if (size != 0)
768 		desc->tdma_word0 |= MV_TDMA_CNT_OWN;
769 	/* size == 0 is owned by ACC, not TDMA */
770 
771 #ifdef MVXPSEC_DEBUG
772 	mvxpsec_dump_dmaq(dh);
773 #endif
774 
775 }
776 
777 /*
778  * Concat 2 DMA
779  */
780 INLINE void
mvxpsec_dma_cat(struct mvxpsec_softc * sc,struct mvxpsec_descriptor_handle * dh1,struct mvxpsec_descriptor_handle * dh2)781 mvxpsec_dma_cat(struct mvxpsec_softc *sc,
782     struct mvxpsec_descriptor_handle *dh1,
783     struct mvxpsec_descriptor_handle *dh2)
784 {
785 	((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr;
786 	MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE);
787 }
788 
789 /*
790  * Schedule DMA Copy
791  */
792 INLINE int
mvxpsec_dma_copy0(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r,uint32_t dst,uint32_t src,uint32_t size)793 mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
794     uint32_t dst, uint32_t src, uint32_t size)
795 {
796 	struct mvxpsec_descriptor_handle *dh;
797 
798 	dh = mvxpsec_dma_getdesc(sc);
799 	if (dh == NULL) {
800 		log(LOG_ERR, "%s: descriptor full\n", __func__);
801 		return -1;
802 	}
803 
804 	mvxpsec_dma_setup(dh, dst, src, size);
805 	if (r->dma_head == NULL) {
806 		r->dma_head = dh;
807 		r->dma_last = dh;
808 		r->dma_size = 1;
809 	}
810 	else {
811 		mvxpsec_dma_cat(sc, r->dma_last, dh);
812 		r->dma_last = dh;
813 		r->dma_size++;
814 	}
815 
816 	return 0;
817 }
818 
819 INLINE int
mvxpsec_dma_copy(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r,uint32_t dst,uint32_t src,uint32_t size)820 mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
821     uint32_t dst, uint32_t src, uint32_t size)
822 {
823 	if (size == 0) /* 0 is very special descriptor */
824 		return 0;
825 
826 	return mvxpsec_dma_copy0(sc, r, dst, src, size);
827 }
828 
829 /*
830  * Schedule ACC Activate
831  */
832 INLINE int
mvxpsec_dma_acc_activate(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r)833 mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
834 {
835 	return mvxpsec_dma_copy0(sc, r, 0, 0, 0);
836 }
837 
838 /*
839  * Finalize DMA setup
840  */
841 INLINE void
mvxpsec_dma_finalize(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r)842 mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
843 {
844 	struct mvxpsec_descriptor_handle *dh;
845 
846 	dh = r->dma_last;
847 	((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0;
848 	MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE);
849 }
850 
851 /*
852  * Free entire DMA ring
853  */
854 INLINE void
mvxpsec_dma_free(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r)855 mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
856 {
857 	sc->sc_desc_ring_cons += r->dma_size;
858 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
859 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
860 	r->dma_head = NULL;
861 	r->dma_last = NULL;
862 	r->dma_size = 0;
863 }
864 
865 /*
866  * create DMA descriptor chain for the packet
867  */
868 INLINE int
mvxpsec_dma_copy_packet(struct mvxpsec_softc * sc,struct mvxpsec_packet * mv_p)869 mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
870 {
871 	struct mvxpsec_session *mv_s = mv_p->mv_s;
872 	uint32_t src, dst, len;
873 	uint32_t pkt_off, pkt_off_r;
874 	int err;
875 	int i;
876 
877 	/* must called with sc->sc_dma_mtx held */
878 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
879 
880 	/*
881 	 * set offset for mem->device copy
882 	 *
883 	 * typical packet image:
884 	 *
885 	 *   enc_ivoff
886 	 *   mac_off
887 	 *   |
888 	 *   |    enc_off
889 	 *   |    |
890 	 *   v    v
891 	 *   +----+--------...
892 	 *   |IV  |DATA
893 	 *   +----+--------...
894 	 */
895 	pkt_off = 0;
896 	if (mv_p->mac_off > 0)
897 		pkt_off = mv_p->mac_off;
898 	if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff)
899 		pkt_off = mv_p->enc_ivoff;
900 	if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off)
901 		pkt_off = mv_p->enc_off;
902 	pkt_off_r = pkt_off;
903 
904 	/* make DMA descriptors to copy packet header: DRAM -> SRAM */
905 	dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
906 	src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
907 	len = sizeof(mv_p->pkt_header);
908 	err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
909 	if (__predict_false(err))
910 		return err;
911 
912 	/*
913 	 * make DMA descriptors to copy session header: DRAM -> SRAM
914 	 * we can reuse session header on SRAM if session is not changed.
915 	 */
916 	if (sc->sc_last_session != mv_s) {
917 		dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc);
918 		src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr;
919 		len = sizeof(mv_s->session_header);
920 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
921 		if (__predict_false(err))
922 			return err;
923 		sc->sc_last_session = mv_s;
924 	}
925 
926 	/* make DMA descriptor to copy payload data: DRAM -> SRAM */
927 	dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
928 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
929 		src = mv_p->data_map->dm_segs[i].ds_addr;
930 		len = mv_p->data_map->dm_segs[i].ds_len;
931 		if (pkt_off) {
932 			if (len <= pkt_off) {
933 				/* ignore the segment */
934 				dst += len;
935 				pkt_off -= len;
936 				continue;
937 			}
938 			/* copy from the middle of the segment */
939 			dst += pkt_off;
940 			src += pkt_off;
941 			len -= pkt_off;
942 			pkt_off = 0;
943 		}
944 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
945 		if (__predict_false(err))
946 			return err;
947 		dst += len;
948 	}
949 
950 	/* make special descriptor to activate security accelerator */
951 	err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring);
952 	if (__predict_false(err))
953 		return err;
954 
955 	/* make DMA descriptors to copy payload: SRAM -> DRAM */
956 	src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
957 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
958 		dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr;
959 		len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len;
960 		if (pkt_off_r) {
961 			if (len <= pkt_off_r) {
962 				/* ignore the segment */
963 				src += len;
964 				pkt_off_r -= len;
965 				continue;
966 			}
967 			/* copy from the middle of the segment */
968 			src += pkt_off_r;
969 			dst += pkt_off_r;
970 			len -= pkt_off_r;
971 			pkt_off_r = 0;
972 		}
973 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
974 		if (__predict_false(err))
975 			return err;
976 		src += len;
977 	}
978 	KASSERT(pkt_off == 0);
979 	KASSERT(pkt_off_r == 0);
980 
981 	/*
982 	 * make DMA descriptors to copy packet header: SRAM->DRAM
983 	 * if IV is present in the payload, no need to copy.
984 	 */
985 	if (mv_p->flags & CRP_EXT_IV) {
986 		dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
987 		src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
988 		len = sizeof(mv_p->pkt_header);
989 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
990 		if (__predict_false(err))
991 			return err;
992 	}
993 
994 	return 0;
995 }
996 
997 INLINE int
mvxpsec_dma_sync_packet(struct mvxpsec_softc * sc,struct mvxpsec_packet * mv_p)998 mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
999 {
1000 	/* sync packet header */
1001 	bus_dmamap_sync(sc->sc_dmat,
1002 	    mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header),
1003 	    BUS_DMASYNC_PREWRITE);
1004 
1005 #ifdef MVXPSEC_DEBUG
1006 	/* sync session header */
1007 	if (mvxpsec_debug != 0) {
1008 		struct mvxpsec_session *mv_s = mv_p->mv_s;
1009 
1010 		/* only debug code touch the session header after newsession */
1011 		bus_dmamap_sync(sc->sc_dmat,
1012 		    mv_s->session_header_map,
1013 		    0, sizeof(mv_s->session_header),
1014 		    BUS_DMASYNC_PREWRITE);
1015 	}
1016 #endif
1017 
1018 	/* sync packet buffer */
1019 	bus_dmamap_sync(sc->sc_dmat,
1020 	    mv_p->data_map, 0, mv_p->data_len,
1021 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1022 
1023 	return 0;
1024 }
1025 
1026 /*
1027  * Initialize MVXPSEC Internal SRAM
1028  *
1029  * - must be called after DMA initizlization.
1030  * - make VM mapping for SRAM area on MBus.
1031  */
1032 STATIC int
mvxpsec_init_sram(struct mvxpsec_softc * sc)1033 mvxpsec_init_sram(struct mvxpsec_softc *sc)
1034 {
1035 	uint32_t tag, target, attr, base, size;
1036 	vaddr_t va;
1037 	int window;
1038 
1039 	switch (sc->sc_dev->dv_unit) {
1040 	case 0:
1041 		tag = ARMADAXP_TAG_CRYPT0;
1042 		break;
1043 	case 1:
1044 		tag = ARMADAXP_TAG_CRYPT1;
1045 		break;
1046 	default:
1047 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1048 		return -1;
1049 	}
1050 
1051 	window = mvsoc_target(tag, &target, &attr, &base, &size);
1052 	if (window >= nwindow) {
1053 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1054 		return -1;
1055 	}
1056 
1057 	if (sizeof(struct mvxpsec_crypt_sram) > size) {
1058 		aprint_error_dev(sc->sc_dev,
1059 		    "SRAM Data Structure Excceeds SRAM window size.\n");
1060 		return -1;
1061 	}
1062 
1063 	aprint_normal_dev(sc->sc_dev,
1064 	    "internal SRAM window at 0x%08x-0x%08x",
1065 	    base, base + size - 1);
1066 	sc->sc_sram_pa = base;
1067 
1068 	/* get vmspace to read/write device internal SRAM */
1069 	va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
1070 			UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1071 	if (va == 0) {
1072 		aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n");
1073 		sc->sc_sram_va = NULL;
1074 		aprint_normal("\n");
1075 		return 0;
1076 	}
1077 	/* XXX: not working. PMAP_NOCACHE is not affected? */
1078 	pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE);
1079 	pmap_update(pmap_kernel());
1080 	sc->sc_sram_va = (void *)va;
1081 	aprint_normal(" va %p\n", sc->sc_sram_va);
1082 	memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE);
1083 
1084 	return 0;
1085 }
1086 
1087 /*
1088  * Initialize TDMA engine.
1089  */
1090 STATIC int
mvxpsec_init_dma(struct mvxpsec_softc * sc,struct marvell_attach_args * mva)1091 mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva)
1092 {
1093 	struct mvxpsec_descriptor_handle *dh;
1094 	uint8_t *va;
1095 	paddr_t pa;
1096 	off_t va_off, pa_off;
1097 	int i, n, seg, ndh;
1098 
1099 	/* Init Deviced's control parameters (disabled yet) */
1100 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL);
1101 
1102 	/* Init Software DMA Handlers */
1103 	sc->sc_devmem_desc =
1104 	    mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES);
1105 	if (sc->sc_devmem_desc == NULL)
1106 		panic("Cannot allocate memory\n");
1107 	ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor))
1108 	    * MVXPSEC_DMA_DESC_PAGES;
1109 	sc->sc_desc_ring =
1110 	    kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh,
1111 	        KM_NOSLEEP);
1112 	if (sc->sc_desc_ring == NULL)
1113 		panic("Cannot allocate memory\n");
1114 	aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n",
1115 	    ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh);
1116 
1117 	ndh = 0;
1118 	for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) {
1119 		va = devmem_va(sc->sc_devmem_desc);
1120 		pa = devmem_pa(sc->sc_devmem_desc, seg);
1121 		n = devmem_palen(sc->sc_devmem_desc, seg) /
1122 		       	sizeof(struct mvxpsec_descriptor);
1123 		va_off = (PAGE_SIZE * seg);
1124 		pa_off = 0;
1125 		for (i = 0; i < n; i++) {
1126 			dh = &sc->sc_desc_ring[ndh];
1127 			dh->map = devmem_map(sc->sc_devmem_desc);
1128 			dh->off = va_off + pa_off;
1129 			dh->_desc = (void *)(va + va_off + pa_off);
1130 			dh->phys_addr = pa + pa_off;
1131 			pa_off += sizeof(struct mvxpsec_descriptor);
1132 			ndh++;
1133 		}
1134 	}
1135 	sc->sc_desc_ring_size = ndh;
1136 	sc->sc_desc_ring_prod = 0;
1137 	sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1;
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  * Wait for TDMA controller become idle
1144  */
1145 INLINE int
mvxpsec_dma_wait(struct mvxpsec_softc * sc)1146 mvxpsec_dma_wait(struct mvxpsec_softc *sc)
1147 {
1148 	int retry = 0;
1149 
1150 	while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) {
1151 		delay(mvxpsec_wait_interval);
1152 		if (retry++ >= mvxpsec_wait_retry)
1153 			return -1;
1154 	}
1155 	return 0;
1156 }
1157 
1158 /*
1159  * Wait for Security Accelerator become idle
1160  */
1161 INLINE int
mvxpsec_acc_wait(struct mvxpsec_softc * sc)1162 mvxpsec_acc_wait(struct mvxpsec_softc *sc)
1163 {
1164 	int retry = 0;
1165 
1166 	while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) {
1167 		delay(mvxpsec_wait_interval);
1168 		if (++retry >= mvxpsec_wait_retry)
1169 			return -1;
1170 	}
1171 	return 0;
1172 }
1173 
1174 /*
1175  * Entry of interrupt handler
1176  *
1177  * register this to kernel via marvell_intr_establish()
1178  */
1179 int
mvxpsec_intr(void * arg)1180 mvxpsec_intr(void *arg)
1181 {
1182 	struct mvxpsec_softc *sc = arg;
1183 	uint32_t v;
1184 
1185 	/* IPL_NET */
1186 	while ((v = mvxpsec_intr_ack(sc)) != 0) {
1187 		mvxpsec_intr_cnt(sc, v);
1188 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v);
1189 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v));
1190 #ifdef MVXPSEC_DEBUG
1191 		mvxpsec_dump_reg(sc);
1192 #endif
1193 
1194 		/* call high-level handlers */
1195 		if (v & MVXPSEC_INT_ACCTDMA)
1196 			mvxpsec_done(sc);
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 INLINE void
mvxpsec_intr_cleanup(struct mvxpsec_softc * sc)1203 mvxpsec_intr_cleanup(struct mvxpsec_softc *sc)
1204 {
1205 	struct mvxpsec_packet *mv_p;
1206 
1207 	/* must called with sc->sc_dma_mtx held */
1208 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
1209 
1210 	/*
1211 	 * there is only one intr for run_queue.
1212 	 * no one touch sc_run_queue.
1213 	 */
1214 	SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue)
1215 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
1216 }
1217 
1218 /*
1219  * Acknowledge to interrupt
1220  *
1221  * read cause bits, clear it, and return it.
1222  * NOTE: multiple cause bits may be returned at once.
1223  */
1224 STATIC uint32_t
mvxpsec_intr_ack(struct mvxpsec_softc * sc)1225 mvxpsec_intr_ack(struct mvxpsec_softc *sc)
1226 {
1227 	uint32_t reg;
1228 
1229 	reg  = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
1230 	reg &= MVXPSEC_DEFAULT_INT;
1231 	MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg);
1232 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1233 
1234 	return reg;
1235 }
1236 
1237 /*
1238  * Entry of TDMA error interrupt handler
1239  *
1240  * register this to kernel via marvell_intr_establish()
1241  */
1242 int
mvxpsec_eintr(void * arg)1243 mvxpsec_eintr(void *arg)
1244 {
1245 	struct mvxpsec_softc *sc = arg;
1246 	uint32_t err;
1247 
1248 	/* IPL_NET */
1249 again:
1250 	err = mvxpsec_eintr_ack(sc);
1251 	if (err == 0)
1252 		goto done;
1253 
1254 	log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__,
1255 	    s_errreg(err));
1256 #ifdef MVXPSEC_DEBUG
1257 	mvxpsec_dump_reg(sc);
1258 #endif
1259 
1260 	goto again;
1261 done:
1262 	return 0;
1263 }
1264 
1265 /*
1266  * Acknowledge to TDMA error interrupt
1267  *
1268  * read cause bits, clear it, and return it.
1269  * NOTE: multiple cause bits may be returned at once.
1270  */
1271 STATIC uint32_t
mvxpsec_eintr_ack(struct mvxpsec_softc * sc)1272 mvxpsec_eintr_ack(struct mvxpsec_softc *sc)
1273 {
1274 	uint32_t reg;
1275 
1276 	reg  = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
1277 	reg &= MVXPSEC_DEFAULT_ERR;
1278 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg);
1279 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1280 
1281 	return reg;
1282 }
1283 
1284 /*
1285  * Interrupt statistics
1286  *
1287  * this is NOT a statistics of how may times the events 'occured'.
1288  * this ONLY means how many times the events 'handled'.
1289  */
1290 INLINE void
mvxpsec_intr_cnt(struct mvxpsec_softc * sc,int cause)1291 mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause)
1292 {
1293 	MVXPSEC_EVCNT_INCR(sc, intr_all);
1294 	if (cause & MVXPSEC_INT_AUTH)
1295 		MVXPSEC_EVCNT_INCR(sc, intr_auth);
1296 	if (cause & MVXPSEC_INT_DES)
1297 		MVXPSEC_EVCNT_INCR(sc, intr_des);
1298 	if (cause & MVXPSEC_INT_AES_ENC)
1299 		MVXPSEC_EVCNT_INCR(sc, intr_aes_enc);
1300 	if (cause & MVXPSEC_INT_AES_DEC)
1301 		MVXPSEC_EVCNT_INCR(sc, intr_aes_dec);
1302 	if (cause & MVXPSEC_INT_ENC)
1303 		MVXPSEC_EVCNT_INCR(sc, intr_enc);
1304 	if (cause & MVXPSEC_INT_SA)
1305 		MVXPSEC_EVCNT_INCR(sc, intr_sa);
1306 	if (cause & MVXPSEC_INT_ACCTDMA)
1307 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma);
1308 	if (cause & MVXPSEC_INT_TDMA_COMP)
1309 		MVXPSEC_EVCNT_INCR(sc, intr_comp);
1310 	if (cause & MVXPSEC_INT_TDMA_OWN)
1311 		MVXPSEC_EVCNT_INCR(sc, intr_own);
1312 	if (cause & MVXPSEC_INT_ACCTDMA_CONT)
1313 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont);
1314 }
1315 
1316 /*
1317  * Setup MVXPSEC header structure.
1318  *
1319  * the header contains descriptor of security accelerator,
1320  * key material of chiphers, iv of ciphers and macs, ...
1321  *
1322  * the header is transfered to MVXPSEC Internal SRAM by TDMA,
1323  * and parsed by MVXPSEC H/W.
1324  */
1325 STATIC int
mvxpsec_header_finalize(struct mvxpsec_packet * mv_p)1326 mvxpsec_header_finalize(struct mvxpsec_packet *mv_p)
1327 {
1328 	struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc;
1329 	int enc_start, enc_len, iv_offset;
1330 	int mac_start, mac_len, mac_offset;
1331 
1332 	/* offset -> device address */
1333 	enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off);
1334 	enc_len = mv_p->enc_len;
1335 	if (mv_p->flags & CRP_EXT_IV)
1336 		iv_offset = mv_p->enc_ivoff;
1337 	else
1338 		iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff);
1339 	mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off);
1340 	mac_len = mv_p->mac_len;
1341 	mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst);
1342 
1343 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1344 	    "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF);
1345 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1346 	    "ENC from 0x%08x\n", enc_start);
1347 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1348 	    "MAC from 0x%08x\n", mac_start);
1349 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1350 	    "MAC to 0x%08x\n", mac_offset);
1351 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1352 	    "ENC IV at 0x%08x\n", iv_offset);
1353 
1354 	/* setup device addresses in Security Accelerator Descriptors */
1355 	desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start);
1356 	desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len);
1357 	if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT)
1358 		desc->acc_enckey =
1359 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA);
1360 	else
1361 		desc->acc_enckey =
1362 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA);
1363 	desc->acc_enciv =
1364 	    MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset);
1365 
1366 	desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len);
1367 	desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len);
1368 	desc->acc_maciv =
1369 	    MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA,
1370 	        MVXPSEC_SRAM_MIV_OUT_DA);
1371 
1372 	return 0;
1373 }
1374 
1375 /*
1376  * constractor of session structure.
1377  *
1378  * this constrator will be called by pool_cache framework.
1379  */
1380 STATIC int
mvxpsec_session_ctor(void * arg,void * obj,int flags)1381 mvxpsec_session_ctor(void *arg, void *obj, int flags)
1382 {
1383 	struct mvxpsec_softc *sc = arg;
1384 	struct mvxpsec_session *mv_s = obj;
1385 
1386 	/* pool is owned by softc */
1387 	mv_s->sc = sc;
1388 
1389 	/* Create and load DMA map for session header */
1390 	mv_s->session_header_map = 0;
1391 	if (bus_dmamap_create(sc->sc_dmat,
1392 	    sizeof(mv_s->session_header), 1,
1393 	    sizeof(mv_s->session_header), 0,
1394 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1395 	    &mv_s->session_header_map)) {
1396 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1397 		goto fail;
1398 	}
1399 	if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map,
1400 	    &mv_s->session_header, sizeof(mv_s->session_header),
1401 	    NULL, BUS_DMA_NOWAIT)) {
1402 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1403 		goto fail;
1404 	}
1405 
1406 	return 0;
1407 fail:
1408 	if (mv_s->session_header_map)
1409 		bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1410 	return ENOMEM;
1411 }
1412 
1413 /*
1414  * destractor of session structure.
1415  *
1416  * this destrator will be called by pool_cache framework.
1417  */
1418 STATIC void
mvxpsec_session_dtor(void * arg,void * obj)1419 mvxpsec_session_dtor(void *arg, void *obj)
1420 {
1421 	struct mvxpsec_softc *sc = arg;
1422 	struct mvxpsec_session *mv_s = obj;
1423 
1424 	if (mv_s->sc != sc)
1425 		panic("inconsitent context\n");
1426 
1427 	bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1428 }
1429 
1430 /*
1431  * constructor of packet structure.
1432  */
1433 STATIC int
mvxpsec_packet_ctor(void * arg,void * obj,int flags)1434 mvxpsec_packet_ctor(void *arg, void *obj, int flags)
1435 {
1436 	struct mvxpsec_softc *sc = arg;
1437 	struct mvxpsec_packet *mv_p = obj;
1438 
1439 	mv_p->dma_ring.dma_head = NULL;
1440 	mv_p->dma_ring.dma_last = NULL;
1441 	mv_p->dma_ring.dma_size = 0;
1442 
1443 	/* Create and load DMA map for packet header */
1444 	mv_p->pkt_header_map = 0;
1445 	if (bus_dmamap_create(sc->sc_dmat,
1446 	    sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0,
1447 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1448 	    &mv_p->pkt_header_map)) {
1449 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1450 		goto fail;
1451 	}
1452 	if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map,
1453 	    &mv_p->pkt_header, sizeof(mv_p->pkt_header),
1454 	    NULL, BUS_DMA_NOWAIT)) {
1455 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1456 		goto fail;
1457 	}
1458 
1459 	/* Create DMA map for session data. */
1460 	mv_p->data_map = 0;
1461 	if (bus_dmamap_create(sc->sc_dmat,
1462 	    MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE,
1463 	    0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) {
1464 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1465 		goto fail;
1466 	}
1467 
1468 	return 0;
1469 fail:
1470 	if (mv_p->pkt_header_map)
1471 		bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1472 	if (mv_p->data_map)
1473 		bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1474 	return ENOMEM;
1475 }
1476 
1477 /*
1478  * destractor of packet structure.
1479  */
1480 STATIC void
mvxpsec_packet_dtor(void * arg,void * obj)1481 mvxpsec_packet_dtor(void *arg, void *obj)
1482 {
1483 	struct mvxpsec_softc *sc = arg;
1484 	struct mvxpsec_packet *mv_p = obj;
1485 
1486 	mutex_enter(&sc->sc_dma_mtx);
1487 	mvxpsec_dma_free(sc, &mv_p->dma_ring);
1488 	mutex_exit(&sc->sc_dma_mtx);
1489 	bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1490 	bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1491 }
1492 
1493 /*
1494  * allocate new session struture.
1495  */
1496 STATIC struct mvxpsec_session *
mvxpsec_session_alloc(struct mvxpsec_softc * sc)1497 mvxpsec_session_alloc(struct mvxpsec_softc *sc)
1498 {
1499 	struct mvxpsec_session *mv_s;
1500 
1501 	mv_s = pool_cache_get(sc->sc_session_pool, 0);
1502 	if (mv_s == NULL) {
1503 		log(LOG_ERR, "%s: cannot allocate memory\n", __func__);
1504 		return NULL;
1505 	}
1506 	mv_s->refs = 1; /* 0 means session is alredy invalid */
1507 	mv_s->sflags = 0;
1508 
1509 	return mv_s;
1510 }
1511 
1512 /*
1513  * deallocate session structure.
1514  */
1515 STATIC void
mvxpsec_session_dealloc(struct mvxpsec_session * mv_s)1516 mvxpsec_session_dealloc(struct mvxpsec_session *mv_s)
1517 {
1518 	struct mvxpsec_softc *sc = mv_s->sc;
1519 
1520 	mv_s->sflags |= DELETED;
1521 	mvxpsec_session_unref(mv_s);
1522 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1523 
1524 	return;
1525 }
1526 
1527 STATIC int
mvxpsec_session_ref(struct mvxpsec_session * mv_s)1528 mvxpsec_session_ref(struct mvxpsec_session *mv_s)
1529 {
1530 	uint32_t refs;
1531 
1532 	if (mv_s->sflags & DELETED) {
1533 		log(LOG_ERR,
1534 		    "%s: session is already deleted.\n", __func__);
1535 		return -1;
1536 	}
1537 
1538 	refs = atomic_inc_32_nv(&mv_s->refs);
1539 	if (refs == 1) {
1540 		/*
1541 		 * a session with refs == 0 is
1542 		 * already invalidated. revert it.
1543 		 * XXX: use CAS ?
1544 		 */
1545 		atomic_dec_32(&mv_s->refs);
1546 		log(LOG_ERR,
1547 		    "%s: session is already invalidated.\n", __func__);
1548 		return -1;
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 STATIC void
mvxpsec_session_unref(struct mvxpsec_session * mv_s)1555 mvxpsec_session_unref(struct mvxpsec_session *mv_s)
1556 {
1557 	uint32_t refs;
1558 
1559 	refs = atomic_dec_32_nv(&mv_s->refs);
1560 	if (refs == 0)
1561 		pool_cache_put(mv_s->sc->sc_session_pool, mv_s);
1562 }
1563 
1564 /*
1565  * look for session is exist or not
1566  */
1567 INLINE struct mvxpsec_session *
mvxpsec_session_lookup(struct mvxpsec_softc * sc,int sid)1568 mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid)
1569 {
1570 	struct mvxpsec_session *mv_s;
1571 	int session;
1572 
1573 	/* must called sc->sc_session_mtx held */
1574 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1575 
1576 	session = MVXPSEC_SESSION(sid);
1577 	if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) {
1578 		log(LOG_ERR, "%s: session number too large %d\n",
1579 		    __func__, session);
1580 		return NULL;
1581 	}
1582 	if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) {
1583 		log(LOG_ERR, "%s: invalid session %d\n",
1584 		    __func__, session);
1585 		return NULL;
1586 	}
1587 
1588 	KASSERT(mv_s->sid == session);
1589 
1590 	return mv_s;
1591 }
1592 
1593 /*
1594  * allocation new packet structure.
1595  */
1596 STATIC struct mvxpsec_packet *
mvxpsec_packet_alloc(struct mvxpsec_session * mv_s)1597 mvxpsec_packet_alloc(struct mvxpsec_session *mv_s)
1598 {
1599 	struct mvxpsec_softc *sc = mv_s->sc;
1600 	struct mvxpsec_packet *mv_p;
1601 
1602 	/* must be called mv_queue_mtx held. */
1603 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1604 	/* must be called mv_session_mtx held. */
1605 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1606 
1607 	if (mvxpsec_session_ref(mv_s) < 0) {
1608 		log(LOG_ERR, "%s: invalid session.\n", __func__);
1609 		return NULL;
1610 	}
1611 
1612 	if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) {
1613 		SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list);
1614 		sc->sc_free_qlen--;
1615 	}
1616 	else {
1617 		mv_p = pool_cache_get(sc->sc_packet_pool, 0);
1618 		if (mv_p == NULL) {
1619 			log(LOG_ERR, "%s: cannot allocate memory\n",
1620 			    __func__);
1621 			mvxpsec_session_unref(mv_s);
1622 			return NULL;
1623 		}
1624 	}
1625 	mv_p->mv_s = mv_s;
1626 	mv_p->flags = 0;
1627 	mv_p->data_ptr = NULL;
1628 
1629 	return mv_p;
1630 }
1631 
1632 /*
1633  * free packet structure.
1634  */
1635 STATIC void
mvxpsec_packet_dealloc(struct mvxpsec_packet * mv_p)1636 mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p)
1637 {
1638 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1639 	struct mvxpsec_softc *sc = mv_s->sc;
1640 
1641 	/* must called with sc->sc_queue_mtx held */
1642 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1643 
1644 	if (mv_p->dma_ring.dma_size != 0) {
1645 		sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size;
1646 	}
1647 	mv_p->dma_ring.dma_head = NULL;
1648 	mv_p->dma_ring.dma_last = NULL;
1649 	mv_p->dma_ring.dma_size = 0;
1650 
1651 	if (mv_p->data_map) {
1652 		if (mv_p->flags & RDY_DATA) {
1653 			bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1654 			mv_p->flags &= ~RDY_DATA;
1655 		}
1656 	}
1657 
1658 	if (sc->sc_free_qlen > sc->sc_wait_qlimit)
1659 		pool_cache_put(sc->sc_packet_pool, mv_p);
1660 	else {
1661 		SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list);
1662 		sc->sc_free_qlen++;
1663 	}
1664 	mvxpsec_session_unref(mv_s);
1665 }
1666 
1667 INLINE void
mvxpsec_packet_enqueue(struct mvxpsec_packet * mv_p)1668 mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p)
1669 {
1670 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
1671 	struct mvxpsec_packet *last_packet;
1672 	struct mvxpsec_descriptor_handle *cur_dma, *prev_dma;
1673 
1674 	/* must called with sc->sc_queue_mtx held */
1675 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1676 
1677 	if (sc->sc_wait_qlen == 0) {
1678 		SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1679 		sc->sc_wait_qlen++;
1680 		mv_p->flags |= SETUP_DONE;
1681 		return;
1682 	}
1683 
1684 	last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue);
1685 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1686 	sc->sc_wait_qlen++;
1687 
1688 	/* chain the DMA */
1689 	cur_dma = mv_p->dma_ring.dma_head;
1690 	prev_dma = last_packet->dma_ring.dma_last;
1691 	mvxpsec_dma_cat(sc, prev_dma, cur_dma);
1692 	mv_p->flags |= SETUP_DONE;
1693 }
1694 
1695 /*
1696  * called by interrupt handler
1697  */
1698 STATIC int
mvxpsec_done_packet(struct mvxpsec_packet * mv_p)1699 mvxpsec_done_packet(struct mvxpsec_packet *mv_p)
1700 {
1701 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1702 	struct mvxpsec_softc *sc = mv_s->sc;
1703 
1704 	KASSERT((mv_p->flags & RDY_DATA));
1705 	KASSERT((mv_p->flags & SETUP_DONE));
1706 
1707 	/* unload data */
1708 	bus_dmamap_sync(sc->sc_dmat, mv_p->data_map,
1709 	    0, mv_p->data_len,
1710 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1711 	bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1712 	mv_p->flags &= ~RDY_DATA;
1713 
1714 #ifdef MVXPSEC_DEBUG
1715 	if (mvxpsec_debug != 0) {
1716 		int s;
1717 
1718 		bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map,
1719 		    0, sizeof(mv_p->pkt_header),
1720 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1721 		bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map,
1722 		    0, sizeof(mv_s->session_header),
1723 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1724 
1725 		if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
1726 			char buf[1500];
1727 			struct mbuf *m;
1728 			struct uio *uio;
1729 			size_t len;
1730 
1731 			switch (mv_p->data_type) {
1732 			case MVXPSEC_DATA_MBUF:
1733 				m = mv_p->data_mbuf;
1734 				len = m->m_pkthdr.len;
1735 				if (len > sizeof(buf))
1736 					len = sizeof(buf);
1737 				m_copydata(m, 0, len, buf);
1738 				break;
1739 			case MVXPSEC_DATA_UIO:
1740 				uio = mv_p->data_uio;
1741 				len = uio->uio_resid;
1742 				if (len > sizeof(buf))
1743 					len = sizeof(buf);
1744 				cuio_copydata(uio, 0, len, buf);
1745 				break;
1746 			default:
1747 				len = 0;
1748 			}
1749 			if (len > 0)
1750 				mvxpsec_dump_data(__func__, buf, len);
1751 		}
1752 
1753 		if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) {
1754 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1755 			    "%s: session_descriptor:\n", __func__);
1756 			mvxpsec_dump_packet_desc(__func__, mv_p);
1757 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1758 			    "%s: session_data:\n", __func__);
1759 			mvxpsec_dump_packet_data(__func__, mv_p);
1760 		}
1761 
1762 		if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) {
1763 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM,
1764 			    "%s: SRAM\n", __func__);
1765 			mvxpsec_dump_sram(__func__, sc, 2000);
1766 		}
1767 
1768 		s = MVXPSEC_READ(sc, MV_ACC_STATUS);
1769 		if (s & MV_ACC_STATUS_MAC_ERR) {
1770 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR,
1771 			    "%s: Message Authentication Failed.\n", __func__);
1772 		}
1773 	}
1774 #endif
1775 
1776 	/* copy back IV */
1777 	if (mv_p->flags & CRP_EXT_IV) {
1778 		memcpy(mv_p->ext_iv,
1779 		    &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen);
1780 		mv_p->ext_iv = NULL;
1781 		mv_p->ext_ivlen = 0;
1782 	}
1783 
1784 	/* notify opencrypto */
1785 	mv_p->crp->crp_etype = 0;
1786 	crypto_done(mv_p->crp);
1787 	mv_p->crp = NULL;
1788 
1789 	/* unblock driver */
1790 	mvxpsec_packet_dealloc(mv_p);
1791 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1792 
1793 	MVXPSEC_EVCNT_INCR(sc, packet_ok);
1794 
1795 	return 0;
1796 }
1797 
1798 
1799 /*
1800  * Opencrypto API registration
1801  */
1802 int
mvxpsec_register(struct mvxpsec_softc * sc)1803 mvxpsec_register(struct mvxpsec_softc *sc)
1804 {
1805 	int oplen = SRAM_PAYLOAD_SIZE;
1806 	int flags = 0;
1807 	int err;
1808 
1809 	sc->sc_nsessions = 0;
1810 	sc->sc_cid = crypto_get_driverid(0);
1811 	if (sc->sc_cid < 0) {
1812 		log(LOG_ERR,
1813 		    "%s: crypto_get_driverid() failed.\n", __func__);
1814 		err = EINVAL;
1815 		goto done;
1816 	}
1817 
1818 	/* Ciphers */
1819 	err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags,
1820 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1821 	if (err)
1822 		goto done;
1823 
1824 	err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags,
1825 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1826 	if (err)
1827 		goto done;
1828 
1829 	err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags,
1830 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1831 	if (err)
1832 		goto done;
1833 
1834 	/* MACs */
1835 	err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96,
1836 	    oplen, flags,
1837 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1838 	if (err)
1839 		goto done;
1840 
1841 	err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96,
1842 	    oplen, flags,
1843 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1844 	if (err)
1845 		goto done;
1846 
1847 #ifdef DEBUG
1848 	log(LOG_DEBUG,
1849 	    "%s: registered to opencrypto(max data = %d bytes)\n",
1850 	    device_xname(sc->sc_dev), oplen);
1851 #endif
1852 
1853 	err = 0;
1854 done:
1855 	return err;
1856 }
1857 
1858 /*
1859  * Create new opencrypto session
1860  *
1861  *   - register cipher key, mac key.
1862  *   - initialize mac internal state.
1863  */
1864 int
mvxpsec_newsession(void * arg,uint32_t * sidp,struct cryptoini * cri)1865 mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
1866 {
1867 	struct mvxpsec_softc *sc = arg;
1868 	struct mvxpsec_session *mv_s = NULL;
1869 	struct cryptoini *c;
1870 	static int hint = 0;
1871 	int session = -1;
1872 	int sid;
1873 	int err;
1874 	int i;
1875 
1876 	/* allocate driver session context */
1877 	mv_s = mvxpsec_session_alloc(sc);
1878 	if (mv_s == NULL)
1879 		return ENOMEM;
1880 
1881 	/*
1882 	 * lookup opencrypto session table
1883 	 *
1884 	 * we have sc_session_mtx after here.
1885 	 */
1886 	mutex_enter(&sc->sc_session_mtx);
1887 	if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) {
1888 		mutex_exit(&sc->sc_session_mtx);
1889 		log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1890 				__func__, MVXPSEC_MAX_SESSIONS);
1891 		mvxpsec_session_dealloc(mv_s);
1892 		return ENOMEM;
1893 	}
1894 	for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) {
1895 		if (sc->sc_sessions[i])
1896 			continue;
1897 		session = i;
1898 		hint = session + 1;
1899 	       	break;
1900 	}
1901 	if (session < 0) {
1902 		for (i = 0; i < hint; i++) {
1903 			if (sc->sc_sessions[i])
1904 				continue;
1905 			session = i;
1906 			hint = session + 1;
1907 			break;
1908 		}
1909 		if (session < 0) {
1910 			mutex_exit(&sc->sc_session_mtx);
1911 			/* session full */
1912 			log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1913 				__func__, MVXPSEC_MAX_SESSIONS);
1914 			mvxpsec_session_dealloc(mv_s);
1915 			hint = 0;
1916 			return ENOMEM;
1917 		}
1918 	}
1919 	if (hint >= MVXPSEC_MAX_SESSIONS)
1920 		hint = 0;
1921 	sc->sc_nsessions++;
1922 	sc->sc_sessions[session] = mv_s;
1923 #ifdef DEBUG
1924 	log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session);
1925 #endif
1926 
1927 	sid = MVXPSEC_SID(device_unit(sc->sc_dev), session);
1928 	mv_s->sid = sid;
1929 
1930 	/* setup the session key ... */
1931 	for (c = cri; c; c = c->cri_next) {
1932 		switch (c->cri_alg) {
1933 		case CRYPTO_DES_CBC:
1934 		case CRYPTO_3DES_CBC:
1935 		case CRYPTO_AES_CBC:
1936 			/* key */
1937 			if (mvxpsec_key_precomp(c->cri_alg,
1938 			    c->cri_key, c->cri_klen,
1939 			    &mv_s->session_header.crp_key,
1940 			    &mv_s->session_header.crp_key_d)) {
1941 				log(LOG_ERR,
1942 				    "%s: Invalid HMAC key for %s.\n",
1943 				    __func__, s_ctlalg(c->cri_alg));
1944 				err = EINVAL;
1945 				goto fail;
1946 			}
1947 			if (mv_s->sflags & RDY_CRP_KEY) {
1948 				log(LOG_WARNING,
1949 				    "%s: overwrite cipher: %s->%s.\n",
1950 				    __func__,
1951 				    s_ctlalg(mv_s->cipher_alg),
1952 				    s_ctlalg(c->cri_alg));
1953 			}
1954 			mv_s->sflags |= RDY_CRP_KEY;
1955 			mv_s->enc_klen = c->cri_klen;
1956 			mv_s->cipher_alg = c->cri_alg;
1957 			/* create per session IV (compatible with KAME IPsec) */
1958 			cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv));
1959 			mv_s->sflags |= RDY_CRP_IV;
1960 			break;
1961 		case CRYPTO_SHA1_HMAC_96:
1962 		case CRYPTO_MD5_HMAC_96:
1963 			/* key */
1964 			if (mvxpsec_hmac_precomp(c->cri_alg,
1965 			    c->cri_key, c->cri_klen,
1966 			    (uint32_t *)&mv_s->session_header.miv_in,
1967 			    (uint32_t *)&mv_s->session_header.miv_out)) {
1968 				log(LOG_ERR,
1969 				    "%s: Invalid MAC key\n", __func__);
1970 				err = EINVAL;
1971 				goto fail;
1972 			}
1973 			if (mv_s->sflags & RDY_MAC_KEY ||
1974 			    mv_s->sflags & RDY_MAC_IV) {
1975 				log(LOG_ERR,
1976 				    "%s: overwrite HMAC: %s->%s.\n",
1977 				    __func__, s_ctlalg(mv_s->hmac_alg),
1978 				    s_ctlalg(c->cri_alg));
1979 			}
1980 			mv_s->sflags |= RDY_MAC_KEY;
1981 			mv_s->sflags |= RDY_MAC_IV;
1982 
1983 			mv_s->mac_klen = c->cri_klen;
1984 			mv_s->hmac_alg = c->cri_alg;
1985 			break;
1986 		default:
1987 			log(LOG_ERR, "%s: Unknown algorithm %d\n",
1988 			    __func__, c->cri_alg);
1989 			err = EINVAL;
1990 			goto fail;
1991 		}
1992 	}
1993 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1994 	    "H/W Crypto session (id:%u) added.\n", session);
1995 
1996 	*sidp = sid;
1997 	MVXPSEC_EVCNT_INCR(sc, session_new);
1998 	mutex_exit(&sc->sc_session_mtx);
1999 
2000 	/* sync session header(it's never touched after here) */
2001 	bus_dmamap_sync(sc->sc_dmat,
2002 	    mv_s->session_header_map,
2003 	    0, sizeof(mv_s->session_header),
2004 	    BUS_DMASYNC_PREWRITE);
2005 
2006 	return 0;
2007 
2008 fail:
2009 	sc->sc_nsessions--;
2010 	sc->sc_sessions[session] = NULL;
2011 	hint = session;
2012 	if (mv_s)
2013 		mvxpsec_session_dealloc(mv_s);
2014 	log(LOG_WARNING,
2015 	    "%s: Failed to add H/W crypto sessoin (id:%u): err=%d\n",
2016 	   __func__, session, err);
2017 
2018 	mutex_exit(&sc->sc_session_mtx);
2019 	return err;
2020 }
2021 
2022 /*
2023  * remove opencrypto session
2024  */
2025 int
mvxpsec_freesession(void * arg,uint64_t tid)2026 mvxpsec_freesession(void *arg, uint64_t tid)
2027 {
2028 	struct mvxpsec_softc *sc = arg;
2029 	struct mvxpsec_session *mv_s;
2030 	int session;
2031 	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
2032 
2033 	session = MVXPSEC_SESSION(sid);
2034 	if (session < 0 || session >= MVXPSEC_MAX_SESSIONS) {
2035 		log(LOG_ERR, "%s: invalid session (id:%u)\n",
2036 		    __func__, session);
2037 		return EINVAL;
2038 	}
2039 
2040 	mutex_enter(&sc->sc_session_mtx);
2041 	if ( (mv_s = sc->sc_sessions[session]) == NULL) {
2042 		mutex_exit(&sc->sc_session_mtx);
2043 #ifdef DEBUG
2044 		log(LOG_DEBUG, "%s: session %d already inactivated\n",
2045 		    __func__, session);
2046 #endif
2047 		return ENOENT;
2048 	}
2049 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2050 	    "%s: inactivate session %d\n", __func__, session);
2051 
2052 	/* inactivate mvxpsec session */
2053 	sc->sc_sessions[session] = NULL;
2054 	sc->sc_nsessions--;
2055 	sc->sc_last_session = NULL;
2056 	mutex_exit(&sc->sc_session_mtx);
2057 
2058 	KASSERT(sc->sc_nsessions >= 0);
2059 	KASSERT(mv_s->sid == sid);
2060 
2061 	mvxpsec_session_dealloc(mv_s);
2062 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2063 	    "H/W Crypto session (id: %d) deleted.\n", session);
2064 
2065 	/* force unblock opencrypto */
2066 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2067 
2068 	MVXPSEC_EVCNT_INCR(sc, session_free);
2069 
2070 	return 0;
2071 }
2072 
2073 /*
2074  * process data with existing session
2075  */
2076 int
mvxpsec_dispatch(void * arg,struct cryptop * crp,int hint)2077 mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint)
2078 {
2079 	struct mvxpsec_softc *sc = arg;
2080 	struct mvxpsec_session *mv_s;
2081 	struct mvxpsec_packet *mv_p;
2082 	int q_full;
2083 	int running;
2084 	int err;
2085 
2086 	mutex_enter(&sc->sc_queue_mtx);
2087 
2088 	/*
2089 	 * lookup session
2090 	 */
2091 	mutex_enter(&sc->sc_session_mtx);
2092 	mv_s = mvxpsec_session_lookup(sc, crp->crp_sid);
2093 	if (__predict_false(mv_s == NULL)) {
2094 		err = EINVAL;
2095 		mv_p = NULL;
2096 		mutex_exit(&sc->sc_session_mtx);
2097 		goto fail;
2098 	}
2099 	mv_p = mvxpsec_packet_alloc(mv_s);
2100 	if (__predict_false(mv_p == NULL)) {
2101 		mutex_exit(&sc->sc_session_mtx);
2102 		mutex_exit(&sc->sc_queue_mtx);
2103 		return ERESTART; /* => queued in opencrypto layer */
2104 	}
2105 	mutex_exit(&sc->sc_session_mtx);
2106 
2107 	/*
2108 	 * check queue status
2109 	 */
2110 #ifdef MVXPSEC_MULTI_PACKET
2111 	q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0;
2112 #else
2113 	q_full = (sc->sc_wait_qlen != 0) ? 1 : 0;
2114 #endif
2115 	running = (sc->sc_flags & HW_RUNNING) ?  1: 0;
2116 	if (q_full) {
2117 		/* input queue is full. */
2118 		if (!running && sc->sc_wait_qlen > 0)
2119 			mvxpsec_dispatch_queue(sc);
2120 		MVXPSEC_EVCNT_INCR(sc, queue_full);
2121 		mvxpsec_packet_dealloc(mv_p);
2122 		mutex_exit(&sc->sc_queue_mtx);
2123 		return ERESTART; /* => queued in opencrypto layer */
2124 	}
2125 
2126 	/*
2127 	 * Load and setup packet data
2128 	 */
2129 	err = mvxpsec_packet_setcrp(mv_p, crp);
2130 	if (__predict_false(err))
2131 		goto fail;
2132 
2133 	/*
2134 	 * Setup DMA descriptor chains
2135 	 */
2136 	mutex_enter(&sc->sc_dma_mtx);
2137 	err = mvxpsec_dma_copy_packet(sc, mv_p);
2138 	mutex_exit(&sc->sc_dma_mtx);
2139 	if (__predict_false(err))
2140 		goto fail;
2141 
2142 #ifdef MVXPSEC_DEBUG
2143 	mvxpsec_dump_packet(__func__, mv_p);
2144 #endif
2145 
2146 	/*
2147 	 * Sync/inval the data cache
2148 	 */
2149 	err = mvxpsec_dma_sync_packet(sc, mv_p);
2150 	if (__predict_false(err))
2151 		goto fail;
2152 
2153 	/*
2154 	 * Enqueue the packet
2155 	 */
2156 	MVXPSEC_EVCNT_INCR(sc, dispatch_packets);
2157 #ifdef MVXPSEC_MULTI_PACKET
2158 	mvxpsec_packet_enqueue(mv_p);
2159 	if (!running)
2160 		mvxpsec_dispatch_queue(sc);
2161 #else
2162 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
2163 	sc->sc_wait_qlen++;
2164 	mv_p->flags |= SETUP_DONE;
2165 	if (!running)
2166 		mvxpsec_dispatch_queue(sc);
2167 #endif
2168 	mutex_exit(&sc->sc_queue_mtx);
2169 	return 0;
2170 
2171 fail:
2172 	/* Drop the incoming packet */
2173 	mvxpsec_drop(sc, crp, mv_p, err);
2174 	mutex_exit(&sc->sc_queue_mtx);
2175 	return 0;
2176 }
2177 
2178 /*
2179  * back the packet to the IP stack
2180  */
2181 void
mvxpsec_done(void * arg)2182 mvxpsec_done(void *arg)
2183 {
2184 	struct mvxpsec_softc *sc = arg;
2185 	struct mvxpsec_packet *mv_p;
2186 	mvxpsec_queue_t ret_queue;
2187 	int ndone;
2188 
2189 	mutex_enter(&sc->sc_queue_mtx);
2190 
2191 	/* stop wdog timer */
2192 	callout_stop(&sc->sc_timeout);
2193 
2194 	/* refill MVXPSEC */
2195 	ret_queue = sc->sc_run_queue;
2196 	SIMPLEQ_INIT(&sc->sc_run_queue);
2197 	sc->sc_flags &= ~HW_RUNNING;
2198 	if (sc->sc_wait_qlen > 0)
2199 		mvxpsec_dispatch_queue(sc);
2200 
2201 	ndone = 0;
2202 	while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) {
2203 		SIMPLEQ_REMOVE_HEAD(&ret_queue, queue);
2204 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
2205 		mvxpsec_done_packet(mv_p);
2206 		ndone++;
2207 	}
2208 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
2209 
2210 	mutex_exit(&sc->sc_queue_mtx);
2211 }
2212 
2213 /*
2214  * drop the packet
2215  */
2216 INLINE void
mvxpsec_drop(struct mvxpsec_softc * sc,struct cryptop * crp,struct mvxpsec_packet * mv_p,int err)2217 mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp,
2218     struct mvxpsec_packet *mv_p, int err)
2219 {
2220 	/* must called with sc->sc_queue_mtx held */
2221 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2222 
2223 	if (mv_p)
2224 		mvxpsec_packet_dealloc(mv_p);
2225 	if (err < 0)
2226 		err = EINVAL;
2227 	crp->crp_etype = err;
2228 	crypto_done(crp);
2229 	MVXPSEC_EVCNT_INCR(sc, packet_err);
2230 
2231 	/* dispatch other packets in queue */
2232 	if (sc->sc_wait_qlen > 0 &&
2233 	    !(sc->sc_flags & HW_RUNNING))
2234 		mvxpsec_dispatch_queue(sc);
2235 
2236 	/* unblock driver for dropped packet */
2237 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2238 }
2239 
2240 /* move wait queue entry to run queue */
2241 STATIC int
mvxpsec_dispatch_queue(struct mvxpsec_softc * sc)2242 mvxpsec_dispatch_queue(struct mvxpsec_softc *sc)
2243 {
2244 	struct mvxpsec_packet *mv_p;
2245 	paddr_t head;
2246 	int ndispatch = 0;
2247 
2248 	/* must called with sc->sc_queue_mtx held */
2249 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2250 
2251 	/* check there is any task */
2252 	if (__predict_false(sc->sc_flags & HW_RUNNING)) {
2253 		log(LOG_WARNING,
2254 		    "%s: another packet already exist.\n", __func__);
2255 		return 0;
2256 	}
2257 	if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) {
2258 		log(LOG_WARNING,
2259 		    "%s: no waiting packet yet(qlen=%d).\n",
2260 		    __func__, sc->sc_wait_qlen);
2261 		return 0;
2262 	}
2263 
2264 	/* move queue */
2265 	sc->sc_run_queue = sc->sc_wait_queue;
2266 	sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */
2267 	SIMPLEQ_INIT(&sc->sc_wait_queue);
2268 	ndispatch = sc->sc_wait_qlen;
2269 	sc->sc_wait_qlen = 0;
2270 
2271 	/* get 1st DMA descriptor */
2272 	mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue);
2273 	head = mv_p->dma_ring.dma_head->phys_addr;
2274 
2275 	/* terminate last DMA descriptor */
2276 	mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue);
2277 	mvxpsec_dma_finalize(sc, &mv_p->dma_ring);
2278 
2279 	/* configure TDMA */
2280 	if (mvxpsec_dma_wait(sc) < 0) {
2281 		log(LOG_ERR, "%s: DMA DEVICE not responding", __func__);
2282 		callout_schedule(&sc->sc_timeout, hz);
2283 		return 0;
2284 	}
2285 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, head);
2286 
2287 	/* trigger ACC */
2288 	if (mvxpsec_acc_wait(sc) < 0) {
2289 		log(LOG_ERR, "%s: MVXPSEC not responding", __func__);
2290 		callout_schedule(&sc->sc_timeout, hz);
2291 		return 0;
2292 	}
2293 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT);
2294 
2295 	MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch);
2296 	MVXPSEC_EVCNT_INCR(sc, dispatch_queue);
2297 	callout_schedule(&sc->sc_timeout, hz);
2298 	return 0;
2299 }
2300 
2301 /*
2302  * process opencrypto operations(cryptop) for packets.
2303  */
2304 INLINE int
mvxpsec_parse_crd(struct mvxpsec_packet * mv_p,struct cryptodesc * crd)2305 mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd)
2306 {
2307 	int ivlen;
2308 
2309 	KASSERT(mv_p->flags & RDY_DATA);
2310 
2311 	/* MAC & Ciphers: set data location and operation */
2312 	switch (crd->crd_alg) {
2313 	case CRYPTO_SHA1_HMAC_96:
2314 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2315 		/* fall through */
2316 	case CRYPTO_SHA1_HMAC:
2317 		mv_p->mac_dst = crd->crd_inject;
2318 		mv_p->mac_off = crd->crd_skip;
2319 		mv_p->mac_len = crd->crd_len;
2320 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2321 		    MV_ACC_CRYPTO_MAC_HMAC_SHA1);
2322 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2323 		/* No more setup for MAC */
2324 		return 0;
2325 	case CRYPTO_MD5_HMAC_96:
2326 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2327 		/* fall through */
2328 	case CRYPTO_MD5_HMAC:
2329 		mv_p->mac_dst = crd->crd_inject;
2330 		mv_p->mac_off = crd->crd_skip;
2331 		mv_p->mac_len = crd->crd_len;
2332 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2333 		    MV_ACC_CRYPTO_MAC_HMAC_MD5);
2334 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2335 		/* No more setup for MAC */
2336 		return 0;
2337 	case CRYPTO_DES_CBC:
2338 		mv_p->enc_ivoff = crd->crd_inject;
2339 		mv_p->enc_off = crd->crd_skip;
2340 		mv_p->enc_len = crd->crd_len;
2341 		ivlen = 8;
2342 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2343 		    MV_ACC_CRYPTO_ENC_DES);
2344 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2345 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2346 		break;
2347 	case CRYPTO_3DES_CBC:
2348 		mv_p->enc_ivoff = crd->crd_inject;
2349 		mv_p->enc_off = crd->crd_skip;
2350 		mv_p->enc_len = crd->crd_len;
2351 		ivlen = 8;
2352 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2353 		    MV_ACC_CRYPTO_ENC_3DES);
2354 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2355 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE;
2356 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2357 		break;
2358 	case CRYPTO_AES_CBC:
2359 		mv_p->enc_ivoff = crd->crd_inject;
2360 		mv_p->enc_off = crd->crd_skip;
2361 		mv_p->enc_len = crd->crd_len;
2362 		ivlen = 16;
2363 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2364 		    MV_ACC_CRYPTO_ENC_AES);
2365 		MV_ACC_CRYPTO_AES_KLEN_SET(
2366 		    mv_p->pkt_header.desc.acc_config,
2367 		   mvxpsec_aesklen(mv_p->mv_s->enc_klen));
2368 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2369 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2370 		break;
2371 	default:
2372 		log(LOG_ERR, "%s: Unknown algorithm %d\n",
2373 		    __func__, crd->crd_alg);
2374 		return EINVAL;
2375 	}
2376 
2377 	/* Operations only for Cipher, not MAC */
2378 	if (crd->crd_flags & CRD_F_ENCRYPT) {
2379 		/* Ciphers: Originate IV for Encryption.*/
2380 		mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT;
2381 		mv_p->flags |= DIR_ENCRYPT;
2382 
2383 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2384 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n");
2385 			mv_p->flags |= CRP_EXT_IV;
2386 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2387 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2388 		}
2389 		else if (crd->crd_flags & CRD_F_IV_PRESENT) {
2390 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n");
2391 			mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen);
2392 		}
2393 		else {
2394 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n");
2395 			mvxpsec_packet_write_iv(mv_p, NULL, ivlen);
2396 		}
2397 	}
2398 	else {
2399 		/* Ciphers: IV is loadded from crd_inject when it's present */
2400 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT;
2401 		mv_p->flags |= DIR_DECRYPT;
2402 
2403 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2404 #ifdef MVXPSEC_DEBUG
2405 			if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) {
2406 				MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV,
2407 				    "EXPLICIT IV(Decrypt)\n");
2408 				mvxpsec_dump_data(__func__, crd->crd_iv, ivlen);
2409 			}
2410 #endif
2411 			mv_p->flags |= CRP_EXT_IV;
2412 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2413 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2414 		}
2415 	}
2416 
2417 	KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT)));
2418 
2419 	return 0;
2420 }
2421 
2422 INLINE int
mvxpsec_parse_crp(struct mvxpsec_packet * mv_p)2423 mvxpsec_parse_crp(struct mvxpsec_packet *mv_p)
2424 {
2425 	struct cryptop *crp = mv_p->crp;
2426 	struct cryptodesc *crd;
2427 	int err;
2428 
2429 	KASSERT(crp);
2430 
2431 	mvxpsec_packet_reset_op(mv_p);
2432 
2433 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2434 		err = mvxpsec_parse_crd(mv_p, crd);
2435 		if (err)
2436 			return err;
2437 	}
2438 
2439 	return 0;
2440 }
2441 
2442 INLINE int
mvxpsec_packet_setcrp(struct mvxpsec_packet * mv_p,struct cryptop * crp)2443 mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp)
2444 {
2445 	int err = EINVAL;
2446 
2447 	/* regiseter crp to the MVXPSEC packet */
2448 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2449 		err = mvxpsec_packet_setmbuf(mv_p,
2450 		    (struct mbuf *)crp->crp_buf);
2451 		mv_p->crp = crp;
2452 	}
2453 	else if (crp->crp_flags & CRYPTO_F_IOV) {
2454 		err = mvxpsec_packet_setuio(mv_p,
2455 		    (struct uio *)crp->crp_buf);
2456 		mv_p->crp = crp;
2457 	}
2458 	else {
2459 		err = mvxpsec_packet_setdata(mv_p,
2460 		    (struct mbuf *)crp->crp_buf, crp->crp_ilen);
2461 		mv_p->crp = crp;
2462 	}
2463 	if (__predict_false(err))
2464 		return err;
2465 
2466 	/* parse crp and setup MVXPSEC registers/descriptors */
2467 	err = mvxpsec_parse_crp(mv_p);
2468 	if (__predict_false(err))
2469 		return err;
2470 
2471 	/* fixup data offset to fit MVXPSEC internal SRAM */
2472 	err = mvxpsec_header_finalize(mv_p);
2473 	if (__predict_false(err))
2474 		return err;
2475 
2476 	return 0;
2477 }
2478 
2479 /*
2480  * load data for encrypt/decrypt/authentication
2481  *
2482  * data is raw kernel memory area.
2483  */
2484 STATIC int
mvxpsec_packet_setdata(struct mvxpsec_packet * mv_p,void * data,uint32_t data_len)2485 mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p,
2486     void *data, uint32_t data_len)
2487 {
2488 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2489 	struct mvxpsec_softc *sc = mv_s->sc;
2490 
2491 	if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len,
2492 	    NULL, BUS_DMA_NOWAIT)) {
2493 		log(LOG_ERR, "%s: cannot load data\n", __func__);
2494 		return -1;
2495 	}
2496 	mv_p->data_type = MVXPSEC_DATA_RAW;
2497 	mv_p->data_raw = data;
2498 	mv_p->data_len = data_len;
2499 	mv_p->flags |= RDY_DATA;
2500 
2501 	return 0;
2502 }
2503 
2504 /*
2505  * load data for encrypt/decrypt/authentication
2506  *
2507  * data is mbuf based network data.
2508  */
2509 STATIC int
mvxpsec_packet_setmbuf(struct mvxpsec_packet * mv_p,struct mbuf * m)2510 mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m)
2511 {
2512 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2513 	struct mvxpsec_softc *sc = mv_s->sc;
2514 	size_t pktlen = 0;
2515 
2516 	if (__predict_true(m->m_flags & M_PKTHDR))
2517 		pktlen = m->m_pkthdr.len;
2518 	else {
2519 		struct mbuf *mp = m;
2520 
2521 		while (mp != NULL) {
2522 			pktlen += m->m_len;
2523 			mp = mp->m_next;
2524 		}
2525 	}
2526 	if (pktlen > SRAM_PAYLOAD_SIZE) {
2527 		extern   percpu_t *espstat_percpu;
2528 	       	/* XXX:
2529 		 * layer violation. opencrypto knows our max packet size
2530 		 * from crypto_register(9) API.
2531 		 */
2532 
2533 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2534 		log(LOG_ERR,
2535 		    "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n",
2536 		    device_xname(sc->sc_dev),
2537 		    (size_t)pktlen, SRAM_PAYLOAD_SIZE);
2538 		mv_p->data_type = MVXPSEC_DATA_NONE;
2539 		mv_p->data_mbuf = NULL;
2540 		return -1;
2541 	}
2542 
2543 	if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m,
2544 	    BUS_DMA_NOWAIT)) {
2545 		mv_p->data_type = MVXPSEC_DATA_NONE;
2546 		mv_p->data_mbuf = NULL;
2547 		log(LOG_ERR, "%s: cannot load mbuf\n", __func__);
2548 		return -1;
2549 	}
2550 
2551 	/* set payload buffer */
2552 	mv_p->data_type = MVXPSEC_DATA_MBUF;
2553 	mv_p->data_mbuf = m;
2554 	if (m->m_flags & M_PKTHDR) {
2555 		mv_p->data_len = m->m_pkthdr.len;
2556 	}
2557 	else {
2558 		mv_p->data_len = 0;
2559 		while (m) {
2560 			mv_p->data_len += m->m_len;
2561 			m = m->m_next;
2562 		}
2563 	}
2564 	mv_p->flags |= RDY_DATA;
2565 
2566 	return 0;
2567 }
2568 
2569 STATIC int
mvxpsec_packet_setuio(struct mvxpsec_packet * mv_p,struct uio * uio)2570 mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio)
2571 {
2572 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2573 	struct mvxpsec_softc *sc = mv_s->sc;
2574 
2575 	if (uio->uio_resid > SRAM_PAYLOAD_SIZE) {
2576 		extern   percpu_t *espstat_percpu;
2577 	       	/* XXX:
2578 		 * layer violation. opencrypto knows our max packet size
2579 		 * from crypto_register(9) API.
2580 		 */
2581 
2582 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2583 		log(LOG_ERR,
2584 		    "%s: uio request too large: %zu [oct.] > %zu [oct.]\n",
2585 		    device_xname(sc->sc_dev),
2586 		    uio->uio_resid, SRAM_PAYLOAD_SIZE);
2587 		mv_p->data_type = MVXPSEC_DATA_NONE;
2588 		mv_p->data_mbuf = NULL;
2589 		return -1;
2590 	}
2591 
2592 	if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio,
2593 	    BUS_DMA_NOWAIT)) {
2594 		mv_p->data_type = MVXPSEC_DATA_NONE;
2595 		mv_p->data_mbuf = NULL;
2596 		log(LOG_ERR, "%s: cannot load uio buf\n", __func__);
2597 		return -1;
2598 	}
2599 
2600 	/* set payload buffer */
2601 	mv_p->data_type = MVXPSEC_DATA_UIO;
2602 	mv_p->data_uio = uio;
2603 	mv_p->data_len = uio->uio_resid;
2604 	mv_p->flags |= RDY_DATA;
2605 
2606 	return 0;
2607 }
2608 
2609 STATIC int
mvxpsec_packet_rdata(struct mvxpsec_packet * mv_p,int off,int len,void * cp)2610 mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p,
2611     int off, int len, void *cp)
2612 {
2613 	uint8_t *p;
2614 
2615 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2616 		p = (uint8_t *)mv_p->data_raw + off;
2617 		memcpy(cp, p, len);
2618 	}
2619 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2620 		m_copydata(mv_p->data_mbuf, off, len, cp);
2621 	}
2622 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2623 		cuio_copydata(mv_p->data_uio, off, len, cp);
2624 	}
2625 	else
2626 		return -1;
2627 
2628 	return 0;
2629 }
2630 
2631 STATIC int
mvxpsec_packet_wdata(struct mvxpsec_packet * mv_p,int off,int len,void * cp)2632 mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p,
2633     int off, int len, void *cp)
2634 {
2635 	uint8_t *p;
2636 
2637 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2638 		p = (uint8_t *)mv_p->data_raw + off;
2639 		memcpy(p, cp, len);
2640 	}
2641 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2642 		m_copyback(mv_p->data_mbuf, off, len, cp);
2643 	}
2644 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2645 		cuio_copyback(mv_p->data_uio, off, len, cp);
2646 	}
2647 	else
2648 		return -1;
2649 
2650 	return 0;
2651 }
2652 
2653 /*
2654  * Set initial vector of cipher to the session.
2655  */
2656 STATIC int
mvxpsec_packet_write_iv(struct mvxpsec_packet * mv_p,void * iv,int ivlen)2657 mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen)
2658 {
2659 	uint8_t ivbuf[16];
2660 
2661 	KASSERT(ivlen == 8 || ivlen == 16);
2662 
2663 	if (iv == NULL) {
2664 	       	if (mv_p->mv_s->sflags & RDY_CRP_IV) {
2665 			/* use per session IV (compatible with KAME IPsec) */
2666 			mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv;
2667 			mv_p->flags |= RDY_CRP_IV;
2668 			return 0;
2669 		}
2670 		cprng_fast(ivbuf, ivlen);
2671 		iv = ivbuf;
2672 	}
2673 	memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen);
2674 	if (mv_p->flags & CRP_EXT_IV) {
2675 		memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen);
2676 		mv_p->ext_iv = iv;
2677 		mv_p->ext_ivlen = ivlen;
2678 	}
2679 	mv_p->flags |= RDY_CRP_IV;
2680 
2681 	return 0;
2682 }
2683 
2684 STATIC int
mvxpsec_packet_copy_iv(struct mvxpsec_packet * mv_p,int off,int ivlen)2685 mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen)
2686 {
2687 	mvxpsec_packet_rdata(mv_p, off, ivlen,
2688 	    &mv_p->pkt_header.crp_iv_work);
2689 	mv_p->flags |= RDY_CRP_IV;
2690 
2691 	return 0;
2692 }
2693 
2694 /*
2695  * set a encryption or decryption key to the session
2696  *
2697  * Input key material is big endian.
2698  */
2699 STATIC int
mvxpsec_key_precomp(int alg,void * keymat,int kbitlen,void * key_encrypt,void * key_decrypt)2700 mvxpsec_key_precomp(int alg, void *keymat, int kbitlen,
2701     void *key_encrypt, void *key_decrypt)
2702 {
2703 	uint32_t *kp = keymat;
2704 	uint32_t *ekp = key_encrypt;
2705 	uint32_t *dkp = key_decrypt;
2706 	int i;
2707 
2708 	switch (alg) {
2709 	case CRYPTO_DES_CBC:
2710 		if (kbitlen < 64 || (kbitlen % 8) != 0) {
2711 			log(LOG_WARNING,
2712 			    "mvxpsec: invalid DES keylen %d\n", kbitlen);
2713 			return EINVAL;
2714 		}
2715 		for (i = 0; i < 2; i++)
2716 			dkp[i] = ekp[i] = kp[i];
2717 		for (; i < 8; i++)
2718 			dkp[i] = ekp[i] = 0;
2719 		break;
2720 	case CRYPTO_3DES_CBC:
2721 		if (kbitlen < 192 || (kbitlen % 8) != 0) {
2722 			log(LOG_WARNING,
2723 			    "mvxpsec: invalid 3DES keylen %d\n", kbitlen);
2724 			return EINVAL;
2725 		}
2726 		for (i = 0; i < 8; i++)
2727 			dkp[i] = ekp[i] = kp[i];
2728 		break;
2729 	case CRYPTO_AES_CBC:
2730 		if (kbitlen < 128) {
2731 			log(LOG_WARNING,
2732 			    "mvxpsec: invalid AES keylen %d\n", kbitlen);
2733 			return EINVAL;
2734 		}
2735 		else if (kbitlen < 192) {
2736 			/* AES-128 */
2737 			for (i = 0; i < 4; i++)
2738 				ekp[i] = kp[i];
2739 			for (; i < 8; i++)
2740 				ekp[i] = 0;
2741 		}
2742 	       	else if (kbitlen < 256) {
2743 			/* AES-192 */
2744 			for (i = 0; i < 6; i++)
2745 				ekp[i] = kp[i];
2746 			for (; i < 8; i++)
2747 				ekp[i] = 0;
2748 		}
2749 		else  {
2750 			/* AES-256 */
2751 			for (i = 0; i < 8; i++)
2752 				ekp[i] = kp[i];
2753 		}
2754 		/* make decryption key */
2755 		mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen);
2756 		break;
2757 	default:
2758 		for (i = 0; i < 8; i++)
2759 			ekp[0] = dkp[0] = 0;
2760 		break;
2761 	}
2762 
2763 #ifdef MVXPSEC_DEBUG
2764 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2765 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2766 		    "%s: keyregistered\n", __func__);
2767 		mvxpsec_dump_data(__func__, ekp, 32);
2768 	}
2769 #endif
2770 
2771 	return 0;
2772 }
2773 
2774 /*
2775  * set MAC key to the session
2776  *
2777  * MAC engine has no register for key itself, but the engine has
2778  * inner and outer IV register. software must compute IV before
2779  * enable the engine.
2780  *
2781  * IV is a hash of ipad/opad. these are defined by FIPS-198a
2782  * standard.
2783  */
2784 STATIC int
mvxpsec_hmac_precomp(int alg,void * key,int kbitlen,void * iv_inner,void * iv_outer)2785 mvxpsec_hmac_precomp(int alg, void *key, int kbitlen,
2786     void *iv_inner, void *iv_outer)
2787 {
2788 	SHA1_CTX sha1;
2789 	MD5_CTX md5;
2790 	uint8_t *key8 = key;
2791 	uint8_t kbuf[64];
2792 	uint8_t ipad[64];
2793 	uint8_t opad[64];
2794 	uint32_t *iv_in = iv_inner;
2795 	uint32_t *iv_out = iv_outer;
2796 	int kbytelen;
2797 	int i;
2798 #define HMAC_IPAD 0x36
2799 #define HMAC_OPAD 0x5c
2800 
2801 	kbytelen = kbitlen / 8;
2802 	KASSERT(kbitlen == kbytelen * 8);
2803 	if (kbytelen > 64) {
2804 		SHA1Init(&sha1);
2805 		SHA1Update(&sha1, key, kbytelen);
2806 		SHA1Final(kbuf, &sha1);
2807 		key8 = kbuf;
2808 		kbytelen = 64;
2809 	}
2810 
2811 	/* make initial 64 oct. string */
2812 	switch (alg) {
2813 	case CRYPTO_SHA1_HMAC_96:
2814 	case CRYPTO_SHA1_HMAC:
2815 	case CRYPTO_MD5_HMAC_96:
2816 	case CRYPTO_MD5_HMAC:
2817 		for (i = 0; i < kbytelen; i++) {
2818 			ipad[i] = (key8[i] ^ HMAC_IPAD);
2819 			opad[i] = (key8[i] ^ HMAC_OPAD);
2820 		}
2821 		for (; i < 64; i++) {
2822 			ipad[i] = HMAC_IPAD;
2823 			opad[i] = HMAC_OPAD;
2824 		}
2825 		break;
2826 	default:
2827 		break;
2828 	}
2829 #ifdef MVXPSEC_DEBUG
2830 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2831 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2832 		    "%s: HMAC-KEY Pre-comp:\n", __func__);
2833 		mvxpsec_dump_data(__func__, key, 64);
2834 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2835 		    "%s: ipad:\n", __func__);
2836 		mvxpsec_dump_data(__func__, ipad, sizeof(ipad));
2837 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2838 		    "%s: opad:\n", __func__);
2839 		mvxpsec_dump_data(__func__, opad, sizeof(opad));
2840 	}
2841 #endif
2842 
2843 	/* make iv from string */
2844 	switch (alg) {
2845 	case CRYPTO_SHA1_HMAC_96:
2846 	case CRYPTO_SHA1_HMAC:
2847 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2848 		    "%s: Generate iv_in(SHA1)\n", __func__);
2849 		SHA1Init(&sha1);
2850 		SHA1Update(&sha1, ipad, 64);
2851 		/* XXX: private state... (LE) */
2852 		iv_in[0] = htobe32(sha1.state[0]);
2853 		iv_in[1] = htobe32(sha1.state[1]);
2854 		iv_in[2] = htobe32(sha1.state[2]);
2855 		iv_in[3] = htobe32(sha1.state[3]);
2856 		iv_in[4] = htobe32(sha1.state[4]);
2857 
2858 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2859 		    "%s: Generate iv_out(SHA1)\n", __func__);
2860 		SHA1Init(&sha1);
2861 		SHA1Update(&sha1, opad, 64);
2862 		/* XXX: private state... (LE) */
2863 		iv_out[0] = htobe32(sha1.state[0]);
2864 		iv_out[1] = htobe32(sha1.state[1]);
2865 		iv_out[2] = htobe32(sha1.state[2]);
2866 		iv_out[3] = htobe32(sha1.state[3]);
2867 		iv_out[4] = htobe32(sha1.state[4]);
2868 		break;
2869 	case CRYPTO_MD5_HMAC_96:
2870 	case CRYPTO_MD5_HMAC:
2871 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2872 		    "%s: Generate iv_in(MD5)\n", __func__);
2873 		MD5Init(&md5);
2874 		MD5Update(&md5, ipad, sizeof(ipad));
2875 		/* XXX: private state... (LE) */
2876 		iv_in[0] = htobe32(md5.state[0]);
2877 		iv_in[1] = htobe32(md5.state[1]);
2878 		iv_in[2] = htobe32(md5.state[2]);
2879 		iv_in[3] = htobe32(md5.state[3]);
2880 		iv_in[4] = 0;
2881 
2882 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2883 		    "%s: Generate iv_out(MD5)\n", __func__);
2884 		MD5Init(&md5);
2885 		MD5Update(&md5, opad, sizeof(opad));
2886 		/* XXX: private state... (LE) */
2887 		iv_out[0] = htobe32(md5.state[0]);
2888 		iv_out[1] = htobe32(md5.state[1]);
2889 		iv_out[2] = htobe32(md5.state[2]);
2890 		iv_out[3] = htobe32(md5.state[3]);
2891 		iv_out[4] = 0;
2892 		break;
2893 	default:
2894 		break;
2895 	}
2896 
2897 #ifdef MVXPSEC_DEBUG
2898 	if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) {
2899 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2900 		    "%s: HMAC IV-IN\n", __func__);
2901 		mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20);
2902 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2903 		    "%s: HMAC IV-OUT\n", __func__);
2904 		mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20);
2905 	}
2906 #endif
2907 
2908 	return 0;
2909 #undef HMAC_IPAD
2910 #undef HMAC_OPAD
2911 }
2912 
2913 /*
2914  * AES Support routine
2915  */
2916 static uint8_t AES_SBOX[256] = {
2917 	 99, 124, 119, 123, 242, 107, 111, 197,  48,   1, 103,  43, 254, 215,
2918        	171, 118, 202, 130, 201, 125, 250,  89,  71, 240, 173, 212, 162, 175,
2919        	156, 164, 114, 192, 183, 253, 147,  38,  54,  63, 247, 204,  52, 165,
2920        	229, 241, 113, 216,  49,  21,   4, 199,  35, 195,  24, 150,   5, 154,
2921        	  7,  18, 128, 226, 235,  39, 178, 117,   9, 131,  44,  26,  27, 110,
2922 	 90, 160,  82,  59, 214, 179,  41, 227,  47, 132,  83, 209,   0, 237,
2923        	 32, 252, 177,  91, 106, 203, 190,  57,  74,  76,  88, 207, 208, 239,
2924 	170, 251,  67,  77,  51, 133,  69, 249,   2, 127,  80,  60, 159, 168,
2925 	 81, 163,  64, 143, 146, 157,  56, 245, 188, 182, 218,  33,  16, 255,
2926 	243, 210, 205,  12,  19, 236,  95, 151,  68,  23, 196, 167, 126,  61,
2927        	100,  93,  25, 115,  96, 129,  79, 220,  34,  42, 144, 136,  70, 238,
2928        	184,  20, 222,  94,  11, 219, 224,  50,  58,  10,  73,   6,  36,  92,
2929        	194, 211, 172,  98, 145, 149, 228, 121, 231, 200,  55, 109, 141, 213,
2930       	 78, 169, 108,  86, 244, 234, 101, 122, 174,   8, 186, 120,  37,  46,
2931        	 28, 166, 180, 198, 232, 221, 116,  31,  75, 189, 139, 138, 112,  62,
2932 	181, 102,  72,   3, 246,  14,  97,  53,  87, 185, 134, 193,  29, 158,
2933        	225, 248, 152,  17, 105, 217, 142, 148, 155,  30, 135, 233, 206,  85,
2934       	 40, 223, 140, 161, 137,  13, 191, 230,  66, 104,  65, 153,  45,  15,
2935 	176,  84, 187,  22
2936 };
2937 
2938 static uint32_t AES_RCON[30] = {
2939 	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
2940        	0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
2941        	0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
2942 };
2943 
2944 STATIC int
mv_aes_ksched(uint8_t k[4][MAXKC],int keyBits,uint8_t W[MAXROUNDS+1][4][MAXBC])2945 mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits,
2946     uint8_t W[MAXROUNDS+1][4][MAXBC])
2947 {
2948 	int KC, BC, ROUNDS;
2949 	int i, j, t, rconpointer = 0;
2950 	uint8_t tk[4][MAXKC];
2951 
2952 	switch (keyBits) {
2953 	case 128:
2954 		ROUNDS = 10;
2955 		KC = 4;
2956 		break;
2957 	case 192:
2958 		ROUNDS = 12;
2959 		KC = 6;
2960 	       	break;
2961 	case 256:
2962 		ROUNDS = 14;
2963 	       	KC = 8;
2964 	       	break;
2965 	default:
2966 	       	return (-1);
2967 	}
2968 	BC = 4; /* 128 bits */
2969 
2970 	for(j = 0; j < KC; j++)
2971 		for(i = 0; i < 4; i++)
2972 			tk[i][j] = k[i][j];
2973 	t = 0;
2974 
2975 	/* copy values into round key array */
2976 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2977 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2978 
2979 	while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
2980 		/* calculate new values */
2981 		for(i = 0; i < 4; i++)
2982 			tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]];
2983 		tk[0][0] ^= AES_RCON[rconpointer++];
2984 
2985 		if (KC != 8)
2986 			for(j = 1; j < KC; j++)
2987 				for(i = 0; i < 4; i++)
2988 				       	tk[i][j] ^= tk[i][j-1];
2989 		else {
2990 			for(j = 1; j < KC/2; j++)
2991 				for(i = 0; i < 4; i++)
2992 				       	tk[i][j] ^= tk[i][j-1];
2993 			for(i = 0; i < 4; i++)
2994 			       	tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]];
2995 			for(j = KC/2 + 1; j < KC; j++)
2996 				for(i = 0; i < 4; i++)
2997 				       	tk[i][j] ^= tk[i][j-1];
2998 	}
2999 	/* copy values into round key array */
3000 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
3001 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
3002 	}
3003 
3004 	return 0;
3005 }
3006 
3007 STATIC int
mv_aes_deckey(uint8_t * expandedKey,uint8_t * keyMaterial,int keyLen)3008 mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen)
3009 {
3010 	uint8_t   W[MAXROUNDS+1][4][MAXBC];
3011 	uint8_t   k[4][MAXKC];
3012 	uint8_t   j;
3013 	int     i, rounds, KC;
3014 
3015 	if (expandedKey == NULL)
3016 		return -1;
3017 
3018 	if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
3019 		return -1;
3020 
3021 	if (keyMaterial == NULL)
3022 		return -1;
3023 
3024 	/* initialize key schedule: */
3025 	for (i=0; i<keyLen/8; i++) {
3026 		j = keyMaterial[i];
3027 		k[i % 4][i / 4] = j;
3028 	}
3029 
3030 	mv_aes_ksched(k, keyLen, W);
3031 	switch (keyLen) {
3032 	case 128:
3033 		rounds = 10;
3034 		KC = 4;
3035 		break;
3036 	case 192:
3037 		rounds = 12;
3038 		KC = 6;
3039 		break;
3040 	case 256:
3041 		rounds = 14;
3042 		KC = 8;
3043 		break;
3044 	default:
3045 		return -1;
3046 	}
3047 
3048 	for(i=0; i<MAXBC; i++)
3049 		for(j=0; j<4; j++)
3050 			expandedKey[i*4+j] = W[rounds][j][i];
3051 	for(; i<KC; i++)
3052 		for(j=0; j<4; j++)
3053 			expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
3054 
3055 	return 0;
3056 }
3057 
3058 /*
3059  * Clear cipher/mac operation state
3060  */
3061 INLINE void
mvxpsec_packet_reset_op(struct mvxpsec_packet * mv_p)3062 mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p)
3063 {
3064 	mv_p->pkt_header.desc.acc_config = 0;
3065 	mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0;
3066 	mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0;
3067 }
3068 
3069 /*
3070  * update MVXPSEC operation order
3071  */
3072 INLINE void
mvxpsec_packet_update_op_order(struct mvxpsec_packet * mv_p,int op)3073 mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op)
3074 {
3075 	struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc;
3076 	uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK;
3077 
3078 	KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC);
3079 	KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op);
3080 
3081 	if (cur_op == 0)
3082 		acc_desc->acc_config |= op;
3083 	else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) {
3084 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3085 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC;
3086 		/* MAC then ENC (= decryption) */
3087 	}
3088 	else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) {
3089 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3090 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC;
3091 		/* ENC then MAC (= encryption) */
3092 	}
3093 	else {
3094 		log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n",
3095 		    __func__,
3096 		    (op == MV_ACC_CRYPTO_OP_ENC) ?  "encryption" : "authentication");
3097 	}
3098 }
3099 
3100 /*
3101  * Parameter Conversions
3102  */
3103 INLINE uint32_t
mvxpsec_alg2acc(uint32_t alg)3104 mvxpsec_alg2acc(uint32_t alg)
3105 {
3106 	uint32_t reg;
3107 
3108 	switch (alg) {
3109 	case CRYPTO_DES_CBC:
3110 		reg = MV_ACC_CRYPTO_ENC_DES;
3111 		reg |= MV_ACC_CRYPTO_CBC;
3112 		break;
3113 	case CRYPTO_3DES_CBC:
3114 		reg = MV_ACC_CRYPTO_ENC_3DES;
3115 		reg |= MV_ACC_CRYPTO_3DES_EDE;
3116 		reg |= MV_ACC_CRYPTO_CBC;
3117 		break;
3118 	case CRYPTO_AES_CBC:
3119 		reg = MV_ACC_CRYPTO_ENC_AES;
3120 		reg |= MV_ACC_CRYPTO_CBC;
3121 		break;
3122 	case CRYPTO_SHA1_HMAC_96:
3123 		reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1;
3124 		reg |= MV_ACC_CRYPTO_MAC_96;
3125 		break;
3126 	case CRYPTO_MD5_HMAC_96:
3127 		reg = MV_ACC_CRYPTO_MAC_HMAC_MD5;
3128 		reg |= MV_ACC_CRYPTO_MAC_96;
3129 		break;
3130 	default:
3131 		reg = 0;
3132 		break;
3133 	}
3134 
3135 	return reg;
3136 }
3137 
3138 INLINE uint32_t
mvxpsec_aesklen(int klen)3139 mvxpsec_aesklen(int klen)
3140 {
3141 	if (klen < 128)
3142 		return 0;
3143 	else if (klen < 192)
3144 		return MV_ACC_CRYPTO_AES_KLEN_128;
3145 	else if (klen < 256)
3146 		return MV_ACC_CRYPTO_AES_KLEN_192;
3147 	else
3148 		return MV_ACC_CRYPTO_AES_KLEN_256;
3149 
3150 	return 0;
3151 }
3152 
3153 /*
3154  * String Conversions
3155  */
3156 STATIC const char *
s_errreg(uint32_t v)3157 s_errreg(uint32_t v)
3158 {
3159 	static char buf[80];
3160 
3161 	snprintf(buf, sizeof(buf),
3162 	    "%sMiss %sDoubleHit %sBothHit %sDataError",
3163 	    (v & MV_TDMA_ERRC_MISS) ? "+" : "-",
3164 	    (v & MV_TDMA_ERRC_DHIT) ? "+" : "-",
3165 	    (v & MV_TDMA_ERRC_BHIT) ? "+" : "-",
3166 	    (v & MV_TDMA_ERRC_DERR) ? "+" : "-");
3167 
3168 	return (const char *)buf;
3169 }
3170 
3171 STATIC const char *
s_winreg(uint32_t v)3172 s_winreg(uint32_t v)
3173 {
3174 	static char buf[80];
3175 
3176 	snprintf(buf, sizeof(buf),
3177 	    "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]",
3178 	    (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS",
3179 	    MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v),
3180 	    MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v));
3181 
3182 	return (const char *)buf;
3183 }
3184 
3185 STATIC const char *
s_ctrlreg(uint32_t reg)3186 s_ctrlreg(uint32_t reg)
3187 {
3188 	static char buf[80];
3189 
3190 	snprintf(buf, sizeof(buf),
3191 	    "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT",
3192 	    (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE",
3193 	    (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-",
3194 	    MV_TDMA_CONTROL_GET_DST_BURST(reg),
3195 	    MV_TDMA_CONTROL_GET_SRC_BURST(reg),
3196 	    (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-",
3197 	    (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+",
3198 	    (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+",
3199 	    (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-");
3200 
3201 	return (const char *)buf;
3202 }
3203 
3204 _STATIC const char *
s_xpsecintr(uint32_t v)3205 s_xpsecintr(uint32_t v)
3206 {
3207 	static char buf[160];
3208 
3209 	snprintf(buf, sizeof(buf),
3210 	    "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA "
3211 	    "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont",
3212 	    (v & MVXPSEC_INT_AUTH) ? "+" : "-",
3213 	    (v & MVXPSEC_INT_DES) ? "+" : "-",
3214 	    (v & MVXPSEC_INT_AES_ENC) ? "+" : "-",
3215 	    (v & MVXPSEC_INT_AES_DEC) ? "+" : "-",
3216 	    (v & MVXPSEC_INT_ENC) ? "+" : "-",
3217 	    (v & MVXPSEC_INT_SA) ? "+" : "-",
3218 	    (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-",
3219 	    (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-",
3220 	    (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-",
3221 	    (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-");
3222 
3223 	return (const char *)buf;
3224 }
3225 
3226 STATIC const char *
s_ctlalg(uint32_t alg)3227 s_ctlalg(uint32_t alg)
3228 {
3229 	switch (alg) {
3230 	case CRYPTO_SHA1_HMAC_96:
3231 		return "HMAC-SHA1-96";
3232 	case CRYPTO_SHA1_HMAC:
3233 		return "HMAC-SHA1";
3234 	case CRYPTO_SHA1:
3235 		return "SHA1";
3236 	case CRYPTO_MD5_HMAC_96:
3237 		return "HMAC-MD5-96";
3238 	case CRYPTO_MD5_HMAC:
3239 		return "HMAC-MD5";
3240 	case CRYPTO_MD5:
3241 		return "MD5";
3242 	case CRYPTO_DES_CBC:
3243 		return "DES-CBC";
3244 	case CRYPTO_3DES_CBC:
3245 		return "3DES-CBC";
3246 	case CRYPTO_AES_CBC:
3247 		return "AES-CBC";
3248 	default:
3249 		break;
3250 	}
3251 
3252 	return "Unknown";
3253 }
3254 
3255 STATIC const char *
s_xpsec_op(uint32_t reg)3256 s_xpsec_op(uint32_t reg)
3257 {
3258 	reg &= MV_ACC_CRYPTO_OP_MASK;
3259 	switch (reg) {
3260 	case MV_ACC_CRYPTO_OP_ENC:
3261 		return "ENC";
3262 	case MV_ACC_CRYPTO_OP_MAC:
3263 		return "MAC";
3264 	case MV_ACC_CRYPTO_OP_ENCMAC:
3265 		return "ENC-MAC";
3266 	case MV_ACC_CRYPTO_OP_MACENC:
3267 		return "MAC-ENC";
3268 	default:
3269 		break;
3270 	}
3271 
3272 	return "Unknown";
3273 
3274 }
3275 
3276 STATIC const char *
s_xpsec_enc(uint32_t alg)3277 s_xpsec_enc(uint32_t alg)
3278 {
3279 	alg <<= MV_ACC_CRYPTO_ENC_SHIFT;
3280 	switch (alg) {
3281 	case MV_ACC_CRYPTO_ENC_DES:
3282 		return "DES";
3283 	case MV_ACC_CRYPTO_ENC_3DES:
3284 		return "3DES";
3285 	case MV_ACC_CRYPTO_ENC_AES:
3286 		return "AES";
3287 	default:
3288 		break;
3289 	}
3290 
3291 	return "Unknown";
3292 }
3293 
3294 STATIC const char *
s_xpsec_mac(uint32_t alg)3295 s_xpsec_mac(uint32_t alg)
3296 {
3297 	alg <<= MV_ACC_CRYPTO_MAC_SHIFT;
3298 	switch (alg) {
3299 	case MV_ACC_CRYPTO_MAC_NONE:
3300 		return "Disabled";
3301 	case MV_ACC_CRYPTO_MAC_MD5:
3302 		return "MD5";
3303 	case MV_ACC_CRYPTO_MAC_SHA1:
3304 		return "SHA1";
3305 	case MV_ACC_CRYPTO_MAC_HMAC_MD5:
3306 		return "HMAC-MD5";
3307 	case MV_ACC_CRYPTO_MAC_HMAC_SHA1:
3308 		return "HMAC-SHA1";
3309 	default:
3310 		break;
3311 	}
3312 
3313 	return "Unknown";
3314 }
3315 
3316 STATIC const char *
s_xpsec_frag(uint32_t frag)3317 s_xpsec_frag(uint32_t frag)
3318 {
3319 	frag <<= MV_ACC_CRYPTO_FRAG_SHIFT;
3320 	switch (frag) {
3321 	case MV_ACC_CRYPTO_NOFRAG:
3322 		return "NoFragment";
3323 	case MV_ACC_CRYPTO_FRAG_FIRST:
3324 		return "FirstFragment";
3325 	case MV_ACC_CRYPTO_FRAG_MID:
3326 		return "MiddleFragment";
3327 	case MV_ACC_CRYPTO_FRAG_LAST:
3328 		return "LastFragment";
3329 	default:
3330 		break;
3331 	}
3332 
3333 	return "Unknown";
3334 }
3335 
3336 #ifdef MVXPSEC_DEBUG
3337 void
mvxpsec_dump_reg(struct mvxpsec_softc * sc)3338 mvxpsec_dump_reg(struct mvxpsec_softc *sc)
3339 {
3340 	uint32_t reg;
3341 	int i;
3342 
3343 	if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0)
3344 		return;
3345 
3346 	printf("--- Interrupt Registers ---\n");
3347 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
3348 	printf("MVXPSEC INT CAUSE: 0x%08x\n", reg);
3349 	printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg));
3350 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK);
3351 	printf("MVXPSEC INT MASK: 0x%08x\n", reg);
3352 	printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg));
3353 
3354 	printf("--- DMA Configuration Registers ---\n");
3355 	for (i = 0; i < MV_TDMA_NWINDOW; i++) {
3356 		reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i));
3357 		printf("TDMA BAR%d: 0x%08x\n", i, reg);
3358 		reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i));
3359 		printf("TDMA ATTR%d: 0x%08x\n", i, reg);
3360 		printf("  -> %s\n", s_winreg(reg));
3361 	}
3362 
3363 	printf("--- DMA Control Registers ---\n");
3364 
3365 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3366 	printf("TDMA CONTROL: 0x%08x\n", reg);
3367 	printf("  -> %s\n", s_ctrlreg(reg));
3368 
3369 	printf("--- DMA Current Command Descriptors ---\n");
3370 
3371 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
3372 	printf("TDMA ERR CAUSE: 0x%08x\n", reg);
3373 
3374 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK);
3375 	printf("TDMA ERR MASK: 0x%08x\n", reg);
3376 
3377 	reg = MVXPSEC_READ(sc, MV_TDMA_CNT);
3378 	printf("TDMA DATA OWNER: %s\n",
3379 	    (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU");
3380 	printf("TDMA DATA COUNT: %d(0x%x)\n",
3381 	    (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN));
3382 
3383 	reg = MVXPSEC_READ(sc, MV_TDMA_SRC);
3384 	printf("TDMA DATA SRC: 0x%08x\n", reg);
3385 
3386 	reg = MVXPSEC_READ(sc, MV_TDMA_DST);
3387 	printf("TDMA DATA DST: 0x%08x\n", reg);
3388 
3389 	reg = MVXPSEC_READ(sc, MV_TDMA_NXT);
3390 	printf("TDMA DATA NXT: 0x%08x\n", reg);
3391 
3392 	reg = MVXPSEC_READ(sc, MV_TDMA_CUR);
3393 	printf("TDMA DATA CUR: 0x%08x\n", reg);
3394 
3395 	printf("--- ACC Command Register ---\n");
3396 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3397 	printf("ACC COMMAND: 0x%08x\n", reg);
3398 	printf("ACC: %sACT %sSTOP\n",
3399 	    (reg & MV_ACC_COMMAND_ACT) ? "+" : "-",
3400 	    (reg & MV_ACC_COMMAND_STOP) ? "+" : "-");
3401 
3402 	reg = MVXPSEC_READ(sc, MV_ACC_CONFIG);
3403 	printf("ACC CONFIG: 0x%08x\n", reg);
3404 	reg = MVXPSEC_READ(sc, MV_ACC_DESC);
3405 	printf("ACC DESC: 0x%08x\n", reg);
3406 
3407 	printf("--- DES Key Register ---\n");
3408 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L);
3409 	printf("DES KEY0  Low: 0x%08x\n", reg);
3410 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H);
3411 	printf("DES KEY0 High: 0x%08x\n", reg);
3412 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L);
3413 	printf("DES KEY1  Low: 0x%08x\n", reg);
3414 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H);
3415 	printf("DES KEY1 High: 0x%08x\n", reg);
3416 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L);
3417 	printf("DES KEY2  Low: 0x%08x\n", reg);
3418 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H);
3419 	printf("DES KEY2 High: 0x%08x\n", reg);
3420 
3421 	printf("--- AES Key Register ---\n");
3422 	for (i = 0; i < 8; i++) {
3423 		reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i));
3424 		printf("AES ENC KEY COL%d: %08x\n", i, reg);
3425 	}
3426 	for (i = 0; i < 8; i++) {
3427 		reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i));
3428 		printf("AES DEC KEY COL%d: %08x\n", i, reg);
3429 	}
3430 
3431 	return;
3432 }
3433 
3434 STATIC void
mvxpsec_dump_sram(const char * name,struct mvxpsec_softc * sc,size_t len)3435 mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len)
3436 {
3437 	uint32_t reg;
3438 
3439 	if (sc->sc_sram_va == NULL)
3440 		return;
3441 
3442 	if (len == 0) {
3443 		printf("\n%s NO DATA(len=0)\n", name);
3444 		return;
3445 	}
3446 	else if (len > MV_ACC_SRAM_SIZE)
3447 		len = MV_ACC_SRAM_SIZE;
3448 
3449 	mutex_enter(&sc->sc_dma_mtx);
3450 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3451 	if (reg & MV_TDMA_CONTROL_ACT) {
3452 		printf("TDMA is active, cannot access SRAM\n");
3453 		mutex_exit(&sc->sc_dma_mtx);
3454 		return;
3455 	}
3456 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3457 	if (reg & MV_ACC_COMMAND_ACT) {
3458 		printf("SA is active, cannot access SRAM\n");
3459 		mutex_exit(&sc->sc_dma_mtx);
3460 		return;
3461 	}
3462 
3463 	printf("%s: dump SRAM, %zu bytes\n", name, len);
3464 	mvxpsec_dump_data(name, sc->sc_sram_va, len);
3465 	mutex_exit(&sc->sc_dma_mtx);
3466 	return;
3467 }
3468 
3469 
3470 _STATIC void
mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle * dh)3471 mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh)
3472 {
3473 	struct mvxpsec_descriptor *d =
3474            (struct mvxpsec_descriptor *)dh->_desc;
3475 
3476 	printf("--- DMA Command Descriptor ---\n");
3477 	printf("DESC: VA=%p PA=0x%08x\n",
3478 	    d, (uint32_t)dh->phys_addr);
3479 	printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0);
3480 	printf("DESC: SRC = 0x%08x\n", d->tdma_src);
3481 	printf("DESC: DST = 0x%08x\n", d->tdma_dst);
3482 	printf("DESC: NXT = 0x%08x\n", d->tdma_nxt);
3483 
3484 	return;
3485 }
3486 
3487 STATIC void
mvxpsec_dump_data(const char * name,void * p,size_t len)3488 mvxpsec_dump_data(const char *name, void *p, size_t len)
3489 {
3490 	uint8_t *data = p;
3491 	off_t off;
3492 
3493 	printf("%s: dump %p, %zu bytes", name, p, len);
3494 	if (p == NULL || len == 0) {
3495 		printf("\n%s: NO DATA\n", name);
3496 		return;
3497 	}
3498 	for (off = 0; off < len; off++) {
3499 		if ((off % 16) == 0) {
3500 			printf("\n%s: 0x%08x:", name, (uint32_t)off);
3501 		}
3502 		if ((off % 4) == 0) {
3503 			printf(" ");
3504 		}
3505 		printf("%02x", data[off]);
3506 	}
3507 	printf("\n");
3508 
3509 	return;
3510 }
3511 
3512 _STATIC void
mvxpsec_dump_packet(const char * name,struct mvxpsec_packet * mv_p)3513 mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p)
3514 {
3515 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
3516 
3517 	printf("%s: packet_data:\n", name);
3518 	mvxpsec_dump_packet_data(name, mv_p);
3519 
3520 	printf("%s: SRAM:\n", name);
3521 	mvxpsec_dump_sram(name, sc, 2000);
3522 
3523 	printf("%s: packet_descriptor:\n", name);
3524 	mvxpsec_dump_packet_desc(name, mv_p);
3525 }
3526 
3527 _STATIC void
mvxpsec_dump_packet_data(const char * name,struct mvxpsec_packet * mv_p)3528 mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p)
3529 {
3530 	static char buf[1500];
3531 	int len;
3532 
3533 	if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
3534 		struct mbuf *m;
3535 
3536 		m = mv_p->data.mbuf;
3537 		len = m->m_pkthdr.len;
3538 		if (len > sizeof(buf))
3539 			len = sizeof(buf);
3540 		m_copydata(m, 0, len, buf);
3541 	}
3542 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
3543 		struct uio *uio;
3544 
3545 		uio = mv_p->data.uio;
3546 		len = uio->uio_resid;
3547 		if (len > sizeof(buf))
3548 			len = sizeof(buf);
3549 		cuio_copydata(uio, 0, len, buf);
3550 	}
3551 	else if (mv_p->data_type == MVXPSEC_DATA_RAW) {
3552 		len = mv_p->data_len;
3553 		if (len > sizeof(buf))
3554 			len = sizeof(buf);
3555 		memcpy(buf, mv_p->data.raw, len);
3556 	}
3557 	else
3558 		return;
3559 	mvxpsec_dump_data(name, buf, len);
3560 
3561 	return;
3562 }
3563 
3564 _STATIC void
mvxpsec_dump_packet_desc(const char * name,struct mvxpsec_packet * mv_p)3565 mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p)
3566 {
3567 	uint32_t *words;
3568 
3569 	if (mv_p == NULL)
3570 		return;
3571 
3572 	words = &mv_p->pkt_header.desc.acc_desc_dword0;
3573 	mvxpsec_dump_acc_config(name, words[0]);
3574 	mvxpsec_dump_acc_encdata(name, words[1], words[2]);
3575 	mvxpsec_dump_acc_enclen(name, words[2]);
3576 	mvxpsec_dump_acc_enckey(name, words[3]);
3577 	mvxpsec_dump_acc_enciv(name, words[4]);
3578 	mvxpsec_dump_acc_macsrc(name, words[5]);
3579 	mvxpsec_dump_acc_macdst(name, words[6]);
3580 	mvxpsec_dump_acc_maciv(name, words[7]);
3581 
3582 	return;
3583 }
3584 
3585 _STATIC void
mvxpsec_dump_acc_config(const char * name,uint32_t w)3586 mvxpsec_dump_acc_config(const char *name, uint32_t w)
3587 {
3588 	/* SA: Dword 0 */
3589 	printf("%s: Dword0=0x%08x\n", name, w);
3590 	printf("%s:   OP = %s\n", name,
3591 	    s_xpsec_op(MV_ACC_CRYPTO_OP(w)));
3592 	printf("%s:   MAC = %s\n", name,
3593 	    s_xpsec_mac(MV_ACC_CRYPTO_MAC(w)));
3594 	printf("%s:   MAC_LEN = %s\n", name,
3595 	    w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit");
3596 	printf("%s:   ENC = %s\n", name,
3597 	    s_xpsec_enc(MV_ACC_CRYPTO_ENC(w)));
3598 	printf("%s:   DIR = %s\n", name,
3599 	    w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption");
3600 	printf("%s:   CHAIN = %s\n", name,
3601 	    w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB");
3602 	printf("%s:   3DES = %s\n", name,
3603 	    w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE");
3604 	printf("%s:   FRAGMENT = %s\n", name,
3605 	    s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w)));
3606 	return;
3607 }
3608 
3609 STATIC void
mvxpsec_dump_acc_encdata(const char * name,uint32_t w,uint32_t w2)3610 mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2)
3611 {
3612 	/* SA: Dword 1 */
3613 	printf("%s: Dword1=0x%08x\n", name, w);
3614 	printf("%s:   ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3615 	printf("%s:   ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3616 	printf("%s:   ENC RANGE = 0x%x - 0x%x\n", name,
3617 	    MV_ACC_DESC_GET_VAL_1(w),
3618 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1);
3619 	return;
3620 }
3621 
3622 STATIC void
mvxpsec_dump_acc_enclen(const char * name,uint32_t w)3623 mvxpsec_dump_acc_enclen(const char *name, uint32_t w)
3624 {
3625 	/* SA: Dword 2 */
3626 	printf("%s: Dword2=0x%08x\n", name, w);
3627 	printf("%s:   ENC LEN = %d\n", name,
3628 	    MV_ACC_DESC_GET_VAL_1(w));
3629 	return;
3630 }
3631 
3632 STATIC void
mvxpsec_dump_acc_enckey(const char * name,uint32_t w)3633 mvxpsec_dump_acc_enckey(const char *name, uint32_t w)
3634 {
3635 	/* SA: Dword 3 */
3636 	printf("%s: Dword3=0x%08x\n", name, w);
3637 	printf("%s:   EKEY = 0x%x\n", name,
3638 	    MV_ACC_DESC_GET_VAL_1(w));
3639 	return;
3640 }
3641 
3642 STATIC void
mvxpsec_dump_acc_enciv(const char * name,uint32_t w)3643 mvxpsec_dump_acc_enciv(const char *name, uint32_t w)
3644 {
3645 	/* SA: Dword 4 */
3646 	printf("%s: Dword4=0x%08x\n", name, w);
3647 	printf("%s:   EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3648 	printf("%s:   EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3649 	return;
3650 }
3651 
3652 STATIC void
mvxpsec_dump_acc_macsrc(const char * name,uint32_t w)3653 mvxpsec_dump_acc_macsrc(const char *name, uint32_t w)
3654 {
3655 	/* SA: Dword 5 */
3656 	printf("%s: Dword5=0x%08x\n", name, w);
3657 	printf("%s:   MAC_SRC = 0x%x\n", name,
3658 	    MV_ACC_DESC_GET_VAL_1(w));
3659 	printf("%s:   MAC_TOTAL_LEN = %d\n", name,
3660 	    MV_ACC_DESC_GET_VAL_3(w));
3661 	printf("%s:   MAC_RANGE = 0x%0x - 0x%0x\n", name,
3662 	    MV_ACC_DESC_GET_VAL_1(w),
3663 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1);
3664 	return;
3665 }
3666 
3667 STATIC void
mvxpsec_dump_acc_macdst(const char * name,uint32_t w)3668 mvxpsec_dump_acc_macdst(const char *name, uint32_t w)
3669 {
3670 	/* SA: Dword 6 */
3671 	printf("%s: Dword6=0x%08x\n", name, w);
3672 	printf("%s:   MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3673 	printf("%s:   MAC_BLOCK_LEN = %d\n", name,
3674 	    MV_ACC_DESC_GET_VAL_2(w));
3675 	return;
3676 }
3677 
3678 STATIC void
mvxpsec_dump_acc_maciv(const char * name,uint32_t w)3679 mvxpsec_dump_acc_maciv(const char *name, uint32_t w)
3680 {
3681 	/* SA: Dword 7 */
3682 	printf("%s: Dword7=0x%08x\n", name, w);
3683 	printf("%s:   MAC_INNER_IV = 0x%x\n", name,
3684 	    MV_ACC_DESC_GET_VAL_1(w));
3685 	printf("%s:   MAC_OUTER_IV = 0x%x\n", name,
3686 	    MV_ACC_DESC_GET_VAL_2(w));
3687 	return;
3688 }
3689 #endif
3690 
3691