xref: /dragonfly/sys/dev/crypto/ubsec/ubsecvar.h (revision f8f04fe3)
1 /* $FreeBSD: src/sys/dev/ubsec/ubsecvar.h,v 1.2.2.5 2003/06/04 17:56:59 sam Exp $ */
2 /* $DragonFly: src/sys/dev/crypto/ubsec/ubsecvar.h,v 1.2 2003/06/17 04:28:32 dillon Exp $ */
3 /*	$OpenBSD: ubsecvar.h,v 1.35 2002/09/24 18:33:26 jason Exp $	*/
4 
5 /*
6  * Copyright (c) 2000 Theo de Raadt
7  * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * Effort sponsored in part by the Defense Advanced Research Projects
33  * Agency (DARPA) and Air Force Research Laboratory, Air Force
34  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35  *
36  */
37 
38 /* Maximum queue length */
39 #ifndef UBS_MAX_NQUEUE
40 #define UBS_MAX_NQUEUE		60
41 #endif
42 
43 #define	UBS_MAX_SCATTER		64	/* Maximum scatter/gather depth */
44 
45 #ifndef UBS_MAX_AGGR
46 #define	UBS_MAX_AGGR		5	/* Maximum aggregation count */
47 #endif
48 
49 #define	UBSEC_CARD(sid)		(((sid) & 0xf0000000) >> 28)
50 #define	UBSEC_SESSION(sid)	( (sid) & 0x0fffffff)
51 #define	UBSEC_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
52 
53 #define UBS_DEF_RTY		0xff	/* PCI Retry Timeout */
54 #define UBS_DEF_TOUT		0xff	/* PCI TRDY Timeout */
55 #define UBS_DEF_CACHELINE	0x01	/* Cache Line setting */
56 
57 #ifdef _KERNEL
58 
59 struct ubsec_dma_alloc {
60 	u_int32_t		dma_paddr;
61 	caddr_t			dma_vaddr;
62 	bus_dma_tag_t		dma_tag;
63 	bus_dmamap_t		dma_map;
64 	bus_dma_segment_t	dma_seg;
65 	bus_size_t		dma_size;
66 	int			dma_nseg;
67 };
68 
69 struct ubsec_q2 {
70 	SIMPLEQ_ENTRY(ubsec_q2)		q_next;
71 	struct ubsec_dma_alloc		q_mcr;
72 	struct ubsec_dma_alloc		q_ctx;
73 	u_int				q_type;
74 };
75 
76 struct ubsec_q2_rng {
77 	struct ubsec_q2			rng_q;
78 	struct ubsec_dma_alloc		rng_buf;
79 	int				rng_used;
80 };
81 
82 /* C = (M ^ E) mod N */
83 #define	UBS_MODEXP_PAR_M	0
84 #define	UBS_MODEXP_PAR_E	1
85 #define	UBS_MODEXP_PAR_N	2
86 #define	UBS_MODEXP_PAR_C	3
87 struct ubsec_q2_modexp {
88 	struct ubsec_q2			me_q;
89 	struct cryptkop *		me_krp;
90 	struct ubsec_dma_alloc		me_M;
91 	struct ubsec_dma_alloc		me_E;
92 	struct ubsec_dma_alloc		me_C;
93 	struct ubsec_dma_alloc		me_epb;
94 	int				me_modbits;
95 	int				me_shiftbits;
96 	int				me_normbits;
97 };
98 
99 #define	UBS_RSAPRIV_PAR_P	0
100 #define	UBS_RSAPRIV_PAR_Q	1
101 #define	UBS_RSAPRIV_PAR_DP	2
102 #define	UBS_RSAPRIV_PAR_DQ	3
103 #define	UBS_RSAPRIV_PAR_PINV	4
104 #define	UBS_RSAPRIV_PAR_MSGIN	5
105 #define	UBS_RSAPRIV_PAR_MSGOUT	6
106 struct ubsec_q2_rsapriv {
107 	struct ubsec_q2			rpr_q;
108 	struct cryptkop *		rpr_krp;
109 	struct ubsec_dma_alloc		rpr_msgin;
110 	struct ubsec_dma_alloc		rpr_msgout;
111 };
112 
113 #define	UBSEC_RNG_BUFSIZ	16		/* measured in 32bit words */
114 
115 struct ubsec_dmachunk {
116 	struct ubsec_mcr	d_mcr;
117 	struct ubsec_mcr_add	d_mcradd[UBS_MAX_AGGR-1];
118 	struct ubsec_pktbuf	d_sbuf[UBS_MAX_SCATTER-1];
119 	struct ubsec_pktbuf	d_dbuf[UBS_MAX_SCATTER-1];
120 	u_int32_t		d_macbuf[5];
121 	union {
122 		struct ubsec_pktctx_long	ctxl;
123 		struct ubsec_pktctx		ctx;
124 	} d_ctx;
125 };
126 
127 struct ubsec_dma {
128 	SIMPLEQ_ENTRY(ubsec_dma)	d_next;
129 	struct ubsec_dmachunk		*d_dma;
130 	struct ubsec_dma_alloc		d_alloc;
131 };
132 
133 #define	UBS_FLAGS_KEY		0x01		/* has key accelerator */
134 #define	UBS_FLAGS_LONGCTX	0x02		/* uses long ipsec ctx */
135 #define	UBS_FLAGS_BIGKEY	0x04		/* 2048bit keys */
136 #define	UBS_FLAGS_HWNORM	0x08		/* hardware normalization */
137 #define	UBS_FLAGS_RNG		0x10		/* hardware rng */
138 
139 struct ubsec_operand {
140 	union {
141 		struct mbuf *m;
142 		struct uio *io;
143 	} u;
144 	bus_dmamap_t		map;
145 	bus_size_t		mapsize;
146 	int			nsegs;
147 	bus_dma_segment_t	segs[UBS_MAX_SCATTER];
148 };
149 
150 struct ubsec_q {
151 	SIMPLEQ_ENTRY(ubsec_q)		q_next;
152 	int				q_nstacked_mcrs;
153 	struct ubsec_q			*q_stacked_mcr[UBS_MAX_AGGR-1];
154 	struct cryptop			*q_crp;
155 	struct ubsec_dma		*q_dma;
156 
157 	struct ubsec_operand		q_src;
158 	struct ubsec_operand		q_dst;
159 
160 	int				q_sesn;
161 	int				q_flags;
162 };
163 
164 #define	q_src_m		q_src.u.m
165 #define	q_src_io	q_src.u.io
166 #define	q_src_map	q_src.map
167 #define	q_src_nsegs	q_src.nsegs
168 #define	q_src_segs	q_src.segs
169 #define	q_src_mapsize	q_src.mapsize
170 
171 #define	q_dst_m		q_dst.u.m
172 #define	q_dst_io	q_dst.u.io
173 #define	q_dst_map	q_dst.map
174 #define	q_dst_nsegs	q_dst.nsegs
175 #define	q_dst_segs	q_dst.segs
176 #define	q_dst_mapsize	q_dst.mapsize
177 
178 struct ubsec_softc {
179 	device_t		sc_dev;		/* device backpointer */
180 	struct resource		*sc_irq;
181 	void			*sc_ih;		/* interrupt handler cookie */
182 	bus_space_handle_t	sc_sh;		/* memory handle */
183 	bus_space_tag_t		sc_st;		/* memory tag */
184 	struct resource		*sc_sr;		/* memory resource */
185 	bus_dma_tag_t		sc_dmat;	/* dma tag */
186 	int			sc_flags;	/* device specific flags */
187 	int			sc_suspended;
188 	int			sc_needwakeup;	/* notify crypto layer */
189 	u_int32_t		sc_statmask;	/* interrupt status mask */
190 	int32_t			sc_cid;		/* crypto tag */
191 	SIMPLEQ_HEAD(,ubsec_q)	sc_queue;	/* packet queue, mcr1 */
192 	int			sc_nqueue;	/* count enqueued, mcr1 */
193 	SIMPLEQ_HEAD(,ubsec_q)	sc_qchip;	/* on chip, mcr1 */
194 	int			sc_nqchip;	/* count on chip, mcr1 */
195 	SIMPLEQ_HEAD(,ubsec_q)	sc_freequeue;	/* list of free queue elements */
196 	SIMPLEQ_HEAD(,ubsec_q2)	sc_queue2;	/* packet queue, mcr2 */
197 	int			sc_nqueue2;	/* count enqueued, mcr2 */
198 	SIMPLEQ_HEAD(,ubsec_q2)	sc_qchip2;	/* on chip, mcr2 */
199 	int			sc_nsessions;	/* # of sessions */
200 	struct ubsec_session	*sc_sessions;	/* sessions */
201 	struct callout		sc_rngto;	/* rng timeout */
202 	int			sc_rnghz;	/* rng poll time */
203 	struct ubsec_q2_rng	sc_rng;
204 	struct rndtest_state	*sc_rndtest;	/* RNG test state */
205 	void			(*sc_harvest)(struct rndtest_state *,
206 					void *, u_int);
207 	struct ubsec_dma	sc_dmaa[UBS_MAX_NQUEUE];
208 	struct ubsec_q		*sc_queuea[UBS_MAX_NQUEUE];
209 	SIMPLEQ_HEAD(,ubsec_q2)	sc_q2free;	/* free list */
210 };
211 
212 #define	UBSEC_QFLAGS_COPYOUTIV		0x1
213 
214 struct ubsec_session {
215 	u_int32_t	ses_used;
216 	u_int32_t	ses_deskey[6];		/* 3DES key */
217 	u_int32_t	ses_hminner[5];		/* hmac inner state */
218 	u_int32_t	ses_hmouter[5];		/* hmac outer state */
219 	u_int32_t	ses_iv[2];		/* [3]DES iv */
220 };
221 #endif /* _KERNEL */
222 
223 struct ubsec_stats {
224 	u_int64_t hst_ibytes;
225 	u_int64_t hst_obytes;
226 	u_int32_t hst_ipackets;
227 	u_int32_t hst_opackets;
228 	u_int32_t hst_invalid;		/* invalid argument */
229 	u_int32_t hst_badsession;	/* invalid session id */
230 	u_int32_t hst_badflags;		/* flags indicate !(mbuf | uio) */
231 	u_int32_t hst_nodesc;		/* op submitted w/o descriptors */
232 	u_int32_t hst_badalg;		/* unsupported algorithm */
233 	u_int32_t hst_nomem;
234 	u_int32_t hst_queuefull;
235 	u_int32_t hst_dmaerr;
236 	u_int32_t hst_mcrerr;
237 	u_int32_t hst_nodmafree;
238 	u_int32_t hst_lenmismatch;	/* enc/auth lengths different */
239 	u_int32_t hst_skipmismatch;	/* enc part begins before auth part */
240 	u_int32_t hst_iovmisaligned;	/* iov op not aligned */
241 	u_int32_t hst_noirq;		/* IRQ for no reason */
242 	u_int32_t hst_unaligned;	/* unaligned src caused copy */
243 	u_int32_t hst_nomap;		/* bus_dmamap_create failed */
244 	u_int32_t hst_noload;		/* bus_dmamap_load_* failed */
245 	u_int32_t hst_nombuf;		/* MGET* failed */
246 	u_int32_t hst_nomcl;		/* MCLGET* failed */
247 	u_int32_t hst_totbatch;		/* ops submitted w/o interrupt */
248 	u_int32_t hst_maxbatch;		/* max ops submitted together */
249 	u_int32_t hst_maxqueue;		/* max ops queued for submission */
250 	u_int32_t hst_maxqchip;		/* max mcr1 ops out for processing */
251 	u_int32_t hst_mcr1full;		/* MCR1 too busy to take ops */
252 	u_int32_t hst_rng;		/* RNG requests */
253 	u_int32_t hst_modexp;		/* MOD EXP requests */
254 	u_int32_t hst_modexpcrt;	/* MOD EXP CRT requests */
255 };
256