xref: /freebsd/sys/crypto/ccp/ccp.h (revision 535af610)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #pragma once
32 
33 /*
34  * Keccak SHAKE128 (if supported by the device?) uses a 1344 bit block.
35  * SHA3-224 is the next largest block size, at 1152 bits.  However, crypto(4)
36  * doesn't support any SHA3 hash, so SHA2 is the constraint:
37  */
38 #define CCP_HASH_MAX_BLOCK_SIZE	(SHA2_512_BLOCK_LEN)
39 
40 #define CCP_AES_MAX_KEY_LEN	(AES_XTS_MAX_KEY)
41 #define CCP_MAX_CRYPTO_IV_LEN	32	/* GCM IV + GHASH context */
42 
43 #define MAX_HW_QUEUES		5
44 #define MAX_LSB_REGIONS		8
45 
46 #ifndef __must_check
47 #define __must_check __attribute__((__warn_unused_result__))
48 #endif
49 
50 /*
51  * Internal data structures.
52  */
53 enum sha_version {
54 	SHA1,
55 #if 0
56 	SHA2_224,
57 #endif
58 	SHA2_256, SHA2_384, SHA2_512
59 };
60 
61 /*
62  * XXX: The hmac.res, gmac.final_block, and blkcipher.iv fields are
63  * used by individual requests meaning that sessions cannot have more
64  * than a single request in flight at a time.
65  */
66 struct ccp_session_hmac {
67 	const struct auth_hash *auth_hash;
68 	int hash_len;
69 	unsigned int auth_mode;
70 	char ipad[CCP_HASH_MAX_BLOCK_SIZE];
71 	char opad[CCP_HASH_MAX_BLOCK_SIZE];
72 	char res[CCP_HASH_MAX_BLOCK_SIZE];
73 };
74 
75 struct ccp_session_gmac {
76 	int hash_len;
77 	char final_block[GMAC_BLOCK_LEN];
78 };
79 
80 struct ccp_session_blkcipher {
81 	unsigned cipher_mode;
82 	unsigned cipher_type;
83 	unsigned key_len;
84 	char enckey[CCP_AES_MAX_KEY_LEN];
85 	char iv[CCP_MAX_CRYPTO_IV_LEN];
86 };
87 
88 struct ccp_session {
89 	bool active;
90 	int pending;
91 	enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
92 	unsigned queue;
93 	union {
94 		struct ccp_session_hmac hmac;
95 		struct ccp_session_gmac gmac;
96 	};
97 	struct ccp_session_blkcipher blkcipher;
98 };
99 
100 struct ccp_softc;
101 struct ccp_queue {
102 	struct mtx		cq_lock;
103 	unsigned		cq_qindex;
104 	struct ccp_softc	*cq_softc;
105 
106 	/* Host memory and tracking structures for descriptor ring. */
107 	bus_dma_tag_t		ring_desc_tag;
108 	bus_dmamap_t		ring_desc_map;
109 	struct ccp_desc		*desc_ring;
110 	bus_addr_t		desc_ring_bus_addr;
111 	/* Callbacks and arguments ring; indices correspond to above ring. */
112 	struct ccp_completion_ctx *completions_ring;
113 
114 	uint32_t		qcontrol;	/* Cached register value */
115 	unsigned		lsb_mask;	/* LSBs available to queue */
116 	int			private_lsb;	/* Reserved LSB #, or -1 */
117 
118 	unsigned		cq_head;
119 	unsigned		cq_tail;
120 	unsigned		cq_acq_tail;
121 
122 	bool			cq_waiting;	/* Thread waiting for space */
123 
124 	struct sglist		*cq_sg_crp;
125 	struct sglist		*cq_sg_ulptx;
126 	struct sglist		*cq_sg_dst;
127 };
128 
129 struct ccp_completion_ctx {
130 	void (*callback_fn)(struct ccp_queue *qp, struct ccp_session *s,
131 	    void *arg, int error);
132 	void *callback_arg;
133 	struct ccp_session *session;
134 };
135 
136 struct ccp_softc {
137 	device_t dev;
138 	int32_t cid;
139 	struct mtx lock;
140 	bool detaching;
141 
142 	unsigned ring_size_order;
143 
144 	/*
145 	 * Each command queue is either public or private.  "Private"
146 	 * (PSP-only) by default.  PSP grants access to some queues to host via
147 	 * QMR (Queue Mask Register).  Set bits are host accessible.
148 	 */
149 	uint8_t valid_queues;
150 
151 	uint8_t hw_version;
152 	uint8_t num_queues;
153 	uint16_t hw_features;
154 	uint16_t num_lsb_entries;
155 
156 	/* Primary BAR (RID 2) used for register access */
157 	bus_space_tag_t pci_bus_tag;
158 	bus_space_handle_t pci_bus_handle;
159 	int pci_resource_id;
160 	struct resource *pci_resource;
161 
162 	/* Secondary BAR (RID 5) apparently used for MSI-X */
163 	int pci_resource_id_msix;
164 	struct resource *pci_resource_msix;
165 
166 	/* Interrupt resources */
167 	void *intr_tag[2];
168 	struct resource *intr_res[2];
169 	unsigned intr_count;
170 
171 	struct ccp_queue queues[MAX_HW_QUEUES];
172 };
173 
174 /* Internal globals */
175 SYSCTL_DECL(_hw_ccp);
176 MALLOC_DECLARE(M_CCP);
177 extern bool g_debug_print;
178 extern struct ccp_softc *g_ccp_softc;
179 
180 /*
181  * Debug macros.
182  */
183 #define DPRINTF(dev, ...)	do {				\
184 	if (!g_debug_print)					\
185 		break;						\
186 	if ((dev) != NULL)					\
187 		device_printf((dev), "XXX " __VA_ARGS__);	\
188 	else							\
189 		printf("ccpXXX: " __VA_ARGS__);			\
190 } while (0)
191 
192 #if 0
193 #define INSECURE_DEBUG(dev, ...)	do {			\
194 	if (!g_debug_print)					\
195 		break;						\
196 	if ((dev) != NULL)					\
197 		device_printf((dev), "XXX " __VA_ARGS__);	\
198 	else							\
199 		printf("ccpXXX: " __VA_ARGS__);			\
200 } while (0)
201 #else
202 #define INSECURE_DEBUG(dev, ...)
203 #endif
204 
205 /*
206  * Internal hardware manipulation routines.
207  */
208 int ccp_hw_attach(device_t dev);
209 void ccp_hw_detach(device_t dev);
210 
211 void ccp_queue_write_tail(struct ccp_queue *qp);
212 
213 #ifdef DDB
214 void db_ccp_show_hw(struct ccp_softc *sc);
215 void db_ccp_show_queue_hw(struct ccp_queue *qp);
216 #endif
217 
218 /*
219  * Internal hardware crypt-op submission routines.
220  */
221 int ccp_authenc(struct ccp_queue *sc, struct ccp_session *s,
222     struct cryptop *crp) __must_check;
223 int ccp_blkcipher(struct ccp_queue *sc, struct ccp_session *s,
224     struct cryptop *crp) __must_check;
225 int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp)
226     __must_check;
227 int ccp_hmac(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp)
228     __must_check;
229 
230 /*
231  * Internal hardware TRNG read routine.
232  */
233 u_int random_ccp_read(void *v, u_int c);
234 
235 /* XXX */
236 int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
237     __must_check;
238 void ccp_queue_abort(struct ccp_queue *qp);
239 void ccp_queue_release(struct ccp_queue *qp);
240 
241 /*
242  * Internal inline routines.
243  */
244 static inline unsigned
245 ccp_queue_get_active(struct ccp_queue *qp)
246 {
247 	struct ccp_softc *sc;
248 
249 	sc = qp->cq_softc;
250 	return ((qp->cq_tail - qp->cq_head) & ((1 << sc->ring_size_order) - 1));
251 }
252 
253 static inline unsigned
254 ccp_queue_get_ring_space(struct ccp_queue *qp)
255 {
256 	struct ccp_softc *sc;
257 
258 	sc = qp->cq_softc;
259 	return ((1 << sc->ring_size_order) - ccp_queue_get_active(qp) - 1);
260 }
261