xref: /freebsd/sys/dev/qat_c2xxx/qat.c (revision 685dc743)
1 /* SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause */
2 /*	$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $	*/
3 
4 /*
5  * Copyright (c) 2019 Internet Initiative Japan, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  *   Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
32  *
33  *   Redistribution and use in source and binary forms, with or without
34  *   modification, are permitted provided that the following conditions
35  *   are met:
36  *
37  *     * Redistributions of source code must retain the above copyright
38  *       notice, this list of conditions and the following disclaimer.
39  *     * Redistributions in binary form must reproduce the above copyright
40  *       notice, this list of conditions and the following disclaimer in
41  *       the documentation and/or other materials provided with the
42  *       distribution.
43  *     * Neither the name of Intel Corporation nor the names of its
44  *       contributors may be used to endorse or promote products derived
45  *       from this software without specific prior written permission.
46  *
47  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
50  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
51  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
57  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 
60 #include <sys/cdefs.h>
61 #if 0
62 __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $");
63 #endif
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/bus.h>
68 #include <sys/cpu.h>
69 #include <sys/firmware.h>
70 #include <sys/kernel.h>
71 #include <sys/mbuf.h>
72 #include <sys/md5.h>
73 #include <sys/module.h>
74 #include <sys/mutex.h>
75 #include <sys/smp.h>
76 #include <sys/sysctl.h>
77 #include <sys/rman.h>
78 
79 #include <machine/bus.h>
80 
81 #include <opencrypto/cryptodev.h>
82 #include <opencrypto/xform.h>
83 
84 #include "cryptodev_if.h"
85 
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 
89 #include "qatreg.h"
90 #include "qatvar.h"
91 #include "qat_aevar.h"
92 
93 extern struct qat_hw qat_hw_c2xxx;
94 
95 #define PCI_VENDOR_INTEL			0x8086
96 #define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS	0x1f18
97 
98 static const struct qat_product {
99 	uint16_t qatp_vendor;
100 	uint16_t qatp_product;
101 	const char *qatp_name;
102 	enum qat_chip_type qatp_chip;
103 	const struct qat_hw *qatp_hw;
104 } qat_products[] = {
105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
106 	  "Intel C2000 QuickAssist PF",
107 	  QAT_CHIP_C2XXX, &qat_hw_c2xxx },
108 	{ 0, 0, NULL, 0, NULL },
109 };
110 
111 /* Hash Algorithm specific structure */
112 
113 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
114 static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
115 	0x67, 0x45, 0x23, 0x01,
116 	0xef, 0xcd, 0xab, 0x89,
117 	0x98, 0xba, 0xdc, 0xfe,
118 	0x10, 0x32, 0x54, 0x76,
119 	0xc3, 0xd2, 0xe1, 0xf0
120 };
121 
122 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
123 static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
124 	0x6a, 0x09, 0xe6, 0x67,
125 	0xbb, 0x67, 0xae, 0x85,
126 	0x3c, 0x6e, 0xf3, 0x72,
127 	0xa5, 0x4f, 0xf5, 0x3a,
128 	0x51, 0x0e, 0x52, 0x7f,
129 	0x9b, 0x05, 0x68, 0x8c,
130 	0x1f, 0x83, 0xd9, 0xab,
131 	0x5b, 0xe0, 0xcd, 0x19
132 };
133 
134 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
135 static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
136 	0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
137 	0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
138 	0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
139 	0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
140 	0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
141 	0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
142 	0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
143 	0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
144 };
145 
146 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
147 static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
148 	0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
149 	0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
150 	0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
151 	0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
152 	0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
153 	0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
154 	0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
155 	0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
156 };
157 
158 static const struct qat_sym_hash_alg_info sha1_info = {
159 	.qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE,
160 	.qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE,
161 	.qshai_state_size = QAT_HASH_SHA1_STATE_SIZE,
162 	.qshai_init_state = sha1_initial_state,
163 	.qshai_sah = &auth_hash_hmac_sha1,
164 	.qshai_state_offset = 0,
165 	.qshai_state_word = 4,
166 };
167 
168 static const struct qat_sym_hash_alg_info sha256_info = {
169 	.qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE,
170 	.qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE,
171 	.qshai_state_size = QAT_HASH_SHA256_STATE_SIZE,
172 	.qshai_init_state = sha256_initial_state,
173 	.qshai_sah = &auth_hash_hmac_sha2_256,
174 	.qshai_state_offset = offsetof(SHA256_CTX, state),
175 	.qshai_state_word = 4,
176 };
177 
178 static const struct qat_sym_hash_alg_info sha384_info = {
179 	.qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE,
180 	.qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE,
181 	.qshai_state_size = QAT_HASH_SHA384_STATE_SIZE,
182 	.qshai_init_state = sha384_initial_state,
183 	.qshai_sah = &auth_hash_hmac_sha2_384,
184 	.qshai_state_offset = offsetof(SHA384_CTX, state),
185 	.qshai_state_word = 8,
186 };
187 
188 static const struct qat_sym_hash_alg_info sha512_info = {
189 	.qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE,
190 	.qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE,
191 	.qshai_state_size = QAT_HASH_SHA512_STATE_SIZE,
192 	.qshai_init_state = sha512_initial_state,
193 	.qshai_sah = &auth_hash_hmac_sha2_512,
194 	.qshai_state_offset = offsetof(SHA512_CTX, state),
195 	.qshai_state_word = 8,
196 };
197 
198 static const struct qat_sym_hash_alg_info aes_gcm_info = {
199 	.qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE,
200 	.qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE,
201 	.qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE,
202 	.qshai_sah = &auth_hash_nist_gmac_aes_128,
203 };
204 
205 /* Hash QAT specific structures */
206 
207 static const struct qat_sym_hash_qat_info sha1_config = {
208 	.qshqi_algo_enc = HW_AUTH_ALGO_SHA1,
209 	.qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE,
210 	.qshqi_state1_len = HW_SHA1_STATE1_SZ,
211 	.qshqi_state2_len = HW_SHA1_STATE2_SZ,
212 };
213 
214 static const struct qat_sym_hash_qat_info sha256_config = {
215 	.qshqi_algo_enc = HW_AUTH_ALGO_SHA256,
216 	.qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE,
217 	.qshqi_state1_len = HW_SHA256_STATE1_SZ,
218 	.qshqi_state2_len = HW_SHA256_STATE2_SZ
219 };
220 
221 static const struct qat_sym_hash_qat_info sha384_config = {
222 	.qshqi_algo_enc = HW_AUTH_ALGO_SHA384,
223 	.qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE,
224 	.qshqi_state1_len = HW_SHA384_STATE1_SZ,
225 	.qshqi_state2_len = HW_SHA384_STATE2_SZ
226 };
227 
228 static const struct qat_sym_hash_qat_info sha512_config = {
229 	.qshqi_algo_enc = HW_AUTH_ALGO_SHA512,
230 	.qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE,
231 	.qshqi_state1_len = HW_SHA512_STATE1_SZ,
232 	.qshqi_state2_len = HW_SHA512_STATE2_SZ
233 };
234 
235 static const struct qat_sym_hash_qat_info aes_gcm_config = {
236 	.qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128,
237 	.qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE,
238 	.qshqi_state1_len = HW_GALOIS_128_STATE1_SZ,
239 	.qshqi_state2_len =
240 	    HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ,
241 };
242 
243 static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
244 	[QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
245 	[QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
246 	[QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
247 	[QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
248 	[QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
249 };
250 
251 static const struct qat_product *qat_lookup(device_t);
252 static int	qat_probe(device_t);
253 static int	qat_attach(device_t);
254 static int	qat_init(device_t);
255 static int	qat_start(device_t);
256 static int	qat_detach(device_t);
257 
258 static int	qat_newsession(device_t dev, crypto_session_t cses,
259 		    const struct crypto_session_params *csp);
260 static void	qat_freesession(device_t dev, crypto_session_t cses);
261 
262 static int	qat_setup_msix_intr(struct qat_softc *);
263 
264 static void	qat_etr_init(struct qat_softc *);
265 static void	qat_etr_deinit(struct qat_softc *);
266 static void	qat_etr_bank_init(struct qat_softc *, int);
267 static void	qat_etr_bank_deinit(struct qat_softc *sc, int);
268 
269 static void	qat_etr_ap_bank_init(struct qat_softc *);
270 static void	qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
271 static void	qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
272 		    uint32_t, int);
273 static void	qat_etr_ap_bank_setup_ring(struct qat_softc *,
274 		    struct qat_ring *);
275 static int	qat_etr_verify_ring_size(uint32_t, uint32_t);
276 
277 static int	qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
278 		    struct qat_ring *);
279 static void	qat_etr_bank_intr(void *);
280 
281 static void	qat_arb_update(struct qat_softc *, struct qat_bank *);
282 
283 static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie(
284 		    struct qat_crypto_bank *);
285 static void	qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
286 		    struct qat_sym_cookie *);
287 static int	qat_crypto_setup_ring(struct qat_softc *,
288 		    struct qat_crypto_bank *);
289 static int	qat_crypto_bank_init(struct qat_softc *,
290 		    struct qat_crypto_bank *);
291 static int	qat_crypto_init(struct qat_softc *);
292 static void	qat_crypto_deinit(struct qat_softc *);
293 static int	qat_crypto_start(struct qat_softc *);
294 static void	qat_crypto_stop(struct qat_softc *);
295 static int	qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
296 
297 static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver");
298 
299 static const struct qat_product *
qat_lookup(device_t dev)300 qat_lookup(device_t dev)
301 {
302 	const struct qat_product *qatp;
303 
304 	for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
305 		if (pci_get_vendor(dev) == qatp->qatp_vendor &&
306 		    pci_get_device(dev) == qatp->qatp_product)
307 			return qatp;
308 	}
309 	return NULL;
310 }
311 
312 static int
qat_probe(device_t dev)313 qat_probe(device_t dev)
314 {
315 	const struct qat_product *prod;
316 
317 	prod = qat_lookup(dev);
318 	if (prod != NULL) {
319 		device_set_desc(dev, prod->qatp_name);
320 		return BUS_PROBE_DEFAULT;
321 	}
322 	return ENXIO;
323 }
324 
325 static int
qat_attach(device_t dev)326 qat_attach(device_t dev)
327 {
328 	struct qat_softc *sc = device_get_softc(dev);
329 	const struct qat_product *qatp;
330 	int bar, count, error, i;
331 
332 	sc->sc_dev = dev;
333 	sc->sc_rev = pci_get_revid(dev);
334 	sc->sc_crypto.qcy_cid = -1;
335 
336 	qatp = qat_lookup(dev);
337 	memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
338 
339 	/* Determine active accelerators and engines */
340 	sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
341 	sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
342 
343 	sc->sc_accel_num = 0;
344 	for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
345 		if (sc->sc_accel_mask & (1 << i))
346 			sc->sc_accel_num++;
347 	}
348 	sc->sc_ae_num = 0;
349 	for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
350 		if (sc->sc_ae_mask & (1 << i))
351 			sc->sc_ae_num++;
352 	}
353 
354 	if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
355 		device_printf(sc->sc_dev, "couldn't find acceleration");
356 		goto fail;
357 	}
358 
359 	MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL);
360 	MPASS(sc->sc_ae_num <= MAX_NUM_AE);
361 
362 	/* Determine SKU and capabilities */
363 	sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
364 	sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
365 	sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
366 
367 	i = 0;
368 	if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
369 		MPASS(sc->sc_hw.qhw_sram_bar_id == 0);
370 		uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4);
371 		/* Skip SRAM BAR */
372 		i = (fusectl & FUSECTL_MASK) ? 1 : 0;
373 	}
374 	for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) {
375 		uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4);
376 		if (val == 0 || !PCI_BAR_MEM(val))
377 			continue;
378 
379 		sc->sc_rid[i] = PCIR_BAR(bar);
380 		sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
381 		    &sc->sc_rid[i], RF_ACTIVE);
382 		if (sc->sc_res[i] == NULL) {
383 			device_printf(dev, "couldn't map BAR %d\n", bar);
384 			goto fail;
385 		}
386 
387 		sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]);
388 		sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]);
389 
390 		i++;
391 		if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64)
392 			bar++;
393 	}
394 
395 	pci_enable_busmaster(dev);
396 
397 	count = sc->sc_hw.qhw_num_banks + 1;
398 	if (pci_msix_count(dev) < count) {
399 		device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n",
400 		    pci_msix_count(dev), count);
401 		goto fail;
402 	}
403 	error = pci_alloc_msix(dev, &count);
404 	if (error != 0) {
405 		device_printf(dev, "failed to allocate MSI-X vectors\n");
406 		goto fail;
407 	}
408 
409 	error = qat_init(dev);
410 	if (error == 0)
411 		return 0;
412 
413 fail:
414 	qat_detach(dev);
415 	return ENXIO;
416 }
417 
418 static int
qat_init(device_t dev)419 qat_init(device_t dev)
420 {
421 	struct qat_softc *sc = device_get_softc(dev);
422 	int error;
423 
424 	qat_etr_init(sc);
425 
426 	if (sc->sc_hw.qhw_init_admin_comms != NULL &&
427 	    (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
428 		device_printf(sc->sc_dev,
429 		    "Could not initialize admin comms: %d\n", error);
430 		return error;
431 	}
432 
433 	if (sc->sc_hw.qhw_init_arb != NULL &&
434 	    (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
435 		device_printf(sc->sc_dev,
436 		    "Could not initialize hw arbiter: %d\n", error);
437 		return error;
438 	}
439 
440 	error = qat_ae_init(sc);
441 	if (error) {
442 		device_printf(sc->sc_dev,
443 		    "Could not initialize Acceleration Engine: %d\n", error);
444 		return error;
445 	}
446 
447 	error = qat_aefw_load(sc);
448 	if (error) {
449 		device_printf(sc->sc_dev,
450 		    "Could not load firmware: %d\n", error);
451 		return error;
452 	}
453 
454 	error = qat_setup_msix_intr(sc);
455 	if (error) {
456 		device_printf(sc->sc_dev,
457 		    "Could not setup interrupts: %d\n", error);
458 		return error;
459 	}
460 
461 	sc->sc_hw.qhw_enable_intr(sc);
462 
463 	error = qat_crypto_init(sc);
464 	if (error) {
465 		device_printf(sc->sc_dev,
466 		    "Could not initialize service: %d\n", error);
467 		return error;
468 	}
469 
470 	if (sc->sc_hw.qhw_enable_error_correction != NULL)
471 		sc->sc_hw.qhw_enable_error_correction(sc);
472 
473 	if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
474 	    (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
475 		device_printf(sc->sc_dev,
476 		    "Could not initialize watchdog timer: %d\n", error);
477 		return error;
478 	}
479 
480 	error = qat_start(dev);
481 	if (error) {
482 		device_printf(sc->sc_dev,
483 		    "Could not start: %d\n", error);
484 		return error;
485 	}
486 
487 	return 0;
488 }
489 
490 static int
qat_start(device_t dev)491 qat_start(device_t dev)
492 {
493 	struct qat_softc *sc = device_get_softc(dev);
494 	int error;
495 
496 	error = qat_ae_start(sc);
497 	if (error)
498 		return error;
499 
500 	if (sc->sc_hw.qhw_send_admin_init != NULL &&
501 	    (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
502 		return error;
503 	}
504 
505 	error = qat_crypto_start(sc);
506 	if (error)
507 		return error;
508 
509 	return 0;
510 }
511 
512 static int
qat_detach(device_t dev)513 qat_detach(device_t dev)
514 {
515 	struct qat_softc *sc;
516 	int bar, i;
517 
518 	sc = device_get_softc(dev);
519 
520 	qat_crypto_stop(sc);
521 	qat_crypto_deinit(sc);
522 	qat_aefw_unload(sc);
523 
524 	if (sc->sc_etr_banks != NULL) {
525 		for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
526 			struct qat_bank *qb = &sc->sc_etr_banks[i];
527 
528 			if (qb->qb_ih_cookie != NULL)
529 				(void)bus_teardown_intr(dev, qb->qb_ih,
530 				    qb->qb_ih_cookie);
531 			if (qb->qb_ih != NULL)
532 				(void)bus_release_resource(dev, SYS_RES_IRQ,
533 				    i + 1, qb->qb_ih);
534 		}
535 	}
536 	if (sc->sc_ih_cookie != NULL) {
537 		(void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie);
538 		sc->sc_ih_cookie = NULL;
539 	}
540 	if (sc->sc_ih != NULL) {
541 		(void)bus_release_resource(dev, SYS_RES_IRQ,
542 		    sc->sc_hw.qhw_num_banks + 1, sc->sc_ih);
543 		sc->sc_ih = NULL;
544 	}
545 	pci_release_msi(dev);
546 
547 	qat_etr_deinit(sc);
548 
549 	for (bar = 0; bar < MAX_BARS; bar++) {
550 		if (sc->sc_res[bar] != NULL) {
551 			(void)bus_release_resource(dev, SYS_RES_MEMORY,
552 			    sc->sc_rid[bar], sc->sc_res[bar]);
553 			sc->sc_res[bar] = NULL;
554 		}
555 	}
556 
557 	return 0;
558 }
559 
560 void *
qat_alloc_mem(size_t size)561 qat_alloc_mem(size_t size)
562 {
563 	return (malloc(size, M_QAT, M_WAITOK | M_ZERO));
564 }
565 
566 void
qat_free_mem(void * ptr)567 qat_free_mem(void *ptr)
568 {
569 	free(ptr, M_QAT);
570 }
571 
572 static void
qat_alloc_dmamem_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)573 qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
574     int error)
575 {
576 	struct qat_dmamem *qdm;
577 
578 	if (error != 0)
579 		return;
580 
581 	KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
582 	qdm = arg;
583 	qdm->qdm_dma_seg = segs[0];
584 }
585 
586 int
qat_alloc_dmamem(struct qat_softc * sc,struct qat_dmamem * qdm,int nseg,bus_size_t size,bus_size_t alignment)587 qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
588     int nseg, bus_size_t size, bus_size_t alignment)
589 {
590 	int error;
591 
592 	KASSERT(qdm->qdm_dma_vaddr == NULL,
593 	    ("%s: DMA memory descriptor in use", __func__));
594 
595 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
596 	    alignment, 0, 		/* alignment, boundary */
597 	    BUS_SPACE_MAXADDR,		/* lowaddr */
598 	    BUS_SPACE_MAXADDR, 		/* highaddr */
599 	    NULL, NULL, 		/* filter, filterarg */
600 	    size,			/* maxsize */
601 	    nseg,			/* nsegments */
602 	    size,			/* maxsegsize */
603 	    BUS_DMA_COHERENT,		/* flags */
604 	    NULL, NULL,			/* lockfunc, lockarg */
605 	    &qdm->qdm_dma_tag);
606 	if (error != 0)
607 		return error;
608 
609 	error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr,
610 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
611 	    &qdm->qdm_dma_map);
612 	if (error != 0) {
613 		device_printf(sc->sc_dev,
614 		    "couldn't allocate dmamem, error = %d\n", error);
615 		goto fail_0;
616 	}
617 
618 	error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map,
619 	    qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm,
620 	    BUS_DMA_NOWAIT);
621 	if (error) {
622 		device_printf(sc->sc_dev,
623 		    "couldn't load dmamem map, error = %d\n", error);
624 		goto fail_1;
625 	}
626 
627 	return 0;
628 fail_1:
629 	bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map);
630 fail_0:
631 	bus_dma_tag_destroy(qdm->qdm_dma_tag);
632 	return error;
633 }
634 
635 void
qat_free_dmamem(struct qat_softc * sc,struct qat_dmamem * qdm)636 qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
637 {
638 	if (qdm->qdm_dma_tag != NULL) {
639 		bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map);
640 		bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr,
641 		    qdm->qdm_dma_map);
642 		bus_dma_tag_destroy(qdm->qdm_dma_tag);
643 		explicit_bzero(qdm, sizeof(*qdm));
644 	}
645 }
646 
647 static int
qat_setup_msix_intr(struct qat_softc * sc)648 qat_setup_msix_intr(struct qat_softc *sc)
649 {
650 	device_t dev;
651 	int error, i, rid;
652 
653 	dev = sc->sc_dev;
654 
655 	for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) {
656 		struct qat_bank *qb = &sc->sc_etr_banks[i - 1];
657 
658 		rid = i;
659 		qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
660 		    RF_ACTIVE);
661 		if (qb->qb_ih == NULL) {
662 			device_printf(dev,
663 			    "failed to allocate bank intr resource\n");
664 			return ENXIO;
665 		}
666 		error = bus_setup_intr(dev, qb->qb_ih,
667 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb,
668 		    &qb->qb_ih_cookie);
669 		if (error != 0) {
670 			device_printf(dev, "failed to set up bank intr\n");
671 			return error;
672 		}
673 		error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus);
674 		if (error != 0)
675 			device_printf(dev, "failed to bind intr %d\n", i);
676 	}
677 
678 	rid = i;
679 	sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
680 	    RF_ACTIVE);
681 	if (sc->sc_ih == NULL)
682 		return ENXIO;
683 	error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE,
684 	    NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie);
685 
686 	return error;
687 }
688 
689 static void
qat_etr_init(struct qat_softc * sc)690 qat_etr_init(struct qat_softc *sc)
691 {
692 	int i;
693 
694 	sc->sc_etr_banks = qat_alloc_mem(
695 	    sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
696 
697 	for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
698 		qat_etr_bank_init(sc, i);
699 
700 	if (sc->sc_hw.qhw_num_ap_banks) {
701 		sc->sc_etr_ap_banks = qat_alloc_mem(
702 		    sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
703 		qat_etr_ap_bank_init(sc);
704 	}
705 }
706 
707 static void
qat_etr_deinit(struct qat_softc * sc)708 qat_etr_deinit(struct qat_softc *sc)
709 {
710 	int i;
711 
712 	if (sc->sc_etr_banks != NULL) {
713 		for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
714 			qat_etr_bank_deinit(sc, i);
715 		qat_free_mem(sc->sc_etr_banks);
716 		sc->sc_etr_banks = NULL;
717 	}
718 	if (sc->sc_etr_ap_banks != NULL) {
719 		qat_free_mem(sc->sc_etr_ap_banks);
720 		sc->sc_etr_ap_banks = NULL;
721 	}
722 }
723 
724 static void
qat_etr_bank_init(struct qat_softc * sc,int bank)725 qat_etr_bank_init(struct qat_softc *sc, int bank)
726 {
727 	struct qat_bank *qb = &sc->sc_etr_banks[bank];
728 	int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
729 
730 	MPASS(bank < sc->sc_hw.qhw_num_banks);
731 
732 	mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF);
733 
734 	qb->qb_sc = sc;
735 	qb->qb_bank = bank;
736 	qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
737 
738 	/* Clean CSRs for all rings within the bank */
739 	for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
740 		struct qat_ring *qr = &qb->qb_et_rings[i];
741 
742 		qat_etr_bank_ring_write_4(sc, bank, i,
743 		    ETR_RING_CONFIG, 0);
744 		qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
745 
746 		if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
747 			qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
748 		} else if (sc->sc_hw.qhw_tx_rings_mask &
749 		    (1 << (i - tx_rx_gap))) {
750 			/* Share inflight counter with rx and tx */
751 			qr->qr_inflight =
752 			    qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
753 		}
754 	}
755 
756 	if (sc->sc_hw.qhw_init_etr_intr != NULL) {
757 		sc->sc_hw.qhw_init_etr_intr(sc, bank);
758 	} else {
759 		/* common code in qat 1.7 */
760 		qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
761 		    ETR_INT_REG_CLEAR_MASK);
762 		for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
763 		    ETR_RINGS_PER_INT_SRCSEL; i++) {
764 			qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
765 			    (i * ETR_INT_SRCSEL_NEXT_OFFSET),
766 			    ETR_INT_SRCSEL_MASK);
767 		}
768 	}
769 }
770 
771 static void
qat_etr_bank_deinit(struct qat_softc * sc,int bank)772 qat_etr_bank_deinit(struct qat_softc *sc, int bank)
773 {
774 	struct qat_bank *qb;
775 	struct qat_ring *qr;
776 	int i;
777 
778 	qb = &sc->sc_etr_banks[bank];
779 	for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
780 		if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
781 			qr = &qb->qb_et_rings[i];
782 			qat_free_mem(qr->qr_inflight);
783 		}
784 	}
785 }
786 
787 static void
qat_etr_ap_bank_init(struct qat_softc * sc)788 qat_etr_ap_bank_init(struct qat_softc *sc)
789 {
790 	int ap_bank;
791 
792 	for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
793 		struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
794 
795 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
796 		    ETR_AP_NF_MASK_INIT);
797 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
798 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
799 		    ETR_AP_NE_MASK_INIT);
800 		qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
801 
802 		memset(qab, 0, sizeof(*qab));
803 	}
804 }
805 
806 static void
qat_etr_ap_bank_set_ring_mask(uint32_t * ap_mask,uint32_t ring,int set_mask)807 qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
808 {
809 	if (set_mask)
810 		*ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
811 	else
812 		*ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
813 }
814 
815 static void
qat_etr_ap_bank_set_ring_dest(struct qat_softc * sc,uint32_t * ap_dest,uint32_t ring,int set_dest)816 qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
817     uint32_t ring, int set_dest)
818 {
819 	uint32_t ae_mask;
820 	uint8_t mailbox, ae, nae;
821 	uint8_t *dest = (uint8_t *)ap_dest;
822 
823 	mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
824 
825 	nae = 0;
826 	ae_mask = sc->sc_ae_mask;
827 	for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
828 		if ((ae_mask & (1 << ae)) == 0)
829 			continue;
830 
831 		if (set_dest) {
832 			dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
833 			    __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
834 			    ETR_AP_DEST_ENABLE;
835 		} else {
836 			dest[nae] = 0;
837 		}
838 		nae++;
839 		if (nae == ETR_MAX_AE_PER_MAILBOX)
840 			break;
841 	}
842 }
843 
844 static void
qat_etr_ap_bank_setup_ring(struct qat_softc * sc,struct qat_ring * qr)845 qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
846 {
847 	struct qat_ap_bank *qab;
848 	int ap_bank;
849 
850 	if (sc->sc_hw.qhw_num_ap_banks == 0)
851 		return;
852 
853 	ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
854 	MPASS(ap_bank < sc->sc_hw.qhw_num_ap_banks);
855 	qab = &sc->sc_etr_ap_banks[ap_bank];
856 
857 	if (qr->qr_cb == NULL) {
858 		qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
859 		if (!qab->qab_ne_dest) {
860 			qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
861 			    qr->qr_ring, 1);
862 			qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
863 			    qab->qab_ne_dest);
864 		}
865 	} else {
866 		qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
867 		if (!qab->qab_nf_dest) {
868 			qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
869 			    qr->qr_ring, 1);
870 			qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
871 			    qab->qab_nf_dest);
872 		}
873 	}
874 }
875 
876 static int
qat_etr_verify_ring_size(uint32_t msg_size,uint32_t num_msgs)877 qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
878 {
879 	int i = QAT_MIN_RING_SIZE;
880 
881 	for (; i <= QAT_MAX_RING_SIZE; i++)
882 		if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
883 			return i;
884 
885 	return QAT_DEFAULT_RING_SIZE;
886 }
887 
888 int
qat_etr_setup_ring(struct qat_softc * sc,int bank,uint32_t ring,uint32_t num_msgs,uint32_t msg_size,qat_cb_t cb,void * cb_arg,const char * name,struct qat_ring ** rqr)889 qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
890     uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
891     const char *name, struct qat_ring **rqr)
892 {
893 	struct qat_bank *qb;
894 	struct qat_ring *qr = NULL;
895 	int error;
896 	uint32_t ring_size_bytes, ring_config;
897 	uint64_t ring_base;
898 	uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
899 	uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
900 
901 	MPASS(bank < sc->sc_hw.qhw_num_banks);
902 
903 	/* Allocate a ring from specified bank */
904 	qb = &sc->sc_etr_banks[bank];
905 
906 	if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
907 		return EINVAL;
908 	if (qb->qb_allocated_rings & (1 << ring))
909 		return ENOENT;
910 	qr = &qb->qb_et_rings[ring];
911 	qb->qb_allocated_rings |= 1 << ring;
912 
913 	/* Initialize allocated ring */
914 	qr->qr_ring = ring;
915 	qr->qr_bank = bank;
916 	qr->qr_name = name;
917 	qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
918 	qr->qr_ring_mask = (1 << ring);
919 	qr->qr_cb = cb;
920 	qr->qr_cb_arg = cb_arg;
921 
922 	/* Setup the shadow variables */
923 	qr->qr_head = 0;
924 	qr->qr_tail = 0;
925 	qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
926 	qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
927 
928 	/*
929 	 * To make sure that ring is alligned to ring size allocate
930 	 * at least 4k and then tell the user it is smaller.
931 	 */
932 	ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
933 	ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
934 	error = qat_alloc_dmamem(sc, &qr->qr_dma, 1, ring_size_bytes,
935 	    ring_size_bytes);
936 	if (error)
937 		return error;
938 
939 	qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
940 	qr->qr_ring_paddr = qr->qr_dma.qdm_dma_seg.ds_addr;
941 
942 	memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
943 	    qr->qr_dma.qdm_dma_seg.ds_len);
944 
945 	bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
946 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
947 
948 	if (cb == NULL) {
949 		ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
950 	} else {
951 		ring_config =
952 		    ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
953 	}
954 	qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
955 
956 	ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
957 	qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
958 
959 	if (sc->sc_hw.qhw_init_arb != NULL)
960 		qat_arb_update(sc, qb);
961 
962 	mtx_init(&qr->qr_ring_mtx, "qr ring", NULL, MTX_DEF);
963 
964 	qat_etr_ap_bank_setup_ring(sc, qr);
965 
966 	if (cb != NULL) {
967 		uint32_t intr_mask;
968 
969 		qb->qb_intr_mask |= qr->qr_ring_mask;
970 		intr_mask = qb->qb_intr_mask;
971 
972 		qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, intr_mask);
973 		qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
974 		    ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
975 	}
976 
977 	*rqr = qr;
978 
979 	return 0;
980 }
981 
982 static inline u_int
qat_modulo(u_int data,u_int shift)983 qat_modulo(u_int data, u_int shift)
984 {
985 	u_int div = data >> shift;
986 	u_int mult = div << shift;
987 	return data - mult;
988 }
989 
990 int
qat_etr_put_msg(struct qat_softc * sc,struct qat_ring * qr,uint32_t * msg)991 qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
992 {
993 	uint32_t inflight;
994 	uint32_t *addr;
995 
996 	mtx_lock(&qr->qr_ring_mtx);
997 
998 	inflight = atomic_fetchadd_32(qr->qr_inflight, 1) + 1;
999 	if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
1000 		atomic_subtract_32(qr->qr_inflight, 1);
1001 		qr->qr_need_wakeup = true;
1002 		mtx_unlock(&qr->qr_ring_mtx);
1003 		counter_u64_add(sc->sc_ring_full_restarts, 1);
1004 		return ERESTART;
1005 	}
1006 
1007 	addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
1008 
1009 	memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1010 
1011 	bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1012 	    BUS_DMASYNC_PREWRITE);
1013 
1014 	qr->qr_tail = qat_modulo(qr->qr_tail +
1015 	    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1016 	    QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1017 
1018 	qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1019 	    ETR_RING_TAIL_OFFSET, qr->qr_tail);
1020 
1021 	mtx_unlock(&qr->qr_ring_mtx);
1022 
1023 	return 0;
1024 }
1025 
1026 static int
qat_etr_ring_intr(struct qat_softc * sc,struct qat_bank * qb,struct qat_ring * qr)1027 qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
1028     struct qat_ring *qr)
1029 {
1030 	uint32_t *msg, nmsg = 0;
1031 	int handled = 0;
1032 	bool blocked = false;
1033 
1034 	mtx_lock(&qr->qr_ring_mtx);
1035 
1036 	msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1037 
1038 	bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1039 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1040 
1041 	while (atomic_load_32(msg) != ETR_RING_EMPTY_ENTRY_SIG) {
1042 		atomic_subtract_32(qr->qr_inflight, 1);
1043 
1044 		if (qr->qr_cb != NULL) {
1045 			mtx_unlock(&qr->qr_ring_mtx);
1046 			handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
1047 			mtx_lock(&qr->qr_ring_mtx);
1048 		}
1049 
1050 		atomic_store_32(msg, ETR_RING_EMPTY_ENTRY_SIG);
1051 
1052 		qr->qr_head = qat_modulo(qr->qr_head +
1053 		    QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1054 		    QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1055 		nmsg++;
1056 
1057 		msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1058 	}
1059 
1060 	bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1061 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1062 
1063 	if (nmsg > 0) {
1064 		qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1065 		    ETR_RING_HEAD_OFFSET, qr->qr_head);
1066 		if (qr->qr_need_wakeup) {
1067 			blocked = true;
1068 			qr->qr_need_wakeup = false;
1069 		}
1070 	}
1071 
1072 	mtx_unlock(&qr->qr_ring_mtx);
1073 
1074 	if (blocked)
1075 		crypto_unblock(sc->sc_crypto.qcy_cid, CRYPTO_SYMQ);
1076 
1077 	return handled;
1078 }
1079 
1080 static void
qat_etr_bank_intr(void * arg)1081 qat_etr_bank_intr(void *arg)
1082 {
1083 	struct qat_bank *qb = arg;
1084 	struct qat_softc *sc = qb->qb_sc;
1085 	uint32_t estat;
1086 	int i;
1087 
1088 	mtx_lock(&qb->qb_bank_mtx);
1089 
1090 	qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
1091 
1092 	/* Now handle all the responses */
1093 	estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
1094 	estat &= qb->qb_intr_mask;
1095 
1096 	qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
1097 	    ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1098 
1099 	mtx_unlock(&qb->qb_bank_mtx);
1100 
1101 	while ((i = ffs(estat)) != 0) {
1102 		struct qat_ring *qr = &qb->qb_et_rings[--i];
1103 		estat &= ~(1 << i);
1104 		(void)qat_etr_ring_intr(sc, qb, qr);
1105 	}
1106 }
1107 
1108 void
qat_arb_update(struct qat_softc * sc,struct qat_bank * qb)1109 qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
1110 {
1111 
1112 	qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
1113 	    qb->qb_allocated_rings & 0xff);
1114 }
1115 
1116 static struct qat_sym_cookie *
qat_crypto_alloc_sym_cookie(struct qat_crypto_bank * qcb)1117 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
1118 {
1119 	struct qat_sym_cookie *qsc;
1120 
1121 	mtx_lock(&qcb->qcb_bank_mtx);
1122 
1123 	if (qcb->qcb_symck_free_count == 0) {
1124 		mtx_unlock(&qcb->qcb_bank_mtx);
1125 		return NULL;
1126 	}
1127 
1128 	qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
1129 
1130 	mtx_unlock(&qcb->qcb_bank_mtx);
1131 
1132 	return qsc;
1133 }
1134 
1135 static void
qat_crypto_free_sym_cookie(struct qat_crypto_bank * qcb,struct qat_sym_cookie * qsc)1136 qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb,
1137     struct qat_sym_cookie *qsc)
1138 {
1139 	explicit_bzero(qsc->qsc_iv_buf, EALG_MAX_BLOCK_LEN);
1140 	explicit_bzero(qsc->qsc_auth_res, QAT_SYM_HASH_BUFFER_LEN);
1141 
1142 	mtx_lock(&qcb->qcb_bank_mtx);
1143 	qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
1144 	mtx_unlock(&qcb->qcb_bank_mtx);
1145 }
1146 
1147 void
qat_memcpy_htobe64(void * dst,const void * src,size_t len)1148 qat_memcpy_htobe64(void *dst, const void *src, size_t len)
1149 {
1150 	uint64_t *dst0 = dst;
1151 	const uint64_t *src0 = src;
1152 	size_t i;
1153 
1154 	MPASS(len % sizeof(*dst0) == 0);
1155 
1156 	for (i = 0; i < len / sizeof(*dst0); i++)
1157 		*(dst0 + i) = htobe64(*(src0 + i));
1158 }
1159 
1160 void
qat_memcpy_htobe32(void * dst,const void * src,size_t len)1161 qat_memcpy_htobe32(void *dst, const void *src, size_t len)
1162 {
1163 	uint32_t *dst0 = dst;
1164 	const uint32_t *src0 = src;
1165 	size_t i;
1166 
1167 	MPASS(len % sizeof(*dst0) == 0);
1168 
1169 	for (i = 0; i < len / sizeof(*dst0); i++)
1170 		*(dst0 + i) = htobe32(*(src0 + i));
1171 }
1172 
1173 void
qat_memcpy_htobe(void * dst,const void * src,size_t len,uint32_t wordbyte)1174 qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
1175 {
1176 	switch (wordbyte) {
1177 	case 4:
1178 		qat_memcpy_htobe32(dst, src, len);
1179 		break;
1180 	case 8:
1181 		qat_memcpy_htobe64(dst, src, len);
1182 		break;
1183 	default:
1184 		panic("invalid word size %u", wordbyte);
1185 	}
1186 }
1187 
1188 void
qat_crypto_gmac_precompute(const struct qat_crypto_desc * desc,const uint8_t * key,int klen,const struct qat_sym_hash_def * hash_def,uint8_t * state)1189 qat_crypto_gmac_precompute(const struct qat_crypto_desc *desc,
1190     const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
1191     uint8_t *state)
1192 {
1193 	uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1194 	char zeros[AES_BLOCK_LEN];
1195 	int rounds;
1196 
1197 	memset(zeros, 0, sizeof(zeros));
1198 	rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1199 	rijndaelEncrypt(ks, rounds, zeros, state);
1200 	explicit_bzero(ks, sizeof(ks));
1201 }
1202 
1203 void
qat_crypto_hmac_precompute(const struct qat_crypto_desc * desc,const uint8_t * key,int klen,const struct qat_sym_hash_def * hash_def,uint8_t * state1,uint8_t * state2)1204 qat_crypto_hmac_precompute(const struct qat_crypto_desc *desc,
1205     const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
1206     uint8_t *state1, uint8_t *state2)
1207 {
1208 	union authctx ctx;
1209 	const struct auth_hash *sah = hash_def->qshd_alg->qshai_sah;
1210 	uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
1211 	uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
1212 	uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
1213 
1214 	hmac_init_ipad(sah, key, klen, &ctx);
1215 	qat_memcpy_htobe(state1, (uint8_t *)&ctx + state_offset, state_size,
1216 	    state_word);
1217 	hmac_init_opad(sah, key, klen, &ctx);
1218 	qat_memcpy_htobe(state2, (uint8_t *)&ctx + state_offset, state_size,
1219 	    state_word);
1220 	explicit_bzero(&ctx, sizeof(ctx));
1221 }
1222 
1223 static enum hw_cipher_algo
qat_aes_cipher_algo(int klen)1224 qat_aes_cipher_algo(int klen)
1225 {
1226 	switch (klen) {
1227 	case HW_AES_128_KEY_SZ:
1228 		return HW_CIPHER_ALGO_AES128;
1229 	case HW_AES_192_KEY_SZ:
1230 		return HW_CIPHER_ALGO_AES192;
1231 	case HW_AES_256_KEY_SZ:
1232 		return HW_CIPHER_ALGO_AES256;
1233 	default:
1234 		panic("invalid key length %d", klen);
1235 	}
1236 }
1237 
1238 uint16_t
qat_crypto_load_cipher_session(const struct qat_crypto_desc * desc,const struct qat_session * qs)1239 qat_crypto_load_cipher_session(const struct qat_crypto_desc *desc,
1240     const struct qat_session *qs)
1241 {
1242 	enum hw_cipher_algo algo;
1243 	enum hw_cipher_dir dir;
1244 	enum hw_cipher_convert key_convert;
1245 	enum hw_cipher_mode mode;
1246 
1247 	dir = desc->qcd_cipher_dir;
1248 	key_convert = HW_CIPHER_NO_CONVERT;
1249 	mode = qs->qs_cipher_mode;
1250 	switch (mode) {
1251 	case HW_CIPHER_CBC_MODE:
1252 	case HW_CIPHER_XTS_MODE:
1253 		algo = qs->qs_cipher_algo;
1254 
1255 		/*
1256 		 * AES decrypt key needs to be reversed.
1257 		 * Instead of reversing the key at session registration,
1258 		 * it is instead reversed on-the-fly by setting the KEY_CONVERT
1259 		 * bit here.
1260 		 */
1261 		if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
1262 			key_convert = HW_CIPHER_KEY_CONVERT;
1263 		break;
1264 	case HW_CIPHER_CTR_MODE:
1265 		algo = qs->qs_cipher_algo;
1266 		dir = HW_CIPHER_ENCRYPT;
1267 		break;
1268 	default:
1269 		panic("unhandled cipher mode %d", mode);
1270 		break;
1271 	}
1272 
1273 	return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, dir);
1274 }
1275 
1276 uint16_t
qat_crypto_load_auth_session(const struct qat_crypto_desc * desc,const struct qat_session * qs,const struct qat_sym_hash_def ** hash_def)1277 qat_crypto_load_auth_session(const struct qat_crypto_desc *desc,
1278     const struct qat_session *qs, const struct qat_sym_hash_def **hash_def)
1279 {
1280 	enum qat_sym_hash_algorithm algo;
1281 
1282 	switch (qs->qs_auth_algo) {
1283 	case HW_AUTH_ALGO_SHA1:
1284 		algo = QAT_SYM_HASH_SHA1;
1285 		break;
1286 	case HW_AUTH_ALGO_SHA256:
1287 		algo = QAT_SYM_HASH_SHA256;
1288 		break;
1289 	case HW_AUTH_ALGO_SHA384:
1290 		algo = QAT_SYM_HASH_SHA384;
1291 		break;
1292 	case HW_AUTH_ALGO_SHA512:
1293 		algo = QAT_SYM_HASH_SHA512;
1294 		break;
1295 	case HW_AUTH_ALGO_GALOIS_128:
1296 		algo = QAT_SYM_HASH_AES_GCM;
1297 		break;
1298 	default:
1299 		panic("unhandled auth algorithm %d", qs->qs_auth_algo);
1300 		break;
1301 	}
1302 	*hash_def = &qat_sym_hash_defs[algo];
1303 
1304 	return HW_AUTH_CONFIG_BUILD(qs->qs_auth_mode,
1305 	    (*hash_def)->qshd_qat->qshqi_algo_enc,
1306 	    (*hash_def)->qshd_alg->qshai_digest_len);
1307 }
1308 
1309 struct qat_crypto_load_cb_arg {
1310 	struct qat_session	*qs;
1311 	struct qat_sym_cookie	*qsc;
1312 	struct cryptop		*crp;
1313 	int			error;
1314 };
1315 
1316 static int
qat_crypto_populate_buf_list(struct buffer_list_desc * buffers,bus_dma_segment_t * segs,int niseg,int noseg,int skip)1317 qat_crypto_populate_buf_list(struct buffer_list_desc *buffers,
1318     bus_dma_segment_t *segs, int niseg, int noseg, int skip)
1319 {
1320 	struct flat_buffer_desc *flatbuf;
1321 	bus_addr_t addr;
1322 	bus_size_t len;
1323 	int iseg, oseg;
1324 
1325 	for (iseg = 0, oseg = noseg; iseg < niseg && oseg < QAT_MAXSEG;
1326 	    iseg++) {
1327 		addr = segs[iseg].ds_addr;
1328 		len = segs[iseg].ds_len;
1329 
1330 		if (skip > 0) {
1331 			if (skip < len) {
1332 				addr += skip;
1333 				len -= skip;
1334 				skip = 0;
1335 			} else {
1336 				skip -= len;
1337 				continue;
1338 			}
1339 		}
1340 
1341 		flatbuf = &buffers->flat_bufs[oseg++];
1342 		flatbuf->data_len_in_bytes = (uint32_t)len;
1343 		flatbuf->phy_buffer = (uint64_t)addr;
1344 	}
1345 	buffers->num_buffers = oseg;
1346 	return iseg < niseg ? E2BIG : 0;
1347 }
1348 
1349 static void
qat_crypto_load_aadbuf_cb(void * _arg,bus_dma_segment_t * segs,int nseg,int error)1350 qat_crypto_load_aadbuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
1351     int error)
1352 {
1353 	struct qat_crypto_load_cb_arg *arg;
1354 	struct qat_sym_cookie *qsc;
1355 
1356 	arg = _arg;
1357 	if (error != 0) {
1358 		arg->error = error;
1359 		return;
1360 	}
1361 
1362 	qsc = arg->qsc;
1363 	arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs,
1364 	    nseg, 0, 0);
1365 }
1366 
1367 static void
qat_crypto_load_buf_cb(void * _arg,bus_dma_segment_t * segs,int nseg,int error)1368 qat_crypto_load_buf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
1369     int error)
1370 {
1371 	struct cryptop *crp;
1372 	struct qat_crypto_load_cb_arg *arg;
1373 	struct qat_session *qs;
1374 	struct qat_sym_cookie *qsc;
1375 	int noseg, skip;
1376 
1377 	arg = _arg;
1378 	if (error != 0) {
1379 		arg->error = error;
1380 		return;
1381 	}
1382 
1383 	crp = arg->crp;
1384 	qs = arg->qs;
1385 	qsc = arg->qsc;
1386 
1387 	if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
1388 		/* AAD was handled in qat_crypto_load(). */
1389 		skip = crp->crp_payload_start;
1390 		noseg = 0;
1391 	} else if (crp->crp_aad == NULL && crp->crp_aad_length > 0) {
1392 		skip = crp->crp_aad_start;
1393 		noseg = 0;
1394 	} else {
1395 		skip = crp->crp_payload_start;
1396 		noseg = crp->crp_aad == NULL ?
1397 		    0 : qsc->qsc_buf_list.num_buffers;
1398 	}
1399 	arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs,
1400 	    nseg, noseg, skip);
1401 }
1402 
1403 static void
qat_crypto_load_obuf_cb(void * _arg,bus_dma_segment_t * segs,int nseg,int error)1404 qat_crypto_load_obuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
1405     int error)
1406 {
1407 	struct buffer_list_desc *ibufs, *obufs;
1408 	struct flat_buffer_desc *ibuf, *obuf;
1409 	struct cryptop *crp;
1410 	struct qat_crypto_load_cb_arg *arg;
1411 	struct qat_session *qs;
1412 	struct qat_sym_cookie *qsc;
1413 	int buflen, osegs, tocopy;
1414 
1415 	arg = _arg;
1416 	if (error != 0) {
1417 		arg->error = error;
1418 		return;
1419 	}
1420 
1421 	crp = arg->crp;
1422 	qs = arg->qs;
1423 	qsc = arg->qsc;
1424 
1425 	/*
1426 	 * The payload must start at the same offset in the output SG list as in
1427 	 * the input SG list.  Copy over SG entries from the input corresponding
1428 	 * to the AAD buffer.
1429 	 */
1430 	osegs = 0;
1431 	if (qs->qs_auth_algo != HW_AUTH_ALGO_GALOIS_128 &&
1432 	    crp->crp_aad_length > 0) {
1433 		tocopy = crp->crp_aad == NULL ?
1434 		    crp->crp_payload_start - crp->crp_aad_start :
1435 		    crp->crp_aad_length;
1436 
1437 		ibufs = &qsc->qsc_buf_list;
1438 		obufs = &qsc->qsc_obuf_list;
1439 		for (; osegs < ibufs->num_buffers && tocopy > 0; osegs++) {
1440 			ibuf = &ibufs->flat_bufs[osegs];
1441 			obuf = &obufs->flat_bufs[osegs];
1442 
1443 			obuf->phy_buffer = ibuf->phy_buffer;
1444 			buflen = imin(ibuf->data_len_in_bytes, tocopy);
1445 			obuf->data_len_in_bytes = buflen;
1446 			tocopy -= buflen;
1447 		}
1448 	}
1449 
1450 	arg->error = qat_crypto_populate_buf_list(&qsc->qsc_obuf_list, segs,
1451 	    nseg, osegs, crp->crp_payload_output_start);
1452 }
1453 
1454 static int
qat_crypto_load(struct qat_session * qs,struct qat_sym_cookie * qsc,struct qat_crypto_desc const * desc,struct cryptop * crp)1455 qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc,
1456     struct qat_crypto_desc const *desc, struct cryptop *crp)
1457 {
1458 	struct qat_crypto_load_cb_arg arg;
1459 	int error;
1460 
1461 	crypto_read_iv(crp, qsc->qsc_iv_buf);
1462 
1463 	arg.crp = crp;
1464 	arg.qs = qs;
1465 	arg.qsc = qsc;
1466 	arg.error = 0;
1467 
1468 	error = 0;
1469 	if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128 &&
1470 	    crp->crp_aad_length > 0) {
1471 		/*
1472 		 * The firmware expects AAD to be in a contiguous buffer and
1473 		 * padded to a multiple of 16 bytes.  To satisfy these
1474 		 * constraints we bounce the AAD into a per-request buffer.
1475 		 * There is a small limit on the AAD size so this is not too
1476 		 * onerous.
1477 		 */
1478 		memset(qsc->qsc_gcm_aad, 0, QAT_GCM_AAD_SIZE_MAX);
1479 		if (crp->crp_aad == NULL) {
1480 			crypto_copydata(crp, crp->crp_aad_start,
1481 			    crp->crp_aad_length, qsc->qsc_gcm_aad);
1482 		} else {
1483 			memcpy(qsc->qsc_gcm_aad, crp->crp_aad,
1484 			    crp->crp_aad_length);
1485 		}
1486 	} else if (crp->crp_aad != NULL) {
1487 		error = bus_dmamap_load(
1488 		    qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag,
1489 		    qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap,
1490 		    crp->crp_aad, crp->crp_aad_length,
1491 		    qat_crypto_load_aadbuf_cb, &arg, BUS_DMA_NOWAIT);
1492 		if (error == 0)
1493 			error = arg.error;
1494 	}
1495 	if (error == 0) {
1496 		error = bus_dmamap_load_crp_buffer(
1497 		    qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag,
1498 		    qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap,
1499 		    &crp->crp_buf, qat_crypto_load_buf_cb, &arg,
1500 		    BUS_DMA_NOWAIT);
1501 		if (error == 0)
1502 			error = arg.error;
1503 	}
1504 	if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
1505 		error = bus_dmamap_load_crp_buffer(
1506 		    qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag,
1507 		    qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap,
1508 		    &crp->crp_obuf, qat_crypto_load_obuf_cb, &arg,
1509 		    BUS_DMA_NOWAIT);
1510 		if (error == 0)
1511 			error = arg.error;
1512 	}
1513 	return error;
1514 }
1515 
1516 static inline struct qat_crypto_bank *
qat_crypto_select_bank(struct qat_crypto * qcy)1517 qat_crypto_select_bank(struct qat_crypto *qcy)
1518 {
1519 	u_int cpuid = PCPU_GET(cpuid);
1520 
1521 	return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
1522 }
1523 
1524 static int
qat_crypto_setup_ring(struct qat_softc * sc,struct qat_crypto_bank * qcb)1525 qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1526 {
1527 	char *name;
1528 	int bank, curname, error, i, j;
1529 
1530 	bank = qcb->qcb_bank;
1531 	curname = 0;
1532 
1533 	name = qcb->qcb_ring_names[curname++];
1534 	snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
1535 	error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1536 	    sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
1537 	    NULL, NULL, name, &qcb->qcb_sym_tx);
1538 	if (error)
1539 		return error;
1540 
1541 	name = qcb->qcb_ring_names[curname++];
1542 	snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
1543 	error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1544 	    sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
1545 	    qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
1546 	if (error)
1547 		return error;
1548 
1549 	for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1550 		struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
1551 		struct qat_sym_cookie *qsc;
1552 
1553 		error = qat_alloc_dmamem(sc, qdm, 1,
1554 		    sizeof(struct qat_sym_cookie), QAT_OPTIMAL_ALIGN);
1555 		if (error)
1556 			return error;
1557 
1558 		qsc = qdm->qdm_dma_vaddr;
1559 		qsc->qsc_self_dmamap = qdm->qdm_dma_map;
1560 		qsc->qsc_self_dma_tag = qdm->qdm_dma_tag;
1561 		qsc->qsc_bulk_req_params_buf_paddr =
1562 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1563 		    qsc_bulk_cookie.qsbc_req_params_buf);
1564 		qsc->qsc_buffer_list_desc_paddr =
1565 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1566 		    qsc_buf_list);
1567 		qsc->qsc_obuffer_list_desc_paddr =
1568 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1569 		    qsc_obuf_list);
1570 		qsc->qsc_obuffer_list_desc_paddr =
1571 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1572 		    qsc_obuf_list);
1573 		qsc->qsc_iv_buf_paddr =
1574 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1575 		    qsc_iv_buf);
1576 		qsc->qsc_auth_res_paddr =
1577 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1578 		    qsc_auth_res);
1579 		qsc->qsc_gcm_aad_paddr =
1580 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1581 		    qsc_gcm_aad);
1582 		qsc->qsc_content_desc_paddr =
1583 		    qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1584 		    qsc_content_desc);
1585 		qcb->qcb_symck_free[i] = qsc;
1586 		qcb->qcb_symck_free_count++;
1587 
1588 		for (j = 0; j < QAT_SYM_DMA_COUNT; j++) {
1589 			error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
1590 			    1, 0, 		/* alignment, boundary */
1591 			    BUS_SPACE_MAXADDR,	/* lowaddr */
1592 			    BUS_SPACE_MAXADDR, 	/* highaddr */
1593 			    NULL, NULL, 	/* filter, filterarg */
1594 			    QAT_MAXLEN,		/* maxsize */
1595 			    QAT_MAXSEG,		/* nsegments */
1596 			    QAT_MAXLEN,		/* maxsegsize */
1597 			    BUS_DMA_COHERENT,	/* flags */
1598 			    NULL, NULL,		/* lockfunc, lockarg */
1599 			    &qsc->qsc_dma[j].qsd_dma_tag);
1600 			if (error != 0)
1601 				return error;
1602 			error = bus_dmamap_create(qsc->qsc_dma[j].qsd_dma_tag,
1603 			    BUS_DMA_COHERENT, &qsc->qsc_dma[j].qsd_dmamap);
1604 			if (error != 0)
1605 				return error;
1606 		}
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 static int
qat_crypto_bank_init(struct qat_softc * sc,struct qat_crypto_bank * qcb)1613 qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1614 {
1615 	mtx_init(&qcb->qcb_bank_mtx, "qcb bank", NULL, MTX_DEF);
1616 
1617 	return qat_crypto_setup_ring(sc, qcb);
1618 }
1619 
1620 static void
qat_crypto_bank_deinit(struct qat_softc * sc,struct qat_crypto_bank * qcb)1621 qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1622 {
1623 	struct qat_dmamem *qdm;
1624 	struct qat_sym_cookie *qsc;
1625 	int i, j;
1626 
1627 	for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1628 		qdm = &qcb->qcb_symck_dmamems[i];
1629 		qsc = qcb->qcb_symck_free[i];
1630 		for (j = 0; j < QAT_SYM_DMA_COUNT; j++) {
1631 			bus_dmamap_destroy(qsc->qsc_dma[j].qsd_dma_tag,
1632 			    qsc->qsc_dma[j].qsd_dmamap);
1633 			bus_dma_tag_destroy(qsc->qsc_dma[j].qsd_dma_tag);
1634 		}
1635 		qat_free_dmamem(sc, qdm);
1636 	}
1637 	qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma);
1638 	qat_free_dmamem(sc, &qcb->qcb_sym_rx->qr_dma);
1639 
1640 	mtx_destroy(&qcb->qcb_bank_mtx);
1641 }
1642 
1643 static int
qat_crypto_init(struct qat_softc * sc)1644 qat_crypto_init(struct qat_softc *sc)
1645 {
1646 	struct qat_crypto *qcy = &sc->sc_crypto;
1647 	struct sysctl_ctx_list *ctx;
1648 	struct sysctl_oid *oid;
1649 	struct sysctl_oid_list *children;
1650 	int bank, error, num_banks;
1651 
1652 	qcy->qcy_sc = sc;
1653 
1654 	if (sc->sc_hw.qhw_init_arb != NULL)
1655 		num_banks = imin(mp_ncpus, sc->sc_hw.qhw_num_banks);
1656 	else
1657 		num_banks = sc->sc_ae_num;
1658 
1659 	qcy->qcy_num_banks = num_banks;
1660 
1661 	qcy->qcy_banks =
1662 	    qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
1663 
1664 	for (bank = 0; bank < num_banks; bank++) {
1665 		struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
1666 		qcb->qcb_bank = bank;
1667 		error = qat_crypto_bank_init(sc, qcb);
1668 		if (error)
1669 			return error;
1670 	}
1671 
1672 	mtx_init(&qcy->qcy_crypto_mtx, "qcy crypto", NULL, MTX_DEF);
1673 
1674 	ctx = device_get_sysctl_ctx(sc->sc_dev);
1675 	oid = device_get_sysctl_tree(sc->sc_dev);
1676 	children = SYSCTL_CHILDREN(oid);
1677 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1678 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1679 	children = SYSCTL_CHILDREN(oid);
1680 
1681 	sc->sc_gcm_aad_restarts = counter_u64_alloc(M_WAITOK);
1682 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_restarts",
1683 	    CTLFLAG_RD, &sc->sc_gcm_aad_restarts,
1684 	    "GCM requests deferred due to AAD size change");
1685 	sc->sc_gcm_aad_updates = counter_u64_alloc(M_WAITOK);
1686 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_updates",
1687 	    CTLFLAG_RD, &sc->sc_gcm_aad_updates,
1688 	    "GCM requests that required session state update");
1689 	sc->sc_ring_full_restarts = counter_u64_alloc(M_WAITOK);
1690 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ring_full",
1691 	    CTLFLAG_RD, &sc->sc_ring_full_restarts,
1692 	    "Requests deferred due to in-flight max reached");
1693 	sc->sc_sym_alloc_failures = counter_u64_alloc(M_WAITOK);
1694 	SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sym_alloc_failures",
1695 	    CTLFLAG_RD, &sc->sc_sym_alloc_failures,
1696 	    "Request allocation failures");
1697 
1698 	return 0;
1699 }
1700 
1701 static void
qat_crypto_deinit(struct qat_softc * sc)1702 qat_crypto_deinit(struct qat_softc *sc)
1703 {
1704 	struct qat_crypto *qcy = &sc->sc_crypto;
1705 	struct qat_crypto_bank *qcb;
1706 	int bank;
1707 
1708 	counter_u64_free(sc->sc_sym_alloc_failures);
1709 	counter_u64_free(sc->sc_ring_full_restarts);
1710 	counter_u64_free(sc->sc_gcm_aad_updates);
1711 	counter_u64_free(sc->sc_gcm_aad_restarts);
1712 
1713 	if (qcy->qcy_banks != NULL) {
1714 		for (bank = 0; bank < qcy->qcy_num_banks; bank++) {
1715 			qcb = &qcy->qcy_banks[bank];
1716 			qat_crypto_bank_deinit(sc, qcb);
1717 		}
1718 		qat_free_mem(qcy->qcy_banks);
1719 		mtx_destroy(&qcy->qcy_crypto_mtx);
1720 	}
1721 }
1722 
1723 static int
qat_crypto_start(struct qat_softc * sc)1724 qat_crypto_start(struct qat_softc *sc)
1725 {
1726 	struct qat_crypto *qcy;
1727 
1728 	qcy = &sc->sc_crypto;
1729 	qcy->qcy_cid = crypto_get_driverid(sc->sc_dev,
1730 	    sizeof(struct qat_session), CRYPTOCAP_F_HARDWARE);
1731 	if (qcy->qcy_cid < 0) {
1732 		device_printf(sc->sc_dev,
1733 		    "could not get opencrypto driver id\n");
1734 		return ENOENT;
1735 	}
1736 
1737 	return 0;
1738 }
1739 
1740 static void
qat_crypto_stop(struct qat_softc * sc)1741 qat_crypto_stop(struct qat_softc *sc)
1742 {
1743 	struct qat_crypto *qcy;
1744 
1745 	qcy = &sc->sc_crypto;
1746 	if (qcy->qcy_cid >= 0)
1747 		(void)crypto_unregister_all(qcy->qcy_cid);
1748 }
1749 
1750 static void
qat_crypto_sym_dma_unload(struct qat_sym_cookie * qsc,enum qat_sym_dma i)1751 qat_crypto_sym_dma_unload(struct qat_sym_cookie *qsc, enum qat_sym_dma i)
1752 {
1753 	bus_dmamap_sync(qsc->qsc_dma[i].qsd_dma_tag, qsc->qsc_dma[i].qsd_dmamap,
1754 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1755 	bus_dmamap_unload(qsc->qsc_dma[i].qsd_dma_tag,
1756 	    qsc->qsc_dma[i].qsd_dmamap);
1757 }
1758 
1759 static int
qat_crypto_sym_rxintr(struct qat_softc * sc,void * arg,void * msg)1760 qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
1761 {
1762 	char icv[QAT_SYM_HASH_BUFFER_LEN];
1763 	struct qat_crypto_bank *qcb = arg;
1764 	struct qat_crypto *qcy;
1765 	struct qat_session *qs;
1766 	struct qat_sym_cookie *qsc;
1767 	struct qat_sym_bulk_cookie *qsbc;
1768 	struct cryptop *crp;
1769 	int error;
1770 	uint16_t auth_sz;
1771 	bool blocked;
1772 
1773 	qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
1774 
1775 	qsbc = &qsc->qsc_bulk_cookie;
1776 	qcy = qsbc->qsbc_crypto;
1777 	qs = qsbc->qsbc_session;
1778 	crp = qsbc->qsbc_cb_tag;
1779 
1780 	bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
1781 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1782 
1783 	if (crp->crp_aad != NULL)
1784 		qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_AADBUF);
1785 	qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_BUF);
1786 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1787 		qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_OBUF);
1788 
1789 	error = 0;
1790 	if ((auth_sz = qs->qs_auth_mlen) != 0) {
1791 		if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
1792 			crypto_copydata(crp, crp->crp_digest_start,
1793 			    auth_sz, icv);
1794 			if (timingsafe_bcmp(icv, qsc->qsc_auth_res,
1795 			    auth_sz) != 0) {
1796 				error = EBADMSG;
1797 			}
1798 		} else {
1799 			crypto_copyback(crp, crp->crp_digest_start,
1800 			    auth_sz, qsc->qsc_auth_res);
1801 		}
1802 	}
1803 
1804 	qat_crypto_free_sym_cookie(qcb, qsc);
1805 
1806 	blocked = false;
1807 	mtx_lock(&qs->qs_session_mtx);
1808 	MPASS(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
1809 	qs->qs_inflight--;
1810 	if (__predict_false(qs->qs_need_wakeup && qs->qs_inflight == 0)) {
1811 		blocked = true;
1812 		qs->qs_need_wakeup = false;
1813 	}
1814 	mtx_unlock(&qs->qs_session_mtx);
1815 
1816 	crp->crp_etype = error;
1817 	crypto_done(crp);
1818 
1819 	if (blocked)
1820 		crypto_unblock(qcy->qcy_cid, CRYPTO_SYMQ);
1821 
1822 	return 1;
1823 }
1824 
1825 static int
qat_probesession(device_t dev,const struct crypto_session_params * csp)1826 qat_probesession(device_t dev, const struct crypto_session_params *csp)
1827 {
1828 	if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
1829 	    0)
1830 		return EINVAL;
1831 
1832 	if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
1833 	    qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) {
1834 		/*
1835 		 * AES-XTS is not supported by the NanoQAT.
1836 		 */
1837 		return EINVAL;
1838 	}
1839 
1840 	switch (csp->csp_mode) {
1841 	case CSP_MODE_CIPHER:
1842 		switch (csp->csp_cipher_alg) {
1843 		case CRYPTO_AES_CBC:
1844 		case CRYPTO_AES_ICM:
1845 			if (csp->csp_ivlen != AES_BLOCK_LEN)
1846 				return EINVAL;
1847 			break;
1848 		case CRYPTO_AES_XTS:
1849 			if (csp->csp_ivlen != AES_XTS_IV_LEN)
1850 				return EINVAL;
1851 			break;
1852 		default:
1853 			return EINVAL;
1854 		}
1855 		break;
1856 	case CSP_MODE_DIGEST:
1857 		switch (csp->csp_auth_alg) {
1858 		case CRYPTO_SHA1:
1859 		case CRYPTO_SHA1_HMAC:
1860 		case CRYPTO_SHA2_256:
1861 		case CRYPTO_SHA2_256_HMAC:
1862 		case CRYPTO_SHA2_384:
1863 		case CRYPTO_SHA2_384_HMAC:
1864 		case CRYPTO_SHA2_512:
1865 		case CRYPTO_SHA2_512_HMAC:
1866 			break;
1867 		case CRYPTO_AES_NIST_GMAC:
1868 			if (csp->csp_ivlen != AES_GCM_IV_LEN)
1869 				return EINVAL;
1870 			break;
1871 		default:
1872 			return EINVAL;
1873 		}
1874 		break;
1875 	case CSP_MODE_AEAD:
1876 		switch (csp->csp_cipher_alg) {
1877 		case CRYPTO_AES_NIST_GCM_16:
1878 			break;
1879 		default:
1880 			return EINVAL;
1881 		}
1882 		break;
1883 	case CSP_MODE_ETA:
1884 		switch (csp->csp_auth_alg) {
1885 		case CRYPTO_SHA1_HMAC:
1886 		case CRYPTO_SHA2_256_HMAC:
1887 		case CRYPTO_SHA2_384_HMAC:
1888 		case CRYPTO_SHA2_512_HMAC:
1889 			switch (csp->csp_cipher_alg) {
1890 			case CRYPTO_AES_CBC:
1891 			case CRYPTO_AES_ICM:
1892 				if (csp->csp_ivlen != AES_BLOCK_LEN)
1893 					return EINVAL;
1894 				break;
1895 			case CRYPTO_AES_XTS:
1896 				if (csp->csp_ivlen != AES_XTS_IV_LEN)
1897 					return EINVAL;
1898 				break;
1899 			default:
1900 				return EINVAL;
1901 			}
1902 			break;
1903 		default:
1904 			return EINVAL;
1905 		}
1906 		break;
1907 	default:
1908 		return EINVAL;
1909 	}
1910 
1911 	return CRYPTODEV_PROBE_HARDWARE;
1912 }
1913 
1914 static int
qat_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)1915 qat_newsession(device_t dev, crypto_session_t cses,
1916     const struct crypto_session_params *csp)
1917 {
1918 	struct qat_crypto *qcy;
1919 	struct qat_dmamem *qdm;
1920 	struct qat_session *qs;
1921 	struct qat_softc *sc;
1922 	struct qat_crypto_desc *ddesc, *edesc;
1923 	int error, slices;
1924 
1925 	sc = device_get_softc(dev);
1926 	qs = crypto_get_driver_session(cses);
1927 	qcy = &sc->sc_crypto;
1928 
1929 	qdm = &qs->qs_desc_mem;
1930 	error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG,
1931 	    sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN);
1932 	if (error != 0)
1933 		return error;
1934 
1935 	mtx_init(&qs->qs_session_mtx, "qs session", NULL, MTX_DEF);
1936 	qs->qs_aad_length = -1;
1937 
1938 	qs->qs_dec_desc = ddesc = qdm->qdm_dma_vaddr;
1939 	qs->qs_enc_desc = edesc = ddesc + 1;
1940 
1941 	ddesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
1942 	ddesc->qcd_hash_state_paddr = ddesc->qcd_desc_paddr +
1943 	    offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1944 	edesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
1945 	    sizeof(struct qat_crypto_desc);
1946 	edesc->qcd_hash_state_paddr = edesc->qcd_desc_paddr +
1947 	    offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1948 
1949 	qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
1950 	qs->qs_inflight = 0;
1951 
1952 	qs->qs_cipher_key = csp->csp_cipher_key;
1953 	qs->qs_cipher_klen = csp->csp_cipher_klen;
1954 	qs->qs_auth_key = csp->csp_auth_key;
1955 	qs->qs_auth_klen = csp->csp_auth_klen;
1956 
1957 	switch (csp->csp_cipher_alg) {
1958 	case CRYPTO_AES_CBC:
1959 		qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1960 		qs->qs_cipher_mode = HW_CIPHER_CBC_MODE;
1961 		break;
1962 	case CRYPTO_AES_ICM:
1963 		qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1964 		qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
1965 		break;
1966 	case CRYPTO_AES_XTS:
1967 		qs->qs_cipher_algo =
1968 		    qat_aes_cipher_algo(csp->csp_cipher_klen / 2);
1969 		qs->qs_cipher_mode = HW_CIPHER_XTS_MODE;
1970 		break;
1971 	case CRYPTO_AES_NIST_GCM_16:
1972 		qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1973 		qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
1974 		qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
1975 		qs->qs_auth_mode = HW_AUTH_MODE1;
1976 		break;
1977 	case 0:
1978 		break;
1979 	default:
1980 		panic("%s: unhandled cipher algorithm %d", __func__,
1981 		    csp->csp_cipher_alg);
1982 	}
1983 
1984 	switch (csp->csp_auth_alg) {
1985 	case CRYPTO_SHA1_HMAC:
1986 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
1987 		qs->qs_auth_mode = HW_AUTH_MODE1;
1988 		break;
1989 	case CRYPTO_SHA1:
1990 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
1991 		qs->qs_auth_mode = HW_AUTH_MODE0;
1992 		break;
1993 	case CRYPTO_SHA2_256_HMAC:
1994 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
1995 		qs->qs_auth_mode = HW_AUTH_MODE1;
1996 		break;
1997 	case CRYPTO_SHA2_256:
1998 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
1999 		qs->qs_auth_mode = HW_AUTH_MODE0;
2000 		break;
2001 	case CRYPTO_SHA2_384_HMAC:
2002 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
2003 		qs->qs_auth_mode = HW_AUTH_MODE1;
2004 		break;
2005 	case CRYPTO_SHA2_384:
2006 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
2007 		qs->qs_auth_mode = HW_AUTH_MODE0;
2008 		break;
2009 	case CRYPTO_SHA2_512_HMAC:
2010 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
2011 		qs->qs_auth_mode = HW_AUTH_MODE1;
2012 		break;
2013 	case CRYPTO_SHA2_512:
2014 		qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
2015 		qs->qs_auth_mode = HW_AUTH_MODE0;
2016 		break;
2017 	case CRYPTO_AES_NIST_GMAC:
2018 		qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen);
2019 		qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
2020 		qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
2021 		qs->qs_auth_mode = HW_AUTH_MODE1;
2022 
2023 		qs->qs_cipher_key = qs->qs_auth_key;
2024 		qs->qs_cipher_klen = qs->qs_auth_klen;
2025 		break;
2026 	case 0:
2027 		break;
2028 	default:
2029 		panic("%s: unhandled auth algorithm %d", __func__,
2030 		    csp->csp_auth_alg);
2031 	}
2032 
2033 	slices = 0;
2034 	switch (csp->csp_mode) {
2035 	case CSP_MODE_AEAD:
2036 	case CSP_MODE_ETA:
2037 		/* auth then decrypt */
2038 		ddesc->qcd_slices[0] = FW_SLICE_AUTH;
2039 		ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
2040 		ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
2041 		ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
2042 		/* encrypt then auth */
2043 		edesc->qcd_slices[0] = FW_SLICE_CIPHER;
2044 		edesc->qcd_slices[1] = FW_SLICE_AUTH;
2045 		edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
2046 		edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
2047 		slices = 2;
2048 		break;
2049 	case CSP_MODE_CIPHER:
2050 		/* decrypt */
2051 		ddesc->qcd_slices[0] = FW_SLICE_CIPHER;
2052 		ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
2053 		ddesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
2054 		/* encrypt */
2055 		edesc->qcd_slices[0] = FW_SLICE_CIPHER;
2056 		edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
2057 		edesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
2058 		slices = 1;
2059 		break;
2060 	case CSP_MODE_DIGEST:
2061 		if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
2062 			/* auth then decrypt */
2063 			ddesc->qcd_slices[0] = FW_SLICE_AUTH;
2064 			ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
2065 			ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
2066 			ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
2067 			/* encrypt then auth */
2068 			edesc->qcd_slices[0] = FW_SLICE_CIPHER;
2069 			edesc->qcd_slices[1] = FW_SLICE_AUTH;
2070 			edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
2071 			edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
2072 			slices = 2;
2073 		} else {
2074 			ddesc->qcd_slices[0] = FW_SLICE_AUTH;
2075 			ddesc->qcd_cmd_id = FW_LA_CMD_AUTH;
2076 			edesc->qcd_slices[0] = FW_SLICE_AUTH;
2077 			edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
2078 			slices = 1;
2079 		}
2080 		break;
2081 	default:
2082 		panic("%s: unhandled crypto algorithm %d, %d", __func__,
2083 		    csp->csp_cipher_alg, csp->csp_auth_alg);
2084 	}
2085 	ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
2086 	edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
2087 
2088 	qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc);
2089 	qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc);
2090 
2091 	if (csp->csp_auth_mlen != 0)
2092 		qs->qs_auth_mlen = csp->csp_auth_mlen;
2093 	else
2094 		qs->qs_auth_mlen = edesc->qcd_auth_sz;
2095 
2096 	/* Compute the GMAC by specifying a null cipher payload. */
2097 	if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC)
2098 		ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
2099 
2100 	return 0;
2101 }
2102 
2103 static void
qat_crypto_clear_desc(struct qat_crypto_desc * desc)2104 qat_crypto_clear_desc(struct qat_crypto_desc *desc)
2105 {
2106 	explicit_bzero(desc->qcd_content_desc, sizeof(desc->qcd_content_desc));
2107 	explicit_bzero(desc->qcd_hash_state_prefix_buf,
2108 	    sizeof(desc->qcd_hash_state_prefix_buf));
2109 	explicit_bzero(desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
2110 }
2111 
2112 static void
qat_freesession(device_t dev,crypto_session_t cses)2113 qat_freesession(device_t dev, crypto_session_t cses)
2114 {
2115 	struct qat_session *qs;
2116 
2117 	qs = crypto_get_driver_session(cses);
2118 	KASSERT(qs->qs_inflight == 0,
2119 	    ("%s: session %p has requests in flight", __func__, qs));
2120 
2121 	qat_crypto_clear_desc(qs->qs_enc_desc);
2122 	qat_crypto_clear_desc(qs->qs_dec_desc);
2123 	qat_free_dmamem(device_get_softc(dev), &qs->qs_desc_mem);
2124 	mtx_destroy(&qs->qs_session_mtx);
2125 }
2126 
2127 static int
qat_process(device_t dev,struct cryptop * crp,int hint)2128 qat_process(device_t dev, struct cryptop *crp, int hint)
2129 {
2130 	struct qat_crypto *qcy;
2131 	struct qat_crypto_bank *qcb;
2132 	struct qat_crypto_desc const *desc;
2133 	struct qat_session *qs;
2134 	struct qat_softc *sc;
2135 	struct qat_sym_cookie *qsc;
2136 	struct qat_sym_bulk_cookie *qsbc;
2137 	int error;
2138 
2139 	sc = device_get_softc(dev);
2140 	qcy = &sc->sc_crypto;
2141 	qs = crypto_get_driver_session(crp->crp_session);
2142 	qsc = NULL;
2143 
2144 	if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) {
2145 		error = E2BIG;
2146 		goto fail1;
2147 	}
2148 
2149 	mtx_lock(&qs->qs_session_mtx);
2150 	if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
2151 		if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) {
2152 			error = E2BIG;
2153 			mtx_unlock(&qs->qs_session_mtx);
2154 			goto fail1;
2155 		}
2156 
2157 		/*
2158 		 * The firmware interface for GCM annoyingly requires the AAD
2159 		 * size to be stored in the session's content descriptor, which
2160 		 * is not really meant to be updated after session
2161 		 * initialization.  For IPSec the AAD size is fixed so this is
2162 		 * not much of a problem in practice, but we have to catch AAD
2163 		 * size updates here so that the device code can safely update
2164 		 * the session's recorded AAD size.
2165 		 */
2166 		if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) {
2167 			if (qs->qs_inflight == 0) {
2168 				if (qs->qs_aad_length != -1) {
2169 					counter_u64_add(sc->sc_gcm_aad_updates,
2170 					    1);
2171 				}
2172 				qs->qs_aad_length = crp->crp_aad_length;
2173 			} else {
2174 				qs->qs_need_wakeup = true;
2175 				mtx_unlock(&qs->qs_session_mtx);
2176 				counter_u64_add(sc->sc_gcm_aad_restarts, 1);
2177 				error = ERESTART;
2178 				goto fail1;
2179 			}
2180 		}
2181 	}
2182 	qs->qs_inflight++;
2183 	mtx_unlock(&qs->qs_session_mtx);
2184 
2185 	qcb = qat_crypto_select_bank(qcy);
2186 
2187 	qsc = qat_crypto_alloc_sym_cookie(qcb);
2188 	if (qsc == NULL) {
2189 		counter_u64_add(sc->sc_sym_alloc_failures, 1);
2190 		error = ENOBUFS;
2191 		goto fail2;
2192 	}
2193 
2194 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2195 		desc = qs->qs_enc_desc;
2196 	else
2197 		desc = qs->qs_dec_desc;
2198 
2199 	error = qat_crypto_load(qs, qsc, desc, crp);
2200 	if (error != 0)
2201 		goto fail2;
2202 
2203 	qsbc = &qsc->qsc_bulk_cookie;
2204 	qsbc->qsbc_crypto = qcy;
2205 	qsbc->qsbc_session = qs;
2206 	qsbc->qsbc_cb_tag = crp;
2207 
2208 	sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp);
2209 
2210 	if (crp->crp_aad != NULL) {
2211 		bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag,
2212 		    qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap,
2213 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2214 	}
2215 	bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag,
2216 	    qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap,
2217 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2218 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
2219 		bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag,
2220 		    qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap,
2221 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2222 	}
2223 	bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
2224 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2225 
2226 	error = qat_etr_put_msg(sc, qcb->qcb_sym_tx,
2227 	    (uint32_t *)qsbc->qsbc_msg);
2228 	if (error)
2229 		goto fail2;
2230 
2231 	return 0;
2232 
2233 fail2:
2234 	if (qsc)
2235 		qat_crypto_free_sym_cookie(qcb, qsc);
2236 	mtx_lock(&qs->qs_session_mtx);
2237 	qs->qs_inflight--;
2238 	mtx_unlock(&qs->qs_session_mtx);
2239 fail1:
2240 	crp->crp_etype = error;
2241 	crypto_done(crp);
2242 	return 0;
2243 }
2244 
2245 static device_method_t qat_methods[] = {
2246 	/* Device interface */
2247 	DEVMETHOD(device_probe,		qat_probe),
2248 	DEVMETHOD(device_attach,	qat_attach),
2249 	DEVMETHOD(device_detach,	qat_detach),
2250 
2251 	/* Cryptodev interface */
2252 	DEVMETHOD(cryptodev_probesession, qat_probesession),
2253 	DEVMETHOD(cryptodev_newsession,	qat_newsession),
2254 	DEVMETHOD(cryptodev_freesession, qat_freesession),
2255 	DEVMETHOD(cryptodev_process,	qat_process),
2256 
2257 	DEVMETHOD_END
2258 };
2259 
2260 static driver_t qat_driver = {
2261 	.name		= "qat_c2xxx",
2262 	.methods	= qat_methods,
2263 	.size		= sizeof(struct qat_softc),
2264 };
2265 
2266 DRIVER_MODULE(qat_c2xxx, pci, qat_driver, 0, 0);
2267 MODULE_VERSION(qat_c2xxx, 1);
2268 MODULE_DEPEND(qat_c2xxx, crypto, 1, 1, 1);
2269 MODULE_DEPEND(qat_c2xxx, firmware, 1, 1, 1);
2270 MODULE_DEPEND(qat_c2xxx, pci, 1, 1, 1);
2271