1 /* $NetBSD: qat.c,v 1.8 2022/05/22 11:39:27 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 *
36 * * Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * * Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in
40 * the documentation and/or other materials provided with the
41 * distribution.
42 * * Neither the name of Intel Corporation nor the names of its
43 * contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.8 2022/05/22 11:39:27 riastradh Exp $");
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/device.h>
66 #include <sys/module.h>
67 #include <sys/kmem.h>
68 #include <sys/mutex.h>
69 #include <sys/bitops.h>
70 #include <sys/atomic.h>
71 #include <sys/mbuf.h>
72 #include <sys/cprng.h>
73 #include <sys/cpu.h>
74 #include <sys/interrupt.h>
75 #include <sys/md5.h>
76 #include <sys/sha1.h>
77 #include <sys/sha2.h>
78
79 #include <opencrypto/cryptodev.h>
80 #include <opencrypto/cryptosoft.h>
81 #include <opencrypto/xform.h>
82
83 /* XXX same as sys/arch/x86/x86/via_padlock.c */
84 #include <opencrypto/cryptosoft_xform.c>
85
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89
90 #include "qatreg.h"
91 #include "qatvar.h"
92 #include "qat_aevar.h"
93
94 extern struct qat_hw qat_hw_c2xxx;
95 extern struct qat_hw qat_hw_c3xxx;
96 extern struct qat_hw qat_hw_c62x;
97 extern struct qat_hw qat_hw_d15xx;
98
99 static const struct qat_product {
100 pci_vendor_id_t qatp_vendor;
101 pci_product_id_t qatp_product;
102 const char *qatp_name;
103 enum qat_chip_type qatp_chip;
104 const struct qat_hw *qatp_hw;
105 } qat_products[] = {
106
107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
108 "Intel C2000 QuickAssist Physical Function",
109 QAT_CHIP_C2XXX, &qat_hw_c2xxx },
110
111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
112 "Intel C3000 QuickAssist Physical Function",
113 QAT_CHIP_C3XXX, &qat_hw_c3xxx },
114 #ifdef notyet
115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT_VF,
116 "Intel C3000 QuickAssist Virtual Function",
117 QAT_CHIP_C3XXX_IOV, &qat_hw_c3xxxvf },
118 #endif
119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
120 "Intel C620/Xeon D-2100 QuickAssist Physical Function",
121 QAT_CHIP_C62X, &qat_hw_c62x },
122 #ifdef notyet
123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT_VF,
124 "Intel C620/Xeon D-2100 QuickAssist Virtual Function",
125 QAT_CHIP_C62X_IOV, &qat_hw_c62xvf },
126 #endif
127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
128 "Intel Xeon D-1500 QuickAssist Physical Function",
129 QAT_CHIP_D15XX, &qat_hw_d15xx },
130 #ifdef notyet
131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT_VF,
132 "Intel Xeon D-1500 QuickAssist Virtual Function",
133 QAT_CHIP_D15XX_IOV, &qat_hw_d15xxvf },
134 #endif
135 { 0, 0, NULL, 0, NULL },
136 };
137
138 /* md5 16 bytes - Initialiser state can be found in RFC 1321*/
139 static const uint8_t md5_initial_state[QAT_HASH_MD5_STATE_SIZE] = {
140 0x01, 0x23, 0x45, 0x67,
141 0x89, 0xab, 0xcd, 0xef,
142 0xfe, 0xdc, 0xba, 0x98,
143 0x76, 0x54, 0x32, 0x10,
144 };
145
146 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
147 static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
148 0x67, 0x45, 0x23, 0x01,
149 0xef, 0xcd, 0xab, 0x89,
150 0x98, 0xba, 0xdc, 0xfe,
151 0x10, 0x32, 0x54, 0x76,
152 0xc3, 0xd2, 0xe1, 0xf0
153 };
154
155 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
156 static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
157 0x6a, 0x09, 0xe6, 0x67,
158 0xbb, 0x67, 0xae, 0x85,
159 0x3c, 0x6e, 0xf3, 0x72,
160 0xa5, 0x4f, 0xf5, 0x3a,
161 0x51, 0x0e, 0x52, 0x7f,
162 0x9b, 0x05, 0x68, 0x8c,
163 0x1f, 0x83, 0xd9, 0xab,
164 0x5b, 0xe0, 0xcd, 0x19
165 };
166
167 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
168 static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
169 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
170 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
171 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
172 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
173 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
174 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
175 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
176 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
177 };
178
179 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
180 static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
181 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
182 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
183 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
184 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
185 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
186 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
187 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
188 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
189 };
190
191 /* Hash Algorithm specific structure */
192
193 static const struct qat_sym_hash_alg_info md5_info = {
194 QAT_HASH_MD5_DIGEST_SIZE,
195 QAT_HASH_MD5_BLOCK_SIZE,
196 md5_initial_state,
197 QAT_HASH_MD5_STATE_SIZE,
198 &swcr_auth_hash_hmac_md5_96,
199 offsetof(MD5_CTX, state),
200 4,
201 };
202
203 static const struct qat_sym_hash_alg_info sha1_info = {
204 QAT_HASH_SHA1_DIGEST_SIZE,
205 QAT_HASH_SHA1_BLOCK_SIZE,
206 sha1_initial_state,
207 QAT_HASH_SHA1_STATE_SIZE,
208 &swcr_auth_hash_hmac_sha1_96,
209 offsetof(SHA1_CTX, state),
210 4,
211 };
212
213 static const struct qat_sym_hash_alg_info sha256_info = {
214 QAT_HASH_SHA256_DIGEST_SIZE,
215 QAT_HASH_SHA256_BLOCK_SIZE,
216 sha256_initial_state,
217 QAT_HASH_SHA256_STATE_SIZE,
218 &swcr_auth_hash_hmac_sha2_256,
219 offsetof(SHA256_CTX, state),
220 4,
221 };
222
223 static const struct qat_sym_hash_alg_info sha384_info = {
224 QAT_HASH_SHA384_DIGEST_SIZE,
225 QAT_HASH_SHA384_BLOCK_SIZE,
226 sha384_initial_state,
227 QAT_HASH_SHA384_STATE_SIZE,
228 &swcr_auth_hash_hmac_sha2_384,
229 offsetof(SHA384_CTX, state),
230 8,
231 };
232
233 static const struct qat_sym_hash_alg_info sha512_info = {
234 QAT_HASH_SHA512_DIGEST_SIZE,
235 QAT_HASH_SHA512_BLOCK_SIZE,
236 sha512_initial_state,
237 QAT_HASH_SHA512_STATE_SIZE,
238 &swcr_auth_hash_hmac_sha2_512,
239 offsetof(SHA512_CTX, state),
240 8,
241 };
242
243 static const struct qat_sym_hash_alg_info aes_gcm_info = {
244 QAT_HASH_AES_GCM_DIGEST_SIZE,
245 QAT_HASH_AES_GCM_BLOCK_SIZE,
246 NULL, 0,
247 NULL, 0, 0, /* XXX */
248 };
249
250 /* Hash QAT specific structures */
251
252 static const struct qat_sym_hash_qat_info md5_config = {
253 HW_AUTH_ALGO_MD5,
254 QAT_HASH_MD5_BLOCK_SIZE,
255 HW_MD5_STATE1_SZ,
256 HW_MD5_STATE2_SZ
257 };
258
259 static const struct qat_sym_hash_qat_info sha1_config = {
260 HW_AUTH_ALGO_SHA1,
261 QAT_HASH_SHA1_BLOCK_SIZE,
262 HW_SHA1_STATE1_SZ,
263 HW_SHA1_STATE2_SZ
264 };
265
266 static const struct qat_sym_hash_qat_info sha256_config = {
267 HW_AUTH_ALGO_SHA256,
268 QAT_HASH_SHA256_BLOCK_SIZE,
269 HW_SHA256_STATE1_SZ,
270 HW_SHA256_STATE2_SZ
271 };
272
273 static const struct qat_sym_hash_qat_info sha384_config = {
274 HW_AUTH_ALGO_SHA384,
275 QAT_HASH_SHA384_BLOCK_SIZE,
276 HW_SHA384_STATE1_SZ,
277 HW_SHA384_STATE2_SZ
278 };
279
280 static const struct qat_sym_hash_qat_info sha512_config = {
281 HW_AUTH_ALGO_SHA512,
282 QAT_HASH_SHA512_BLOCK_SIZE,
283 HW_SHA512_STATE1_SZ,
284 HW_SHA512_STATE2_SZ
285 };
286
287 static const struct qat_sym_hash_qat_info aes_gcm_config = {
288 HW_AUTH_ALGO_GALOIS_128,
289 0,
290 HW_GALOIS_128_STATE1_SZ,
291 HW_GALOIS_H_SZ +
292 HW_GALOIS_LEN_A_SZ +
293 HW_GALOIS_E_CTR0_SZ
294 };
295
296 static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
297 [QAT_SYM_HASH_MD5] = { &md5_info, &md5_config },
298 [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
299 [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
300 [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
301 [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
302 [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
303 };
304
305 const struct qat_product *
306 qat_lookup(const struct pci_attach_args *);
307 int qat_match(struct device *, struct cfdata *, void *);
308 void qat_attach(struct device *, struct device *, void *);
309 void qat_init(struct device *);
310 int qat_start(struct device *);
311 int qat_detach(struct device *, int);
312
313 int qat_alloc_msix_intr(struct qat_softc *,
314 struct pci_attach_args *);
315 void * qat_establish_msix_intr(struct qat_softc *, pci_intr_handle_t,
316 int (*)(void *), void *, const char *, int);
317 int qat_setup_msix_intr(struct qat_softc *);
318
319 int qat_etr_init(struct qat_softc *);
320 int qat_etr_bank_init(struct qat_softc *, int);
321
322 int qat_etr_ap_bank_init(struct qat_softc *);
323 void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
324 void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
325 uint32_t, int);
326 void qat_etr_ap_bank_setup_ring(struct qat_softc *,
327 struct qat_ring *);
328 int qat_etr_verify_ring_size(uint32_t, uint32_t);
329
330 int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
331 struct qat_ring *);
332 int qat_etr_bank_intr(void *);
333
334 void qat_arb_update(struct qat_softc *, struct qat_bank *);
335
336 struct qat_sym_cookie *
337 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *);
338 void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
339 struct qat_sym_cookie *);
340 int qat_crypto_load_buf(struct qat_softc *, struct cryptop *,
341 struct qat_sym_cookie *, struct qat_crypto_desc const *,
342 uint8_t *, int, bus_addr_t *);
343 int qat_crypto_load_iv(struct qat_sym_cookie *, struct cryptop *,
344 struct cryptodesc *, struct qat_crypto_desc const *);
345 int qat_crypto_process(void *, struct cryptop *, int);
346 int qat_crypto_setup_ring(struct qat_softc *,
347 struct qat_crypto_bank *);
348 int qat_crypto_new_session(void *, uint32_t *, struct cryptoini *);
349 void qat_crypto_free_session0(struct qat_crypto *,
350 struct qat_session *);
351 void qat_crypto_check_free_session(struct qat_crypto *,
352 struct qat_session *);
353 void qat_crypto_free_session(void *, uint64_t);
354 int qat_crypto_bank_init(struct qat_softc *,
355 struct qat_crypto_bank *);
356 int qat_crypto_init(struct qat_softc *);
357 int qat_crypto_start(struct qat_softc *);
358 int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
359
360 CFATTACH_DECL_NEW(qat, sizeof(struct qat_softc),
361 qat_match, qat_attach, qat_detach, NULL);
362
363 struct qat_softc *gsc = NULL;
364
365 #ifdef QAT_DUMP
366 int qat_dump = QAT_DUMP;
367 #endif
368
369 const struct qat_product *
qat_lookup(const struct pci_attach_args * pa)370 qat_lookup(const struct pci_attach_args *pa)
371 {
372 const struct qat_product *qatp;
373
374 for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
375 if (PCI_VENDOR(pa->pa_id) == qatp->qatp_vendor &&
376 PCI_PRODUCT(pa->pa_id) == qatp->qatp_product)
377 return qatp;
378 }
379 return NULL;
380 }
381
382 int
qat_match(struct device * parent,struct cfdata * cf,void * aux)383 qat_match(struct device *parent, struct cfdata *cf, void *aux)
384 {
385 struct pci_attach_args *pa = aux;
386
387 if (qat_lookup(pa) != NULL)
388 return 1;
389
390 return 0;
391 }
392
393 void
qat_attach(struct device * parent,struct device * self,void * aux)394 qat_attach(struct device *parent, struct device *self, void *aux)
395 {
396 struct qat_softc *sc = device_private(self);
397 struct pci_attach_args *pa = aux;
398 pci_chipset_tag_t pc = pa->pa_pc;
399 const struct qat_product *qatp;
400 char cap[256];
401 pcireg_t cmd, memtype, msixoff, fusectl;
402 bus_size_t msixtbl_offset;
403 int i, bar, msixtbl_bar;
404
405 sc->sc_dev = self;
406 sc->sc_pc = pc;
407 sc->sc_pcitag = pa->pa_tag;
408
409 gsc = sc; /* for debug */
410
411 qatp = qat_lookup(pa);
412 KASSERT(qatp != NULL);
413
414 if (pci_dma64_available(pa))
415 sc->sc_dmat = pa->pa_dmat64;
416 else
417 sc->sc_dmat = pa->pa_dmat;
418
419 aprint_naive(": Crypto processor\n");
420 sc->sc_rev = PCI_REVISION(pa->pa_class);
421 aprint_normal(": %s (rev. 0x%02x)\n", qatp->qatp_name, sc->sc_rev);
422
423 memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
424
425 /* Determine active accelerators and engines */
426 sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
427 sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
428
429 sc->sc_accel_num = 0;
430 for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
431 if (sc->sc_accel_mask & (1 << i))
432 sc->sc_accel_num++;
433 }
434 sc->sc_ae_num = 0;
435 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
436 if (sc->sc_ae_mask & (1 << i)) {
437 sc->sc_ae_num++;
438 }
439 }
440
441 if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
442 aprint_error_dev(sc->sc_dev, "couldn't find acceleration");
443 goto fail;
444 }
445
446 KASSERT(sc->sc_accel_num <= MAX_NUM_ACCEL);
447 KASSERT(sc->sc_ae_num <= MAX_NUM_AE);
448
449 /* Determine SKU and capabilities */
450 sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
451 sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
452 sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
453
454 aprint_normal_dev(sc->sc_dev,
455 "sku %d accel %d accel_mask 0x%x ae %d ae_mask 0x%x\n",
456 sc->sc_sku, sc->sc_accel_num, sc->sc_accel_mask,
457 sc->sc_ae_num, sc->sc_ae_mask);
458 snprintb(cap, sizeof(cap), QAT_ACCEL_CAP_BITS, sc->sc_accel_cap);
459 aprint_normal_dev(sc->sc_dev, "accel capabilities %s\n", cap);
460
461 /* Map BARs */
462
463 msixtbl_bar = 0;
464 msixtbl_offset = 0;
465 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, NULL)) {
466 pcireg_t msixtbl;
467 msixtbl = pci_conf_read(pc, pa->pa_tag,
468 msixoff + PCI_MSIX_TBLOFFSET);
469 msixtbl_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
470 msixtbl_bar = PCI_MAPREG_START +
471 ((msixtbl & PCI_MSIX_TBLBIR_MASK) << 2);
472 }
473
474 i = 0;
475 if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
476 KASSERT(sc->sc_hw.qhw_sram_bar_id == 0);
477 fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
478 /* Skip SRAM BAR */
479 i = (fusectl & FUSECTL_MASK) ? 1 : 0;
480 }
481 for (bar = PCI_MAPREG_START; bar <= PCI_MAPREG_END; bar += 4) {
482 bus_size_t size;
483 bus_addr_t addr;
484
485 if (pci_mapreg_probe(pc, pa->pa_tag, bar, &memtype) == 0)
486 continue;
487
488 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM)
489 continue;
490
491 /* MSI-X table will be mapped by pci_msix_alloc_map */
492 if (bar == msixtbl_bar)
493 size = msixtbl_offset;
494 else
495 size = 0;
496
497 if (pci_mapreg_submap(pa, bar, memtype, 0, size, 0,
498 &sc->sc_csrt[i], &sc->sc_csrh[i], &addr, &sc->sc_csrs[i])) {
499 aprint_error_dev(sc->sc_dev,
500 "couldn't map bar 0x%02x\n", bar);
501 goto fail;
502 }
503
504 aprint_verbose_dev(sc->sc_dev,
505 "region #%d bar 0x%02x size 0x%x at 0x%llx"
506 " mapped to %p\n", i, bar,
507 (int)sc->sc_csrs[i], (unsigned long long)addr,
508 bus_space_vaddr(sc->sc_csrt[i], sc->sc_csrh[i]));
509
510 i++;
511 if (PCI_MAPREG_MEM_TYPE(memtype) == PCI_MAPREG_MEM_TYPE_64BIT)
512 bar += 4;
513 }
514
515 /* XXX Enable advanced error reporting */
516
517 /* Enable bus mastering */
518 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
519 cmd |= PCI_COMMAND_MASTER_ENABLE;
520 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
521
522 if (qat_alloc_msix_intr(sc, pa))
523 goto fail;
524
525 config_mountroot(self, qat_init);
526
527 fail:
528 /* XXX */
529 return;
530 }
531
532 void
qat_init(struct device * self)533 qat_init(struct device *self)
534 {
535 int error;
536 struct qat_softc *sc = device_private(self);
537
538 aprint_verbose_dev(sc->sc_dev, "Initializing ETR\n");
539 error = qat_etr_init(sc);
540 if (error) {
541 aprint_error_dev(sc->sc_dev,
542 "Could not initialize ETR: %d\n", error);
543 return;
544 }
545
546 aprint_verbose_dev(sc->sc_dev, "Initializing admin comms\n");
547 if (sc->sc_hw.qhw_init_admin_comms != NULL &&
548 (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
549 aprint_error_dev(sc->sc_dev,
550 "Could not initialize admin comms: %d\n", error);
551 return;
552 }
553
554 aprint_verbose_dev(sc->sc_dev, "Initializing hw arbiter\n");
555 if (sc->sc_hw.qhw_init_arb != NULL &&
556 (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
557 aprint_error_dev(sc->sc_dev,
558 "Could not initialize hw arbiter: %d\n", error);
559 return;
560 }
561
562 aprint_verbose_dev(sc->sc_dev, "Initializing acceleration engine\n");
563 error = qat_ae_init(sc);
564 if (error) {
565 aprint_error_dev(sc->sc_dev,
566 "Could not initialize Acceleration Engine: %d\n", error);
567 return;
568 }
569
570 aprint_verbose_dev(sc->sc_dev, "Loading acceleration engine firmware\n");
571 error = qat_aefw_load(sc);
572 if (error) {
573 aprint_error_dev(sc->sc_dev,
574 "Could not load firmware: %d\n", error);
575 return;
576 }
577
578 aprint_verbose_dev(sc->sc_dev, "Establishing interrupts\n");
579 error = qat_setup_msix_intr(sc);
580 if (error) {
581 aprint_error_dev(sc->sc_dev,
582 "Could not setup interrupts: %d\n", error);
583 return;
584 }
585
586 sc->sc_hw.qhw_enable_intr(sc);
587
588 error = qat_crypto_init(sc);
589 if (error) {
590 aprint_error_dev(sc->sc_dev,
591 "Could not initialize service: %d\n", error);
592 return;
593 }
594
595 aprint_verbose_dev(sc->sc_dev, "Enabling error correction\n");
596 if (sc->sc_hw.qhw_enable_error_correction != NULL)
597 sc->sc_hw.qhw_enable_error_correction(sc);
598
599 aprint_verbose_dev(sc->sc_dev, "Initializing watchdog timer\n");
600 if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
601 (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
602 aprint_error_dev(sc->sc_dev,
603 "Could not initialize watchdog timer: %d\n", error);
604 return;
605 }
606
607 error = qat_start(self);
608 if (error) {
609 aprint_error_dev(sc->sc_dev,
610 "Could not start: %d\n", error);
611 return;
612 }
613 }
614
615 int
qat_start(struct device * self)616 qat_start(struct device *self)
617 {
618 struct qat_softc *sc = device_private(self);
619 int error;
620
621 error = qat_ae_start(sc);
622 if (error)
623 return error;
624
625 if (sc->sc_hw.qhw_send_admin_init != NULL &&
626 (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
627 return error;
628 }
629
630 error = qat_crypto_start(sc);
631 if (error)
632 return error;
633
634 return 0;
635 }
636
637 int
qat_detach(struct device * self,int flags)638 qat_detach(struct device *self, int flags)
639 {
640
641 return 0;
642 }
643
644 void *
qat_alloc_mem(size_t size)645 qat_alloc_mem(size_t size)
646 {
647 size_t *sptr;
648 sptr = kmem_zalloc(size + sizeof(size), KM_SLEEP);
649 *sptr = size;
650 return ++sptr;
651 }
652
653 void
qat_free_mem(void * ptr)654 qat_free_mem(void *ptr)
655 {
656 size_t *sptr = ptr, size;
657 size = *(--sptr);
658 kmem_free(sptr, size + sizeof(size));
659 }
660
661 void
qat_free_dmamem(struct qat_softc * sc,struct qat_dmamem * qdm)662 qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
663 {
664
665 bus_dmamap_unload(sc->sc_dmat, qdm->qdm_dma_map);
666 bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
667 bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, qdm->qdm_dma_size);
668 bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
669 explicit_memset(qdm, 0, sizeof(*qdm));
670 }
671
672 int
qat_alloc_dmamem(struct qat_softc * sc,struct qat_dmamem * qdm,bus_size_t size,bus_size_t alignment)673 qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
674 bus_size_t size, bus_size_t alignment)
675 {
676 int error = 0, nseg;
677
678 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment,
679 0, &qdm->qdm_dma_seg, 1, &nseg, BUS_DMA_NOWAIT);
680 if (error) {
681 aprint_error_dev(sc->sc_dev,
682 "couldn't allocate dmamem, error = %d\n", error);
683 goto fail_0;
684 }
685 KASSERT(nseg == 1);
686 error = bus_dmamem_map(sc->sc_dmat, &qdm->qdm_dma_seg,
687 nseg, size, &qdm->qdm_dma_vaddr,
688 BUS_DMA_COHERENT | BUS_DMA_NOWAIT);
689 if (error) {
690 aprint_error_dev(sc->sc_dev,
691 "couldn't map dmamem, error = %d\n", error);
692 goto fail_1;
693 }
694 qdm->qdm_dma_size = size;
695 error = bus_dmamap_create(sc->sc_dmat, size, nseg, size,
696 0, BUS_DMA_NOWAIT, &qdm->qdm_dma_map);
697 if (error) {
698 aprint_error_dev(sc->sc_dev,
699 "couldn't create dmamem map, error = %d\n", error);
700 goto fail_2;
701 }
702 error = bus_dmamap_load(sc->sc_dmat, qdm->qdm_dma_map,
703 qdm->qdm_dma_vaddr, size, NULL, BUS_DMA_NOWAIT);
704 if (error) {
705 aprint_error_dev(sc->sc_dev,
706 "couldn't load dmamem map, error = %d\n", error);
707 goto fail_3;
708 }
709
710 return 0;
711 fail_3:
712 bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
713 qdm->qdm_dma_map = NULL;
714 fail_2:
715 bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, size);
716 qdm->qdm_dma_vaddr = NULL;
717 qdm->qdm_dma_size = 0;
718 fail_1:
719 bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
720 fail_0:
721 return error;
722 }
723
724 int
qat_alloc_msix_intr(struct qat_softc * sc,struct pci_attach_args * pa)725 qat_alloc_msix_intr(struct qat_softc *sc, struct pci_attach_args *pa)
726 {
727 u_int *ih_map, vec;
728 int error, count, ihi;
729
730 count = sc->sc_hw.qhw_num_banks + 1;
731 ih_map = qat_alloc_mem(sizeof(*ih_map) * count);
732 ihi = 0;
733
734 for (vec = 0; vec < sc->sc_hw.qhw_num_banks; vec++)
735 ih_map[ihi++] = vec;
736
737 vec += sc->sc_hw.qhw_msix_ae_vec_gap;
738 ih_map[ihi++] = vec;
739
740 error = pci_msix_alloc_map(pa, &sc->sc_ih, ih_map, count);
741 qat_free_mem(ih_map);
742 if (error) {
743 aprint_error_dev(sc->sc_dev, "couldn't allocate msix %d: %d\n",
744 count, error);
745 }
746
747 return error;
748 }
749
750 void *
qat_establish_msix_intr(struct qat_softc * sc,pci_intr_handle_t ih,int (* func)(void *),void * arg,const char * name,int index)751 qat_establish_msix_intr(struct qat_softc *sc, pci_intr_handle_t ih,
752 int (*func)(void *), void *arg,
753 const char *name, int index)
754 {
755 kcpuset_t *affinity;
756 int error;
757 char buf[PCI_INTRSTR_LEN];
758 char intrxname[INTRDEVNAMEBUF];
759 const char *intrstr;
760 void *cookie;
761
762 snprintf(intrxname, sizeof(intrxname), "%s%s%d",
763 device_xname(sc->sc_dev), name, index);
764
765 intrstr = pci_intr_string(sc->sc_pc, ih, buf, sizeof(buf));
766
767 pci_intr_setattr(sc->sc_pc, &ih, PCI_INTR_MPSAFE, true);
768
769 cookie = pci_intr_establish_xname(sc->sc_pc, ih,
770 IPL_NET, func, arg, intrxname);
771
772 aprint_normal_dev(sc->sc_dev, "%s%d interrupting at %s\n",
773 name, index, intrstr);
774
775 kcpuset_create(&affinity, true);
776 kcpuset_set(affinity, index % ncpu);
777 error = interrupt_distribute(cookie, affinity, NULL);
778 if (error) {
779 aprint_error_dev(sc->sc_dev,
780 "couldn't distribute interrupt: %s%d\n", name, index);
781 }
782 kcpuset_destroy(affinity);
783
784 return cookie;
785 }
786
787 int
qat_setup_msix_intr(struct qat_softc * sc)788 qat_setup_msix_intr(struct qat_softc *sc)
789 {
790 int i;
791 pci_intr_handle_t ih;
792
793 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
794 struct qat_bank *qb = &sc->sc_etr_banks[i];
795 ih = sc->sc_ih[i];
796
797 qb->qb_ih_cookie = qat_establish_msix_intr(sc, ih,
798 qat_etr_bank_intr, qb, "bank", i);
799 if (qb->qb_ih_cookie == NULL)
800 return ENOMEM;
801 }
802
803 sc->sc_ae_ih_cookie = qat_establish_msix_intr(sc, sc->sc_ih[i],
804 qat_ae_cluster_intr, sc, "aeclust", 0);
805 if (sc->sc_ae_ih_cookie == NULL)
806 return ENOMEM;
807
808 return 0;
809 }
810
811 int
qat_etr_init(struct qat_softc * sc)812 qat_etr_init(struct qat_softc *sc)
813 {
814 int i;
815 int error = 0;
816
817 sc->sc_etr_banks = qat_alloc_mem(
818 sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
819
820 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
821 error = qat_etr_bank_init(sc, i);
822 if (error) {
823 goto fail;
824 }
825 }
826
827 if (sc->sc_hw.qhw_num_ap_banks) {
828 sc->sc_etr_ap_banks = qat_alloc_mem(
829 sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
830 error = qat_etr_ap_bank_init(sc);
831 if (error) {
832 goto fail;
833 }
834 }
835
836 return 0;
837
838 fail:
839 if (sc->sc_etr_banks != NULL) {
840 qat_free_mem(sc->sc_etr_banks);
841 sc->sc_etr_banks = NULL;
842 }
843 if (sc->sc_etr_ap_banks != NULL) {
844 qat_free_mem(sc->sc_etr_ap_banks);
845 sc->sc_etr_ap_banks = NULL;
846 }
847 return error;
848 }
849
850 int
qat_etr_bank_init(struct qat_softc * sc,int bank)851 qat_etr_bank_init(struct qat_softc *sc, int bank)
852 {
853 struct qat_bank *qb = &sc->sc_etr_banks[bank];
854 int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
855
856 KASSERT(bank < sc->sc_hw.qhw_num_banks);
857
858 mutex_init(&qb->qb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
859
860 qb->qb_sc = sc;
861 qb->qb_bank = bank;
862 qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
863 QAT_EVCNT_ATTACH(sc, &qb->qb_ev_rxintr, EVCNT_TYPE_INTR,
864 qb->qb_ev_rxintr_name, "bank%d rxintr", bank);
865
866 /* Clean CSRs for all rings within the bank */
867 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
868 struct qat_ring *qr = &qb->qb_et_rings[i];
869
870 qat_etr_bank_ring_write_4(sc, bank, i,
871 ETR_RING_CONFIG, 0);
872 qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
873
874 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
875 qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
876 } else if (sc->sc_hw.qhw_tx_rings_mask &
877 (1 << (i - tx_rx_gap))) {
878 /* Share inflight counter with rx and tx */
879 qr->qr_inflight =
880 qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
881 }
882 }
883
884 if (sc->sc_hw.qhw_init_etr_intr != NULL) {
885 sc->sc_hw.qhw_init_etr_intr(sc, bank);
886 } else {
887 /* common code in qat 1.7 */
888 qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
889 ETR_INT_REG_CLEAR_MASK);
890 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
891 ETR_RINGS_PER_INT_SRCSEL; i++) {
892 qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
893 (i * ETR_INT_SRCSEL_NEXT_OFFSET),
894 ETR_INT_SRCSEL_MASK);
895 }
896 }
897
898 return 0;
899 }
900
901 int
qat_etr_ap_bank_init(struct qat_softc * sc)902 qat_etr_ap_bank_init(struct qat_softc *sc)
903 {
904 int ap_bank;
905
906 for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
907 struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
908
909 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
910 ETR_AP_NF_MASK_INIT);
911 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
912 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
913 ETR_AP_NE_MASK_INIT);
914 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
915
916 memset(qab, 0, sizeof(*qab));
917 }
918
919 return 0;
920 }
921
922 void
qat_etr_ap_bank_set_ring_mask(uint32_t * ap_mask,uint32_t ring,int set_mask)923 qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
924 {
925 if (set_mask)
926 *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
927 else
928 *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
929 }
930
931 void
qat_etr_ap_bank_set_ring_dest(struct qat_softc * sc,uint32_t * ap_dest,uint32_t ring,int set_dest)932 qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
933 uint32_t ring, int set_dest)
934 {
935 uint32_t ae_mask;
936 uint8_t mailbox, ae, nae;
937 uint8_t *dest = (uint8_t *)ap_dest;
938
939 mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
940
941 nae = 0;
942 ae_mask = sc->sc_ae_mask;
943 for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
944 if ((ae_mask & (1 << ae)) == 0)
945 continue;
946
947 if (set_dest) {
948 dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
949 __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
950 ETR_AP_DEST_ENABLE;
951 } else {
952 dest[nae] = 0;
953 }
954 nae++;
955 if (nae == ETR_MAX_AE_PER_MAILBOX)
956 break;
957
958 }
959 }
960
961 void
qat_etr_ap_bank_setup_ring(struct qat_softc * sc,struct qat_ring * qr)962 qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
963 {
964 struct qat_ap_bank *qab;
965 int ap_bank;
966
967 if (sc->sc_hw.qhw_num_ap_banks == 0)
968 return;
969
970 ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
971 KASSERT(ap_bank < sc->sc_hw.qhw_num_ap_banks);
972 qab = &sc->sc_etr_ap_banks[ap_bank];
973
974 if (qr->qr_cb == NULL) {
975 qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
976 if (!qab->qab_ne_dest) {
977 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
978 qr->qr_ring, 1);
979 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
980 qab->qab_ne_dest);
981 }
982 } else {
983 qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
984 if (!qab->qab_nf_dest) {
985 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
986 qr->qr_ring, 1);
987 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
988 qab->qab_nf_dest);
989 }
990 }
991 }
992
993 int
qat_etr_verify_ring_size(uint32_t msg_size,uint32_t num_msgs)994 qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
995 {
996 int i = QAT_MIN_RING_SIZE;
997
998 for (; i <= QAT_MAX_RING_SIZE; i++)
999 if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
1000 return i;
1001
1002 return QAT_DEFAULT_RING_SIZE;
1003 }
1004
1005 int
qat_etr_setup_ring(struct qat_softc * sc,int bank,uint32_t ring,uint32_t num_msgs,uint32_t msg_size,qat_cb_t cb,void * cb_arg,const char * name,struct qat_ring ** rqr)1006 qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
1007 uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
1008 const char *name, struct qat_ring **rqr)
1009 {
1010 struct qat_bank *qb;
1011 struct qat_ring *qr = NULL;
1012 int error;
1013 uint32_t ring_size_bytes, ring_config;
1014 uint64_t ring_base;
1015 uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
1016 uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
1017
1018 KASSERT(bank < sc->sc_hw.qhw_num_banks);
1019
1020 /* Allocate a ring from specified bank */
1021 qb = &sc->sc_etr_banks[bank];
1022
1023 if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
1024 return EINVAL;
1025 if (qb->qb_allocated_rings & (1 << ring))
1026 return ENOENT;
1027 qr = &qb->qb_et_rings[ring];
1028 qb->qb_allocated_rings |= 1 << ring;
1029
1030 /* Initialize allocated ring */
1031 qr->qr_ring = ring;
1032 qr->qr_bank = bank;
1033 qr->qr_name = name;
1034 qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
1035 qr->qr_ring_mask = (1 << ring);
1036 qr->qr_cb = cb;
1037 qr->qr_cb_arg = cb_arg;
1038 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxintr, EVCNT_TYPE_INTR,
1039 qr->qr_ev_rxintr_name, "bank%d ring%d rxintr", bank, ring);
1040 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxmsg, EVCNT_TYPE_MISC,
1041 qr->qr_ev_rxmsg_name, "bank%d ring%d rxmsg", bank, ring);
1042 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txmsg, EVCNT_TYPE_MISC,
1043 qr->qr_ev_txmsg_name, "bank%d ring%d txmsg", bank, ring);
1044 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txfull, EVCNT_TYPE_MISC,
1045 qr->qr_ev_txfull_name, "bank%d ring%d txfull", bank, ring);
1046
1047 /* Setup the shadow variables */
1048 qr->qr_head = 0;
1049 qr->qr_tail = 0;
1050 qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
1051 qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
1052
1053 /*
1054 * To make sure that ring is aligned to ring size allocate
1055 * at least 4k and then tell the user it is smaller.
1056 */
1057 ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
1058 ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
1059 error = qat_alloc_dmamem(sc, &qr->qr_dma,
1060 ring_size_bytes, ring_size_bytes);
1061 if (error)
1062 return error;
1063
1064 KASSERT(qr->qr_dma.qdm_dma_map->dm_nsegs == 1);
1065
1066 qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
1067 qr->qr_ring_paddr = qr->qr_dma.qdm_dma_map->dm_segs[0].ds_addr;
1068
1069 aprint_verbose_dev(sc->sc_dev,
1070 "allocate ring %d of bank %d for %s "
1071 "size %d %d at vaddr %p paddr 0x%llx\n",
1072 ring, bank, name, ring_size_bytes,
1073 (int)qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len,
1074 qr->qr_ring_vaddr,
1075 (unsigned long long)qr->qr_ring_paddr);
1076
1077 memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
1078 qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len);
1079
1080 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, 0,
1081 qr->qr_dma.qdm_dma_map->dm_mapsize,
1082 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1083
1084 if (((uintptr_t)qr->qr_ring_paddr & (ring_size_bytes - 1)) != 0) {
1085 aprint_error_dev(sc->sc_dev, "ring address not aligned\n");
1086 return EFAULT;
1087 }
1088
1089 if (cb == NULL) {
1090 ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
1091 } else {
1092 ring_config =
1093 ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
1094 }
1095 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
1096
1097 ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
1098 qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
1099
1100 if (sc->sc_hw.qhw_init_arb != NULL)
1101 qat_arb_update(sc, qb);
1102
1103 mutex_init(&qr->qr_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1104
1105 qat_etr_ap_bank_setup_ring(sc, qr);
1106
1107 if (cb != NULL) {
1108 uint32_t intr_mask;
1109
1110 qb->qb_intr_mask |= qr->qr_ring_mask;
1111 intr_mask = qb->qb_intr_mask;
1112
1113 aprint_verbose_dev(sc->sc_dev,
1114 "update intr mask for bank %d "
1115 "(coalescing time %dns): 0x%08x\n",
1116 bank, qb->qb_coalescing_time, intr_mask);
1117 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN,
1118 intr_mask);
1119 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
1120 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1121 }
1122
1123 *rqr = qr;
1124
1125 return 0;
1126 }
1127
1128 static inline u_int
qat_modulo(u_int data,u_int shift)1129 qat_modulo(u_int data, u_int shift)
1130 {
1131 u_int div = data >> shift;
1132 u_int mult = div << shift;
1133 return data - mult;
1134 }
1135
1136 int
qat_etr_put_msg(struct qat_softc * sc,struct qat_ring * qr,uint32_t * msg)1137 qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
1138 {
1139 uint32_t inflight;
1140 uint32_t *addr;
1141
1142 mutex_spin_enter(&qr->qr_ring_mtx);
1143
1144 inflight = atomic_inc_32_nv(qr->qr_inflight);
1145 if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
1146 atomic_dec_32(qr->qr_inflight);
1147 QAT_EVCNT_INCR(&qr->qr_ev_txfull);
1148 mutex_spin_exit(&qr->qr_ring_mtx);
1149 return EBUSY;
1150 }
1151 QAT_EVCNT_INCR(&qr->qr_ev_txmsg);
1152
1153 addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
1154
1155 memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1156 #ifdef QAT_DUMP
1157 qat_dump_raw(QAT_DUMP_RING_MSG, "put_msg", addr,
1158 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1159 #endif
1160
1161 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_tail,
1162 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1163 BUS_DMASYNC_PREWRITE);
1164
1165 qr->qr_tail = qat_modulo(qr->qr_tail +
1166 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1167 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1168
1169 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1170 ETR_RING_TAIL_OFFSET, qr->qr_tail);
1171
1172 mutex_spin_exit(&qr->qr_ring_mtx);
1173
1174 return 0;
1175 }
1176
1177 int
qat_etr_ring_intr(struct qat_softc * sc,struct qat_bank * qb,struct qat_ring * qr)1178 qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
1179 struct qat_ring *qr)
1180 {
1181 int handled = 0;
1182 uint32_t *msg;
1183 uint32_t nmsg = 0;
1184
1185 mutex_spin_enter(&qr->qr_ring_mtx);
1186
1187 QAT_EVCNT_INCR(&qr->qr_ev_rxintr);
1188
1189 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1190
1191 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1192 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1193 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1194
1195 while (*msg != ETR_RING_EMPTY_ENTRY_SIG) {
1196 atomic_dec_32(qr->qr_inflight);
1197 QAT_EVCNT_INCR(&qr->qr_ev_rxmsg);
1198
1199 if (qr->qr_cb != NULL) {
1200 mutex_spin_exit(&qr->qr_ring_mtx);
1201 handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
1202 mutex_spin_enter(&qr->qr_ring_mtx);
1203 }
1204
1205 *msg = ETR_RING_EMPTY_ENTRY_SIG;
1206
1207 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1208 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1209 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1210
1211 qr->qr_head = qat_modulo(qr->qr_head +
1212 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1213 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1214 nmsg++;
1215
1216 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1217
1218 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1219 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1220 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1221 }
1222
1223 if (nmsg > 0) {
1224 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1225 ETR_RING_HEAD_OFFSET, qr->qr_head);
1226 }
1227
1228 mutex_spin_exit(&qr->qr_ring_mtx);
1229
1230 return handled;
1231 }
1232
1233 int
qat_etr_bank_intr(void * arg)1234 qat_etr_bank_intr(void *arg)
1235 {
1236 struct qat_bank *qb = arg;
1237 struct qat_softc *sc = qb->qb_sc;
1238 uint32_t estat;
1239 int i, handled = 0;
1240
1241 mutex_spin_enter(&qb->qb_bank_mtx);
1242
1243 QAT_EVCNT_INCR(&qb->qb_ev_rxintr);
1244
1245 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
1246
1247 /* Now handle all the responses */
1248 estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
1249 estat &= qb->qb_intr_mask;
1250
1251 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
1252 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1253
1254 mutex_spin_exit(&qb->qb_bank_mtx);
1255
1256 while ((i = ffs32(estat)) != 0) {
1257 struct qat_ring *qr = &qb->qb_et_rings[--i];
1258 estat &= ~(1 << i);
1259 handled |= qat_etr_ring_intr(sc, qb, qr);
1260 }
1261
1262 return handled;
1263 }
1264
1265 void
qat_arb_update(struct qat_softc * sc,struct qat_bank * qb)1266 qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
1267 {
1268
1269 qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
1270 qb->qb_allocated_rings & 0xff);
1271 }
1272
1273 struct qat_sym_cookie *
qat_crypto_alloc_sym_cookie(struct qat_crypto_bank * qcb)1274 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
1275 {
1276 struct qat_sym_cookie *qsc;
1277
1278 mutex_spin_enter(&qcb->qcb_bank_mtx);
1279
1280 if (qcb->qcb_symck_free_count == 0) {
1281 QAT_EVCNT_INCR(&qcb->qcb_ev_no_symck);
1282 mutex_spin_exit(&qcb->qcb_bank_mtx);
1283 return NULL;
1284 }
1285
1286 qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
1287
1288 mutex_spin_exit(&qcb->qcb_bank_mtx);
1289
1290 return qsc;
1291 }
1292
1293 void
qat_crypto_free_sym_cookie(struct qat_crypto_bank * qcb,struct qat_sym_cookie * qsc)1294 qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc)
1295 {
1296
1297 mutex_spin_enter(&qcb->qcb_bank_mtx);
1298 qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
1299 mutex_spin_exit(&qcb->qcb_bank_mtx);
1300 }
1301
1302
1303 void
qat_memcpy_htobe64(void * dst,const void * src,size_t len)1304 qat_memcpy_htobe64(void *dst, const void *src, size_t len)
1305 {
1306 uint64_t *dst0 = dst;
1307 const uint64_t *src0 = src;
1308 size_t i;
1309
1310 KASSERT(len % sizeof(*dst0) == 0);
1311
1312 for (i = 0; i < len / sizeof(*dst0); i++)
1313 *(dst0 + i) = htobe64(*(src0 + i));
1314 }
1315
1316 void
qat_memcpy_htobe32(void * dst,const void * src,size_t len)1317 qat_memcpy_htobe32(void *dst, const void *src, size_t len)
1318 {
1319 uint32_t *dst0 = dst;
1320 const uint32_t *src0 = src;
1321 size_t i;
1322
1323 KASSERT(len % sizeof(*dst0) == 0);
1324
1325 for (i = 0; i < len / sizeof(*dst0); i++)
1326 *(dst0 + i) = htobe32(*(src0 + i));
1327 }
1328
1329 void
qat_memcpy_htobe(void * dst,const void * src,size_t len,uint32_t wordbyte)1330 qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
1331 {
1332 switch (wordbyte) {
1333 case 4:
1334 qat_memcpy_htobe32(dst, src, len);
1335 break;
1336 case 8:
1337 qat_memcpy_htobe64(dst, src, len);
1338 break;
1339 default:
1340 KASSERT(0);
1341 }
1342 }
1343
1344 void
qat_crypto_hmac_precompute(struct qat_crypto_desc * desc,struct cryptoini * cria,struct qat_sym_hash_def const * hash_def,uint8_t * state1,uint8_t * state2)1345 qat_crypto_hmac_precompute(struct qat_crypto_desc *desc, struct cryptoini *cria,
1346 struct qat_sym_hash_def const *hash_def, uint8_t *state1, uint8_t *state2)
1347 {
1348 int i, state_swap;
1349 struct swcr_auth_hash const *sah = hash_def->qshd_alg->qshai_sah;
1350 uint32_t blklen = hash_def->qshd_alg->qshai_block_len;
1351 uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
1352 uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
1353 uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
1354 uint32_t keylen = cria->cri_klen / 8;
1355 uint32_t padlen = blklen - keylen;
1356 uint8_t *ipad = desc->qcd_hash_state_prefix_buf;
1357 uint8_t *opad = desc->qcd_hash_state_prefix_buf +
1358 sizeof(desc->qcd_hash_state_prefix_buf) / 2;
1359 /* XXX
1360 * For "stack protector not protecting local variables" error,
1361 * use constant variable.
1362 * Currently, the max length is sizeof(aesxcbc_ctx) used by
1363 * swcr_auth_hash_aes_xcbc_mac
1364 */
1365 uint8_t ctx[sizeof(aesxcbc_ctx)];
1366
1367 memcpy(ipad, cria->cri_key, keylen);
1368 memcpy(opad, cria->cri_key, keylen);
1369
1370 if (padlen > 0) {
1371 memset(ipad + keylen, 0, padlen);
1372 memset(opad + keylen, 0, padlen);
1373 }
1374 for (i = 0; i < blklen; i++) {
1375 ipad[i] ^= 0x36;
1376 opad[i] ^= 0x5c;
1377 }
1378
1379 /* ipad */
1380 sah->Init(ctx);
1381 /* Check the endian of kernel built-in hash state */
1382 state_swap = memcmp(hash_def->qshd_alg->qshai_init_state,
1383 ((uint8_t *)ctx) + state_offset, state_word);
1384 sah->Update(ctx, ipad, blklen);
1385 if (state_swap == 0) {
1386 memcpy(state1, ((uint8_t *)ctx) + state_offset, state_size);
1387 } else {
1388 qat_memcpy_htobe(state1, ((uint8_t *)ctx) + state_offset,
1389 state_size, state_word);
1390 }
1391
1392 /* opad */
1393 sah->Init(ctx);
1394 sah->Update(ctx, opad, blklen);
1395 if (state_swap == 0) {
1396 memcpy(state2, ((uint8_t *)ctx) + state_offset, state_size);
1397 } else {
1398 qat_memcpy_htobe(state2, ((uint8_t *)ctx) + state_offset,
1399 state_size, state_word);
1400 }
1401 }
1402
1403 uint16_t
qat_crypto_load_cipher_cryptoini(struct qat_crypto_desc * desc,struct cryptoini * crie)1404 qat_crypto_load_cipher_cryptoini(
1405 struct qat_crypto_desc *desc, struct cryptoini *crie)
1406 {
1407 enum hw_cipher_algo algo = HW_CIPHER_ALGO_NULL;
1408 enum hw_cipher_mode mode = HW_CIPHER_CBC_MODE;
1409 enum hw_cipher_convert key_convert = HW_CIPHER_NO_CONVERT;
1410
1411 switch (crie->cri_alg) {
1412 case CRYPTO_DES_CBC:
1413 algo = HW_CIPHER_ALGO_DES;
1414 desc->qcd_cipher_blk_sz = HW_DES_BLK_SZ;
1415 break;
1416 case CRYPTO_3DES_CBC:
1417 algo = HW_CIPHER_ALGO_3DES;
1418 desc->qcd_cipher_blk_sz = HW_3DES_BLK_SZ;
1419 break;
1420 case CRYPTO_AES_CBC:
1421 switch (crie->cri_klen / 8) {
1422 case HW_AES_128_KEY_SZ:
1423 algo = HW_CIPHER_ALGO_AES128;
1424 break;
1425 case HW_AES_192_KEY_SZ:
1426 algo = HW_CIPHER_ALGO_AES192;
1427 break;
1428 case HW_AES_256_KEY_SZ:
1429 algo = HW_CIPHER_ALGO_AES256;
1430 break;
1431 default:
1432 KASSERT(0);
1433 break;
1434 }
1435 desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
1436 /*
1437 * AES decrypt key needs to be reversed.
1438 * Instead of reversing the key at session registration,
1439 * it is instead reversed on-the-fly by setting the KEY_CONVERT
1440 * bit here
1441 */
1442 if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
1443 key_convert = HW_CIPHER_KEY_CONVERT;
1444
1445 break;
1446 default:
1447 KASSERT(0);
1448 break;
1449 }
1450
1451 return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert,
1452 desc->qcd_cipher_dir);
1453 }
1454
1455 uint16_t
qat_crypto_load_auth_cryptoini(struct qat_crypto_desc * desc,struct cryptoini * cria,struct qat_sym_hash_def const ** hash_def)1456 qat_crypto_load_auth_cryptoini(
1457 struct qat_crypto_desc *desc, struct cryptoini *cria,
1458 struct qat_sym_hash_def const **hash_def)
1459 {
1460 const struct swcr_auth_hash *sah;
1461 enum qat_sym_hash_algorithm algo = 0;
1462
1463 switch (cria->cri_alg) {
1464 case CRYPTO_MD5_HMAC_96:
1465 algo = QAT_SYM_HASH_MD5;
1466 break;
1467 case CRYPTO_SHA1_HMAC_96:
1468 algo = QAT_SYM_HASH_SHA1;
1469 break;
1470 case CRYPTO_SHA2_256_HMAC:
1471 algo = QAT_SYM_HASH_SHA256;
1472 break;
1473 case CRYPTO_SHA2_384_HMAC:
1474 algo = QAT_SYM_HASH_SHA384;
1475 break;
1476 case CRYPTO_SHA2_512_HMAC:
1477 algo = QAT_SYM_HASH_SHA512;
1478 break;
1479 default:
1480 KASSERT(0);
1481 break;
1482 }
1483 *hash_def = &qat_sym_hash_defs[algo];
1484 sah = (*hash_def)->qshd_alg->qshai_sah;
1485 KASSERT(sah != NULL);
1486 desc->qcd_auth_sz = sah->auth_hash->authsize;
1487
1488 return HW_AUTH_CONFIG_BUILD(HW_AUTH_MODE1,
1489 (*hash_def)->qshd_qat->qshqi_algo_enc,
1490 (*hash_def)->qshd_alg->qshai_digest_len);
1491 }
1492
1493 int
qat_crypto_load_buf(struct qat_softc * sc,struct cryptop * crp,struct qat_sym_cookie * qsc,struct qat_crypto_desc const * desc,uint8_t * icv_buf,int icv_offset,bus_addr_t * icv_paddr)1494 qat_crypto_load_buf(struct qat_softc *sc, struct cryptop *crp,
1495 struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc,
1496 uint8_t *icv_buf, int icv_offset, bus_addr_t *icv_paddr)
1497 {
1498 int error, i, nsegs;
1499
1500 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1501 struct mbuf *m = (struct mbuf *)crp->crp_buf;
1502
1503 if (icv_offset >= 0) {
1504 if (m_length(m) == icv_offset) {
1505 m_copyback(m, icv_offset, desc->qcd_auth_sz,
1506 icv_buf);
1507 if (m_length(m) == icv_offset)
1508 return ENOBUFS;
1509 } else {
1510 struct mbuf *m0;
1511 m0 = m_pulldown(m, icv_offset,
1512 desc->qcd_auth_sz, NULL);
1513 if (m0 == NULL)
1514 return ENOBUFS;
1515 }
1516 }
1517
1518 error = bus_dmamap_load_mbuf(sc->sc_dmat, qsc->qsc_buf_dmamap,
1519 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1520 if (error == EFBIG) {
1521 struct mbuf *m_new;
1522 m_new = m_defrag(m, M_DONTWAIT);
1523 if (m_new != NULL) {
1524 crp->crp_buf = m_new;
1525 qsc->qsc_buf = m_new;
1526 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1527 qsc->qsc_buf_dmamap, m_new,
1528 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1529 if (error) {
1530 m_freem(m_new);
1531 crp->crp_buf = NULL;
1532 }
1533 }
1534 }
1535
1536 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1537 error = bus_dmamap_load_uio(sc->sc_dmat, qsc->qsc_buf_dmamap,
1538 (struct uio *)crp->crp_buf, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1539 } else {
1540 error = bus_dmamap_load(sc->sc_dmat, qsc->qsc_buf_dmamap,
1541 crp->crp_buf, crp->crp_ilen, NULL, BUS_DMA_NOWAIT);
1542 }
1543 if (error) {
1544 aprint_debug_dev(sc->sc_dev,
1545 "can't load crp_buf, error %d\n", error);
1546 crp->crp_etype = error;
1547 return error;
1548 }
1549
1550 nsegs = qsc->qsc_buf_dmamap->dm_nsegs;
1551 qsc->qsc_buf_list.num_buffers = nsegs;
1552 for (i = 0; i < nsegs; i++) {
1553 struct flat_buffer_desc *flatbuf =
1554 &qsc->qsc_buf_list.phy_buffers[i];
1555 bus_addr_t paddr = qsc->qsc_buf_dmamap->dm_segs[i].ds_addr;
1556 bus_size_t len = qsc->qsc_buf_dmamap->dm_segs[i].ds_len;
1557
1558 flatbuf->data_len_in_bytes = len;
1559 flatbuf->phy_buffer = (uint64_t)paddr;
1560
1561 if (icv_offset >= 0) {
1562 if (icv_offset < len)
1563 *icv_paddr = paddr + icv_offset;
1564 else
1565 icv_offset -= len;
1566 }
1567 }
1568
1569 bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
1570 qsc->qsc_buf_dmamap->dm_mapsize,
1571 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1572
1573 return 0;
1574 }
1575
1576 int
qat_crypto_load_iv(struct qat_sym_cookie * qsc,struct cryptop * crp,struct cryptodesc * crde,struct qat_crypto_desc const * desc)1577 qat_crypto_load_iv(struct qat_sym_cookie *qsc, struct cryptop *crp,
1578 struct cryptodesc *crde, struct qat_crypto_desc const *desc)
1579 {
1580 uint32_t ivlen = desc->qcd_cipher_blk_sz;
1581
1582 if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
1583 memcpy(qsc->qsc_iv_buf, crde->crd_iv, ivlen);
1584 } else {
1585 if (crde->crd_flags & CRD_F_ENCRYPT) {
1586 cprng_fast(qsc->qsc_iv_buf, ivlen);
1587 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1588 /* get iv from buf */
1589 m_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
1590 qsc->qsc_iv_buf);
1591 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1592 cuio_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
1593 qsc->qsc_iv_buf);
1594 }
1595 }
1596
1597 if ((crde->crd_flags & CRD_F_ENCRYPT) != 0 &&
1598 (crde->crd_flags & CRD_F_IV_PRESENT) == 0) {
1599 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1600 m_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
1601 qsc->qsc_iv_buf);
1602 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1603 cuio_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
1604 qsc->qsc_iv_buf);
1605 }
1606 }
1607
1608 return 0;
1609 }
1610
1611 static inline struct qat_crypto_bank *
qat_crypto_select_bank(struct qat_crypto * qcy)1612 qat_crypto_select_bank(struct qat_crypto *qcy)
1613 {
1614 u_int cpuid = cpu_index(curcpu());
1615
1616 return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
1617 }
1618
1619 int
qat_crypto_process(void * arg,struct cryptop * crp,int hint)1620 qat_crypto_process(void *arg, struct cryptop *crp, int hint)
1621 {
1622 struct qat_crypto *qcy = arg;
1623 struct qat_crypto_bank *qcb;
1624 struct qat_session *qs = NULL;
1625 struct qat_crypto_desc const *desc;
1626 struct qat_sym_cookie *qsc = NULL;
1627 struct qat_sym_bulk_cookie *qsbc;
1628 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
1629 bus_addr_t icv_paddr = 0;
1630 int error, icv_offset = -1;
1631 uint8_t icv_buf[CRYPTO_MAX_MAC_LEN];
1632
1633 qs = qcy->qcy_sessions[CRYPTO_SESID2LID(crp->crp_sid)];
1634 mutex_spin_enter(&qs->qs_session_mtx);
1635 KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
1636 qs->qs_inflight++;
1637 mutex_spin_exit(&qs->qs_session_mtx);
1638
1639 qcb = qat_crypto_select_bank(qcy);
1640
1641 qsc = qat_crypto_alloc_sym_cookie(qcb);
1642 if (qsc == NULL) {
1643 error = ENOBUFS;
1644 goto fail;
1645 }
1646
1647 error = 0;
1648 desc = &qs->qs_dec_desc;
1649 crd = crp->crp_desc;
1650 while (crd != NULL) {
1651 switch (crd->crd_alg) {
1652 case CRYPTO_DES_CBC:
1653 case CRYPTO_3DES_CBC:
1654 case CRYPTO_AES_CBC:
1655 if (crde != NULL)
1656 error = EINVAL;
1657 if (crd->crd_flags & CRD_F_ENCRYPT) {
1658 /* use encrypt desc */
1659 desc = &qs->qs_enc_desc;
1660 if (crda != NULL)
1661 error = ENOTSUP;
1662 }
1663 crde = crd;
1664 break;
1665 case CRYPTO_MD5_HMAC_96:
1666 case CRYPTO_SHA1_HMAC_96:
1667 case CRYPTO_SHA2_256_HMAC:
1668 case CRYPTO_SHA2_384_HMAC:
1669 case CRYPTO_SHA2_512_HMAC:
1670 if (crda != NULL)
1671 error = EINVAL;
1672 if (crde != NULL &&
1673 (crde->crd_flags & CRD_F_ENCRYPT) == 0)
1674 error = EINVAL;
1675 crda = crd;
1676 icv_offset = crd->crd_inject;
1677 break;
1678 }
1679 if (error)
1680 goto fail;
1681
1682 crd = crd->crd_next;
1683 }
1684
1685 qsc->qsc_buf = crp->crp_buf;
1686
1687 if (crde != NULL) {
1688 error = qat_crypto_load_iv(qsc, crp, crde, desc);
1689 if (error)
1690 goto fail;
1691 }
1692
1693 error = qat_crypto_load_buf(qcy->qcy_sc, crp, qsc, desc, icv_buf,
1694 icv_offset, &icv_paddr);
1695 if (error)
1696 goto fail;
1697
1698 qsbc = &qsc->u.qsc_bulk_cookie;
1699
1700 qsbc->qsbc_crypto = qcy;
1701 qsbc->qsbc_session = qs;
1702 qsbc->qsbc_cb_tag = crp;
1703
1704 qcy->qcy_sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc,
1705 crde, crda, icv_paddr);
1706
1707 bus_dmamap_sync(qcy->qcy_sc->sc_dmat, *qsc->qsc_self_dmamap, 0,
1708 offsetof(struct qat_sym_cookie, qsc_self_dmamap),
1709 BUS_DMASYNC_PREWRITE);
1710
1711 error = qat_etr_put_msg(qcy->qcy_sc, qcb->qcb_sym_tx,
1712 (uint32_t *)qsbc->qsbc_msg);
1713 if (error)
1714 goto fail;
1715
1716 return 0;
1717 fail:
1718 if (qsc)
1719 qat_crypto_free_sym_cookie(qcb, qsc);
1720 mutex_spin_enter(&qs->qs_session_mtx);
1721 qs->qs_inflight--;
1722 qat_crypto_check_free_session(qcy, qs);
1723 crp->crp_etype = error;
1724 crypto_done(crp);
1725 return 0;
1726 }
1727
1728 int
qat_crypto_setup_ring(struct qat_softc * sc,struct qat_crypto_bank * qcb)1729 qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1730 {
1731 int error, i, bank;
1732 int curname = 0;
1733 char *name;
1734
1735 bank = qcb->qcb_bank;
1736
1737 name = qcb->qcb_ring_names[curname++];
1738 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
1739 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1740 sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
1741 NULL, NULL, name, &qcb->qcb_sym_tx);
1742 if (error)
1743 return error;
1744
1745 name = qcb->qcb_ring_names[curname++];
1746 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
1747 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1748 sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
1749 qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
1750 if (error)
1751 return error;
1752
1753 for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1754 struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
1755 struct qat_sym_cookie *qsc;
1756
1757 error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_sym_cookie),
1758 QAT_OPTIMAL_ALIGN);
1759 if (error)
1760 return error;
1761
1762 qsc = qdm->qdm_dma_vaddr;
1763 qsc->qsc_self_dmamap = &qdm->qdm_dma_map;
1764 qsc->qsc_bulk_req_params_buf_paddr =
1765 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1766 u.qsc_bulk_cookie.qsbc_req_params_buf);
1767 qsc->qsc_buffer_list_desc_paddr =
1768 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1769 qsc_buf_list);
1770 qsc->qsc_iv_buf_paddr =
1771 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1772 qsc_iv_buf);
1773 qcb->qcb_symck_free[i] = qsc;
1774 qcb->qcb_symck_free_count++;
1775
1776 error = bus_dmamap_create(sc->sc_dmat, QAT_MAXLEN,
1777 QAT_MAXSEG, MCLBYTES, 0, 0, &qsc->qsc_buf_dmamap);
1778 if (error)
1779 return error;
1780 }
1781
1782 return 0;
1783 }
1784
1785 int
qat_crypto_bank_init(struct qat_softc * sc,struct qat_crypto_bank * qcb)1786 qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1787 {
1788 int error;
1789
1790 mutex_init(&qcb->qcb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
1791
1792 QAT_EVCNT_ATTACH(sc, &qcb->qcb_ev_no_symck, EVCNT_TYPE_MISC,
1793 qcb->qcb_ev_no_symck_name, "crypto no_symck");
1794
1795 error = qat_crypto_setup_ring(sc, qcb);
1796 if (error)
1797 return error;
1798
1799 return 0;
1800 }
1801
1802 int
qat_crypto_init(struct qat_softc * sc)1803 qat_crypto_init(struct qat_softc *sc)
1804 {
1805 struct qat_crypto *qcy = &sc->sc_crypto;
1806 int error, bank, i;
1807 int num_banks;
1808
1809 qcy->qcy_sc = sc;
1810
1811 if (sc->sc_hw.qhw_init_arb != NULL)
1812 num_banks = uimin(ncpu, sc->sc_hw.qhw_num_banks);
1813 else
1814 num_banks = sc->sc_ae_num;
1815
1816 qcy->qcy_num_banks = num_banks;
1817
1818 qcy->qcy_banks =
1819 qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
1820
1821 for (bank = 0; bank < num_banks; bank++) {
1822 struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
1823 qcb->qcb_bank = bank;
1824 qcb->qcb_crypto = qcy;
1825 error = qat_crypto_bank_init(sc, qcb);
1826 if (error)
1827 return error;
1828 }
1829
1830 mutex_init(&qcy->qcy_crypto_mtx, MUTEX_DEFAULT, IPL_NET);
1831
1832 for (i = 0; i < QAT_NSESSION; i++) {
1833 struct qat_dmamem *qdm = &qcy->qcy_session_dmamems[i];
1834 struct qat_session *qs;
1835
1836 error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_session),
1837 QAT_OPTIMAL_ALIGN);
1838 if (error)
1839 return error;
1840
1841 qs = qdm->qdm_dma_vaddr;
1842 qs->qs_lid = i;
1843 qs->qs_dec_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
1844 qs->qs_dec_desc.qcd_hash_state_paddr =
1845 qs->qs_dec_desc.qcd_desc_paddr +
1846 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1847 qs->qs_enc_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
1848 offsetof(struct qat_session, qs_enc_desc);
1849 qs->qs_enc_desc.qcd_hash_state_paddr =
1850 qs->qs_enc_desc.qcd_desc_paddr +
1851 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1852
1853 mutex_init(&qs->qs_session_mtx, MUTEX_DEFAULT, IPL_NET);
1854
1855 qcy->qcy_sessions[i] = qs;
1856 qcy->qcy_session_free[i] = qs;
1857 qcy->qcy_session_free_count++;
1858 }
1859
1860 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_new_sess, EVCNT_TYPE_MISC,
1861 qcy->qcy_ev_new_sess_name, "crypto new_sess");
1862 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_free_sess, EVCNT_TYPE_MISC,
1863 qcy->qcy_ev_free_sess_name, "crypto free_sess");
1864 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_no_sess, EVCNT_TYPE_MISC,
1865 qcy->qcy_ev_no_sess_name, "crypto no_sess");
1866
1867 return 0;
1868 }
1869
1870 int
qat_crypto_new_session(void * arg,uint32_t * lid,struct cryptoini * cri)1871 qat_crypto_new_session(void *arg, uint32_t *lid, struct cryptoini *cri)
1872 {
1873 struct qat_crypto *qcy = arg;
1874 struct qat_session *qs = NULL;
1875 struct cryptoini *crie = NULL;
1876 struct cryptoini *cria = NULL;
1877 int slice, error;
1878
1879 mutex_spin_enter(&qcy->qcy_crypto_mtx);
1880
1881 if (qcy->qcy_session_free_count == 0) {
1882 QAT_EVCNT_INCR(&qcy->qcy_ev_no_sess);
1883 mutex_spin_exit(&qcy->qcy_crypto_mtx);
1884 return ENOBUFS;
1885 }
1886 qs = qcy->qcy_session_free[--qcy->qcy_session_free_count];
1887 QAT_EVCNT_INCR(&qcy->qcy_ev_new_sess);
1888
1889 mutex_spin_exit(&qcy->qcy_crypto_mtx);
1890
1891 qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
1892 qs->qs_inflight = 0;
1893 *lid = qs->qs_lid;
1894
1895 error = 0;
1896 while (cri) {
1897 switch (cri->cri_alg) {
1898 case CRYPTO_DES_CBC:
1899 case CRYPTO_3DES_CBC:
1900 case CRYPTO_AES_CBC:
1901 if (crie != NULL)
1902 error = EINVAL;
1903 crie = cri;
1904 break;
1905 case CRYPTO_MD5_HMAC_96:
1906 case CRYPTO_SHA1_HMAC_96:
1907 case CRYPTO_SHA2_256_HMAC:
1908 case CRYPTO_SHA2_384_HMAC:
1909 case CRYPTO_SHA2_512_HMAC:
1910 if (cria != NULL)
1911 error = EINVAL;
1912 cria = cri;
1913 break;
1914 default:
1915 error = EINVAL;
1916 }
1917 if (error)
1918 goto fail;
1919 cri = cri->cri_next;
1920 }
1921
1922 slice = 1;
1923 if (crie != NULL && cria != NULL) {
1924 slice = 2;
1925 /* auth then decrypt */
1926 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
1927 qs->qs_dec_desc.qcd_slices[1] = FW_SLICE_CIPHER;
1928 qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
1929 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
1930 /* encrypt then auth */
1931 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1932 qs->qs_enc_desc.qcd_slices[1] = FW_SLICE_AUTH;
1933 qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1934 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
1935 } else if (crie != NULL) {
1936 /* decrypt */
1937 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1938 qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
1939 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
1940 /* encrypt */
1941 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1942 qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1943 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
1944 } else if (cria != NULL) {
1945 /* auth */
1946 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
1947 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
1948 /* auth */
1949 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_AUTH;
1950 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
1951 } else {
1952 error = EINVAL;
1953 goto fail;
1954 }
1955 qs->qs_dec_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
1956 qs->qs_enc_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
1957
1958 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_dec_desc, crie, cria);
1959 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_enc_desc, crie, cria);
1960
1961 return 0;
1962 fail:
1963 if (qs != NULL) {
1964 mutex_spin_enter(&qs->qs_session_mtx);
1965 qat_crypto_free_session0(qcy, qs);
1966 }
1967 return error;
1968 }
1969
1970 static inline void
qat_crypto_clean_desc(struct qat_crypto_desc * desc)1971 qat_crypto_clean_desc(struct qat_crypto_desc *desc)
1972 {
1973 explicit_memset(desc->qcd_content_desc, 0,
1974 sizeof(desc->qcd_content_desc));
1975 explicit_memset(desc->qcd_hash_state_prefix_buf, 0,
1976 sizeof(desc->qcd_hash_state_prefix_buf));
1977 explicit_memset(desc->qcd_req_cache, 0,
1978 sizeof(desc->qcd_req_cache));
1979 }
1980
1981 void
qat_crypto_free_session0(struct qat_crypto * qcy,struct qat_session * qs)1982 qat_crypto_free_session0(struct qat_crypto *qcy, struct qat_session *qs)
1983 {
1984
1985 qat_crypto_clean_desc(&qs->qs_dec_desc);
1986 qat_crypto_clean_desc(&qs->qs_enc_desc);
1987 qs->qs_status &= ~QAT_SESSION_STATUS_ACTIVE;
1988
1989 mutex_spin_exit(&qs->qs_session_mtx);
1990
1991 mutex_spin_enter(&qcy->qcy_crypto_mtx);
1992
1993 qcy->qcy_session_free[qcy->qcy_session_free_count++] = qs;
1994 QAT_EVCNT_INCR(&qcy->qcy_ev_free_sess);
1995
1996 mutex_spin_exit(&qcy->qcy_crypto_mtx);
1997 }
1998
1999 void
qat_crypto_check_free_session(struct qat_crypto * qcy,struct qat_session * qs)2000 qat_crypto_check_free_session(struct qat_crypto *qcy, struct qat_session *qs)
2001 {
2002
2003 if ((qs->qs_status & QAT_SESSION_STATUS_FREEING) &&
2004 qs->qs_inflight == 0) {
2005 qat_crypto_free_session0(qcy, qs);
2006 } else {
2007 mutex_spin_exit(&qs->qs_session_mtx);
2008 }
2009 }
2010
2011 void
qat_crypto_free_session(void * arg,uint64_t sid)2012 qat_crypto_free_session(void *arg, uint64_t sid)
2013 {
2014 struct qat_crypto *qcy = arg;
2015 struct qat_session *qs;
2016
2017 qs = qcy->qcy_sessions[CRYPTO_SESID2LID(sid)];
2018
2019 mutex_spin_enter(&qs->qs_session_mtx);
2020
2021 if (qs->qs_inflight > 0) {
2022 qs->qs_status |= QAT_SESSION_STATUS_FREEING;
2023 mutex_spin_exit(&qs->qs_session_mtx);
2024 return;
2025 }
2026
2027 qat_crypto_free_session0(qcy, qs);
2028 }
2029
2030 int
qat_crypto_start(struct qat_softc * sc)2031 qat_crypto_start(struct qat_softc *sc)
2032 {
2033 struct qat_crypto *qcy = &sc->sc_crypto;
2034 int error, i;
2035 static const int algs[] = {
2036 CRYPTO_DES_CBC, CRYPTO_3DES_CBC, CRYPTO_AES_CBC,
2037 CRYPTO_MD5_HMAC_96, CRYPTO_SHA1_HMAC_96, CRYPTO_SHA2_256_HMAC,
2038 CRYPTO_SHA2_384_HMAC, CRYPTO_SHA2_512_HMAC,
2039 };
2040
2041 /* opencrypto */
2042 qcy->qcy_cid = crypto_get_driverid(0);
2043 if (qcy->qcy_cid < 0) {
2044 aprint_error_dev(sc->sc_dev,
2045 "could not get opencrypto driver id\n");
2046 return ENOENT;
2047 }
2048
2049 for (i = 0; i < __arraycount(algs); i++) {
2050 error = crypto_register(qcy->qcy_cid, algs[i], 0, 0,
2051 qat_crypto_new_session, qat_crypto_free_session,
2052 qat_crypto_process, qcy);
2053 if (error) {
2054 aprint_error_dev(sc->sc_dev,
2055 "could not register crypto: %d\n", error);
2056 return error;
2057 }
2058 }
2059
2060 return 0;
2061 }
2062
2063 int
qat_crypto_sym_rxintr(struct qat_softc * sc,void * arg,void * msg)2064 qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
2065 {
2066 struct qat_crypto_bank *qcb = arg;
2067 struct qat_crypto *qcy;
2068 struct qat_session *qs;
2069 struct qat_sym_cookie *qsc;
2070 struct qat_sym_bulk_cookie *qsbc;
2071 struct cryptop *crp;
2072
2073 qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
2074
2075 qsbc = &qsc->u.qsc_bulk_cookie;
2076 qcy = qsbc->qsbc_crypto;
2077 qs = qsbc->qsbc_session;
2078 crp = qsbc->qsbc_cb_tag;
2079
2080 bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
2081 qsc->qsc_buf_dmamap->dm_mapsize,
2082 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2083 bus_dmamap_unload(sc->sc_dmat, qsc->qsc_buf_dmamap);
2084 qat_crypto_free_sym_cookie(qcb, qsc);
2085
2086 crp->crp_etype = 0;
2087 crypto_done(crp);
2088
2089 mutex_spin_enter(&qs->qs_session_mtx);
2090 KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
2091 qs->qs_inflight--;
2092 qat_crypto_check_free_session(qcy, qs);
2093
2094 return 1;
2095 }
2096
2097 #ifdef QAT_DUMP
2098
2099 void
qat_dump_raw(int flag,const char * label,void * d,size_t len)2100 qat_dump_raw(int flag, const char *label, void *d, size_t len)
2101 {
2102 uintptr_t pc;
2103 size_t pos;
2104 uint8_t *dp = (uint8_t *)d;
2105
2106 if ((qat_dump & flag) == 0)
2107 return;
2108
2109 printf("dumping %s at %p len %zu\n", label, d, len);
2110
2111 pc = (uintptr_t)__builtin_return_address(0);
2112 printf("\tcallpc ");
2113 qat_print_sym(pc);
2114 printf("\n");
2115
2116 for (pos = 0; pos < len; pos++) {
2117 if (pos % 32 == 0)
2118 printf("%8zx: ", pos);
2119 else if (pos % 4 == 0)
2120 printf(" ");
2121
2122 printf("%02x", dp[pos]);
2123
2124 if (pos % 32 == 31 || pos + 1 == len)
2125 printf("\n");
2126 }
2127 }
2128
2129 void
qat_dump_ring(int bank,int ring)2130 qat_dump_ring(int bank, int ring)
2131 {
2132 struct qat_softc *sc = gsc;
2133 struct qat_bank *qb = &sc->sc_etr_banks[bank];
2134 struct qat_ring *qr = &qb->qb_et_rings[ring];
2135 u_int offset;
2136 int i;
2137 uint32_t msg;
2138
2139 printf("dumping bank %d ring %d\n", bank, ring);
2140 printf("\tid %d name %s msg size %d ring size %d\n",
2141 qr->qr_ring_id, qr->qr_name,
2142 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
2143 qr->qr_ring_size);
2144 printf("\thost head 0x%08x tail 0x%08x\n", qr->qr_head, qr->qr_tail);
2145 printf("\ttarget head 0x%08x tail 0x%08x\n",
2146 qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
2147 ETR_RING_HEAD_OFFSET),
2148 qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
2149 ETR_RING_TAIL_OFFSET));
2150
2151 printf("\n");
2152 i = 0;
2153 offset = 0;
2154 do {
2155 if (i % 8 == 0)
2156 printf("%8x:", offset);
2157
2158 if (offset == qr->qr_head) {
2159 printf("*");
2160 } else if (offset == qr->qr_tail) {
2161 printf("v");
2162 } else {
2163 printf(" ");
2164 }
2165
2166 msg = *(uint32_t *)((uintptr_t)qr->qr_ring_vaddr + offset);
2167 printf("%08x", htobe32(msg));
2168
2169 if (i % 8 == 7)
2170 printf("\n");
2171
2172 i++;
2173 offset = qat_modulo(offset +
2174 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
2175 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
2176 } while (offset != 0);
2177 }
2178
2179 void
qat_dump_mbuf(struct mbuf * m0,int pre,int post)2180 qat_dump_mbuf(struct mbuf *m0, int pre, int post)
2181 {
2182 struct mbuf *m;
2183
2184 for (m = m0; m != NULL; m = m->m_next) {
2185 size_t pos, len;
2186 uint8_t *buf_start, *data_start, *data_end, *buf_end;
2187 uint8_t *start, *end, *dp;
2188 bool skip_ind;
2189 const char *ind;
2190
2191 printf("dumping mbuf %p len %d flags 0x%08x\n",
2192 m, m->m_len, m->m_flags);
2193 if (m->m_len == 0)
2194 continue;
2195
2196 data_start = (uint8_t *)m->m_data;
2197 data_end = data_start + m->m_len;
2198 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
2199 case 0:
2200 buf_start = (uint8_t *)M_BUFADDR(m);
2201 buf_end = buf_start +
2202 ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
2203 break;
2204 case M_EXT|M_EXT_CLUSTER:
2205 buf_start = (uint8_t *)m->m_ext.ext_buf;
2206 buf_end = buf_start +m->m_ext.ext_size;
2207 break;
2208 default:
2209 /* XXX */
2210 buf_start = data_start;
2211 buf_end = data_end;
2212 break;
2213 }
2214
2215 start = data_start - pre;
2216 if (start < buf_start)
2217 start = buf_start;
2218 end = data_end + post;
2219 if (end > buf_end)
2220 end = buf_end;
2221
2222 dp = start;
2223 len = (size_t)(end - start);
2224 skip_ind = false;
2225 for (pos = 0; pos < len; pos++) {
2226
2227 if (skip_ind)
2228 ind = "";
2229 else if (&dp[pos] == data_start)
2230 ind = "`";
2231 else
2232 ind = " ";
2233
2234 if (pos % 32 == 0)
2235 printf("%8zx:%s", pos, ind);
2236 else if (pos % 2 == 0)
2237 printf("%s", ind);
2238
2239 printf("%02x", dp[pos]);
2240
2241 skip_ind = false;
2242 if (&dp[pos + 1] == data_end) {
2243 skip_ind = true;
2244 printf("'");
2245 }
2246
2247 if (pos % 32 == 31 || pos + 1 == len) {
2248 printf("\n");
2249 skip_ind = false;
2250 }
2251 }
2252 }
2253 }
2254
2255 #endif /* QAT_DUMP */
2256
2257 MODULE(MODULE_CLASS_DRIVER, qat, "pci,opencrypto");
2258
2259 #ifdef _MODULE
2260 #include "ioconf.c"
2261 #endif
2262
2263 int
qat_modcmd(modcmd_t cmd,void * data)2264 qat_modcmd(modcmd_t cmd, void *data)
2265 {
2266 int error = 0;
2267
2268 switch (cmd) {
2269 case MODULE_CMD_INIT:
2270 #ifdef _MODULE
2271 error = config_init_component(cfdriver_ioconf_qat,
2272 cfattach_ioconf_qat, cfdata_ioconf_qat);
2273 #endif
2274 return error;
2275 case MODULE_CMD_FINI:
2276 #ifdef _MODULE
2277 error = config_fini_component(cfdriver_ioconf_qat,
2278 cfattach_ioconf_qat, cfdata_ioconf_qat);
2279 #endif
2280 return error;
2281 default:
2282 return ENOTTY;
2283 }
2284 }
2285