1*fdf161e4Sriastradh /* $NetBSD: qat.c,v 1.8 2022/05/22 11:39:27 riastradh Exp $ */
254e21c12Shikaru
354e21c12Shikaru /*
454e21c12Shikaru * Copyright (c) 2019 Internet Initiative Japan, Inc.
554e21c12Shikaru * All rights reserved.
654e21c12Shikaru *
754e21c12Shikaru * Redistribution and use in source and binary forms, with or without
854e21c12Shikaru * modification, are permitted provided that the following conditions
954e21c12Shikaru * are met:
1054e21c12Shikaru * 1. Redistributions of source code must retain the above copyright
1154e21c12Shikaru * notice, this list of conditions and the following disclaimer.
1254e21c12Shikaru * 2. Redistributions in binary form must reproduce the above copyright
1354e21c12Shikaru * notice, this list of conditions and the following disclaimer in the
1454e21c12Shikaru * documentation and/or other materials provided with the distribution.
1554e21c12Shikaru *
1654e21c12Shikaru * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
1754e21c12Shikaru * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
1854e21c12Shikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1954e21c12Shikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2054e21c12Shikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2154e21c12Shikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2254e21c12Shikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2354e21c12Shikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2454e21c12Shikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2554e21c12Shikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2654e21c12Shikaru * POSSIBILITY OF SUCH DAMAGE.
2754e21c12Shikaru */
2854e21c12Shikaru
2954e21c12Shikaru /*
3054e21c12Shikaru * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
3154e21c12Shikaru *
3254e21c12Shikaru * Redistribution and use in source and binary forms, with or without
3354e21c12Shikaru * modification, are permitted provided that the following conditions
3454e21c12Shikaru * are met:
3554e21c12Shikaru *
3654e21c12Shikaru * * Redistributions of source code must retain the above copyright
3754e21c12Shikaru * notice, this list of conditions and the following disclaimer.
3854e21c12Shikaru * * Redistributions in binary form must reproduce the above copyright
3954e21c12Shikaru * notice, this list of conditions and the following disclaimer in
4054e21c12Shikaru * the documentation and/or other materials provided with the
4154e21c12Shikaru * distribution.
4254e21c12Shikaru * * Neither the name of Intel Corporation nor the names of its
4354e21c12Shikaru * contributors may be used to endorse or promote products derived
4454e21c12Shikaru * from this software without specific prior written permission.
4554e21c12Shikaru *
4654e21c12Shikaru * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4754e21c12Shikaru * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
4854e21c12Shikaru * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
4954e21c12Shikaru * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
5054e21c12Shikaru * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5154e21c12Shikaru * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
5254e21c12Shikaru * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5354e21c12Shikaru * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5454e21c12Shikaru * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5554e21c12Shikaru * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
5654e21c12Shikaru * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5754e21c12Shikaru */
5854e21c12Shikaru
5954e21c12Shikaru #include <sys/cdefs.h>
60*fdf161e4Sriastradh __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.8 2022/05/22 11:39:27 riastradh Exp $");
6154e21c12Shikaru
6254e21c12Shikaru #include <sys/param.h>
6354e21c12Shikaru #include <sys/systm.h>
6454e21c12Shikaru #include <sys/kernel.h>
6554e21c12Shikaru #include <sys/device.h>
6654e21c12Shikaru #include <sys/module.h>
6754e21c12Shikaru #include <sys/kmem.h>
6854e21c12Shikaru #include <sys/mutex.h>
6954e21c12Shikaru #include <sys/bitops.h>
7054e21c12Shikaru #include <sys/atomic.h>
7154e21c12Shikaru #include <sys/mbuf.h>
7254e21c12Shikaru #include <sys/cprng.h>
7354e21c12Shikaru #include <sys/cpu.h>
7454e21c12Shikaru #include <sys/interrupt.h>
7554e21c12Shikaru #include <sys/md5.h>
7654e21c12Shikaru #include <sys/sha1.h>
7754e21c12Shikaru #include <sys/sha2.h>
7854e21c12Shikaru
7954e21c12Shikaru #include <opencrypto/cryptodev.h>
8054e21c12Shikaru #include <opencrypto/cryptosoft.h>
8154e21c12Shikaru #include <opencrypto/xform.h>
8254e21c12Shikaru
8354e21c12Shikaru /* XXX same as sys/arch/x86/x86/via_padlock.c */
8454e21c12Shikaru #include <opencrypto/cryptosoft_xform.c>
8554e21c12Shikaru
8654e21c12Shikaru #include <dev/pci/pcireg.h>
8754e21c12Shikaru #include <dev/pci/pcivar.h>
8854e21c12Shikaru #include <dev/pci/pcidevs.h>
8954e21c12Shikaru
9054e21c12Shikaru #include "qatreg.h"
9154e21c12Shikaru #include "qatvar.h"
9254e21c12Shikaru #include "qat_aevar.h"
9354e21c12Shikaru
9454e21c12Shikaru extern struct qat_hw qat_hw_c2xxx;
9554e21c12Shikaru extern struct qat_hw qat_hw_c3xxx;
9654e21c12Shikaru extern struct qat_hw qat_hw_c62x;
9754e21c12Shikaru extern struct qat_hw qat_hw_d15xx;
9854e21c12Shikaru
9954e21c12Shikaru static const struct qat_product {
10054e21c12Shikaru pci_vendor_id_t qatp_vendor;
10154e21c12Shikaru pci_product_id_t qatp_product;
10254e21c12Shikaru const char *qatp_name;
10354e21c12Shikaru enum qat_chip_type qatp_chip;
10454e21c12Shikaru const struct qat_hw *qatp_hw;
10554e21c12Shikaru } qat_products[] = {
10654e21c12Shikaru
10754e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
10854e21c12Shikaru "Intel C2000 QuickAssist Physical Function",
10954e21c12Shikaru QAT_CHIP_C2XXX, &qat_hw_c2xxx },
11054e21c12Shikaru
11154e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
11254e21c12Shikaru "Intel C3000 QuickAssist Physical Function",
11354e21c12Shikaru QAT_CHIP_C3XXX, &qat_hw_c3xxx },
11454e21c12Shikaru #ifdef notyet
11554e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT_VF,
11654e21c12Shikaru "Intel C3000 QuickAssist Virtual Function",
11754e21c12Shikaru QAT_CHIP_C3XXX_IOV, &qat_hw_c3xxxvf },
11854e21c12Shikaru #endif
11954e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
12054e21c12Shikaru "Intel C620/Xeon D-2100 QuickAssist Physical Function",
12154e21c12Shikaru QAT_CHIP_C62X, &qat_hw_c62x },
12254e21c12Shikaru #ifdef notyet
12354e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT_VF,
12454e21c12Shikaru "Intel C620/Xeon D-2100 QuickAssist Virtual Function",
12554e21c12Shikaru QAT_CHIP_C62X_IOV, &qat_hw_c62xvf },
12654e21c12Shikaru #endif
12754e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
12854e21c12Shikaru "Intel Xeon D-1500 QuickAssist Physical Function",
12954e21c12Shikaru QAT_CHIP_D15XX, &qat_hw_d15xx },
13054e21c12Shikaru #ifdef notyet
13154e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT_VF,
13254e21c12Shikaru "Intel Xeon D-1500 QuickAssist Virtual Function",
13354e21c12Shikaru QAT_CHIP_D15XX_IOV, &qat_hw_d15xxvf },
13454e21c12Shikaru #endif
13554e21c12Shikaru { 0, 0, NULL, 0, NULL },
13654e21c12Shikaru };
13754e21c12Shikaru
13854e21c12Shikaru /* md5 16 bytes - Initialiser state can be found in RFC 1321*/
13954e21c12Shikaru static const uint8_t md5_initial_state[QAT_HASH_MD5_STATE_SIZE] = {
14054e21c12Shikaru 0x01, 0x23, 0x45, 0x67,
14154e21c12Shikaru 0x89, 0xab, 0xcd, 0xef,
14254e21c12Shikaru 0xfe, 0xdc, 0xba, 0x98,
14354e21c12Shikaru 0x76, 0x54, 0x32, 0x10,
14454e21c12Shikaru };
14554e21c12Shikaru
14654e21c12Shikaru /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
14754e21c12Shikaru static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
14854e21c12Shikaru 0x67, 0x45, 0x23, 0x01,
14954e21c12Shikaru 0xef, 0xcd, 0xab, 0x89,
15054e21c12Shikaru 0x98, 0xba, 0xdc, 0xfe,
15154e21c12Shikaru 0x10, 0x32, 0x54, 0x76,
15254e21c12Shikaru 0xc3, 0xd2, 0xe1, 0xf0
15354e21c12Shikaru };
15454e21c12Shikaru
15554e21c12Shikaru /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
15654e21c12Shikaru static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
15754e21c12Shikaru 0x6a, 0x09, 0xe6, 0x67,
15854e21c12Shikaru 0xbb, 0x67, 0xae, 0x85,
15954e21c12Shikaru 0x3c, 0x6e, 0xf3, 0x72,
16054e21c12Shikaru 0xa5, 0x4f, 0xf5, 0x3a,
16154e21c12Shikaru 0x51, 0x0e, 0x52, 0x7f,
16254e21c12Shikaru 0x9b, 0x05, 0x68, 0x8c,
16354e21c12Shikaru 0x1f, 0x83, 0xd9, 0xab,
16454e21c12Shikaru 0x5b, 0xe0, 0xcd, 0x19
16554e21c12Shikaru };
16654e21c12Shikaru
16754e21c12Shikaru /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
16854e21c12Shikaru static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
16954e21c12Shikaru 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
17054e21c12Shikaru 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
17154e21c12Shikaru 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
17254e21c12Shikaru 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
17354e21c12Shikaru 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
17454e21c12Shikaru 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
17554e21c12Shikaru 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
17654e21c12Shikaru 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
17754e21c12Shikaru };
17854e21c12Shikaru
17954e21c12Shikaru /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
18054e21c12Shikaru static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
18154e21c12Shikaru 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
18254e21c12Shikaru 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
18354e21c12Shikaru 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
18454e21c12Shikaru 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
18554e21c12Shikaru 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
18654e21c12Shikaru 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
18754e21c12Shikaru 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
18854e21c12Shikaru 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
18954e21c12Shikaru };
19054e21c12Shikaru
19154e21c12Shikaru /* Hash Algorithm specific structure */
19254e21c12Shikaru
19354e21c12Shikaru static const struct qat_sym_hash_alg_info md5_info = {
19454e21c12Shikaru QAT_HASH_MD5_DIGEST_SIZE,
19554e21c12Shikaru QAT_HASH_MD5_BLOCK_SIZE,
19654e21c12Shikaru md5_initial_state,
19754e21c12Shikaru QAT_HASH_MD5_STATE_SIZE,
19854e21c12Shikaru &swcr_auth_hash_hmac_md5_96,
19954e21c12Shikaru offsetof(MD5_CTX, state),
20054e21c12Shikaru 4,
20154e21c12Shikaru };
20254e21c12Shikaru
20354e21c12Shikaru static const struct qat_sym_hash_alg_info sha1_info = {
20454e21c12Shikaru QAT_HASH_SHA1_DIGEST_SIZE,
20554e21c12Shikaru QAT_HASH_SHA1_BLOCK_SIZE,
20654e21c12Shikaru sha1_initial_state,
20754e21c12Shikaru QAT_HASH_SHA1_STATE_SIZE,
20854e21c12Shikaru &swcr_auth_hash_hmac_sha1_96,
20954e21c12Shikaru offsetof(SHA1_CTX, state),
21054e21c12Shikaru 4,
21154e21c12Shikaru };
21254e21c12Shikaru
21354e21c12Shikaru static const struct qat_sym_hash_alg_info sha256_info = {
21454e21c12Shikaru QAT_HASH_SHA256_DIGEST_SIZE,
21554e21c12Shikaru QAT_HASH_SHA256_BLOCK_SIZE,
21654e21c12Shikaru sha256_initial_state,
21754e21c12Shikaru QAT_HASH_SHA256_STATE_SIZE,
21854e21c12Shikaru &swcr_auth_hash_hmac_sha2_256,
21954e21c12Shikaru offsetof(SHA256_CTX, state),
22054e21c12Shikaru 4,
22154e21c12Shikaru };
22254e21c12Shikaru
22354e21c12Shikaru static const struct qat_sym_hash_alg_info sha384_info = {
22454e21c12Shikaru QAT_HASH_SHA384_DIGEST_SIZE,
22554e21c12Shikaru QAT_HASH_SHA384_BLOCK_SIZE,
22654e21c12Shikaru sha384_initial_state,
22754e21c12Shikaru QAT_HASH_SHA384_STATE_SIZE,
22854e21c12Shikaru &swcr_auth_hash_hmac_sha2_384,
22954e21c12Shikaru offsetof(SHA384_CTX, state),
23054e21c12Shikaru 8,
23154e21c12Shikaru };
23254e21c12Shikaru
23354e21c12Shikaru static const struct qat_sym_hash_alg_info sha512_info = {
23454e21c12Shikaru QAT_HASH_SHA512_DIGEST_SIZE,
23554e21c12Shikaru QAT_HASH_SHA512_BLOCK_SIZE,
23654e21c12Shikaru sha512_initial_state,
23754e21c12Shikaru QAT_HASH_SHA512_STATE_SIZE,
23854e21c12Shikaru &swcr_auth_hash_hmac_sha2_512,
23954e21c12Shikaru offsetof(SHA512_CTX, state),
24054e21c12Shikaru 8,
24154e21c12Shikaru };
24254e21c12Shikaru
24354e21c12Shikaru static const struct qat_sym_hash_alg_info aes_gcm_info = {
24454e21c12Shikaru QAT_HASH_AES_GCM_DIGEST_SIZE,
24554e21c12Shikaru QAT_HASH_AES_GCM_BLOCK_SIZE,
24654e21c12Shikaru NULL, 0,
24754e21c12Shikaru NULL, 0, 0, /* XXX */
24854e21c12Shikaru };
24954e21c12Shikaru
25054e21c12Shikaru /* Hash QAT specific structures */
25154e21c12Shikaru
25254e21c12Shikaru static const struct qat_sym_hash_qat_info md5_config = {
25354e21c12Shikaru HW_AUTH_ALGO_MD5,
25454e21c12Shikaru QAT_HASH_MD5_BLOCK_SIZE,
25554e21c12Shikaru HW_MD5_STATE1_SZ,
25654e21c12Shikaru HW_MD5_STATE2_SZ
25754e21c12Shikaru };
25854e21c12Shikaru
25954e21c12Shikaru static const struct qat_sym_hash_qat_info sha1_config = {
26054e21c12Shikaru HW_AUTH_ALGO_SHA1,
26154e21c12Shikaru QAT_HASH_SHA1_BLOCK_SIZE,
26254e21c12Shikaru HW_SHA1_STATE1_SZ,
26354e21c12Shikaru HW_SHA1_STATE2_SZ
26454e21c12Shikaru };
26554e21c12Shikaru
26654e21c12Shikaru static const struct qat_sym_hash_qat_info sha256_config = {
26754e21c12Shikaru HW_AUTH_ALGO_SHA256,
26854e21c12Shikaru QAT_HASH_SHA256_BLOCK_SIZE,
26954e21c12Shikaru HW_SHA256_STATE1_SZ,
27054e21c12Shikaru HW_SHA256_STATE2_SZ
27154e21c12Shikaru };
27254e21c12Shikaru
27354e21c12Shikaru static const struct qat_sym_hash_qat_info sha384_config = {
27454e21c12Shikaru HW_AUTH_ALGO_SHA384,
27554e21c12Shikaru QAT_HASH_SHA384_BLOCK_SIZE,
27654e21c12Shikaru HW_SHA384_STATE1_SZ,
27754e21c12Shikaru HW_SHA384_STATE2_SZ
27854e21c12Shikaru };
27954e21c12Shikaru
28054e21c12Shikaru static const struct qat_sym_hash_qat_info sha512_config = {
28154e21c12Shikaru HW_AUTH_ALGO_SHA512,
28254e21c12Shikaru QAT_HASH_SHA512_BLOCK_SIZE,
28354e21c12Shikaru HW_SHA512_STATE1_SZ,
28454e21c12Shikaru HW_SHA512_STATE2_SZ
28554e21c12Shikaru };
28654e21c12Shikaru
28754e21c12Shikaru static const struct qat_sym_hash_qat_info aes_gcm_config = {
28854e21c12Shikaru HW_AUTH_ALGO_GALOIS_128,
28954e21c12Shikaru 0,
29054e21c12Shikaru HW_GALOIS_128_STATE1_SZ,
29154e21c12Shikaru HW_GALOIS_H_SZ +
29254e21c12Shikaru HW_GALOIS_LEN_A_SZ +
29354e21c12Shikaru HW_GALOIS_E_CTR0_SZ
29454e21c12Shikaru };
29554e21c12Shikaru
29654e21c12Shikaru static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
29754e21c12Shikaru [QAT_SYM_HASH_MD5] = { &md5_info, &md5_config },
29854e21c12Shikaru [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
29954e21c12Shikaru [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
30054e21c12Shikaru [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
30154e21c12Shikaru [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
30254e21c12Shikaru [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
30354e21c12Shikaru };
30454e21c12Shikaru
30554e21c12Shikaru const struct qat_product *
30654e21c12Shikaru qat_lookup(const struct pci_attach_args *);
30754e21c12Shikaru int qat_match(struct device *, struct cfdata *, void *);
30854e21c12Shikaru void qat_attach(struct device *, struct device *, void *);
30954e21c12Shikaru void qat_init(struct device *);
31054e21c12Shikaru int qat_start(struct device *);
31154e21c12Shikaru int qat_detach(struct device *, int);
31254e21c12Shikaru
31354e21c12Shikaru int qat_alloc_msix_intr(struct qat_softc *,
31454e21c12Shikaru struct pci_attach_args *);
31554e21c12Shikaru void * qat_establish_msix_intr(struct qat_softc *, pci_intr_handle_t,
31654e21c12Shikaru int (*)(void *), void *, const char *, int);
31754e21c12Shikaru int qat_setup_msix_intr(struct qat_softc *);
31854e21c12Shikaru
31954e21c12Shikaru int qat_etr_init(struct qat_softc *);
32054e21c12Shikaru int qat_etr_bank_init(struct qat_softc *, int);
32154e21c12Shikaru
32254e21c12Shikaru int qat_etr_ap_bank_init(struct qat_softc *);
32354e21c12Shikaru void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
32454e21c12Shikaru void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
32554e21c12Shikaru uint32_t, int);
32654e21c12Shikaru void qat_etr_ap_bank_setup_ring(struct qat_softc *,
32754e21c12Shikaru struct qat_ring *);
32854e21c12Shikaru int qat_etr_verify_ring_size(uint32_t, uint32_t);
32954e21c12Shikaru
33054e21c12Shikaru int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
33154e21c12Shikaru struct qat_ring *);
33254e21c12Shikaru int qat_etr_bank_intr(void *);
33354e21c12Shikaru
33454e21c12Shikaru void qat_arb_update(struct qat_softc *, struct qat_bank *);
33554e21c12Shikaru
33654e21c12Shikaru struct qat_sym_cookie *
33754e21c12Shikaru qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *);
33854e21c12Shikaru void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
33954e21c12Shikaru struct qat_sym_cookie *);
34054e21c12Shikaru int qat_crypto_load_buf(struct qat_softc *, struct cryptop *,
34154e21c12Shikaru struct qat_sym_cookie *, struct qat_crypto_desc const *,
34254e21c12Shikaru uint8_t *, int, bus_addr_t *);
34354e21c12Shikaru int qat_crypto_load_iv(struct qat_sym_cookie *, struct cryptop *,
34454e21c12Shikaru struct cryptodesc *, struct qat_crypto_desc const *);
34554e21c12Shikaru int qat_crypto_process(void *, struct cryptop *, int);
34654e21c12Shikaru int qat_crypto_setup_ring(struct qat_softc *,
34754e21c12Shikaru struct qat_crypto_bank *);
34854e21c12Shikaru int qat_crypto_new_session(void *, uint32_t *, struct cryptoini *);
349*fdf161e4Sriastradh void qat_crypto_free_session0(struct qat_crypto *,
35054e21c12Shikaru struct qat_session *);
35154e21c12Shikaru void qat_crypto_check_free_session(struct qat_crypto *,
35254e21c12Shikaru struct qat_session *);
353*fdf161e4Sriastradh void qat_crypto_free_session(void *, uint64_t);
35454e21c12Shikaru int qat_crypto_bank_init(struct qat_softc *,
35554e21c12Shikaru struct qat_crypto_bank *);
35654e21c12Shikaru int qat_crypto_init(struct qat_softc *);
35754e21c12Shikaru int qat_crypto_start(struct qat_softc *);
35854e21c12Shikaru int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
35954e21c12Shikaru
36054e21c12Shikaru CFATTACH_DECL_NEW(qat, sizeof(struct qat_softc),
36154e21c12Shikaru qat_match, qat_attach, qat_detach, NULL);
36254e21c12Shikaru
36354e21c12Shikaru struct qat_softc *gsc = NULL;
36454e21c12Shikaru
36554e21c12Shikaru #ifdef QAT_DUMP
36654e21c12Shikaru int qat_dump = QAT_DUMP;
36754e21c12Shikaru #endif
36854e21c12Shikaru
36954e21c12Shikaru const struct qat_product *
qat_lookup(const struct pci_attach_args * pa)37054e21c12Shikaru qat_lookup(const struct pci_attach_args *pa)
37154e21c12Shikaru {
37254e21c12Shikaru const struct qat_product *qatp;
37354e21c12Shikaru
37454e21c12Shikaru for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
37554e21c12Shikaru if (PCI_VENDOR(pa->pa_id) == qatp->qatp_vendor &&
37654e21c12Shikaru PCI_PRODUCT(pa->pa_id) == qatp->qatp_product)
37754e21c12Shikaru return qatp;
37854e21c12Shikaru }
37954e21c12Shikaru return NULL;
38054e21c12Shikaru }
38154e21c12Shikaru
38254e21c12Shikaru int
qat_match(struct device * parent,struct cfdata * cf,void * aux)38354e21c12Shikaru qat_match(struct device *parent, struct cfdata *cf, void *aux)
38454e21c12Shikaru {
38554e21c12Shikaru struct pci_attach_args *pa = aux;
38654e21c12Shikaru
38754e21c12Shikaru if (qat_lookup(pa) != NULL)
38854e21c12Shikaru return 1;
38954e21c12Shikaru
39054e21c12Shikaru return 0;
39154e21c12Shikaru }
39254e21c12Shikaru
39354e21c12Shikaru void
qat_attach(struct device * parent,struct device * self,void * aux)39454e21c12Shikaru qat_attach(struct device *parent, struct device *self, void *aux)
39554e21c12Shikaru {
39654e21c12Shikaru struct qat_softc *sc = device_private(self);
39754e21c12Shikaru struct pci_attach_args *pa = aux;
39854e21c12Shikaru pci_chipset_tag_t pc = pa->pa_pc;
39954e21c12Shikaru const struct qat_product *qatp;
40054e21c12Shikaru char cap[256];
40154e21c12Shikaru pcireg_t cmd, memtype, msixoff, fusectl;
40254e21c12Shikaru bus_size_t msixtbl_offset;
40354e21c12Shikaru int i, bar, msixtbl_bar;
40454e21c12Shikaru
40554e21c12Shikaru sc->sc_dev = self;
40654e21c12Shikaru sc->sc_pc = pc;
40754e21c12Shikaru sc->sc_pcitag = pa->pa_tag;
40854e21c12Shikaru
40954e21c12Shikaru gsc = sc; /* for debug */
41054e21c12Shikaru
41154e21c12Shikaru qatp = qat_lookup(pa);
41254e21c12Shikaru KASSERT(qatp != NULL);
41354e21c12Shikaru
41454e21c12Shikaru if (pci_dma64_available(pa))
41554e21c12Shikaru sc->sc_dmat = pa->pa_dmat64;
41654e21c12Shikaru else
41754e21c12Shikaru sc->sc_dmat = pa->pa_dmat;
41854e21c12Shikaru
41954e21c12Shikaru aprint_naive(": Crypto processor\n");
42054e21c12Shikaru sc->sc_rev = PCI_REVISION(pa->pa_class);
42154e21c12Shikaru aprint_normal(": %s (rev. 0x%02x)\n", qatp->qatp_name, sc->sc_rev);
42254e21c12Shikaru
42354e21c12Shikaru memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
42454e21c12Shikaru
42554e21c12Shikaru /* Determine active accelerators and engines */
42654e21c12Shikaru sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
42754e21c12Shikaru sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
42854e21c12Shikaru
42954e21c12Shikaru sc->sc_accel_num = 0;
43054e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
43154e21c12Shikaru if (sc->sc_accel_mask & (1 << i))
43254e21c12Shikaru sc->sc_accel_num++;
43354e21c12Shikaru }
43454e21c12Shikaru sc->sc_ae_num = 0;
43554e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
43654e21c12Shikaru if (sc->sc_ae_mask & (1 << i)) {
43754e21c12Shikaru sc->sc_ae_num++;
43854e21c12Shikaru }
43954e21c12Shikaru }
44054e21c12Shikaru
44154e21c12Shikaru if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
44254e21c12Shikaru aprint_error_dev(sc->sc_dev, "couldn't find acceleration");
44354e21c12Shikaru goto fail;
44454e21c12Shikaru }
44554e21c12Shikaru
44654e21c12Shikaru KASSERT(sc->sc_accel_num <= MAX_NUM_ACCEL);
44754e21c12Shikaru KASSERT(sc->sc_ae_num <= MAX_NUM_AE);
44854e21c12Shikaru
44954e21c12Shikaru /* Determine SKU and capabilities */
45054e21c12Shikaru sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
45154e21c12Shikaru sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
45254e21c12Shikaru sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
45354e21c12Shikaru
45454e21c12Shikaru aprint_normal_dev(sc->sc_dev,
45554e21c12Shikaru "sku %d accel %d accel_mask 0x%x ae %d ae_mask 0x%x\n",
45654e21c12Shikaru sc->sc_sku, sc->sc_accel_num, sc->sc_accel_mask,
45754e21c12Shikaru sc->sc_ae_num, sc->sc_ae_mask);
45854e21c12Shikaru snprintb(cap, sizeof(cap), QAT_ACCEL_CAP_BITS, sc->sc_accel_cap);
45954e21c12Shikaru aprint_normal_dev(sc->sc_dev, "accel capabilities %s\n", cap);
46054e21c12Shikaru
46154e21c12Shikaru /* Map BARs */
46254e21c12Shikaru
46354e21c12Shikaru msixtbl_bar = 0;
46454e21c12Shikaru msixtbl_offset = 0;
46554e21c12Shikaru if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, NULL)) {
46654e21c12Shikaru pcireg_t msixtbl;
46754e21c12Shikaru msixtbl = pci_conf_read(pc, pa->pa_tag,
46854e21c12Shikaru msixoff + PCI_MSIX_TBLOFFSET);
46954e21c12Shikaru msixtbl_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
47054e21c12Shikaru msixtbl_bar = PCI_MAPREG_START +
471ec9e7766Smsaitoh ((msixtbl & PCI_MSIX_TBLBIR_MASK) << 2);
47254e21c12Shikaru }
47354e21c12Shikaru
47454e21c12Shikaru i = 0;
47554e21c12Shikaru if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
47654e21c12Shikaru KASSERT(sc->sc_hw.qhw_sram_bar_id == 0);
47754e21c12Shikaru fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
47854e21c12Shikaru /* Skip SRAM BAR */
47954e21c12Shikaru i = (fusectl & FUSECTL_MASK) ? 1 : 0;
48054e21c12Shikaru }
48154e21c12Shikaru for (bar = PCI_MAPREG_START; bar <= PCI_MAPREG_END; bar += 4) {
48254e21c12Shikaru bus_size_t size;
48354e21c12Shikaru bus_addr_t addr;
48454e21c12Shikaru
48554e21c12Shikaru if (pci_mapreg_probe(pc, pa->pa_tag, bar, &memtype) == 0)
48654e21c12Shikaru continue;
48754e21c12Shikaru
48854e21c12Shikaru if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM)
48954e21c12Shikaru continue;
49054e21c12Shikaru
49154e21c12Shikaru /* MSI-X table will be mapped by pci_msix_alloc_map */
49254e21c12Shikaru if (bar == msixtbl_bar)
49354e21c12Shikaru size = msixtbl_offset;
49454e21c12Shikaru else
49554e21c12Shikaru size = 0;
49654e21c12Shikaru
49754e21c12Shikaru if (pci_mapreg_submap(pa, bar, memtype, 0, size, 0,
49854e21c12Shikaru &sc->sc_csrt[i], &sc->sc_csrh[i], &addr, &sc->sc_csrs[i])) {
49954e21c12Shikaru aprint_error_dev(sc->sc_dev,
50054e21c12Shikaru "couldn't map bar 0x%02x\n", bar);
50154e21c12Shikaru goto fail;
50254e21c12Shikaru }
50354e21c12Shikaru
50454e21c12Shikaru aprint_verbose_dev(sc->sc_dev,
50554e21c12Shikaru "region #%d bar 0x%02x size 0x%x at 0x%llx"
50654e21c12Shikaru " mapped to %p\n", i, bar,
50754e21c12Shikaru (int)sc->sc_csrs[i], (unsigned long long)addr,
50854e21c12Shikaru bus_space_vaddr(sc->sc_csrt[i], sc->sc_csrh[i]));
50954e21c12Shikaru
51054e21c12Shikaru i++;
51154e21c12Shikaru if (PCI_MAPREG_MEM_TYPE(memtype) == PCI_MAPREG_MEM_TYPE_64BIT)
51254e21c12Shikaru bar += 4;
51354e21c12Shikaru }
51454e21c12Shikaru
51554e21c12Shikaru /* XXX Enable advanced error reporting */
51654e21c12Shikaru
51754e21c12Shikaru /* Enable bus mastering */
51854e21c12Shikaru cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
51954e21c12Shikaru cmd |= PCI_COMMAND_MASTER_ENABLE;
52054e21c12Shikaru pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
52154e21c12Shikaru
52254e21c12Shikaru if (qat_alloc_msix_intr(sc, pa))
52354e21c12Shikaru goto fail;
52454e21c12Shikaru
52554e21c12Shikaru config_mountroot(self, qat_init);
52654e21c12Shikaru
52754e21c12Shikaru fail:
52854e21c12Shikaru /* XXX */
52954e21c12Shikaru return;
53054e21c12Shikaru }
53154e21c12Shikaru
53254e21c12Shikaru void
qat_init(struct device * self)53354e21c12Shikaru qat_init(struct device *self)
53454e21c12Shikaru {
53554e21c12Shikaru int error;
53654e21c12Shikaru struct qat_softc *sc = device_private(self);
53754e21c12Shikaru
53854e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing ETR\n");
53954e21c12Shikaru error = qat_etr_init(sc);
54054e21c12Shikaru if (error) {
54154e21c12Shikaru aprint_error_dev(sc->sc_dev,
54254e21c12Shikaru "Could not initialize ETR: %d\n", error);
54354e21c12Shikaru return;
54454e21c12Shikaru }
54554e21c12Shikaru
54654e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing admin comms\n");
54754e21c12Shikaru if (sc->sc_hw.qhw_init_admin_comms != NULL &&
54854e21c12Shikaru (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
54954e21c12Shikaru aprint_error_dev(sc->sc_dev,
55054e21c12Shikaru "Could not initialize admin comms: %d\n", error);
55154e21c12Shikaru return;
55254e21c12Shikaru }
55354e21c12Shikaru
55454e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing hw arbiter\n");
55554e21c12Shikaru if (sc->sc_hw.qhw_init_arb != NULL &&
55654e21c12Shikaru (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
55754e21c12Shikaru aprint_error_dev(sc->sc_dev,
55854e21c12Shikaru "Could not initialize hw arbiter: %d\n", error);
55954e21c12Shikaru return;
56054e21c12Shikaru }
56154e21c12Shikaru
56254e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing acceleration engine\n");
56354e21c12Shikaru error = qat_ae_init(sc);
56454e21c12Shikaru if (error) {
56554e21c12Shikaru aprint_error_dev(sc->sc_dev,
56654e21c12Shikaru "Could not initialize Acceleration Engine: %d\n", error);
56754e21c12Shikaru return;
56854e21c12Shikaru }
56954e21c12Shikaru
57054e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Loading acceleration engine firmware\n");
57154e21c12Shikaru error = qat_aefw_load(sc);
57254e21c12Shikaru if (error) {
57354e21c12Shikaru aprint_error_dev(sc->sc_dev,
57454e21c12Shikaru "Could not load firmware: %d\n", error);
57554e21c12Shikaru return;
57654e21c12Shikaru }
57754e21c12Shikaru
57854e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Establishing interrupts\n");
57954e21c12Shikaru error = qat_setup_msix_intr(sc);
58054e21c12Shikaru if (error) {
58154e21c12Shikaru aprint_error_dev(sc->sc_dev,
58254e21c12Shikaru "Could not setup interrupts: %d\n", error);
58354e21c12Shikaru return;
58454e21c12Shikaru }
58554e21c12Shikaru
58654e21c12Shikaru sc->sc_hw.qhw_enable_intr(sc);
58754e21c12Shikaru
58854e21c12Shikaru error = qat_crypto_init(sc);
58954e21c12Shikaru if (error) {
59054e21c12Shikaru aprint_error_dev(sc->sc_dev,
59154e21c12Shikaru "Could not initialize service: %d\n", error);
59254e21c12Shikaru return;
59354e21c12Shikaru }
59454e21c12Shikaru
59554e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Enabling error correction\n");
59654e21c12Shikaru if (sc->sc_hw.qhw_enable_error_correction != NULL)
59754e21c12Shikaru sc->sc_hw.qhw_enable_error_correction(sc);
59854e21c12Shikaru
59954e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing watchdog timer\n");
60054e21c12Shikaru if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
60154e21c12Shikaru (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
60254e21c12Shikaru aprint_error_dev(sc->sc_dev,
60354e21c12Shikaru "Could not initialize watchdog timer: %d\n", error);
60454e21c12Shikaru return;
60554e21c12Shikaru }
60654e21c12Shikaru
60754e21c12Shikaru error = qat_start(self);
60854e21c12Shikaru if (error) {
60954e21c12Shikaru aprint_error_dev(sc->sc_dev,
61054e21c12Shikaru "Could not start: %d\n", error);
61154e21c12Shikaru return;
61254e21c12Shikaru }
61354e21c12Shikaru }
61454e21c12Shikaru
61554e21c12Shikaru int
qat_start(struct device * self)61654e21c12Shikaru qat_start(struct device *self)
61754e21c12Shikaru {
61854e21c12Shikaru struct qat_softc *sc = device_private(self);
61954e21c12Shikaru int error;
62054e21c12Shikaru
62154e21c12Shikaru error = qat_ae_start(sc);
62254e21c12Shikaru if (error)
62354e21c12Shikaru return error;
62454e21c12Shikaru
62554e21c12Shikaru if (sc->sc_hw.qhw_send_admin_init != NULL &&
62654e21c12Shikaru (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
62754e21c12Shikaru return error;
62854e21c12Shikaru }
62954e21c12Shikaru
63054e21c12Shikaru error = qat_crypto_start(sc);
63154e21c12Shikaru if (error)
63254e21c12Shikaru return error;
63354e21c12Shikaru
63454e21c12Shikaru return 0;
63554e21c12Shikaru }
63654e21c12Shikaru
63754e21c12Shikaru int
qat_detach(struct device * self,int flags)63854e21c12Shikaru qat_detach(struct device *self, int flags)
63954e21c12Shikaru {
64054e21c12Shikaru
64154e21c12Shikaru return 0;
64254e21c12Shikaru }
64354e21c12Shikaru
64454e21c12Shikaru void *
qat_alloc_mem(size_t size)64554e21c12Shikaru qat_alloc_mem(size_t size)
64654e21c12Shikaru {
64754e21c12Shikaru size_t *sptr;
64854e21c12Shikaru sptr = kmem_zalloc(size + sizeof(size), KM_SLEEP);
64954e21c12Shikaru *sptr = size;
65054e21c12Shikaru return ++sptr;
65154e21c12Shikaru }
65254e21c12Shikaru
65354e21c12Shikaru void
qat_free_mem(void * ptr)65454e21c12Shikaru qat_free_mem(void *ptr)
65554e21c12Shikaru {
65654e21c12Shikaru size_t *sptr = ptr, size;
65754e21c12Shikaru size = *(--sptr);
65854e21c12Shikaru kmem_free(sptr, size + sizeof(size));
65954e21c12Shikaru }
66054e21c12Shikaru
66154e21c12Shikaru void
qat_free_dmamem(struct qat_softc * sc,struct qat_dmamem * qdm)66254e21c12Shikaru qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
66354e21c12Shikaru {
66454e21c12Shikaru
66554e21c12Shikaru bus_dmamap_unload(sc->sc_dmat, qdm->qdm_dma_map);
66654e21c12Shikaru bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
66754e21c12Shikaru bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, qdm->qdm_dma_size);
66854e21c12Shikaru bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
66954e21c12Shikaru explicit_memset(qdm, 0, sizeof(*qdm));
67054e21c12Shikaru }
67154e21c12Shikaru
67254e21c12Shikaru int
qat_alloc_dmamem(struct qat_softc * sc,struct qat_dmamem * qdm,bus_size_t size,bus_size_t alignment)67354e21c12Shikaru qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
67454e21c12Shikaru bus_size_t size, bus_size_t alignment)
67554e21c12Shikaru {
67654e21c12Shikaru int error = 0, nseg;
67754e21c12Shikaru
67854e21c12Shikaru error = bus_dmamem_alloc(sc->sc_dmat, size, alignment,
67954e21c12Shikaru 0, &qdm->qdm_dma_seg, 1, &nseg, BUS_DMA_NOWAIT);
68054e21c12Shikaru if (error) {
68154e21c12Shikaru aprint_error_dev(sc->sc_dev,
68254e21c12Shikaru "couldn't allocate dmamem, error = %d\n", error);
68354e21c12Shikaru goto fail_0;
68454e21c12Shikaru }
68554e21c12Shikaru KASSERT(nseg == 1);
68654e21c12Shikaru error = bus_dmamem_map(sc->sc_dmat, &qdm->qdm_dma_seg,
68754e21c12Shikaru nseg, size, &qdm->qdm_dma_vaddr,
68854e21c12Shikaru BUS_DMA_COHERENT | BUS_DMA_NOWAIT);
68954e21c12Shikaru if (error) {
69054e21c12Shikaru aprint_error_dev(sc->sc_dev,
69154e21c12Shikaru "couldn't map dmamem, error = %d\n", error);
69254e21c12Shikaru goto fail_1;
69354e21c12Shikaru }
69454e21c12Shikaru qdm->qdm_dma_size = size;
69554e21c12Shikaru error = bus_dmamap_create(sc->sc_dmat, size, nseg, size,
69654e21c12Shikaru 0, BUS_DMA_NOWAIT, &qdm->qdm_dma_map);
69754e21c12Shikaru if (error) {
69854e21c12Shikaru aprint_error_dev(sc->sc_dev,
69954e21c12Shikaru "couldn't create dmamem map, error = %d\n", error);
70054e21c12Shikaru goto fail_2;
70154e21c12Shikaru }
70254e21c12Shikaru error = bus_dmamap_load(sc->sc_dmat, qdm->qdm_dma_map,
70354e21c12Shikaru qdm->qdm_dma_vaddr, size, NULL, BUS_DMA_NOWAIT);
70454e21c12Shikaru if (error) {
70554e21c12Shikaru aprint_error_dev(sc->sc_dev,
70654e21c12Shikaru "couldn't load dmamem map, error = %d\n", error);
70754e21c12Shikaru goto fail_3;
70854e21c12Shikaru }
70954e21c12Shikaru
71054e21c12Shikaru return 0;
71154e21c12Shikaru fail_3:
71254e21c12Shikaru bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
71354e21c12Shikaru qdm->qdm_dma_map = NULL;
71454e21c12Shikaru fail_2:
71554e21c12Shikaru bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, size);
71654e21c12Shikaru qdm->qdm_dma_vaddr = NULL;
71754e21c12Shikaru qdm->qdm_dma_size = 0;
71854e21c12Shikaru fail_1:
71954e21c12Shikaru bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
72054e21c12Shikaru fail_0:
72154e21c12Shikaru return error;
72254e21c12Shikaru }
72354e21c12Shikaru
72454e21c12Shikaru int
qat_alloc_msix_intr(struct qat_softc * sc,struct pci_attach_args * pa)72554e21c12Shikaru qat_alloc_msix_intr(struct qat_softc *sc, struct pci_attach_args *pa)
72654e21c12Shikaru {
72754e21c12Shikaru u_int *ih_map, vec;
72854e21c12Shikaru int error, count, ihi;
72954e21c12Shikaru
73054e21c12Shikaru count = sc->sc_hw.qhw_num_banks + 1;
73154e21c12Shikaru ih_map = qat_alloc_mem(sizeof(*ih_map) * count);
73254e21c12Shikaru ihi = 0;
73354e21c12Shikaru
73454e21c12Shikaru for (vec = 0; vec < sc->sc_hw.qhw_num_banks; vec++)
73554e21c12Shikaru ih_map[ihi++] = vec;
73654e21c12Shikaru
73754e21c12Shikaru vec += sc->sc_hw.qhw_msix_ae_vec_gap;
73854e21c12Shikaru ih_map[ihi++] = vec;
73954e21c12Shikaru
74054e21c12Shikaru error = pci_msix_alloc_map(pa, &sc->sc_ih, ih_map, count);
74154e21c12Shikaru qat_free_mem(ih_map);
74254e21c12Shikaru if (error) {
74354e21c12Shikaru aprint_error_dev(sc->sc_dev, "couldn't allocate msix %d: %d\n",
74454e21c12Shikaru count, error);
74554e21c12Shikaru }
74654e21c12Shikaru
74754e21c12Shikaru return error;
74854e21c12Shikaru }
74954e21c12Shikaru
75054e21c12Shikaru void *
qat_establish_msix_intr(struct qat_softc * sc,pci_intr_handle_t ih,int (* func)(void *),void * arg,const char * name,int index)75154e21c12Shikaru qat_establish_msix_intr(struct qat_softc *sc, pci_intr_handle_t ih,
75254e21c12Shikaru int (*func)(void *), void *arg,
75354e21c12Shikaru const char *name, int index)
75454e21c12Shikaru {
75554e21c12Shikaru kcpuset_t *affinity;
75654e21c12Shikaru int error;
75754e21c12Shikaru char buf[PCI_INTRSTR_LEN];
75854e21c12Shikaru char intrxname[INTRDEVNAMEBUF];
75954e21c12Shikaru const char *intrstr;
76054e21c12Shikaru void *cookie;
76154e21c12Shikaru
76254e21c12Shikaru snprintf(intrxname, sizeof(intrxname), "%s%s%d",
76354e21c12Shikaru device_xname(sc->sc_dev), name, index);
76454e21c12Shikaru
76554e21c12Shikaru intrstr = pci_intr_string(sc->sc_pc, ih, buf, sizeof(buf));
76654e21c12Shikaru
76754e21c12Shikaru pci_intr_setattr(sc->sc_pc, &ih, PCI_INTR_MPSAFE, true);
76854e21c12Shikaru
76954e21c12Shikaru cookie = pci_intr_establish_xname(sc->sc_pc, ih,
77054e21c12Shikaru IPL_NET, func, arg, intrxname);
77154e21c12Shikaru
77254e21c12Shikaru aprint_normal_dev(sc->sc_dev, "%s%d interrupting at %s\n",
77354e21c12Shikaru name, index, intrstr);
77454e21c12Shikaru
77554e21c12Shikaru kcpuset_create(&affinity, true);
77654e21c12Shikaru kcpuset_set(affinity, index % ncpu);
77754e21c12Shikaru error = interrupt_distribute(cookie, affinity, NULL);
77854e21c12Shikaru if (error) {
77954e21c12Shikaru aprint_error_dev(sc->sc_dev,
78054e21c12Shikaru "couldn't distribute interrupt: %s%d\n", name, index);
78154e21c12Shikaru }
78254e21c12Shikaru kcpuset_destroy(affinity);
78354e21c12Shikaru
78454e21c12Shikaru return cookie;
78554e21c12Shikaru }
78654e21c12Shikaru
78754e21c12Shikaru int
qat_setup_msix_intr(struct qat_softc * sc)78854e21c12Shikaru qat_setup_msix_intr(struct qat_softc *sc)
78954e21c12Shikaru {
79054e21c12Shikaru int i;
79154e21c12Shikaru pci_intr_handle_t ih;
79254e21c12Shikaru
79354e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
79454e21c12Shikaru struct qat_bank *qb = &sc->sc_etr_banks[i];
79554e21c12Shikaru ih = sc->sc_ih[i];
79654e21c12Shikaru
79754e21c12Shikaru qb->qb_ih_cookie = qat_establish_msix_intr(sc, ih,
79854e21c12Shikaru qat_etr_bank_intr, qb, "bank", i);
79954e21c12Shikaru if (qb->qb_ih_cookie == NULL)
80054e21c12Shikaru return ENOMEM;
80154e21c12Shikaru }
80254e21c12Shikaru
80354e21c12Shikaru sc->sc_ae_ih_cookie = qat_establish_msix_intr(sc, sc->sc_ih[i],
80454e21c12Shikaru qat_ae_cluster_intr, sc, "aeclust", 0);
80554e21c12Shikaru if (sc->sc_ae_ih_cookie == NULL)
80654e21c12Shikaru return ENOMEM;
80754e21c12Shikaru
80854e21c12Shikaru return 0;
80954e21c12Shikaru }
81054e21c12Shikaru
81154e21c12Shikaru int
qat_etr_init(struct qat_softc * sc)81254e21c12Shikaru qat_etr_init(struct qat_softc *sc)
81354e21c12Shikaru {
81454e21c12Shikaru int i;
81554e21c12Shikaru int error = 0;
81654e21c12Shikaru
81754e21c12Shikaru sc->sc_etr_banks = qat_alloc_mem(
81854e21c12Shikaru sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
81954e21c12Shikaru
82054e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
82154e21c12Shikaru error = qat_etr_bank_init(sc, i);
82254e21c12Shikaru if (error) {
82354e21c12Shikaru goto fail;
82454e21c12Shikaru }
82554e21c12Shikaru }
82654e21c12Shikaru
82754e21c12Shikaru if (sc->sc_hw.qhw_num_ap_banks) {
82854e21c12Shikaru sc->sc_etr_ap_banks = qat_alloc_mem(
82954e21c12Shikaru sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
83054e21c12Shikaru error = qat_etr_ap_bank_init(sc);
83154e21c12Shikaru if (error) {
83254e21c12Shikaru goto fail;
83354e21c12Shikaru }
83454e21c12Shikaru }
83554e21c12Shikaru
83654e21c12Shikaru return 0;
83754e21c12Shikaru
83854e21c12Shikaru fail:
83954e21c12Shikaru if (sc->sc_etr_banks != NULL) {
84054e21c12Shikaru qat_free_mem(sc->sc_etr_banks);
84154e21c12Shikaru sc->sc_etr_banks = NULL;
84254e21c12Shikaru }
84354e21c12Shikaru if (sc->sc_etr_ap_banks != NULL) {
84454e21c12Shikaru qat_free_mem(sc->sc_etr_ap_banks);
84554e21c12Shikaru sc->sc_etr_ap_banks = NULL;
84654e21c12Shikaru }
84754e21c12Shikaru return error;
84854e21c12Shikaru }
84954e21c12Shikaru
85054e21c12Shikaru int
qat_etr_bank_init(struct qat_softc * sc,int bank)85154e21c12Shikaru qat_etr_bank_init(struct qat_softc *sc, int bank)
85254e21c12Shikaru {
85354e21c12Shikaru struct qat_bank *qb = &sc->sc_etr_banks[bank];
85454e21c12Shikaru int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
85554e21c12Shikaru
85654e21c12Shikaru KASSERT(bank < sc->sc_hw.qhw_num_banks);
85754e21c12Shikaru
85854e21c12Shikaru mutex_init(&qb->qb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
85954e21c12Shikaru
86054e21c12Shikaru qb->qb_sc = sc;
86154e21c12Shikaru qb->qb_bank = bank;
86254e21c12Shikaru qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
86354e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qb->qb_ev_rxintr, EVCNT_TYPE_INTR,
86454e21c12Shikaru qb->qb_ev_rxintr_name, "bank%d rxintr", bank);
86554e21c12Shikaru
86654e21c12Shikaru /* Clean CSRs for all rings within the bank */
86754e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
86854e21c12Shikaru struct qat_ring *qr = &qb->qb_et_rings[i];
86954e21c12Shikaru
87054e21c12Shikaru qat_etr_bank_ring_write_4(sc, bank, i,
87154e21c12Shikaru ETR_RING_CONFIG, 0);
87254e21c12Shikaru qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
87354e21c12Shikaru
87454e21c12Shikaru if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
87554e21c12Shikaru qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
87654e21c12Shikaru } else if (sc->sc_hw.qhw_tx_rings_mask &
87754e21c12Shikaru (1 << (i - tx_rx_gap))) {
87854e21c12Shikaru /* Share inflight counter with rx and tx */
87954e21c12Shikaru qr->qr_inflight =
88054e21c12Shikaru qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
88154e21c12Shikaru }
88254e21c12Shikaru }
88354e21c12Shikaru
88454e21c12Shikaru if (sc->sc_hw.qhw_init_etr_intr != NULL) {
88554e21c12Shikaru sc->sc_hw.qhw_init_etr_intr(sc, bank);
88654e21c12Shikaru } else {
88754e21c12Shikaru /* common code in qat 1.7 */
88854e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
88954e21c12Shikaru ETR_INT_REG_CLEAR_MASK);
89054e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
89154e21c12Shikaru ETR_RINGS_PER_INT_SRCSEL; i++) {
89254e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
89354e21c12Shikaru (i * ETR_INT_SRCSEL_NEXT_OFFSET),
89454e21c12Shikaru ETR_INT_SRCSEL_MASK);
89554e21c12Shikaru }
89654e21c12Shikaru }
89754e21c12Shikaru
89854e21c12Shikaru return 0;
89954e21c12Shikaru }
90054e21c12Shikaru
90154e21c12Shikaru int
qat_etr_ap_bank_init(struct qat_softc * sc)90254e21c12Shikaru qat_etr_ap_bank_init(struct qat_softc *sc)
90354e21c12Shikaru {
90454e21c12Shikaru int ap_bank;
90554e21c12Shikaru
90654e21c12Shikaru for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
90754e21c12Shikaru struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
90854e21c12Shikaru
90954e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
91054e21c12Shikaru ETR_AP_NF_MASK_INIT);
91154e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
91254e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
91354e21c12Shikaru ETR_AP_NE_MASK_INIT);
91454e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
91554e21c12Shikaru
91654e21c12Shikaru memset(qab, 0, sizeof(*qab));
91754e21c12Shikaru }
91854e21c12Shikaru
91954e21c12Shikaru return 0;
92054e21c12Shikaru }
92154e21c12Shikaru
92254e21c12Shikaru void
qat_etr_ap_bank_set_ring_mask(uint32_t * ap_mask,uint32_t ring,int set_mask)92354e21c12Shikaru qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
92454e21c12Shikaru {
92554e21c12Shikaru if (set_mask)
92654e21c12Shikaru *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
92754e21c12Shikaru else
92854e21c12Shikaru *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
92954e21c12Shikaru }
93054e21c12Shikaru
93154e21c12Shikaru void
qat_etr_ap_bank_set_ring_dest(struct qat_softc * sc,uint32_t * ap_dest,uint32_t ring,int set_dest)93254e21c12Shikaru qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
93354e21c12Shikaru uint32_t ring, int set_dest)
93454e21c12Shikaru {
93554e21c12Shikaru uint32_t ae_mask;
93654e21c12Shikaru uint8_t mailbox, ae, nae;
93754e21c12Shikaru uint8_t *dest = (uint8_t *)ap_dest;
93854e21c12Shikaru
93954e21c12Shikaru mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
94054e21c12Shikaru
94154e21c12Shikaru nae = 0;
94254e21c12Shikaru ae_mask = sc->sc_ae_mask;
94354e21c12Shikaru for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
94454e21c12Shikaru if ((ae_mask & (1 << ae)) == 0)
94554e21c12Shikaru continue;
94654e21c12Shikaru
94754e21c12Shikaru if (set_dest) {
94854e21c12Shikaru dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
94954e21c12Shikaru __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
95054e21c12Shikaru ETR_AP_DEST_ENABLE;
95154e21c12Shikaru } else {
95254e21c12Shikaru dest[nae] = 0;
95354e21c12Shikaru }
95454e21c12Shikaru nae++;
95554e21c12Shikaru if (nae == ETR_MAX_AE_PER_MAILBOX)
95654e21c12Shikaru break;
95754e21c12Shikaru
95854e21c12Shikaru }
95954e21c12Shikaru }
96054e21c12Shikaru
96154e21c12Shikaru void
qat_etr_ap_bank_setup_ring(struct qat_softc * sc,struct qat_ring * qr)96254e21c12Shikaru qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
96354e21c12Shikaru {
96454e21c12Shikaru struct qat_ap_bank *qab;
96554e21c12Shikaru int ap_bank;
96654e21c12Shikaru
96754e21c12Shikaru if (sc->sc_hw.qhw_num_ap_banks == 0)
96854e21c12Shikaru return;
96954e21c12Shikaru
97054e21c12Shikaru ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
97154e21c12Shikaru KASSERT(ap_bank < sc->sc_hw.qhw_num_ap_banks);
97254e21c12Shikaru qab = &sc->sc_etr_ap_banks[ap_bank];
97354e21c12Shikaru
97454e21c12Shikaru if (qr->qr_cb == NULL) {
97554e21c12Shikaru qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
97654e21c12Shikaru if (!qab->qab_ne_dest) {
97754e21c12Shikaru qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
97854e21c12Shikaru qr->qr_ring, 1);
97954e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
98054e21c12Shikaru qab->qab_ne_dest);
98154e21c12Shikaru }
98254e21c12Shikaru } else {
98354e21c12Shikaru qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
98454e21c12Shikaru if (!qab->qab_nf_dest) {
98554e21c12Shikaru qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
98654e21c12Shikaru qr->qr_ring, 1);
98754e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
98854e21c12Shikaru qab->qab_nf_dest);
98954e21c12Shikaru }
99054e21c12Shikaru }
99154e21c12Shikaru }
99254e21c12Shikaru
99354e21c12Shikaru int
qat_etr_verify_ring_size(uint32_t msg_size,uint32_t num_msgs)99454e21c12Shikaru qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
99554e21c12Shikaru {
99654e21c12Shikaru int i = QAT_MIN_RING_SIZE;
99754e21c12Shikaru
99854e21c12Shikaru for (; i <= QAT_MAX_RING_SIZE; i++)
99954e21c12Shikaru if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
100054e21c12Shikaru return i;
100154e21c12Shikaru
100254e21c12Shikaru return QAT_DEFAULT_RING_SIZE;
100354e21c12Shikaru }
100454e21c12Shikaru
100554e21c12Shikaru int
qat_etr_setup_ring(struct qat_softc * sc,int bank,uint32_t ring,uint32_t num_msgs,uint32_t msg_size,qat_cb_t cb,void * cb_arg,const char * name,struct qat_ring ** rqr)100654e21c12Shikaru qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
100754e21c12Shikaru uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
100854e21c12Shikaru const char *name, struct qat_ring **rqr)
100954e21c12Shikaru {
101054e21c12Shikaru struct qat_bank *qb;
101154e21c12Shikaru struct qat_ring *qr = NULL;
101254e21c12Shikaru int error;
101354e21c12Shikaru uint32_t ring_size_bytes, ring_config;
101454e21c12Shikaru uint64_t ring_base;
101554e21c12Shikaru uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
101654e21c12Shikaru uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
101754e21c12Shikaru
101854e21c12Shikaru KASSERT(bank < sc->sc_hw.qhw_num_banks);
101954e21c12Shikaru
102054e21c12Shikaru /* Allocate a ring from specified bank */
102154e21c12Shikaru qb = &sc->sc_etr_banks[bank];
102254e21c12Shikaru
102354e21c12Shikaru if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
102454e21c12Shikaru return EINVAL;
102554e21c12Shikaru if (qb->qb_allocated_rings & (1 << ring))
102654e21c12Shikaru return ENOENT;
102754e21c12Shikaru qr = &qb->qb_et_rings[ring];
102854e21c12Shikaru qb->qb_allocated_rings |= 1 << ring;
102954e21c12Shikaru
10303c0f0e63Smsaitoh /* Initialize allocated ring */
103154e21c12Shikaru qr->qr_ring = ring;
103254e21c12Shikaru qr->qr_bank = bank;
103354e21c12Shikaru qr->qr_name = name;
103454e21c12Shikaru qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
103554e21c12Shikaru qr->qr_ring_mask = (1 << ring);
103654e21c12Shikaru qr->qr_cb = cb;
103754e21c12Shikaru qr->qr_cb_arg = cb_arg;
103854e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxintr, EVCNT_TYPE_INTR,
103954e21c12Shikaru qr->qr_ev_rxintr_name, "bank%d ring%d rxintr", bank, ring);
104054e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxmsg, EVCNT_TYPE_MISC,
104154e21c12Shikaru qr->qr_ev_rxmsg_name, "bank%d ring%d rxmsg", bank, ring);
104254e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txmsg, EVCNT_TYPE_MISC,
104354e21c12Shikaru qr->qr_ev_txmsg_name, "bank%d ring%d txmsg", bank, ring);
104454e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txfull, EVCNT_TYPE_MISC,
104554e21c12Shikaru qr->qr_ev_txfull_name, "bank%d ring%d txfull", bank, ring);
104654e21c12Shikaru
104754e21c12Shikaru /* Setup the shadow variables */
104854e21c12Shikaru qr->qr_head = 0;
104954e21c12Shikaru qr->qr_tail = 0;
105054e21c12Shikaru qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
105154e21c12Shikaru qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
105254e21c12Shikaru
105354e21c12Shikaru /*
1054d56923f3Smsaitoh * To make sure that ring is aligned to ring size allocate
105554e21c12Shikaru * at least 4k and then tell the user it is smaller.
105654e21c12Shikaru */
105754e21c12Shikaru ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
105854e21c12Shikaru ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
105954e21c12Shikaru error = qat_alloc_dmamem(sc, &qr->qr_dma,
106054e21c12Shikaru ring_size_bytes, ring_size_bytes);
106154e21c12Shikaru if (error)
106254e21c12Shikaru return error;
106354e21c12Shikaru
106454e21c12Shikaru KASSERT(qr->qr_dma.qdm_dma_map->dm_nsegs == 1);
106554e21c12Shikaru
106654e21c12Shikaru qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
106754e21c12Shikaru qr->qr_ring_paddr = qr->qr_dma.qdm_dma_map->dm_segs[0].ds_addr;
106854e21c12Shikaru
106954e21c12Shikaru aprint_verbose_dev(sc->sc_dev,
107054e21c12Shikaru "allocate ring %d of bank %d for %s "
107154e21c12Shikaru "size %d %d at vaddr %p paddr 0x%llx\n",
107254e21c12Shikaru ring, bank, name, ring_size_bytes,
107354e21c12Shikaru (int)qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len,
107454e21c12Shikaru qr->qr_ring_vaddr,
107554e21c12Shikaru (unsigned long long)qr->qr_ring_paddr);
107654e21c12Shikaru
107754e21c12Shikaru memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
107854e21c12Shikaru qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len);
107954e21c12Shikaru
108054e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, 0,
108154e21c12Shikaru qr->qr_dma.qdm_dma_map->dm_mapsize,
108254e21c12Shikaru BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
108354e21c12Shikaru
108454e21c12Shikaru if (((uintptr_t)qr->qr_ring_paddr & (ring_size_bytes - 1)) != 0) {
108554e21c12Shikaru aprint_error_dev(sc->sc_dev, "ring address not aligned\n");
108654e21c12Shikaru return EFAULT;
108754e21c12Shikaru }
108854e21c12Shikaru
108954e21c12Shikaru if (cb == NULL) {
109054e21c12Shikaru ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
109154e21c12Shikaru } else {
109254e21c12Shikaru ring_config =
109354e21c12Shikaru ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
109454e21c12Shikaru }
109554e21c12Shikaru qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
109654e21c12Shikaru
109754e21c12Shikaru ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
109854e21c12Shikaru qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
109954e21c12Shikaru
110054e21c12Shikaru if (sc->sc_hw.qhw_init_arb != NULL)
110154e21c12Shikaru qat_arb_update(sc, qb);
110254e21c12Shikaru
110354e21c12Shikaru mutex_init(&qr->qr_ring_mtx, MUTEX_DEFAULT, IPL_NET);
110454e21c12Shikaru
110554e21c12Shikaru qat_etr_ap_bank_setup_ring(sc, qr);
110654e21c12Shikaru
110754e21c12Shikaru if (cb != NULL) {
110854e21c12Shikaru uint32_t intr_mask;
110954e21c12Shikaru
111054e21c12Shikaru qb->qb_intr_mask |= qr->qr_ring_mask;
111154e21c12Shikaru intr_mask = qb->qb_intr_mask;
111254e21c12Shikaru
111354e21c12Shikaru aprint_verbose_dev(sc->sc_dev,
111454e21c12Shikaru "update intr mask for bank %d "
111554e21c12Shikaru "(coalescing time %dns): 0x%08x\n",
111654e21c12Shikaru bank, qb->qb_coalescing_time, intr_mask);
111754e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN,
111854e21c12Shikaru intr_mask);
111954e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
112054e21c12Shikaru ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
112154e21c12Shikaru }
112254e21c12Shikaru
112354e21c12Shikaru *rqr = qr;
112454e21c12Shikaru
112554e21c12Shikaru return 0;
112654e21c12Shikaru }
112754e21c12Shikaru
112854e21c12Shikaru static inline u_int
qat_modulo(u_int data,u_int shift)112954e21c12Shikaru qat_modulo(u_int data, u_int shift)
113054e21c12Shikaru {
113154e21c12Shikaru u_int div = data >> shift;
113254e21c12Shikaru u_int mult = div << shift;
113354e21c12Shikaru return data - mult;
113454e21c12Shikaru }
113554e21c12Shikaru
113654e21c12Shikaru int
qat_etr_put_msg(struct qat_softc * sc,struct qat_ring * qr,uint32_t * msg)113754e21c12Shikaru qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
113854e21c12Shikaru {
113954e21c12Shikaru uint32_t inflight;
114054e21c12Shikaru uint32_t *addr;
114154e21c12Shikaru
114254e21c12Shikaru mutex_spin_enter(&qr->qr_ring_mtx);
114354e21c12Shikaru
114454e21c12Shikaru inflight = atomic_inc_32_nv(qr->qr_inflight);
114554e21c12Shikaru if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
114654e21c12Shikaru atomic_dec_32(qr->qr_inflight);
114754e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_txfull);
114854e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx);
114954e21c12Shikaru return EBUSY;
115054e21c12Shikaru }
115154e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_txmsg);
115254e21c12Shikaru
115354e21c12Shikaru addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
115454e21c12Shikaru
115554e21c12Shikaru memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
115654e21c12Shikaru #ifdef QAT_DUMP
115754e21c12Shikaru qat_dump_raw(QAT_DUMP_RING_MSG, "put_msg", addr,
115854e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
115954e21c12Shikaru #endif
116054e21c12Shikaru
116154e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_tail,
116254e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
116354e21c12Shikaru BUS_DMASYNC_PREWRITE);
116454e21c12Shikaru
116554e21c12Shikaru qr->qr_tail = qat_modulo(qr->qr_tail +
116654e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
116754e21c12Shikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size));
116854e21c12Shikaru
116954e21c12Shikaru qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
117054e21c12Shikaru ETR_RING_TAIL_OFFSET, qr->qr_tail);
117154e21c12Shikaru
117254e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx);
117354e21c12Shikaru
117454e21c12Shikaru return 0;
117554e21c12Shikaru }
117654e21c12Shikaru
117754e21c12Shikaru int
qat_etr_ring_intr(struct qat_softc * sc,struct qat_bank * qb,struct qat_ring * qr)117854e21c12Shikaru qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
117954e21c12Shikaru struct qat_ring *qr)
118054e21c12Shikaru {
118154e21c12Shikaru int handled = 0;
118254e21c12Shikaru uint32_t *msg;
118354e21c12Shikaru uint32_t nmsg = 0;
118454e21c12Shikaru
118554e21c12Shikaru mutex_spin_enter(&qr->qr_ring_mtx);
118654e21c12Shikaru
118754e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_rxintr);
118854e21c12Shikaru
118954e21c12Shikaru msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
119054e21c12Shikaru
119154e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
119254e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
119354e21c12Shikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
119454e21c12Shikaru
119554e21c12Shikaru while (*msg != ETR_RING_EMPTY_ENTRY_SIG) {
119654e21c12Shikaru atomic_dec_32(qr->qr_inflight);
119754e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_rxmsg);
119854e21c12Shikaru
119954e21c12Shikaru if (qr->qr_cb != NULL) {
120054e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx);
120154e21c12Shikaru handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
120254e21c12Shikaru mutex_spin_enter(&qr->qr_ring_mtx);
120354e21c12Shikaru }
120454e21c12Shikaru
120554e21c12Shikaru *msg = ETR_RING_EMPTY_ENTRY_SIG;
120654e21c12Shikaru
120754e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
120854e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
120954e21c12Shikaru BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
121054e21c12Shikaru
121154e21c12Shikaru qr->qr_head = qat_modulo(qr->qr_head +
121254e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
121354e21c12Shikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size));
121454e21c12Shikaru nmsg++;
121554e21c12Shikaru
121654e21c12Shikaru msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
121754e21c12Shikaru
121854e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
121954e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
122054e21c12Shikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
122154e21c12Shikaru }
122254e21c12Shikaru
122354e21c12Shikaru if (nmsg > 0) {
122454e21c12Shikaru qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
122554e21c12Shikaru ETR_RING_HEAD_OFFSET, qr->qr_head);
122654e21c12Shikaru }
122754e21c12Shikaru
122854e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx);
122954e21c12Shikaru
123054e21c12Shikaru return handled;
123154e21c12Shikaru }
123254e21c12Shikaru
123354e21c12Shikaru int
qat_etr_bank_intr(void * arg)123454e21c12Shikaru qat_etr_bank_intr(void *arg)
123554e21c12Shikaru {
123654e21c12Shikaru struct qat_bank *qb = arg;
123754e21c12Shikaru struct qat_softc *sc = qb->qb_sc;
123854e21c12Shikaru uint32_t estat;
123954e21c12Shikaru int i, handled = 0;
124054e21c12Shikaru
124154e21c12Shikaru mutex_spin_enter(&qb->qb_bank_mtx);
124254e21c12Shikaru
124354e21c12Shikaru QAT_EVCNT_INCR(&qb->qb_ev_rxintr);
124454e21c12Shikaru
124554e21c12Shikaru qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
124654e21c12Shikaru
124754e21c12Shikaru /* Now handle all the responses */
124854e21c12Shikaru estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
124954e21c12Shikaru estat &= qb->qb_intr_mask;
125054e21c12Shikaru
125154e21c12Shikaru qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
125254e21c12Shikaru ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
125354e21c12Shikaru
125454e21c12Shikaru mutex_spin_exit(&qb->qb_bank_mtx);
125554e21c12Shikaru
125654e21c12Shikaru while ((i = ffs32(estat)) != 0) {
125754e21c12Shikaru struct qat_ring *qr = &qb->qb_et_rings[--i];
125854e21c12Shikaru estat &= ~(1 << i);
125954e21c12Shikaru handled |= qat_etr_ring_intr(sc, qb, qr);
126054e21c12Shikaru }
126154e21c12Shikaru
126254e21c12Shikaru return handled;
126354e21c12Shikaru }
126454e21c12Shikaru
126554e21c12Shikaru void
qat_arb_update(struct qat_softc * sc,struct qat_bank * qb)126654e21c12Shikaru qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
126754e21c12Shikaru {
126854e21c12Shikaru
126954e21c12Shikaru qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
127054e21c12Shikaru qb->qb_allocated_rings & 0xff);
127154e21c12Shikaru }
127254e21c12Shikaru
127354e21c12Shikaru struct qat_sym_cookie *
qat_crypto_alloc_sym_cookie(struct qat_crypto_bank * qcb)127454e21c12Shikaru qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
127554e21c12Shikaru {
127654e21c12Shikaru struct qat_sym_cookie *qsc;
127754e21c12Shikaru
127854e21c12Shikaru mutex_spin_enter(&qcb->qcb_bank_mtx);
127954e21c12Shikaru
128054e21c12Shikaru if (qcb->qcb_symck_free_count == 0) {
128154e21c12Shikaru QAT_EVCNT_INCR(&qcb->qcb_ev_no_symck);
128254e21c12Shikaru mutex_spin_exit(&qcb->qcb_bank_mtx);
128354e21c12Shikaru return NULL;
128454e21c12Shikaru }
128554e21c12Shikaru
128654e21c12Shikaru qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
128754e21c12Shikaru
128854e21c12Shikaru mutex_spin_exit(&qcb->qcb_bank_mtx);
128954e21c12Shikaru
129054e21c12Shikaru return qsc;
129154e21c12Shikaru }
129254e21c12Shikaru
129354e21c12Shikaru void
qat_crypto_free_sym_cookie(struct qat_crypto_bank * qcb,struct qat_sym_cookie * qsc)129454e21c12Shikaru qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc)
129554e21c12Shikaru {
129654e21c12Shikaru
129754e21c12Shikaru mutex_spin_enter(&qcb->qcb_bank_mtx);
129854e21c12Shikaru qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
129954e21c12Shikaru mutex_spin_exit(&qcb->qcb_bank_mtx);
130054e21c12Shikaru }
130154e21c12Shikaru
130254e21c12Shikaru
130354e21c12Shikaru void
qat_memcpy_htobe64(void * dst,const void * src,size_t len)130454e21c12Shikaru qat_memcpy_htobe64(void *dst, const void *src, size_t len)
130554e21c12Shikaru {
130654e21c12Shikaru uint64_t *dst0 = dst;
130754e21c12Shikaru const uint64_t *src0 = src;
130854e21c12Shikaru size_t i;
130954e21c12Shikaru
131054e21c12Shikaru KASSERT(len % sizeof(*dst0) == 0);
131154e21c12Shikaru
131254e21c12Shikaru for (i = 0; i < len / sizeof(*dst0); i++)
131354e21c12Shikaru *(dst0 + i) = htobe64(*(src0 + i));
131454e21c12Shikaru }
131554e21c12Shikaru
131654e21c12Shikaru void
qat_memcpy_htobe32(void * dst,const void * src,size_t len)131754e21c12Shikaru qat_memcpy_htobe32(void *dst, const void *src, size_t len)
131854e21c12Shikaru {
131954e21c12Shikaru uint32_t *dst0 = dst;
132054e21c12Shikaru const uint32_t *src0 = src;
132154e21c12Shikaru size_t i;
132254e21c12Shikaru
132354e21c12Shikaru KASSERT(len % sizeof(*dst0) == 0);
132454e21c12Shikaru
132554e21c12Shikaru for (i = 0; i < len / sizeof(*dst0); i++)
132654e21c12Shikaru *(dst0 + i) = htobe32(*(src0 + i));
132754e21c12Shikaru }
132854e21c12Shikaru
132954e21c12Shikaru void
qat_memcpy_htobe(void * dst,const void * src,size_t len,uint32_t wordbyte)133054e21c12Shikaru qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
133154e21c12Shikaru {
133254e21c12Shikaru switch (wordbyte) {
133354e21c12Shikaru case 4:
133454e21c12Shikaru qat_memcpy_htobe32(dst, src, len);
133554e21c12Shikaru break;
133654e21c12Shikaru case 8:
133754e21c12Shikaru qat_memcpy_htobe64(dst, src, len);
133854e21c12Shikaru break;
133954e21c12Shikaru default:
134054e21c12Shikaru KASSERT(0);
134154e21c12Shikaru }
134254e21c12Shikaru }
134354e21c12Shikaru
134454e21c12Shikaru void
qat_crypto_hmac_precompute(struct qat_crypto_desc * desc,struct cryptoini * cria,struct qat_sym_hash_def const * hash_def,uint8_t * state1,uint8_t * state2)134554e21c12Shikaru qat_crypto_hmac_precompute(struct qat_crypto_desc *desc, struct cryptoini *cria,
134654e21c12Shikaru struct qat_sym_hash_def const *hash_def, uint8_t *state1, uint8_t *state2)
134754e21c12Shikaru {
134854e21c12Shikaru int i, state_swap;
134954e21c12Shikaru struct swcr_auth_hash const *sah = hash_def->qshd_alg->qshai_sah;
135054e21c12Shikaru uint32_t blklen = hash_def->qshd_alg->qshai_block_len;
135154e21c12Shikaru uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
135254e21c12Shikaru uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
135354e21c12Shikaru uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
135454e21c12Shikaru uint32_t keylen = cria->cri_klen / 8;
135554e21c12Shikaru uint32_t padlen = blklen - keylen;
135654e21c12Shikaru uint8_t *ipad = desc->qcd_hash_state_prefix_buf;
135754e21c12Shikaru uint8_t *opad = desc->qcd_hash_state_prefix_buf +
135854e21c12Shikaru sizeof(desc->qcd_hash_state_prefix_buf) / 2;
135954e21c12Shikaru /* XXX
136054e21c12Shikaru * For "stack protector not protecting local variables" error,
136154e21c12Shikaru * use constant variable.
136254e21c12Shikaru * Currently, the max length is sizeof(aesxcbc_ctx) used by
136354e21c12Shikaru * swcr_auth_hash_aes_xcbc_mac
136454e21c12Shikaru */
136554e21c12Shikaru uint8_t ctx[sizeof(aesxcbc_ctx)];
136654e21c12Shikaru
136754e21c12Shikaru memcpy(ipad, cria->cri_key, keylen);
136854e21c12Shikaru memcpy(opad, cria->cri_key, keylen);
136954e21c12Shikaru
137054e21c12Shikaru if (padlen > 0) {
137154e21c12Shikaru memset(ipad + keylen, 0, padlen);
137254e21c12Shikaru memset(opad + keylen, 0, padlen);
137354e21c12Shikaru }
137454e21c12Shikaru for (i = 0; i < blklen; i++) {
137554e21c12Shikaru ipad[i] ^= 0x36;
137654e21c12Shikaru opad[i] ^= 0x5c;
137754e21c12Shikaru }
137854e21c12Shikaru
137954e21c12Shikaru /* ipad */
138054e21c12Shikaru sah->Init(ctx);
138154e21c12Shikaru /* Check the endian of kernel built-in hash state */
138254e21c12Shikaru state_swap = memcmp(hash_def->qshd_alg->qshai_init_state,
138354e21c12Shikaru ((uint8_t *)ctx) + state_offset, state_word);
138454e21c12Shikaru sah->Update(ctx, ipad, blklen);
138554e21c12Shikaru if (state_swap == 0) {
138654e21c12Shikaru memcpy(state1, ((uint8_t *)ctx) + state_offset, state_size);
138754e21c12Shikaru } else {
138854e21c12Shikaru qat_memcpy_htobe(state1, ((uint8_t *)ctx) + state_offset,
138954e21c12Shikaru state_size, state_word);
139054e21c12Shikaru }
139154e21c12Shikaru
139254e21c12Shikaru /* opad */
139354e21c12Shikaru sah->Init(ctx);
139454e21c12Shikaru sah->Update(ctx, opad, blklen);
139554e21c12Shikaru if (state_swap == 0) {
139654e21c12Shikaru memcpy(state2, ((uint8_t *)ctx) + state_offset, state_size);
139754e21c12Shikaru } else {
139854e21c12Shikaru qat_memcpy_htobe(state2, ((uint8_t *)ctx) + state_offset,
139954e21c12Shikaru state_size, state_word);
140054e21c12Shikaru }
140154e21c12Shikaru }
140254e21c12Shikaru
140354e21c12Shikaru uint16_t
qat_crypto_load_cipher_cryptoini(struct qat_crypto_desc * desc,struct cryptoini * crie)140454e21c12Shikaru qat_crypto_load_cipher_cryptoini(
140554e21c12Shikaru struct qat_crypto_desc *desc, struct cryptoini *crie)
140654e21c12Shikaru {
140754e21c12Shikaru enum hw_cipher_algo algo = HW_CIPHER_ALGO_NULL;
140854e21c12Shikaru enum hw_cipher_mode mode = HW_CIPHER_CBC_MODE;
140954e21c12Shikaru enum hw_cipher_convert key_convert = HW_CIPHER_NO_CONVERT;
141054e21c12Shikaru
141154e21c12Shikaru switch (crie->cri_alg) {
141254e21c12Shikaru case CRYPTO_DES_CBC:
141354e21c12Shikaru algo = HW_CIPHER_ALGO_DES;
141454e21c12Shikaru desc->qcd_cipher_blk_sz = HW_DES_BLK_SZ;
141554e21c12Shikaru break;
141654e21c12Shikaru case CRYPTO_3DES_CBC:
141754e21c12Shikaru algo = HW_CIPHER_ALGO_3DES;
141854e21c12Shikaru desc->qcd_cipher_blk_sz = HW_3DES_BLK_SZ;
141954e21c12Shikaru break;
142054e21c12Shikaru case CRYPTO_AES_CBC:
142154e21c12Shikaru switch (crie->cri_klen / 8) {
142254e21c12Shikaru case HW_AES_128_KEY_SZ:
142354e21c12Shikaru algo = HW_CIPHER_ALGO_AES128;
142454e21c12Shikaru break;
142554e21c12Shikaru case HW_AES_192_KEY_SZ:
142654e21c12Shikaru algo = HW_CIPHER_ALGO_AES192;
142754e21c12Shikaru break;
142854e21c12Shikaru case HW_AES_256_KEY_SZ:
142954e21c12Shikaru algo = HW_CIPHER_ALGO_AES256;
143054e21c12Shikaru break;
143154e21c12Shikaru default:
143254e21c12Shikaru KASSERT(0);
143354e21c12Shikaru break;
143454e21c12Shikaru }
143554e21c12Shikaru desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
143654e21c12Shikaru /*
143754e21c12Shikaru * AES decrypt key needs to be reversed.
143854e21c12Shikaru * Instead of reversing the key at session registration,
143954e21c12Shikaru * it is instead reversed on-the-fly by setting the KEY_CONVERT
144054e21c12Shikaru * bit here
144154e21c12Shikaru */
144254e21c12Shikaru if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
144354e21c12Shikaru key_convert = HW_CIPHER_KEY_CONVERT;
144454e21c12Shikaru
144554e21c12Shikaru break;
144654e21c12Shikaru default:
144754e21c12Shikaru KASSERT(0);
144854e21c12Shikaru break;
144954e21c12Shikaru }
145054e21c12Shikaru
145154e21c12Shikaru return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert,
145254e21c12Shikaru desc->qcd_cipher_dir);
145354e21c12Shikaru }
145454e21c12Shikaru
145554e21c12Shikaru uint16_t
qat_crypto_load_auth_cryptoini(struct qat_crypto_desc * desc,struct cryptoini * cria,struct qat_sym_hash_def const ** hash_def)145654e21c12Shikaru qat_crypto_load_auth_cryptoini(
145754e21c12Shikaru struct qat_crypto_desc *desc, struct cryptoini *cria,
145854e21c12Shikaru struct qat_sym_hash_def const **hash_def)
145954e21c12Shikaru {
146054e21c12Shikaru const struct swcr_auth_hash *sah;
146154e21c12Shikaru enum qat_sym_hash_algorithm algo = 0;
146254e21c12Shikaru
146354e21c12Shikaru switch (cria->cri_alg) {
146454e21c12Shikaru case CRYPTO_MD5_HMAC_96:
146554e21c12Shikaru algo = QAT_SYM_HASH_MD5;
146654e21c12Shikaru break;
146754e21c12Shikaru case CRYPTO_SHA1_HMAC_96:
146854e21c12Shikaru algo = QAT_SYM_HASH_SHA1;
146954e21c12Shikaru break;
147054e21c12Shikaru case CRYPTO_SHA2_256_HMAC:
147154e21c12Shikaru algo = QAT_SYM_HASH_SHA256;
147254e21c12Shikaru break;
147354e21c12Shikaru case CRYPTO_SHA2_384_HMAC:
147454e21c12Shikaru algo = QAT_SYM_HASH_SHA384;
147554e21c12Shikaru break;
147654e21c12Shikaru case CRYPTO_SHA2_512_HMAC:
147754e21c12Shikaru algo = QAT_SYM_HASH_SHA512;
147854e21c12Shikaru break;
147954e21c12Shikaru default:
148054e21c12Shikaru KASSERT(0);
148154e21c12Shikaru break;
148254e21c12Shikaru }
148354e21c12Shikaru *hash_def = &qat_sym_hash_defs[algo];
148454e21c12Shikaru sah = (*hash_def)->qshd_alg->qshai_sah;
148554e21c12Shikaru KASSERT(sah != NULL);
148654e21c12Shikaru desc->qcd_auth_sz = sah->auth_hash->authsize;
148754e21c12Shikaru
148854e21c12Shikaru return HW_AUTH_CONFIG_BUILD(HW_AUTH_MODE1,
148954e21c12Shikaru (*hash_def)->qshd_qat->qshqi_algo_enc,
149054e21c12Shikaru (*hash_def)->qshd_alg->qshai_digest_len);
149154e21c12Shikaru }
149254e21c12Shikaru
149354e21c12Shikaru int
qat_crypto_load_buf(struct qat_softc * sc,struct cryptop * crp,struct qat_sym_cookie * qsc,struct qat_crypto_desc const * desc,uint8_t * icv_buf,int icv_offset,bus_addr_t * icv_paddr)149454e21c12Shikaru qat_crypto_load_buf(struct qat_softc *sc, struct cryptop *crp,
149554e21c12Shikaru struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc,
149654e21c12Shikaru uint8_t *icv_buf, int icv_offset, bus_addr_t *icv_paddr)
149754e21c12Shikaru {
149854e21c12Shikaru int error, i, nsegs;
149954e21c12Shikaru
150054e21c12Shikaru if (crp->crp_flags & CRYPTO_F_IMBUF) {
150154e21c12Shikaru struct mbuf *m = (struct mbuf *)crp->crp_buf;
150254e21c12Shikaru
150354e21c12Shikaru if (icv_offset >= 0) {
150454e21c12Shikaru if (m_length(m) == icv_offset) {
150554e21c12Shikaru m_copyback(m, icv_offset, desc->qcd_auth_sz,
150654e21c12Shikaru icv_buf);
150754e21c12Shikaru if (m_length(m) == icv_offset)
150854e21c12Shikaru return ENOBUFS;
150954e21c12Shikaru } else {
151054e21c12Shikaru struct mbuf *m0;
151154e21c12Shikaru m0 = m_pulldown(m, icv_offset,
151254e21c12Shikaru desc->qcd_auth_sz, NULL);
151354e21c12Shikaru if (m0 == NULL)
151454e21c12Shikaru return ENOBUFS;
151554e21c12Shikaru }
151654e21c12Shikaru }
151754e21c12Shikaru
151854e21c12Shikaru error = bus_dmamap_load_mbuf(sc->sc_dmat, qsc->qsc_buf_dmamap,
151954e21c12Shikaru m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
152054e21c12Shikaru if (error == EFBIG) {
152154e21c12Shikaru struct mbuf *m_new;
152254e21c12Shikaru m_new = m_defrag(m, M_DONTWAIT);
152354e21c12Shikaru if (m_new != NULL) {
152454e21c12Shikaru crp->crp_buf = m_new;
152554e21c12Shikaru qsc->qsc_buf = m_new;
152654e21c12Shikaru error = bus_dmamap_load_mbuf(sc->sc_dmat,
152754e21c12Shikaru qsc->qsc_buf_dmamap, m_new,
152854e21c12Shikaru BUS_DMA_WRITE | BUS_DMA_NOWAIT);
152954e21c12Shikaru if (error) {
153054e21c12Shikaru m_freem(m_new);
153154e21c12Shikaru crp->crp_buf = NULL;
153254e21c12Shikaru }
153354e21c12Shikaru }
153454e21c12Shikaru }
153554e21c12Shikaru
153654e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IOV) {
153754e21c12Shikaru error = bus_dmamap_load_uio(sc->sc_dmat, qsc->qsc_buf_dmamap,
153854e21c12Shikaru (struct uio *)crp->crp_buf, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
153954e21c12Shikaru } else {
154054e21c12Shikaru error = bus_dmamap_load(sc->sc_dmat, qsc->qsc_buf_dmamap,
154154e21c12Shikaru crp->crp_buf, crp->crp_ilen, NULL, BUS_DMA_NOWAIT);
154254e21c12Shikaru }
154354e21c12Shikaru if (error) {
154454e21c12Shikaru aprint_debug_dev(sc->sc_dev,
154554e21c12Shikaru "can't load crp_buf, error %d\n", error);
154654e21c12Shikaru crp->crp_etype = error;
154754e21c12Shikaru return error;
154854e21c12Shikaru }
154954e21c12Shikaru
155054e21c12Shikaru nsegs = qsc->qsc_buf_dmamap->dm_nsegs;
155154e21c12Shikaru qsc->qsc_buf_list.num_buffers = nsegs;
155254e21c12Shikaru for (i = 0; i < nsegs; i++) {
155354e21c12Shikaru struct flat_buffer_desc *flatbuf =
155454e21c12Shikaru &qsc->qsc_buf_list.phy_buffers[i];
155554e21c12Shikaru bus_addr_t paddr = qsc->qsc_buf_dmamap->dm_segs[i].ds_addr;
155654e21c12Shikaru bus_size_t len = qsc->qsc_buf_dmamap->dm_segs[i].ds_len;
155754e21c12Shikaru
155854e21c12Shikaru flatbuf->data_len_in_bytes = len;
155954e21c12Shikaru flatbuf->phy_buffer = (uint64_t)paddr;
156054e21c12Shikaru
156154e21c12Shikaru if (icv_offset >= 0) {
156254e21c12Shikaru if (icv_offset < len)
156354e21c12Shikaru *icv_paddr = paddr + icv_offset;
156454e21c12Shikaru else
156554e21c12Shikaru icv_offset -= len;
156654e21c12Shikaru }
156754e21c12Shikaru }
156854e21c12Shikaru
156954e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
157054e21c12Shikaru qsc->qsc_buf_dmamap->dm_mapsize,
157154e21c12Shikaru BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
157254e21c12Shikaru
157354e21c12Shikaru return 0;
157454e21c12Shikaru }
157554e21c12Shikaru
157654e21c12Shikaru int
qat_crypto_load_iv(struct qat_sym_cookie * qsc,struct cryptop * crp,struct cryptodesc * crde,struct qat_crypto_desc const * desc)157754e21c12Shikaru qat_crypto_load_iv(struct qat_sym_cookie *qsc, struct cryptop *crp,
157854e21c12Shikaru struct cryptodesc *crde, struct qat_crypto_desc const *desc)
157954e21c12Shikaru {
158054e21c12Shikaru uint32_t ivlen = desc->qcd_cipher_blk_sz;
158154e21c12Shikaru
158254e21c12Shikaru if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
158354e21c12Shikaru memcpy(qsc->qsc_iv_buf, crde->crd_iv, ivlen);
158454e21c12Shikaru } else {
158554e21c12Shikaru if (crde->crd_flags & CRD_F_ENCRYPT) {
1586138b1600Sriastradh cprng_fast(qsc->qsc_iv_buf, ivlen);
158754e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
158854e21c12Shikaru /* get iv from buf */
158954e21c12Shikaru m_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
159054e21c12Shikaru qsc->qsc_iv_buf);
159154e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IOV) {
159254e21c12Shikaru cuio_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
159354e21c12Shikaru qsc->qsc_iv_buf);
159454e21c12Shikaru }
159554e21c12Shikaru }
159654e21c12Shikaru
159754e21c12Shikaru if ((crde->crd_flags & CRD_F_ENCRYPT) != 0 &&
159854e21c12Shikaru (crde->crd_flags & CRD_F_IV_PRESENT) == 0) {
159954e21c12Shikaru if (crp->crp_flags & CRYPTO_F_IMBUF) {
160054e21c12Shikaru m_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
160154e21c12Shikaru qsc->qsc_iv_buf);
160254e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IOV) {
160354e21c12Shikaru cuio_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
160454e21c12Shikaru qsc->qsc_iv_buf);
160554e21c12Shikaru }
160654e21c12Shikaru }
160754e21c12Shikaru
160854e21c12Shikaru return 0;
160954e21c12Shikaru }
161054e21c12Shikaru
161154e21c12Shikaru static inline struct qat_crypto_bank *
qat_crypto_select_bank(struct qat_crypto * qcy)161254e21c12Shikaru qat_crypto_select_bank(struct qat_crypto *qcy)
161354e21c12Shikaru {
161454e21c12Shikaru u_int cpuid = cpu_index(curcpu());
161554e21c12Shikaru
161654e21c12Shikaru return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
161754e21c12Shikaru }
161854e21c12Shikaru
161954e21c12Shikaru int
qat_crypto_process(void * arg,struct cryptop * crp,int hint)162054e21c12Shikaru qat_crypto_process(void *arg, struct cryptop *crp, int hint)
162154e21c12Shikaru {
162254e21c12Shikaru struct qat_crypto *qcy = arg;
162354e21c12Shikaru struct qat_crypto_bank *qcb;
162454e21c12Shikaru struct qat_session *qs = NULL;
162554e21c12Shikaru struct qat_crypto_desc const *desc;
162654e21c12Shikaru struct qat_sym_cookie *qsc = NULL;
162754e21c12Shikaru struct qat_sym_bulk_cookie *qsbc;
162854e21c12Shikaru struct cryptodesc *crd, *crda = NULL, *crde = NULL;
162954e21c12Shikaru bus_addr_t icv_paddr = 0;
163054e21c12Shikaru int error, icv_offset = -1;
163154e21c12Shikaru uint8_t icv_buf[CRYPTO_MAX_MAC_LEN];
163254e21c12Shikaru
163354e21c12Shikaru qs = qcy->qcy_sessions[CRYPTO_SESID2LID(crp->crp_sid)];
163454e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx);
163554e21c12Shikaru KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
163654e21c12Shikaru qs->qs_inflight++;
163754e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx);
163854e21c12Shikaru
163954e21c12Shikaru qcb = qat_crypto_select_bank(qcy);
164054e21c12Shikaru
164154e21c12Shikaru qsc = qat_crypto_alloc_sym_cookie(qcb);
164254e21c12Shikaru if (qsc == NULL) {
164354e21c12Shikaru error = ENOBUFS;
164454e21c12Shikaru goto fail;
164554e21c12Shikaru }
164654e21c12Shikaru
164754e21c12Shikaru error = 0;
164854e21c12Shikaru desc = &qs->qs_dec_desc;
164954e21c12Shikaru crd = crp->crp_desc;
165054e21c12Shikaru while (crd != NULL) {
165154e21c12Shikaru switch (crd->crd_alg) {
165254e21c12Shikaru case CRYPTO_DES_CBC:
165354e21c12Shikaru case CRYPTO_3DES_CBC:
165454e21c12Shikaru case CRYPTO_AES_CBC:
165554e21c12Shikaru if (crde != NULL)
165654e21c12Shikaru error = EINVAL;
165754e21c12Shikaru if (crd->crd_flags & CRD_F_ENCRYPT) {
165854e21c12Shikaru /* use encrypt desc */
165954e21c12Shikaru desc = &qs->qs_enc_desc;
166054e21c12Shikaru if (crda != NULL)
166154e21c12Shikaru error = ENOTSUP;
166254e21c12Shikaru }
166354e21c12Shikaru crde = crd;
166454e21c12Shikaru break;
166554e21c12Shikaru case CRYPTO_MD5_HMAC_96:
166654e21c12Shikaru case CRYPTO_SHA1_HMAC_96:
166754e21c12Shikaru case CRYPTO_SHA2_256_HMAC:
166854e21c12Shikaru case CRYPTO_SHA2_384_HMAC:
166954e21c12Shikaru case CRYPTO_SHA2_512_HMAC:
167054e21c12Shikaru if (crda != NULL)
167154e21c12Shikaru error = EINVAL;
167254e21c12Shikaru if (crde != NULL &&
167354e21c12Shikaru (crde->crd_flags & CRD_F_ENCRYPT) == 0)
167454e21c12Shikaru error = EINVAL;
167554e21c12Shikaru crda = crd;
167654e21c12Shikaru icv_offset = crd->crd_inject;
167754e21c12Shikaru break;
167854e21c12Shikaru }
167954e21c12Shikaru if (error)
168054e21c12Shikaru goto fail;
168154e21c12Shikaru
168254e21c12Shikaru crd = crd->crd_next;
168354e21c12Shikaru }
168454e21c12Shikaru
168554e21c12Shikaru qsc->qsc_buf = crp->crp_buf;
168654e21c12Shikaru
168754e21c12Shikaru if (crde != NULL) {
168854e21c12Shikaru error = qat_crypto_load_iv(qsc, crp, crde, desc);
168954e21c12Shikaru if (error)
169054e21c12Shikaru goto fail;
169154e21c12Shikaru }
169254e21c12Shikaru
169354e21c12Shikaru error = qat_crypto_load_buf(qcy->qcy_sc, crp, qsc, desc, icv_buf,
169454e21c12Shikaru icv_offset, &icv_paddr);
169554e21c12Shikaru if (error)
169654e21c12Shikaru goto fail;
169754e21c12Shikaru
169854e21c12Shikaru qsbc = &qsc->u.qsc_bulk_cookie;
169954e21c12Shikaru
170054e21c12Shikaru qsbc->qsbc_crypto = qcy;
170154e21c12Shikaru qsbc->qsbc_session = qs;
170254e21c12Shikaru qsbc->qsbc_cb_tag = crp;
170354e21c12Shikaru
170454e21c12Shikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc,
170554e21c12Shikaru crde, crda, icv_paddr);
170654e21c12Shikaru
170754e21c12Shikaru bus_dmamap_sync(qcy->qcy_sc->sc_dmat, *qsc->qsc_self_dmamap, 0,
170854e21c12Shikaru offsetof(struct qat_sym_cookie, qsc_self_dmamap),
170954e21c12Shikaru BUS_DMASYNC_PREWRITE);
171054e21c12Shikaru
171154e21c12Shikaru error = qat_etr_put_msg(qcy->qcy_sc, qcb->qcb_sym_tx,
171254e21c12Shikaru (uint32_t *)qsbc->qsbc_msg);
171354e21c12Shikaru if (error)
171454e21c12Shikaru goto fail;
171554e21c12Shikaru
171654e21c12Shikaru return 0;
171754e21c12Shikaru fail:
171854e21c12Shikaru if (qsc)
171954e21c12Shikaru qat_crypto_free_sym_cookie(qcb, qsc);
172054e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx);
172154e21c12Shikaru qs->qs_inflight--;
172254e21c12Shikaru qat_crypto_check_free_session(qcy, qs);
172354e21c12Shikaru crp->crp_etype = error;
172454e21c12Shikaru crypto_done(crp);
172554e21c12Shikaru return 0;
172654e21c12Shikaru }
172754e21c12Shikaru
172854e21c12Shikaru int
qat_crypto_setup_ring(struct qat_softc * sc,struct qat_crypto_bank * qcb)172954e21c12Shikaru qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
173054e21c12Shikaru {
173154e21c12Shikaru int error, i, bank;
173254e21c12Shikaru int curname = 0;
173354e21c12Shikaru char *name;
173454e21c12Shikaru
173554e21c12Shikaru bank = qcb->qcb_bank;
173654e21c12Shikaru
173754e21c12Shikaru name = qcb->qcb_ring_names[curname++];
173854e21c12Shikaru snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
173954e21c12Shikaru error = qat_etr_setup_ring(sc, qcb->qcb_bank,
174054e21c12Shikaru sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
174154e21c12Shikaru NULL, NULL, name, &qcb->qcb_sym_tx);
174254e21c12Shikaru if (error)
174354e21c12Shikaru return error;
174454e21c12Shikaru
174554e21c12Shikaru name = qcb->qcb_ring_names[curname++];
174654e21c12Shikaru snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
174754e21c12Shikaru error = qat_etr_setup_ring(sc, qcb->qcb_bank,
174854e21c12Shikaru sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
174954e21c12Shikaru qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
175054e21c12Shikaru if (error)
175154e21c12Shikaru return error;
175254e21c12Shikaru
175354e21c12Shikaru for (i = 0; i < QAT_NSYMCOOKIE; i++) {
175454e21c12Shikaru struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
175554e21c12Shikaru struct qat_sym_cookie *qsc;
175654e21c12Shikaru
175754e21c12Shikaru error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_sym_cookie),
175854e21c12Shikaru QAT_OPTIMAL_ALIGN);
175954e21c12Shikaru if (error)
176054e21c12Shikaru return error;
176154e21c12Shikaru
176254e21c12Shikaru qsc = qdm->qdm_dma_vaddr;
176354e21c12Shikaru qsc->qsc_self_dmamap = &qdm->qdm_dma_map;
176454e21c12Shikaru qsc->qsc_bulk_req_params_buf_paddr =
176554e21c12Shikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
176654e21c12Shikaru u.qsc_bulk_cookie.qsbc_req_params_buf);
176754e21c12Shikaru qsc->qsc_buffer_list_desc_paddr =
176854e21c12Shikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
176954e21c12Shikaru qsc_buf_list);
177054e21c12Shikaru qsc->qsc_iv_buf_paddr =
177154e21c12Shikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
177254e21c12Shikaru qsc_iv_buf);
177354e21c12Shikaru qcb->qcb_symck_free[i] = qsc;
177454e21c12Shikaru qcb->qcb_symck_free_count++;
177554e21c12Shikaru
177654e21c12Shikaru error = bus_dmamap_create(sc->sc_dmat, QAT_MAXLEN,
177754e21c12Shikaru QAT_MAXSEG, MCLBYTES, 0, 0, &qsc->qsc_buf_dmamap);
177854e21c12Shikaru if (error)
177954e21c12Shikaru return error;
178054e21c12Shikaru }
178154e21c12Shikaru
178254e21c12Shikaru return 0;
178354e21c12Shikaru }
178454e21c12Shikaru
178554e21c12Shikaru int
qat_crypto_bank_init(struct qat_softc * sc,struct qat_crypto_bank * qcb)178654e21c12Shikaru qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
178754e21c12Shikaru {
178854e21c12Shikaru int error;
178954e21c12Shikaru
179054e21c12Shikaru mutex_init(&qcb->qcb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
179154e21c12Shikaru
179254e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcb->qcb_ev_no_symck, EVCNT_TYPE_MISC,
179354e21c12Shikaru qcb->qcb_ev_no_symck_name, "crypto no_symck");
179454e21c12Shikaru
179554e21c12Shikaru error = qat_crypto_setup_ring(sc, qcb);
179654e21c12Shikaru if (error)
179754e21c12Shikaru return error;
179854e21c12Shikaru
179954e21c12Shikaru return 0;
180054e21c12Shikaru }
180154e21c12Shikaru
180254e21c12Shikaru int
qat_crypto_init(struct qat_softc * sc)180354e21c12Shikaru qat_crypto_init(struct qat_softc *sc)
180454e21c12Shikaru {
180554e21c12Shikaru struct qat_crypto *qcy = &sc->sc_crypto;
180654e21c12Shikaru int error, bank, i;
180754e21c12Shikaru int num_banks;
180854e21c12Shikaru
180954e21c12Shikaru qcy->qcy_sc = sc;
181054e21c12Shikaru
181154e21c12Shikaru if (sc->sc_hw.qhw_init_arb != NULL)
181254e21c12Shikaru num_banks = uimin(ncpu, sc->sc_hw.qhw_num_banks);
181354e21c12Shikaru else
181454e21c12Shikaru num_banks = sc->sc_ae_num;
181554e21c12Shikaru
181654e21c12Shikaru qcy->qcy_num_banks = num_banks;
181754e21c12Shikaru
181854e21c12Shikaru qcy->qcy_banks =
181954e21c12Shikaru qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
182054e21c12Shikaru
182154e21c12Shikaru for (bank = 0; bank < num_banks; bank++) {
182254e21c12Shikaru struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
182354e21c12Shikaru qcb->qcb_bank = bank;
182454e21c12Shikaru qcb->qcb_crypto = qcy;
182554e21c12Shikaru error = qat_crypto_bank_init(sc, qcb);
182654e21c12Shikaru if (error)
182754e21c12Shikaru return error;
182854e21c12Shikaru }
182954e21c12Shikaru
183054e21c12Shikaru mutex_init(&qcy->qcy_crypto_mtx, MUTEX_DEFAULT, IPL_NET);
183154e21c12Shikaru
183254e21c12Shikaru for (i = 0; i < QAT_NSESSION; i++) {
183354e21c12Shikaru struct qat_dmamem *qdm = &qcy->qcy_session_dmamems[i];
183454e21c12Shikaru struct qat_session *qs;
183554e21c12Shikaru
183654e21c12Shikaru error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_session),
183754e21c12Shikaru QAT_OPTIMAL_ALIGN);
183854e21c12Shikaru if (error)
183954e21c12Shikaru return error;
184054e21c12Shikaru
184154e21c12Shikaru qs = qdm->qdm_dma_vaddr;
184254e21c12Shikaru qs->qs_lid = i;
184354e21c12Shikaru qs->qs_dec_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
184454e21c12Shikaru qs->qs_dec_desc.qcd_hash_state_paddr =
184554e21c12Shikaru qs->qs_dec_desc.qcd_desc_paddr +
184654e21c12Shikaru offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
184754e21c12Shikaru qs->qs_enc_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
184854e21c12Shikaru offsetof(struct qat_session, qs_enc_desc);
184954e21c12Shikaru qs->qs_enc_desc.qcd_hash_state_paddr =
185054e21c12Shikaru qs->qs_enc_desc.qcd_desc_paddr +
185154e21c12Shikaru offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
185254e21c12Shikaru
185354e21c12Shikaru mutex_init(&qs->qs_session_mtx, MUTEX_DEFAULT, IPL_NET);
185454e21c12Shikaru
185554e21c12Shikaru qcy->qcy_sessions[i] = qs;
185654e21c12Shikaru qcy->qcy_session_free[i] = qs;
185754e21c12Shikaru qcy->qcy_session_free_count++;
185854e21c12Shikaru }
185954e21c12Shikaru
186054e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_new_sess, EVCNT_TYPE_MISC,
186154e21c12Shikaru qcy->qcy_ev_new_sess_name, "crypto new_sess");
186254e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_free_sess, EVCNT_TYPE_MISC,
186354e21c12Shikaru qcy->qcy_ev_free_sess_name, "crypto free_sess");
186454e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_no_sess, EVCNT_TYPE_MISC,
186554e21c12Shikaru qcy->qcy_ev_no_sess_name, "crypto no_sess");
186654e21c12Shikaru
186754e21c12Shikaru return 0;
186854e21c12Shikaru }
186954e21c12Shikaru
187054e21c12Shikaru int
qat_crypto_new_session(void * arg,uint32_t * lid,struct cryptoini * cri)187154e21c12Shikaru qat_crypto_new_session(void *arg, uint32_t *lid, struct cryptoini *cri)
187254e21c12Shikaru {
187354e21c12Shikaru struct qat_crypto *qcy = arg;
187454e21c12Shikaru struct qat_session *qs = NULL;
187554e21c12Shikaru struct cryptoini *crie = NULL;
187654e21c12Shikaru struct cryptoini *cria = NULL;
187754e21c12Shikaru int slice, error;
187854e21c12Shikaru
187954e21c12Shikaru mutex_spin_enter(&qcy->qcy_crypto_mtx);
188054e21c12Shikaru
188154e21c12Shikaru if (qcy->qcy_session_free_count == 0) {
188254e21c12Shikaru QAT_EVCNT_INCR(&qcy->qcy_ev_no_sess);
188354e21c12Shikaru mutex_spin_exit(&qcy->qcy_crypto_mtx);
188454e21c12Shikaru return ENOBUFS;
188554e21c12Shikaru }
188654e21c12Shikaru qs = qcy->qcy_session_free[--qcy->qcy_session_free_count];
188754e21c12Shikaru QAT_EVCNT_INCR(&qcy->qcy_ev_new_sess);
188854e21c12Shikaru
188954e21c12Shikaru mutex_spin_exit(&qcy->qcy_crypto_mtx);
189054e21c12Shikaru
189154e21c12Shikaru qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
189254e21c12Shikaru qs->qs_inflight = 0;
189354e21c12Shikaru *lid = qs->qs_lid;
189454e21c12Shikaru
189554e21c12Shikaru error = 0;
189654e21c12Shikaru while (cri) {
189754e21c12Shikaru switch (cri->cri_alg) {
189854e21c12Shikaru case CRYPTO_DES_CBC:
189954e21c12Shikaru case CRYPTO_3DES_CBC:
190054e21c12Shikaru case CRYPTO_AES_CBC:
190154e21c12Shikaru if (crie != NULL)
190254e21c12Shikaru error = EINVAL;
190354e21c12Shikaru crie = cri;
190454e21c12Shikaru break;
190554e21c12Shikaru case CRYPTO_MD5_HMAC_96:
190654e21c12Shikaru case CRYPTO_SHA1_HMAC_96:
190754e21c12Shikaru case CRYPTO_SHA2_256_HMAC:
190854e21c12Shikaru case CRYPTO_SHA2_384_HMAC:
190954e21c12Shikaru case CRYPTO_SHA2_512_HMAC:
191054e21c12Shikaru if (cria != NULL)
191154e21c12Shikaru error = EINVAL;
191254e21c12Shikaru cria = cri;
191354e21c12Shikaru break;
191454e21c12Shikaru default:
191554e21c12Shikaru error = EINVAL;
191654e21c12Shikaru }
191754e21c12Shikaru if (error)
191854e21c12Shikaru goto fail;
191954e21c12Shikaru cri = cri->cri_next;
192054e21c12Shikaru }
192154e21c12Shikaru
192254e21c12Shikaru slice = 1;
192354e21c12Shikaru if (crie != NULL && cria != NULL) {
192454e21c12Shikaru slice = 2;
192554e21c12Shikaru /* auth then decrypt */
192654e21c12Shikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
192754e21c12Shikaru qs->qs_dec_desc.qcd_slices[1] = FW_SLICE_CIPHER;
192854e21c12Shikaru qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
192954e21c12Shikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
193054e21c12Shikaru /* encrypt then auth */
193154e21c12Shikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
193254e21c12Shikaru qs->qs_enc_desc.qcd_slices[1] = FW_SLICE_AUTH;
193354e21c12Shikaru qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
193454e21c12Shikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
193554e21c12Shikaru } else if (crie != NULL) {
193654e21c12Shikaru /* decrypt */
193754e21c12Shikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_CIPHER;
193854e21c12Shikaru qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
193954e21c12Shikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
194054e21c12Shikaru /* encrypt */
194154e21c12Shikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
194254e21c12Shikaru qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
194354e21c12Shikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
194454e21c12Shikaru } else if (cria != NULL) {
194554e21c12Shikaru /* auth */
194654e21c12Shikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
194754e21c12Shikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
194854e21c12Shikaru /* auth */
194954e21c12Shikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_AUTH;
195054e21c12Shikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
195154e21c12Shikaru } else {
195254e21c12Shikaru error = EINVAL;
195354e21c12Shikaru goto fail;
195454e21c12Shikaru }
195554e21c12Shikaru qs->qs_dec_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
195654e21c12Shikaru qs->qs_enc_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
195754e21c12Shikaru
195854e21c12Shikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_dec_desc, crie, cria);
195954e21c12Shikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_enc_desc, crie, cria);
196054e21c12Shikaru
196154e21c12Shikaru return 0;
196254e21c12Shikaru fail:
196354e21c12Shikaru if (qs != NULL) {
196454e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx);
196554e21c12Shikaru qat_crypto_free_session0(qcy, qs);
196654e21c12Shikaru }
196754e21c12Shikaru return error;
196854e21c12Shikaru }
196954e21c12Shikaru
197054e21c12Shikaru static inline void
qat_crypto_clean_desc(struct qat_crypto_desc * desc)197154e21c12Shikaru qat_crypto_clean_desc(struct qat_crypto_desc *desc)
197254e21c12Shikaru {
197354e21c12Shikaru explicit_memset(desc->qcd_content_desc, 0,
197454e21c12Shikaru sizeof(desc->qcd_content_desc));
197554e21c12Shikaru explicit_memset(desc->qcd_hash_state_prefix_buf, 0,
197654e21c12Shikaru sizeof(desc->qcd_hash_state_prefix_buf));
197754e21c12Shikaru explicit_memset(desc->qcd_req_cache, 0,
197854e21c12Shikaru sizeof(desc->qcd_req_cache));
197954e21c12Shikaru }
198054e21c12Shikaru
1981*fdf161e4Sriastradh void
qat_crypto_free_session0(struct qat_crypto * qcy,struct qat_session * qs)198254e21c12Shikaru qat_crypto_free_session0(struct qat_crypto *qcy, struct qat_session *qs)
198354e21c12Shikaru {
198454e21c12Shikaru
198554e21c12Shikaru qat_crypto_clean_desc(&qs->qs_dec_desc);
198654e21c12Shikaru qat_crypto_clean_desc(&qs->qs_enc_desc);
198754e21c12Shikaru qs->qs_status &= ~QAT_SESSION_STATUS_ACTIVE;
198854e21c12Shikaru
198954e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx);
199054e21c12Shikaru
199154e21c12Shikaru mutex_spin_enter(&qcy->qcy_crypto_mtx);
199254e21c12Shikaru
199354e21c12Shikaru qcy->qcy_session_free[qcy->qcy_session_free_count++] = qs;
199454e21c12Shikaru QAT_EVCNT_INCR(&qcy->qcy_ev_free_sess);
199554e21c12Shikaru
199654e21c12Shikaru mutex_spin_exit(&qcy->qcy_crypto_mtx);
199754e21c12Shikaru }
199854e21c12Shikaru
199954e21c12Shikaru void
qat_crypto_check_free_session(struct qat_crypto * qcy,struct qat_session * qs)200054e21c12Shikaru qat_crypto_check_free_session(struct qat_crypto *qcy, struct qat_session *qs)
200154e21c12Shikaru {
200254e21c12Shikaru
200354e21c12Shikaru if ((qs->qs_status & QAT_SESSION_STATUS_FREEING) &&
200454e21c12Shikaru qs->qs_inflight == 0) {
200554e21c12Shikaru qat_crypto_free_session0(qcy, qs);
200654e21c12Shikaru } else {
200754e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx);
200854e21c12Shikaru }
200954e21c12Shikaru }
201054e21c12Shikaru
2011*fdf161e4Sriastradh void
qat_crypto_free_session(void * arg,uint64_t sid)201254e21c12Shikaru qat_crypto_free_session(void *arg, uint64_t sid)
201354e21c12Shikaru {
201454e21c12Shikaru struct qat_crypto *qcy = arg;
201554e21c12Shikaru struct qat_session *qs;
201654e21c12Shikaru
201754e21c12Shikaru qs = qcy->qcy_sessions[CRYPTO_SESID2LID(sid)];
201854e21c12Shikaru
201954e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx);
202054e21c12Shikaru
202154e21c12Shikaru if (qs->qs_inflight > 0) {
202254e21c12Shikaru qs->qs_status |= QAT_SESSION_STATUS_FREEING;
202354e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx);
2024*fdf161e4Sriastradh return;
202554e21c12Shikaru }
202654e21c12Shikaru
2027*fdf161e4Sriastradh qat_crypto_free_session0(qcy, qs);
202854e21c12Shikaru }
202954e21c12Shikaru
203054e21c12Shikaru int
qat_crypto_start(struct qat_softc * sc)203154e21c12Shikaru qat_crypto_start(struct qat_softc *sc)
203254e21c12Shikaru {
203354e21c12Shikaru struct qat_crypto *qcy = &sc->sc_crypto;
203454e21c12Shikaru int error, i;
203554e21c12Shikaru static const int algs[] = {
203654e21c12Shikaru CRYPTO_DES_CBC, CRYPTO_3DES_CBC, CRYPTO_AES_CBC,
203754e21c12Shikaru CRYPTO_MD5_HMAC_96, CRYPTO_SHA1_HMAC_96, CRYPTO_SHA2_256_HMAC,
203854e21c12Shikaru CRYPTO_SHA2_384_HMAC, CRYPTO_SHA2_512_HMAC,
203954e21c12Shikaru };
204054e21c12Shikaru
204154e21c12Shikaru /* opencrypto */
204254e21c12Shikaru qcy->qcy_cid = crypto_get_driverid(0);
204354e21c12Shikaru if (qcy->qcy_cid < 0) {
204454e21c12Shikaru aprint_error_dev(sc->sc_dev,
204554e21c12Shikaru "could not get opencrypto driver id\n");
204654e21c12Shikaru return ENOENT;
204754e21c12Shikaru }
204854e21c12Shikaru
204954e21c12Shikaru for (i = 0; i < __arraycount(algs); i++) {
205054e21c12Shikaru error = crypto_register(qcy->qcy_cid, algs[i], 0, 0,
205154e21c12Shikaru qat_crypto_new_session, qat_crypto_free_session,
205254e21c12Shikaru qat_crypto_process, qcy);
205354e21c12Shikaru if (error) {
205454e21c12Shikaru aprint_error_dev(sc->sc_dev,
205554e21c12Shikaru "could not register crypto: %d\n", error);
205654e21c12Shikaru return error;
205754e21c12Shikaru }
205854e21c12Shikaru }
205954e21c12Shikaru
206054e21c12Shikaru return 0;
206154e21c12Shikaru }
206254e21c12Shikaru
206354e21c12Shikaru int
qat_crypto_sym_rxintr(struct qat_softc * sc,void * arg,void * msg)206454e21c12Shikaru qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
206554e21c12Shikaru {
206654e21c12Shikaru struct qat_crypto_bank *qcb = arg;
206754e21c12Shikaru struct qat_crypto *qcy;
206854e21c12Shikaru struct qat_session *qs;
206954e21c12Shikaru struct qat_sym_cookie *qsc;
207054e21c12Shikaru struct qat_sym_bulk_cookie *qsbc;
207154e21c12Shikaru struct cryptop *crp;
207254e21c12Shikaru
207354e21c12Shikaru qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
207454e21c12Shikaru
207554e21c12Shikaru qsbc = &qsc->u.qsc_bulk_cookie;
207654e21c12Shikaru qcy = qsbc->qsbc_crypto;
207754e21c12Shikaru qs = qsbc->qsbc_session;
207854e21c12Shikaru crp = qsbc->qsbc_cb_tag;
207954e21c12Shikaru
208054e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
208154e21c12Shikaru qsc->qsc_buf_dmamap->dm_mapsize,
208254e21c12Shikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
208354e21c12Shikaru bus_dmamap_unload(sc->sc_dmat, qsc->qsc_buf_dmamap);
208454e21c12Shikaru qat_crypto_free_sym_cookie(qcb, qsc);
208554e21c12Shikaru
208654e21c12Shikaru crp->crp_etype = 0;
208754e21c12Shikaru crypto_done(crp);
208854e21c12Shikaru
208954e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx);
209054e21c12Shikaru KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
209154e21c12Shikaru qs->qs_inflight--;
209254e21c12Shikaru qat_crypto_check_free_session(qcy, qs);
209354e21c12Shikaru
209454e21c12Shikaru return 1;
209554e21c12Shikaru }
209654e21c12Shikaru
209754e21c12Shikaru #ifdef QAT_DUMP
209854e21c12Shikaru
209954e21c12Shikaru void
qat_dump_raw(int flag,const char * label,void * d,size_t len)210054e21c12Shikaru qat_dump_raw(int flag, const char *label, void *d, size_t len)
210154e21c12Shikaru {
210254e21c12Shikaru uintptr_t pc;
210354e21c12Shikaru size_t pos;
210454e21c12Shikaru uint8_t *dp = (uint8_t *)d;
210554e21c12Shikaru
210654e21c12Shikaru if ((qat_dump & flag) == 0)
210754e21c12Shikaru return;
210854e21c12Shikaru
210954e21c12Shikaru printf("dumping %s at %p len %zu\n", label, d, len);
211054e21c12Shikaru
211132d2ad6bShikaru pc = (uintptr_t)__builtin_return_address(0);
211254e21c12Shikaru printf("\tcallpc ");
211354e21c12Shikaru qat_print_sym(pc);
211454e21c12Shikaru printf("\n");
211554e21c12Shikaru
211654e21c12Shikaru for (pos = 0; pos < len; pos++) {
211754e21c12Shikaru if (pos % 32 == 0)
211854e21c12Shikaru printf("%8zx: ", pos);
211954e21c12Shikaru else if (pos % 4 == 0)
212054e21c12Shikaru printf(" ");
212154e21c12Shikaru
212254e21c12Shikaru printf("%02x", dp[pos]);
212354e21c12Shikaru
212454e21c12Shikaru if (pos % 32 == 31 || pos + 1 == len)
212554e21c12Shikaru printf("\n");
212654e21c12Shikaru }
212754e21c12Shikaru }
212854e21c12Shikaru
212954e21c12Shikaru void
qat_dump_ring(int bank,int ring)213054e21c12Shikaru qat_dump_ring(int bank, int ring)
213154e21c12Shikaru {
213254e21c12Shikaru struct qat_softc *sc = gsc;
213354e21c12Shikaru struct qat_bank *qb = &sc->sc_etr_banks[bank];
213454e21c12Shikaru struct qat_ring *qr = &qb->qb_et_rings[ring];
213554e21c12Shikaru u_int offset;
213654e21c12Shikaru int i;
213754e21c12Shikaru uint32_t msg;
213854e21c12Shikaru
213954e21c12Shikaru printf("dumping bank %d ring %d\n", bank, ring);
214054e21c12Shikaru printf("\tid %d name %s msg size %d ring size %d\n",
214154e21c12Shikaru qr->qr_ring_id, qr->qr_name,
214254e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
214354e21c12Shikaru qr->qr_ring_size);
214454e21c12Shikaru printf("\thost head 0x%08x tail 0x%08x\n", qr->qr_head, qr->qr_tail);
214554e21c12Shikaru printf("\ttarget head 0x%08x tail 0x%08x\n",
214654e21c12Shikaru qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
214754e21c12Shikaru ETR_RING_HEAD_OFFSET),
214854e21c12Shikaru qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring,
214954e21c12Shikaru ETR_RING_TAIL_OFFSET));
215054e21c12Shikaru
215154e21c12Shikaru printf("\n");
215254e21c12Shikaru i = 0;
215354e21c12Shikaru offset = 0;
215454e21c12Shikaru do {
215554e21c12Shikaru if (i % 8 == 0)
215654e21c12Shikaru printf("%8x:", offset);
215754e21c12Shikaru
215854e21c12Shikaru if (offset == qr->qr_head) {
215954e21c12Shikaru printf("*");
216054e21c12Shikaru } else if (offset == qr->qr_tail) {
216154e21c12Shikaru printf("v");
216254e21c12Shikaru } else {
216354e21c12Shikaru printf(" ");
216454e21c12Shikaru }
216554e21c12Shikaru
216654e21c12Shikaru msg = *(uint32_t *)((uintptr_t)qr->qr_ring_vaddr + offset);
216754e21c12Shikaru printf("%08x", htobe32(msg));
216854e21c12Shikaru
216954e21c12Shikaru if (i % 8 == 7)
217054e21c12Shikaru printf("\n");
217154e21c12Shikaru
217254e21c12Shikaru i++;
217354e21c12Shikaru offset = qat_modulo(offset +
217454e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
217554e21c12Shikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size));
217654e21c12Shikaru } while (offset != 0);
217754e21c12Shikaru }
217854e21c12Shikaru
217954e21c12Shikaru void
qat_dump_mbuf(struct mbuf * m0,int pre,int post)218054e21c12Shikaru qat_dump_mbuf(struct mbuf *m0, int pre, int post)
218154e21c12Shikaru {
218254e21c12Shikaru struct mbuf *m;
218354e21c12Shikaru
218454e21c12Shikaru for (m = m0; m != NULL; m = m->m_next) {
218554e21c12Shikaru size_t pos, len;
218654e21c12Shikaru uint8_t *buf_start, *data_start, *data_end, *buf_end;
218754e21c12Shikaru uint8_t *start, *end, *dp;
218854e21c12Shikaru bool skip_ind;
218954e21c12Shikaru const char *ind;
219054e21c12Shikaru
219154e21c12Shikaru printf("dumping mbuf %p len %d flags 0x%08x\n",
219254e21c12Shikaru m, m->m_len, m->m_flags);
219354e21c12Shikaru if (m->m_len == 0)
219454e21c12Shikaru continue;
219554e21c12Shikaru
219654e21c12Shikaru data_start = (uint8_t *)m->m_data;
219754e21c12Shikaru data_end = data_start + m->m_len;
219854e21c12Shikaru switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
219954e21c12Shikaru case 0:
220054e21c12Shikaru buf_start = (uint8_t *)M_BUFADDR(m);
220154e21c12Shikaru buf_end = buf_start +
220254e21c12Shikaru ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
220354e21c12Shikaru break;
220454e21c12Shikaru case M_EXT|M_EXT_CLUSTER:
220554e21c12Shikaru buf_start = (uint8_t *)m->m_ext.ext_buf;
220654e21c12Shikaru buf_end = buf_start +m->m_ext.ext_size;
220754e21c12Shikaru break;
220854e21c12Shikaru default:
220954e21c12Shikaru /* XXX */
221054e21c12Shikaru buf_start = data_start;
221154e21c12Shikaru buf_end = data_end;
221254e21c12Shikaru break;
221354e21c12Shikaru }
221454e21c12Shikaru
221554e21c12Shikaru start = data_start - pre;
221654e21c12Shikaru if (start < buf_start)
221754e21c12Shikaru start = buf_start;
221854e21c12Shikaru end = data_end + post;
221954e21c12Shikaru if (end > buf_end)
222054e21c12Shikaru end = buf_end;
222154e21c12Shikaru
222254e21c12Shikaru dp = start;
222354e21c12Shikaru len = (size_t)(end - start);
222454e21c12Shikaru skip_ind = false;
222554e21c12Shikaru for (pos = 0; pos < len; pos++) {
222654e21c12Shikaru
222754e21c12Shikaru if (skip_ind)
222854e21c12Shikaru ind = "";
222954e21c12Shikaru else if (&dp[pos] == data_start)
223054e21c12Shikaru ind = "`";
223154e21c12Shikaru else
223254e21c12Shikaru ind = " ";
223354e21c12Shikaru
223454e21c12Shikaru if (pos % 32 == 0)
223554e21c12Shikaru printf("%8zx:%s", pos, ind);
223654e21c12Shikaru else if (pos % 2 == 0)
223754e21c12Shikaru printf("%s", ind);
223854e21c12Shikaru
223954e21c12Shikaru printf("%02x", dp[pos]);
224054e21c12Shikaru
224154e21c12Shikaru skip_ind = false;
224254e21c12Shikaru if (&dp[pos + 1] == data_end) {
224354e21c12Shikaru skip_ind = true;
224454e21c12Shikaru printf("'");
224554e21c12Shikaru }
224654e21c12Shikaru
224754e21c12Shikaru if (pos % 32 == 31 || pos + 1 == len) {
224854e21c12Shikaru printf("\n");
224954e21c12Shikaru skip_ind = false;
225054e21c12Shikaru }
225154e21c12Shikaru }
225254e21c12Shikaru }
225354e21c12Shikaru }
225454e21c12Shikaru
225554e21c12Shikaru #endif /* QAT_DUMP */
225654e21c12Shikaru
225754e21c12Shikaru MODULE(MODULE_CLASS_DRIVER, qat, "pci,opencrypto");
225854e21c12Shikaru
225954e21c12Shikaru #ifdef _MODULE
226054e21c12Shikaru #include "ioconf.c"
226154e21c12Shikaru #endif
226254e21c12Shikaru
226354e21c12Shikaru int
qat_modcmd(modcmd_t cmd,void * data)226454e21c12Shikaru qat_modcmd(modcmd_t cmd, void *data)
226554e21c12Shikaru {
226654e21c12Shikaru int error = 0;
226754e21c12Shikaru
226854e21c12Shikaru switch (cmd) {
226954e21c12Shikaru case MODULE_CMD_INIT:
227054e21c12Shikaru #ifdef _MODULE
227154e21c12Shikaru error = config_init_component(cfdriver_ioconf_qat,
227254e21c12Shikaru cfattach_ioconf_qat, cfdata_ioconf_qat);
227354e21c12Shikaru #endif
227454e21c12Shikaru return error;
227554e21c12Shikaru case MODULE_CMD_FINI:
227654e21c12Shikaru #ifdef _MODULE
227754e21c12Shikaru error = config_fini_component(cfdriver_ioconf_qat,
227854e21c12Shikaru cfattach_ioconf_qat, cfdata_ioconf_qat);
227954e21c12Shikaru #endif
228054e21c12Shikaru return error;
228154e21c12Shikaru default:
228254e21c12Shikaru return ENOTTY;
228354e21c12Shikaru }
228454e21c12Shikaru }
2285