1*54e21c12Shikaru /* $NetBSD: qat.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ 2*54e21c12Shikaru 3*54e21c12Shikaru /* 4*54e21c12Shikaru * Copyright (c) 2019 Internet Initiative Japan, Inc. 5*54e21c12Shikaru * All rights reserved. 6*54e21c12Shikaru * 7*54e21c12Shikaru * Redistribution and use in source and binary forms, with or without 8*54e21c12Shikaru * modification, are permitted provided that the following conditions 9*54e21c12Shikaru * are met: 10*54e21c12Shikaru * 1. Redistributions of source code must retain the above copyright 11*54e21c12Shikaru * notice, this list of conditions and the following disclaimer. 12*54e21c12Shikaru * 2. Redistributions in binary form must reproduce the above copyright 13*54e21c12Shikaru * notice, this list of conditions and the following disclaimer in the 14*54e21c12Shikaru * documentation and/or other materials provided with the distribution. 15*54e21c12Shikaru * 16*54e21c12Shikaru * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17*54e21c12Shikaru * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18*54e21c12Shikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19*54e21c12Shikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20*54e21c12Shikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21*54e21c12Shikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22*54e21c12Shikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23*54e21c12Shikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24*54e21c12Shikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25*54e21c12Shikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26*54e21c12Shikaru * POSSIBILITY OF SUCH DAMAGE. 27*54e21c12Shikaru */ 28*54e21c12Shikaru 29*54e21c12Shikaru /* 30*54e21c12Shikaru * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. 31*54e21c12Shikaru * 32*54e21c12Shikaru * Redistribution and use in source and binary forms, with or without 33*54e21c12Shikaru * modification, are permitted provided that the following conditions 34*54e21c12Shikaru * are met: 35*54e21c12Shikaru * 36*54e21c12Shikaru * * Redistributions of source code must retain the above copyright 37*54e21c12Shikaru * notice, this list of conditions and the following disclaimer. 38*54e21c12Shikaru * * Redistributions in binary form must reproduce the above copyright 39*54e21c12Shikaru * notice, this list of conditions and the following disclaimer in 40*54e21c12Shikaru * the documentation and/or other materials provided with the 41*54e21c12Shikaru * distribution. 42*54e21c12Shikaru * * Neither the name of Intel Corporation nor the names of its 43*54e21c12Shikaru * contributors may be used to endorse or promote products derived 44*54e21c12Shikaru * from this software without specific prior written permission. 45*54e21c12Shikaru * 46*54e21c12Shikaru * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 47*54e21c12Shikaru * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 48*54e21c12Shikaru * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 49*54e21c12Shikaru * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 50*54e21c12Shikaru * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 51*54e21c12Shikaru * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 52*54e21c12Shikaru * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53*54e21c12Shikaru * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54*54e21c12Shikaru * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55*54e21c12Shikaru * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 56*54e21c12Shikaru * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57*54e21c12Shikaru */ 58*54e21c12Shikaru 59*54e21c12Shikaru #include <sys/cdefs.h> 60*54e21c12Shikaru __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); 61*54e21c12Shikaru 62*54e21c12Shikaru #include <sys/param.h> 63*54e21c12Shikaru #include <sys/systm.h> 64*54e21c12Shikaru #include <sys/kernel.h> 65*54e21c12Shikaru #include <sys/device.h> 66*54e21c12Shikaru #include <sys/module.h> 67*54e21c12Shikaru #include <sys/kmem.h> 68*54e21c12Shikaru #include <sys/mutex.h> 69*54e21c12Shikaru #include <sys/bitops.h> 70*54e21c12Shikaru #include <sys/atomic.h> 71*54e21c12Shikaru #include <sys/mbuf.h> 72*54e21c12Shikaru #include <sys/cprng.h> 73*54e21c12Shikaru #include <sys/cpu.h> 74*54e21c12Shikaru #include <sys/interrupt.h> 75*54e21c12Shikaru #include <sys/md5.h> 76*54e21c12Shikaru #include <sys/sha1.h> 77*54e21c12Shikaru #include <sys/sha2.h> 78*54e21c12Shikaru 79*54e21c12Shikaru #include <opencrypto/cryptodev.h> 80*54e21c12Shikaru #include <opencrypto/cryptosoft.h> 81*54e21c12Shikaru #include <opencrypto/xform.h> 82*54e21c12Shikaru 83*54e21c12Shikaru /* XXX same as sys/arch/x86/x86/via_padlock.c */ 84*54e21c12Shikaru #include <opencrypto/cryptosoft_xform.c> 85*54e21c12Shikaru 86*54e21c12Shikaru #include <dev/pci/pcireg.h> 87*54e21c12Shikaru #include <dev/pci/pcivar.h> 88*54e21c12Shikaru #include <dev/pci/pcidevs.h> 89*54e21c12Shikaru 90*54e21c12Shikaru #include "qatreg.h" 91*54e21c12Shikaru #include "qatvar.h" 92*54e21c12Shikaru #include "qat_aevar.h" 93*54e21c12Shikaru 94*54e21c12Shikaru extern struct qat_hw qat_hw_c2xxx; 95*54e21c12Shikaru extern struct qat_hw qat_hw_c3xxx; 96*54e21c12Shikaru extern struct qat_hw qat_hw_c62x; 97*54e21c12Shikaru extern struct qat_hw qat_hw_d15xx; 98*54e21c12Shikaru 99*54e21c12Shikaru static const struct qat_product { 100*54e21c12Shikaru pci_vendor_id_t qatp_vendor; 101*54e21c12Shikaru pci_product_id_t qatp_product; 102*54e21c12Shikaru const char *qatp_name; 103*54e21c12Shikaru enum qat_chip_type qatp_chip; 104*54e21c12Shikaru const struct qat_hw *qatp_hw; 105*54e21c12Shikaru } qat_products[] = { 106*54e21c12Shikaru 107*54e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS, 108*54e21c12Shikaru "Intel C2000 QuickAssist Physical Function", 109*54e21c12Shikaru QAT_CHIP_C2XXX, &qat_hw_c2xxx }, 110*54e21c12Shikaru 111*54e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT, 112*54e21c12Shikaru "Intel C3000 QuickAssist Physical Function", 113*54e21c12Shikaru QAT_CHIP_C3XXX, &qat_hw_c3xxx }, 114*54e21c12Shikaru #ifdef notyet 115*54e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT_VF, 116*54e21c12Shikaru "Intel C3000 QuickAssist Virtual Function", 117*54e21c12Shikaru QAT_CHIP_C3XXX_IOV, &qat_hw_c3xxxvf }, 118*54e21c12Shikaru #endif 119*54e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT, 120*54e21c12Shikaru "Intel C620/Xeon D-2100 QuickAssist Physical Function", 121*54e21c12Shikaru QAT_CHIP_C62X, &qat_hw_c62x }, 122*54e21c12Shikaru #ifdef notyet 123*54e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT_VF, 124*54e21c12Shikaru "Intel C620/Xeon D-2100 QuickAssist Virtual Function", 125*54e21c12Shikaru QAT_CHIP_C62X_IOV, &qat_hw_c62xvf }, 126*54e21c12Shikaru #endif 127*54e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT, 128*54e21c12Shikaru "Intel Xeon D-1500 QuickAssist Physical Function", 129*54e21c12Shikaru QAT_CHIP_D15XX, &qat_hw_d15xx }, 130*54e21c12Shikaru #ifdef notyet 131*54e21c12Shikaru { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT_VF, 132*54e21c12Shikaru "Intel Xeon D-1500 QuickAssist Virtual Function", 133*54e21c12Shikaru QAT_CHIP_D15XX_IOV, &qat_hw_d15xxvf }, 134*54e21c12Shikaru #endif 135*54e21c12Shikaru { 0, 0, NULL, 0, NULL }, 136*54e21c12Shikaru }; 137*54e21c12Shikaru 138*54e21c12Shikaru /* md5 16 bytes - Initialiser state can be found in RFC 1321*/ 139*54e21c12Shikaru static const uint8_t md5_initial_state[QAT_HASH_MD5_STATE_SIZE] = { 140*54e21c12Shikaru 0x01, 0x23, 0x45, 0x67, 141*54e21c12Shikaru 0x89, 0xab, 0xcd, 0xef, 142*54e21c12Shikaru 0xfe, 0xdc, 0xba, 0x98, 143*54e21c12Shikaru 0x76, 0x54, 0x32, 0x10, 144*54e21c12Shikaru }; 145*54e21c12Shikaru 146*54e21c12Shikaru /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ 147*54e21c12Shikaru static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = { 148*54e21c12Shikaru 0x67, 0x45, 0x23, 0x01, 149*54e21c12Shikaru 0xef, 0xcd, 0xab, 0x89, 150*54e21c12Shikaru 0x98, 0xba, 0xdc, 0xfe, 151*54e21c12Shikaru 0x10, 0x32, 0x54, 0x76, 152*54e21c12Shikaru 0xc3, 0xd2, 0xe1, 0xf0 153*54e21c12Shikaru }; 154*54e21c12Shikaru 155*54e21c12Shikaru /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ 156*54e21c12Shikaru static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = { 157*54e21c12Shikaru 0x6a, 0x09, 0xe6, 0x67, 158*54e21c12Shikaru 0xbb, 0x67, 0xae, 0x85, 159*54e21c12Shikaru 0x3c, 0x6e, 0xf3, 0x72, 160*54e21c12Shikaru 0xa5, 0x4f, 0xf5, 0x3a, 161*54e21c12Shikaru 0x51, 0x0e, 0x52, 0x7f, 162*54e21c12Shikaru 0x9b, 0x05, 0x68, 0x8c, 163*54e21c12Shikaru 0x1f, 0x83, 0xd9, 0xab, 164*54e21c12Shikaru 0x5b, 0xe0, 0xcd, 0x19 165*54e21c12Shikaru }; 166*54e21c12Shikaru 167*54e21c12Shikaru /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ 168*54e21c12Shikaru static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = { 169*54e21c12Shikaru 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 170*54e21c12Shikaru 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07, 171*54e21c12Shikaru 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, 172*54e21c12Shikaru 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 173*54e21c12Shikaru 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 174*54e21c12Shikaru 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 175*54e21c12Shikaru 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 176*54e21c12Shikaru 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 177*54e21c12Shikaru }; 178*54e21c12Shikaru 179*54e21c12Shikaru /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ 180*54e21c12Shikaru static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = { 181*54e21c12Shikaru 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 182*54e21c12Shikaru 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 183*54e21c12Shikaru 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, 184*54e21c12Shikaru 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 185*54e21c12Shikaru 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 186*54e21c12Shikaru 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, 187*54e21c12Shikaru 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 188*54e21c12Shikaru 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 189*54e21c12Shikaru }; 190*54e21c12Shikaru 191*54e21c12Shikaru /* Hash Algorithm specific structure */ 192*54e21c12Shikaru 193*54e21c12Shikaru static const struct qat_sym_hash_alg_info md5_info = { 194*54e21c12Shikaru QAT_HASH_MD5_DIGEST_SIZE, 195*54e21c12Shikaru QAT_HASH_MD5_BLOCK_SIZE, 196*54e21c12Shikaru md5_initial_state, 197*54e21c12Shikaru QAT_HASH_MD5_STATE_SIZE, 198*54e21c12Shikaru &swcr_auth_hash_hmac_md5_96, 199*54e21c12Shikaru offsetof(MD5_CTX, state), 200*54e21c12Shikaru 4, 201*54e21c12Shikaru }; 202*54e21c12Shikaru 203*54e21c12Shikaru static const struct qat_sym_hash_alg_info sha1_info = { 204*54e21c12Shikaru QAT_HASH_SHA1_DIGEST_SIZE, 205*54e21c12Shikaru QAT_HASH_SHA1_BLOCK_SIZE, 206*54e21c12Shikaru sha1_initial_state, 207*54e21c12Shikaru QAT_HASH_SHA1_STATE_SIZE, 208*54e21c12Shikaru &swcr_auth_hash_hmac_sha1_96, 209*54e21c12Shikaru offsetof(SHA1_CTX, state), 210*54e21c12Shikaru 4, 211*54e21c12Shikaru }; 212*54e21c12Shikaru 213*54e21c12Shikaru static const struct qat_sym_hash_alg_info sha256_info = { 214*54e21c12Shikaru QAT_HASH_SHA256_DIGEST_SIZE, 215*54e21c12Shikaru QAT_HASH_SHA256_BLOCK_SIZE, 216*54e21c12Shikaru sha256_initial_state, 217*54e21c12Shikaru QAT_HASH_SHA256_STATE_SIZE, 218*54e21c12Shikaru &swcr_auth_hash_hmac_sha2_256, 219*54e21c12Shikaru offsetof(SHA256_CTX, state), 220*54e21c12Shikaru 4, 221*54e21c12Shikaru }; 222*54e21c12Shikaru 223*54e21c12Shikaru static const struct qat_sym_hash_alg_info sha384_info = { 224*54e21c12Shikaru QAT_HASH_SHA384_DIGEST_SIZE, 225*54e21c12Shikaru QAT_HASH_SHA384_BLOCK_SIZE, 226*54e21c12Shikaru sha384_initial_state, 227*54e21c12Shikaru QAT_HASH_SHA384_STATE_SIZE, 228*54e21c12Shikaru &swcr_auth_hash_hmac_sha2_384, 229*54e21c12Shikaru offsetof(SHA384_CTX, state), 230*54e21c12Shikaru 8, 231*54e21c12Shikaru }; 232*54e21c12Shikaru 233*54e21c12Shikaru static const struct qat_sym_hash_alg_info sha512_info = { 234*54e21c12Shikaru QAT_HASH_SHA512_DIGEST_SIZE, 235*54e21c12Shikaru QAT_HASH_SHA512_BLOCK_SIZE, 236*54e21c12Shikaru sha512_initial_state, 237*54e21c12Shikaru QAT_HASH_SHA512_STATE_SIZE, 238*54e21c12Shikaru &swcr_auth_hash_hmac_sha2_512, 239*54e21c12Shikaru offsetof(SHA512_CTX, state), 240*54e21c12Shikaru 8, 241*54e21c12Shikaru }; 242*54e21c12Shikaru 243*54e21c12Shikaru static const struct qat_sym_hash_alg_info aes_gcm_info = { 244*54e21c12Shikaru QAT_HASH_AES_GCM_DIGEST_SIZE, 245*54e21c12Shikaru QAT_HASH_AES_GCM_BLOCK_SIZE, 246*54e21c12Shikaru NULL, 0, 247*54e21c12Shikaru NULL, 0, 0, /* XXX */ 248*54e21c12Shikaru }; 249*54e21c12Shikaru 250*54e21c12Shikaru /* Hash QAT specific structures */ 251*54e21c12Shikaru 252*54e21c12Shikaru static const struct qat_sym_hash_qat_info md5_config = { 253*54e21c12Shikaru HW_AUTH_ALGO_MD5, 254*54e21c12Shikaru QAT_HASH_MD5_BLOCK_SIZE, 255*54e21c12Shikaru HW_MD5_STATE1_SZ, 256*54e21c12Shikaru HW_MD5_STATE2_SZ 257*54e21c12Shikaru }; 258*54e21c12Shikaru 259*54e21c12Shikaru static const struct qat_sym_hash_qat_info sha1_config = { 260*54e21c12Shikaru HW_AUTH_ALGO_SHA1, 261*54e21c12Shikaru QAT_HASH_SHA1_BLOCK_SIZE, 262*54e21c12Shikaru HW_SHA1_STATE1_SZ, 263*54e21c12Shikaru HW_SHA1_STATE2_SZ 264*54e21c12Shikaru }; 265*54e21c12Shikaru 266*54e21c12Shikaru static const struct qat_sym_hash_qat_info sha256_config = { 267*54e21c12Shikaru HW_AUTH_ALGO_SHA256, 268*54e21c12Shikaru QAT_HASH_SHA256_BLOCK_SIZE, 269*54e21c12Shikaru HW_SHA256_STATE1_SZ, 270*54e21c12Shikaru HW_SHA256_STATE2_SZ 271*54e21c12Shikaru }; 272*54e21c12Shikaru 273*54e21c12Shikaru static const struct qat_sym_hash_qat_info sha384_config = { 274*54e21c12Shikaru HW_AUTH_ALGO_SHA384, 275*54e21c12Shikaru QAT_HASH_SHA384_BLOCK_SIZE, 276*54e21c12Shikaru HW_SHA384_STATE1_SZ, 277*54e21c12Shikaru HW_SHA384_STATE2_SZ 278*54e21c12Shikaru }; 279*54e21c12Shikaru 280*54e21c12Shikaru static const struct qat_sym_hash_qat_info sha512_config = { 281*54e21c12Shikaru HW_AUTH_ALGO_SHA512, 282*54e21c12Shikaru QAT_HASH_SHA512_BLOCK_SIZE, 283*54e21c12Shikaru HW_SHA512_STATE1_SZ, 284*54e21c12Shikaru HW_SHA512_STATE2_SZ 285*54e21c12Shikaru }; 286*54e21c12Shikaru 287*54e21c12Shikaru static const struct qat_sym_hash_qat_info aes_gcm_config = { 288*54e21c12Shikaru HW_AUTH_ALGO_GALOIS_128, 289*54e21c12Shikaru 0, 290*54e21c12Shikaru HW_GALOIS_128_STATE1_SZ, 291*54e21c12Shikaru HW_GALOIS_H_SZ + 292*54e21c12Shikaru HW_GALOIS_LEN_A_SZ + 293*54e21c12Shikaru HW_GALOIS_E_CTR0_SZ 294*54e21c12Shikaru }; 295*54e21c12Shikaru 296*54e21c12Shikaru static const struct qat_sym_hash_def qat_sym_hash_defs[] = { 297*54e21c12Shikaru [QAT_SYM_HASH_MD5] = { &md5_info, &md5_config }, 298*54e21c12Shikaru [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config }, 299*54e21c12Shikaru [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config }, 300*54e21c12Shikaru [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config }, 301*54e21c12Shikaru [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config }, 302*54e21c12Shikaru [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config }, 303*54e21c12Shikaru }; 304*54e21c12Shikaru 305*54e21c12Shikaru const struct qat_product * 306*54e21c12Shikaru qat_lookup(const struct pci_attach_args *); 307*54e21c12Shikaru int qat_match(struct device *, struct cfdata *, void *); 308*54e21c12Shikaru void qat_attach(struct device *, struct device *, void *); 309*54e21c12Shikaru void qat_init(struct device *); 310*54e21c12Shikaru int qat_start(struct device *); 311*54e21c12Shikaru int qat_detach(struct device *, int); 312*54e21c12Shikaru 313*54e21c12Shikaru int qat_alloc_msix_intr(struct qat_softc *, 314*54e21c12Shikaru struct pci_attach_args *); 315*54e21c12Shikaru void * qat_establish_msix_intr(struct qat_softc *, pci_intr_handle_t, 316*54e21c12Shikaru int (*)(void *), void *, const char *, int); 317*54e21c12Shikaru int qat_setup_msix_intr(struct qat_softc *); 318*54e21c12Shikaru 319*54e21c12Shikaru int qat_etr_init(struct qat_softc *); 320*54e21c12Shikaru int qat_etr_bank_init(struct qat_softc *, int); 321*54e21c12Shikaru 322*54e21c12Shikaru int qat_etr_ap_bank_init(struct qat_softc *); 323*54e21c12Shikaru void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int); 324*54e21c12Shikaru void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *, 325*54e21c12Shikaru uint32_t, int); 326*54e21c12Shikaru void qat_etr_ap_bank_setup_ring(struct qat_softc *, 327*54e21c12Shikaru struct qat_ring *); 328*54e21c12Shikaru int qat_etr_verify_ring_size(uint32_t, uint32_t); 329*54e21c12Shikaru 330*54e21c12Shikaru int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *, 331*54e21c12Shikaru struct qat_ring *); 332*54e21c12Shikaru int qat_etr_bank_intr(void *); 333*54e21c12Shikaru 334*54e21c12Shikaru void qat_arb_update(struct qat_softc *, struct qat_bank *); 335*54e21c12Shikaru 336*54e21c12Shikaru struct qat_sym_cookie * 337*54e21c12Shikaru qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *); 338*54e21c12Shikaru void qat_crypto_free_sym_cookie(struct qat_crypto_bank *, 339*54e21c12Shikaru struct qat_sym_cookie *); 340*54e21c12Shikaru int qat_crypto_load_buf(struct qat_softc *, struct cryptop *, 341*54e21c12Shikaru struct qat_sym_cookie *, struct qat_crypto_desc const *, 342*54e21c12Shikaru uint8_t *, int, bus_addr_t *); 343*54e21c12Shikaru int qat_crypto_load_iv(struct qat_sym_cookie *, struct cryptop *, 344*54e21c12Shikaru struct cryptodesc *, struct qat_crypto_desc const *); 345*54e21c12Shikaru int qat_crypto_process(void *, struct cryptop *, int); 346*54e21c12Shikaru int qat_crypto_setup_ring(struct qat_softc *, 347*54e21c12Shikaru struct qat_crypto_bank *); 348*54e21c12Shikaru int qat_crypto_new_session(void *, uint32_t *, struct cryptoini *); 349*54e21c12Shikaru int qat_crypto_free_session0(struct qat_crypto *, 350*54e21c12Shikaru struct qat_session *); 351*54e21c12Shikaru void qat_crypto_check_free_session(struct qat_crypto *, 352*54e21c12Shikaru struct qat_session *); 353*54e21c12Shikaru int qat_crypto_free_session(void *, uint64_t); 354*54e21c12Shikaru int qat_crypto_bank_init(struct qat_softc *, 355*54e21c12Shikaru struct qat_crypto_bank *); 356*54e21c12Shikaru int qat_crypto_init(struct qat_softc *); 357*54e21c12Shikaru int qat_crypto_start(struct qat_softc *); 358*54e21c12Shikaru int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *); 359*54e21c12Shikaru 360*54e21c12Shikaru CFATTACH_DECL_NEW(qat, sizeof(struct qat_softc), 361*54e21c12Shikaru qat_match, qat_attach, qat_detach, NULL); 362*54e21c12Shikaru 363*54e21c12Shikaru struct qat_softc *gsc = NULL; 364*54e21c12Shikaru 365*54e21c12Shikaru #ifdef QAT_DUMP 366*54e21c12Shikaru int qat_dump = QAT_DUMP; 367*54e21c12Shikaru #endif 368*54e21c12Shikaru 369*54e21c12Shikaru const struct qat_product * 370*54e21c12Shikaru qat_lookup(const struct pci_attach_args *pa) 371*54e21c12Shikaru { 372*54e21c12Shikaru const struct qat_product *qatp; 373*54e21c12Shikaru 374*54e21c12Shikaru for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) { 375*54e21c12Shikaru if (PCI_VENDOR(pa->pa_id) == qatp->qatp_vendor && 376*54e21c12Shikaru PCI_PRODUCT(pa->pa_id) == qatp->qatp_product) 377*54e21c12Shikaru return qatp; 378*54e21c12Shikaru } 379*54e21c12Shikaru return NULL; 380*54e21c12Shikaru } 381*54e21c12Shikaru 382*54e21c12Shikaru int 383*54e21c12Shikaru qat_match(struct device *parent, struct cfdata *cf, void *aux) 384*54e21c12Shikaru { 385*54e21c12Shikaru struct pci_attach_args *pa = aux; 386*54e21c12Shikaru 387*54e21c12Shikaru if (qat_lookup(pa) != NULL) 388*54e21c12Shikaru return 1; 389*54e21c12Shikaru 390*54e21c12Shikaru return 0; 391*54e21c12Shikaru } 392*54e21c12Shikaru 393*54e21c12Shikaru void 394*54e21c12Shikaru qat_attach(struct device *parent, struct device *self, void *aux) 395*54e21c12Shikaru { 396*54e21c12Shikaru struct qat_softc *sc = device_private(self); 397*54e21c12Shikaru struct pci_attach_args *pa = aux; 398*54e21c12Shikaru pci_chipset_tag_t pc = pa->pa_pc; 399*54e21c12Shikaru const struct qat_product *qatp; 400*54e21c12Shikaru char cap[256]; 401*54e21c12Shikaru pcireg_t cmd, memtype, msixoff, fusectl; 402*54e21c12Shikaru bus_size_t msixtbl_offset; 403*54e21c12Shikaru int i, bar, msixtbl_bar; 404*54e21c12Shikaru 405*54e21c12Shikaru sc->sc_dev = self; 406*54e21c12Shikaru sc->sc_pc = pc; 407*54e21c12Shikaru sc->sc_pcitag = pa->pa_tag; 408*54e21c12Shikaru 409*54e21c12Shikaru gsc = sc; /* for debug */ 410*54e21c12Shikaru 411*54e21c12Shikaru qatp = qat_lookup(pa); 412*54e21c12Shikaru KASSERT(qatp != NULL); 413*54e21c12Shikaru 414*54e21c12Shikaru if (pci_dma64_available(pa)) 415*54e21c12Shikaru sc->sc_dmat = pa->pa_dmat64; 416*54e21c12Shikaru else 417*54e21c12Shikaru sc->sc_dmat = pa->pa_dmat; 418*54e21c12Shikaru 419*54e21c12Shikaru aprint_naive(": Crypto processor\n"); 420*54e21c12Shikaru sc->sc_rev = PCI_REVISION(pa->pa_class); 421*54e21c12Shikaru aprint_normal(": %s (rev. 0x%02x)\n", qatp->qatp_name, sc->sc_rev); 422*54e21c12Shikaru 423*54e21c12Shikaru memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw)); 424*54e21c12Shikaru 425*54e21c12Shikaru /* Determine active accelerators and engines */ 426*54e21c12Shikaru sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc); 427*54e21c12Shikaru sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc); 428*54e21c12Shikaru 429*54e21c12Shikaru sc->sc_accel_num = 0; 430*54e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) { 431*54e21c12Shikaru if (sc->sc_accel_mask & (1 << i)) 432*54e21c12Shikaru sc->sc_accel_num++; 433*54e21c12Shikaru } 434*54e21c12Shikaru sc->sc_ae_num = 0; 435*54e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) { 436*54e21c12Shikaru if (sc->sc_ae_mask & (1 << i)) { 437*54e21c12Shikaru sc->sc_ae_num++; 438*54e21c12Shikaru } 439*54e21c12Shikaru } 440*54e21c12Shikaru 441*54e21c12Shikaru if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) { 442*54e21c12Shikaru aprint_error_dev(sc->sc_dev, "couldn't find acceleration"); 443*54e21c12Shikaru goto fail; 444*54e21c12Shikaru } 445*54e21c12Shikaru 446*54e21c12Shikaru KASSERT(sc->sc_accel_num <= MAX_NUM_ACCEL); 447*54e21c12Shikaru KASSERT(sc->sc_ae_num <= MAX_NUM_AE); 448*54e21c12Shikaru 449*54e21c12Shikaru /* Determine SKU and capabilities */ 450*54e21c12Shikaru sc->sc_sku = sc->sc_hw.qhw_get_sku(sc); 451*54e21c12Shikaru sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc); 452*54e21c12Shikaru sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc); 453*54e21c12Shikaru 454*54e21c12Shikaru aprint_normal_dev(sc->sc_dev, 455*54e21c12Shikaru "sku %d accel %d accel_mask 0x%x ae %d ae_mask 0x%x\n", 456*54e21c12Shikaru sc->sc_sku, sc->sc_accel_num, sc->sc_accel_mask, 457*54e21c12Shikaru sc->sc_ae_num, sc->sc_ae_mask); 458*54e21c12Shikaru snprintb(cap, sizeof(cap), QAT_ACCEL_CAP_BITS, sc->sc_accel_cap); 459*54e21c12Shikaru aprint_normal_dev(sc->sc_dev, "accel capabilities %s\n", cap); 460*54e21c12Shikaru 461*54e21c12Shikaru /* Map BARs */ 462*54e21c12Shikaru 463*54e21c12Shikaru msixtbl_bar = 0; 464*54e21c12Shikaru msixtbl_offset = 0; 465*54e21c12Shikaru if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, NULL)) { 466*54e21c12Shikaru pcireg_t msixtbl; 467*54e21c12Shikaru msixtbl = pci_conf_read(pc, pa->pa_tag, 468*54e21c12Shikaru msixoff + PCI_MSIX_TBLOFFSET); 469*54e21c12Shikaru msixtbl_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK; 470*54e21c12Shikaru msixtbl_bar = PCI_MAPREG_START + 471*54e21c12Shikaru ((msixtbl & PCI_MSIX_PBABIR_MASK) << 2); 472*54e21c12Shikaru } 473*54e21c12Shikaru 474*54e21c12Shikaru i = 0; 475*54e21c12Shikaru if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) { 476*54e21c12Shikaru KASSERT(sc->sc_hw.qhw_sram_bar_id == 0); 477*54e21c12Shikaru fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG); 478*54e21c12Shikaru /* Skip SRAM BAR */ 479*54e21c12Shikaru i = (fusectl & FUSECTL_MASK) ? 1 : 0; 480*54e21c12Shikaru } 481*54e21c12Shikaru for (bar = PCI_MAPREG_START; bar <= PCI_MAPREG_END; bar += 4) { 482*54e21c12Shikaru bus_size_t size; 483*54e21c12Shikaru bus_addr_t addr; 484*54e21c12Shikaru 485*54e21c12Shikaru if (pci_mapreg_probe(pc, pa->pa_tag, bar, &memtype) == 0) 486*54e21c12Shikaru continue; 487*54e21c12Shikaru 488*54e21c12Shikaru if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) 489*54e21c12Shikaru continue; 490*54e21c12Shikaru 491*54e21c12Shikaru /* MSI-X table will be mapped by pci_msix_alloc_map */ 492*54e21c12Shikaru if (bar == msixtbl_bar) 493*54e21c12Shikaru size = msixtbl_offset; 494*54e21c12Shikaru else 495*54e21c12Shikaru size = 0; 496*54e21c12Shikaru 497*54e21c12Shikaru if (pci_mapreg_submap(pa, bar, memtype, 0, size, 0, 498*54e21c12Shikaru &sc->sc_csrt[i], &sc->sc_csrh[i], &addr, &sc->sc_csrs[i])) { 499*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 500*54e21c12Shikaru "couldn't map bar 0x%02x\n", bar); 501*54e21c12Shikaru goto fail; 502*54e21c12Shikaru } 503*54e21c12Shikaru 504*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, 505*54e21c12Shikaru "region #%d bar 0x%02x size 0x%x at 0x%llx" 506*54e21c12Shikaru " mapped to %p\n", i, bar, 507*54e21c12Shikaru (int)sc->sc_csrs[i], (unsigned long long)addr, 508*54e21c12Shikaru bus_space_vaddr(sc->sc_csrt[i], sc->sc_csrh[i])); 509*54e21c12Shikaru 510*54e21c12Shikaru i++; 511*54e21c12Shikaru if (PCI_MAPREG_MEM_TYPE(memtype) == PCI_MAPREG_MEM_TYPE_64BIT) 512*54e21c12Shikaru bar += 4; 513*54e21c12Shikaru } 514*54e21c12Shikaru 515*54e21c12Shikaru /* XXX Enable advanced error reporting */ 516*54e21c12Shikaru 517*54e21c12Shikaru /* Enable bus mastering */ 518*54e21c12Shikaru cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 519*54e21c12Shikaru cmd |= PCI_COMMAND_MASTER_ENABLE; 520*54e21c12Shikaru pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 521*54e21c12Shikaru 522*54e21c12Shikaru if (qat_alloc_msix_intr(sc, pa)) 523*54e21c12Shikaru goto fail; 524*54e21c12Shikaru 525*54e21c12Shikaru config_mountroot(self, qat_init); 526*54e21c12Shikaru 527*54e21c12Shikaru fail: 528*54e21c12Shikaru /* XXX */ 529*54e21c12Shikaru return; 530*54e21c12Shikaru } 531*54e21c12Shikaru 532*54e21c12Shikaru void 533*54e21c12Shikaru qat_init(struct device *self) 534*54e21c12Shikaru { 535*54e21c12Shikaru int error; 536*54e21c12Shikaru struct qat_softc *sc = device_private(self); 537*54e21c12Shikaru 538*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing ETR\n"); 539*54e21c12Shikaru error = qat_etr_init(sc); 540*54e21c12Shikaru if (error) { 541*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 542*54e21c12Shikaru "Could not initialize ETR: %d\n", error); 543*54e21c12Shikaru return; 544*54e21c12Shikaru } 545*54e21c12Shikaru 546*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing admin comms\n"); 547*54e21c12Shikaru if (sc->sc_hw.qhw_init_admin_comms != NULL && 548*54e21c12Shikaru (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) { 549*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 550*54e21c12Shikaru "Could not initialize admin comms: %d\n", error); 551*54e21c12Shikaru return; 552*54e21c12Shikaru } 553*54e21c12Shikaru 554*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing hw arbiter\n"); 555*54e21c12Shikaru if (sc->sc_hw.qhw_init_arb != NULL && 556*54e21c12Shikaru (error = sc->sc_hw.qhw_init_arb(sc)) != 0) { 557*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 558*54e21c12Shikaru "Could not initialize hw arbiter: %d\n", error); 559*54e21c12Shikaru return; 560*54e21c12Shikaru } 561*54e21c12Shikaru 562*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing acceleration engine\n"); 563*54e21c12Shikaru error = qat_ae_init(sc); 564*54e21c12Shikaru if (error) { 565*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 566*54e21c12Shikaru "Could not initialize Acceleration Engine: %d\n", error); 567*54e21c12Shikaru return; 568*54e21c12Shikaru } 569*54e21c12Shikaru 570*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Loading acceleration engine firmware\n"); 571*54e21c12Shikaru error = qat_aefw_load(sc); 572*54e21c12Shikaru if (error) { 573*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 574*54e21c12Shikaru "Could not load firmware: %d\n", error); 575*54e21c12Shikaru return; 576*54e21c12Shikaru } 577*54e21c12Shikaru 578*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Establishing interrupts\n"); 579*54e21c12Shikaru error = qat_setup_msix_intr(sc); 580*54e21c12Shikaru if (error) { 581*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 582*54e21c12Shikaru "Could not setup interrupts: %d\n", error); 583*54e21c12Shikaru return; 584*54e21c12Shikaru } 585*54e21c12Shikaru 586*54e21c12Shikaru sc->sc_hw.qhw_enable_intr(sc); 587*54e21c12Shikaru 588*54e21c12Shikaru error = qat_crypto_init(sc); 589*54e21c12Shikaru if (error) { 590*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 591*54e21c12Shikaru "Could not initialize service: %d\n", error); 592*54e21c12Shikaru return; 593*54e21c12Shikaru } 594*54e21c12Shikaru 595*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Enabling error correction\n"); 596*54e21c12Shikaru if (sc->sc_hw.qhw_enable_error_correction != NULL) 597*54e21c12Shikaru sc->sc_hw.qhw_enable_error_correction(sc); 598*54e21c12Shikaru 599*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, "Initializing watchdog timer\n"); 600*54e21c12Shikaru if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL && 601*54e21c12Shikaru (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) { 602*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 603*54e21c12Shikaru "Could not initialize watchdog timer: %d\n", error); 604*54e21c12Shikaru return; 605*54e21c12Shikaru } 606*54e21c12Shikaru 607*54e21c12Shikaru error = qat_start(self); 608*54e21c12Shikaru if (error) { 609*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 610*54e21c12Shikaru "Could not start: %d\n", error); 611*54e21c12Shikaru return; 612*54e21c12Shikaru } 613*54e21c12Shikaru } 614*54e21c12Shikaru 615*54e21c12Shikaru int 616*54e21c12Shikaru qat_start(struct device *self) 617*54e21c12Shikaru { 618*54e21c12Shikaru struct qat_softc *sc = device_private(self); 619*54e21c12Shikaru int error; 620*54e21c12Shikaru 621*54e21c12Shikaru error = qat_ae_start(sc); 622*54e21c12Shikaru if (error) 623*54e21c12Shikaru return error; 624*54e21c12Shikaru 625*54e21c12Shikaru if (sc->sc_hw.qhw_send_admin_init != NULL && 626*54e21c12Shikaru (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) { 627*54e21c12Shikaru return error; 628*54e21c12Shikaru } 629*54e21c12Shikaru 630*54e21c12Shikaru error = qat_crypto_start(sc); 631*54e21c12Shikaru if (error) 632*54e21c12Shikaru return error; 633*54e21c12Shikaru 634*54e21c12Shikaru return 0; 635*54e21c12Shikaru } 636*54e21c12Shikaru 637*54e21c12Shikaru int 638*54e21c12Shikaru qat_detach(struct device *self, int flags) 639*54e21c12Shikaru { 640*54e21c12Shikaru 641*54e21c12Shikaru return 0; 642*54e21c12Shikaru } 643*54e21c12Shikaru 644*54e21c12Shikaru void * 645*54e21c12Shikaru qat_alloc_mem(size_t size) 646*54e21c12Shikaru { 647*54e21c12Shikaru size_t *sptr; 648*54e21c12Shikaru sptr = kmem_zalloc(size + sizeof(size), KM_SLEEP); 649*54e21c12Shikaru *sptr = size; 650*54e21c12Shikaru return ++sptr; 651*54e21c12Shikaru } 652*54e21c12Shikaru 653*54e21c12Shikaru void 654*54e21c12Shikaru qat_free_mem(void *ptr) 655*54e21c12Shikaru { 656*54e21c12Shikaru size_t *sptr = ptr, size; 657*54e21c12Shikaru size = *(--sptr); 658*54e21c12Shikaru kmem_free(sptr, size + sizeof(size)); 659*54e21c12Shikaru } 660*54e21c12Shikaru 661*54e21c12Shikaru void 662*54e21c12Shikaru qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm) 663*54e21c12Shikaru { 664*54e21c12Shikaru 665*54e21c12Shikaru bus_dmamap_unload(sc->sc_dmat, qdm->qdm_dma_map); 666*54e21c12Shikaru bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map); 667*54e21c12Shikaru bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, qdm->qdm_dma_size); 668*54e21c12Shikaru bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1); 669*54e21c12Shikaru explicit_memset(qdm, 0, sizeof(*qdm)); 670*54e21c12Shikaru } 671*54e21c12Shikaru 672*54e21c12Shikaru int 673*54e21c12Shikaru qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm, 674*54e21c12Shikaru bus_size_t size, bus_size_t alignment) 675*54e21c12Shikaru { 676*54e21c12Shikaru int error = 0, nseg; 677*54e21c12Shikaru 678*54e21c12Shikaru error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, 679*54e21c12Shikaru 0, &qdm->qdm_dma_seg, 1, &nseg, BUS_DMA_NOWAIT); 680*54e21c12Shikaru if (error) { 681*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 682*54e21c12Shikaru "couldn't allocate dmamem, error = %d\n", error); 683*54e21c12Shikaru goto fail_0; 684*54e21c12Shikaru } 685*54e21c12Shikaru KASSERT(nseg == 1); 686*54e21c12Shikaru error = bus_dmamem_map(sc->sc_dmat, &qdm->qdm_dma_seg, 687*54e21c12Shikaru nseg, size, &qdm->qdm_dma_vaddr, 688*54e21c12Shikaru BUS_DMA_COHERENT | BUS_DMA_NOWAIT); 689*54e21c12Shikaru if (error) { 690*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 691*54e21c12Shikaru "couldn't map dmamem, error = %d\n", error); 692*54e21c12Shikaru goto fail_1; 693*54e21c12Shikaru } 694*54e21c12Shikaru qdm->qdm_dma_size = size; 695*54e21c12Shikaru error = bus_dmamap_create(sc->sc_dmat, size, nseg, size, 696*54e21c12Shikaru 0, BUS_DMA_NOWAIT, &qdm->qdm_dma_map); 697*54e21c12Shikaru if (error) { 698*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 699*54e21c12Shikaru "couldn't create dmamem map, error = %d\n", error); 700*54e21c12Shikaru goto fail_2; 701*54e21c12Shikaru } 702*54e21c12Shikaru error = bus_dmamap_load(sc->sc_dmat, qdm->qdm_dma_map, 703*54e21c12Shikaru qdm->qdm_dma_vaddr, size, NULL, BUS_DMA_NOWAIT); 704*54e21c12Shikaru if (error) { 705*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 706*54e21c12Shikaru "couldn't load dmamem map, error = %d\n", error); 707*54e21c12Shikaru goto fail_3; 708*54e21c12Shikaru } 709*54e21c12Shikaru 710*54e21c12Shikaru return 0; 711*54e21c12Shikaru fail_3: 712*54e21c12Shikaru bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map); 713*54e21c12Shikaru qdm->qdm_dma_map = NULL; 714*54e21c12Shikaru fail_2: 715*54e21c12Shikaru bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, size); 716*54e21c12Shikaru qdm->qdm_dma_vaddr = NULL; 717*54e21c12Shikaru qdm->qdm_dma_size = 0; 718*54e21c12Shikaru fail_1: 719*54e21c12Shikaru bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1); 720*54e21c12Shikaru fail_0: 721*54e21c12Shikaru return error; 722*54e21c12Shikaru } 723*54e21c12Shikaru 724*54e21c12Shikaru int 725*54e21c12Shikaru qat_alloc_msix_intr(struct qat_softc *sc, struct pci_attach_args *pa) 726*54e21c12Shikaru { 727*54e21c12Shikaru u_int *ih_map, vec; 728*54e21c12Shikaru int error, count, ihi; 729*54e21c12Shikaru 730*54e21c12Shikaru count = sc->sc_hw.qhw_num_banks + 1; 731*54e21c12Shikaru ih_map = qat_alloc_mem(sizeof(*ih_map) * count); 732*54e21c12Shikaru ihi = 0; 733*54e21c12Shikaru 734*54e21c12Shikaru for (vec = 0; vec < sc->sc_hw.qhw_num_banks; vec++) 735*54e21c12Shikaru ih_map[ihi++] = vec; 736*54e21c12Shikaru 737*54e21c12Shikaru vec += sc->sc_hw.qhw_msix_ae_vec_gap; 738*54e21c12Shikaru ih_map[ihi++] = vec; 739*54e21c12Shikaru 740*54e21c12Shikaru error = pci_msix_alloc_map(pa, &sc->sc_ih, ih_map, count); 741*54e21c12Shikaru qat_free_mem(ih_map); 742*54e21c12Shikaru if (error) { 743*54e21c12Shikaru aprint_error_dev(sc->sc_dev, "couldn't allocate msix %d: %d\n", 744*54e21c12Shikaru count, error); 745*54e21c12Shikaru } 746*54e21c12Shikaru 747*54e21c12Shikaru return error; 748*54e21c12Shikaru } 749*54e21c12Shikaru 750*54e21c12Shikaru void * 751*54e21c12Shikaru qat_establish_msix_intr(struct qat_softc *sc, pci_intr_handle_t ih, 752*54e21c12Shikaru int (*func)(void *), void *arg, 753*54e21c12Shikaru const char *name, int index) 754*54e21c12Shikaru { 755*54e21c12Shikaru kcpuset_t *affinity; 756*54e21c12Shikaru int error; 757*54e21c12Shikaru char buf[PCI_INTRSTR_LEN]; 758*54e21c12Shikaru char intrxname[INTRDEVNAMEBUF]; 759*54e21c12Shikaru const char *intrstr; 760*54e21c12Shikaru void *cookie; 761*54e21c12Shikaru 762*54e21c12Shikaru snprintf(intrxname, sizeof(intrxname), "%s%s%d", 763*54e21c12Shikaru device_xname(sc->sc_dev), name, index); 764*54e21c12Shikaru 765*54e21c12Shikaru intrstr = pci_intr_string(sc->sc_pc, ih, buf, sizeof(buf)); 766*54e21c12Shikaru 767*54e21c12Shikaru pci_intr_setattr(sc->sc_pc, &ih, PCI_INTR_MPSAFE, true); 768*54e21c12Shikaru 769*54e21c12Shikaru cookie = pci_intr_establish_xname(sc->sc_pc, ih, 770*54e21c12Shikaru IPL_NET, func, arg, intrxname); 771*54e21c12Shikaru 772*54e21c12Shikaru aprint_normal_dev(sc->sc_dev, "%s%d interrupting at %s\n", 773*54e21c12Shikaru name, index, intrstr); 774*54e21c12Shikaru 775*54e21c12Shikaru kcpuset_create(&affinity, true); 776*54e21c12Shikaru kcpuset_set(affinity, index % ncpu); 777*54e21c12Shikaru error = interrupt_distribute(cookie, affinity, NULL); 778*54e21c12Shikaru if (error) { 779*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 780*54e21c12Shikaru "couldn't distribute interrupt: %s%d\n", name, index); 781*54e21c12Shikaru } 782*54e21c12Shikaru kcpuset_destroy(affinity); 783*54e21c12Shikaru 784*54e21c12Shikaru return cookie; 785*54e21c12Shikaru } 786*54e21c12Shikaru 787*54e21c12Shikaru int 788*54e21c12Shikaru qat_setup_msix_intr(struct qat_softc *sc) 789*54e21c12Shikaru { 790*54e21c12Shikaru int i; 791*54e21c12Shikaru pci_intr_handle_t ih; 792*54e21c12Shikaru 793*54e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) { 794*54e21c12Shikaru struct qat_bank *qb = &sc->sc_etr_banks[i]; 795*54e21c12Shikaru ih = sc->sc_ih[i]; 796*54e21c12Shikaru 797*54e21c12Shikaru qb->qb_ih_cookie = qat_establish_msix_intr(sc, ih, 798*54e21c12Shikaru qat_etr_bank_intr, qb, "bank", i); 799*54e21c12Shikaru if (qb->qb_ih_cookie == NULL) 800*54e21c12Shikaru return ENOMEM; 801*54e21c12Shikaru } 802*54e21c12Shikaru 803*54e21c12Shikaru sc->sc_ae_ih_cookie = qat_establish_msix_intr(sc, sc->sc_ih[i], 804*54e21c12Shikaru qat_ae_cluster_intr, sc, "aeclust", 0); 805*54e21c12Shikaru if (sc->sc_ae_ih_cookie == NULL) 806*54e21c12Shikaru return ENOMEM; 807*54e21c12Shikaru 808*54e21c12Shikaru return 0; 809*54e21c12Shikaru } 810*54e21c12Shikaru 811*54e21c12Shikaru int 812*54e21c12Shikaru qat_etr_init(struct qat_softc *sc) 813*54e21c12Shikaru { 814*54e21c12Shikaru int i; 815*54e21c12Shikaru int error = 0; 816*54e21c12Shikaru 817*54e21c12Shikaru sc->sc_etr_banks = qat_alloc_mem( 818*54e21c12Shikaru sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks); 819*54e21c12Shikaru 820*54e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) { 821*54e21c12Shikaru error = qat_etr_bank_init(sc, i); 822*54e21c12Shikaru if (error) { 823*54e21c12Shikaru goto fail; 824*54e21c12Shikaru } 825*54e21c12Shikaru } 826*54e21c12Shikaru 827*54e21c12Shikaru if (sc->sc_hw.qhw_num_ap_banks) { 828*54e21c12Shikaru sc->sc_etr_ap_banks = qat_alloc_mem( 829*54e21c12Shikaru sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks); 830*54e21c12Shikaru error = qat_etr_ap_bank_init(sc); 831*54e21c12Shikaru if (error) { 832*54e21c12Shikaru goto fail; 833*54e21c12Shikaru } 834*54e21c12Shikaru } 835*54e21c12Shikaru 836*54e21c12Shikaru return 0; 837*54e21c12Shikaru 838*54e21c12Shikaru fail: 839*54e21c12Shikaru if (sc->sc_etr_banks != NULL) { 840*54e21c12Shikaru qat_free_mem(sc->sc_etr_banks); 841*54e21c12Shikaru sc->sc_etr_banks = NULL; 842*54e21c12Shikaru } 843*54e21c12Shikaru if (sc->sc_etr_ap_banks != NULL) { 844*54e21c12Shikaru qat_free_mem(sc->sc_etr_ap_banks); 845*54e21c12Shikaru sc->sc_etr_ap_banks = NULL; 846*54e21c12Shikaru } 847*54e21c12Shikaru return error; 848*54e21c12Shikaru } 849*54e21c12Shikaru 850*54e21c12Shikaru int 851*54e21c12Shikaru qat_etr_bank_init(struct qat_softc *sc, int bank) 852*54e21c12Shikaru { 853*54e21c12Shikaru struct qat_bank *qb = &sc->sc_etr_banks[bank]; 854*54e21c12Shikaru int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap; 855*54e21c12Shikaru 856*54e21c12Shikaru KASSERT(bank < sc->sc_hw.qhw_num_banks); 857*54e21c12Shikaru 858*54e21c12Shikaru mutex_init(&qb->qb_bank_mtx, MUTEX_DEFAULT, IPL_NET); 859*54e21c12Shikaru 860*54e21c12Shikaru qb->qb_sc = sc; 861*54e21c12Shikaru qb->qb_bank = bank; 862*54e21c12Shikaru qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT; 863*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qb->qb_ev_rxintr, EVCNT_TYPE_INTR, 864*54e21c12Shikaru qb->qb_ev_rxintr_name, "bank%d rxintr", bank); 865*54e21c12Shikaru 866*54e21c12Shikaru /* Clean CSRs for all rings within the bank */ 867*54e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { 868*54e21c12Shikaru struct qat_ring *qr = &qb->qb_et_rings[i]; 869*54e21c12Shikaru 870*54e21c12Shikaru qat_etr_bank_ring_write_4(sc, bank, i, 871*54e21c12Shikaru ETR_RING_CONFIG, 0); 872*54e21c12Shikaru qat_etr_bank_ring_base_write_8(sc, bank, i, 0); 873*54e21c12Shikaru 874*54e21c12Shikaru if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { 875*54e21c12Shikaru qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t)); 876*54e21c12Shikaru } else if (sc->sc_hw.qhw_tx_rings_mask & 877*54e21c12Shikaru (1 << (i - tx_rx_gap))) { 878*54e21c12Shikaru /* Share inflight counter with rx and tx */ 879*54e21c12Shikaru qr->qr_inflight = 880*54e21c12Shikaru qb->qb_et_rings[i - tx_rx_gap].qr_inflight; 881*54e21c12Shikaru } 882*54e21c12Shikaru } 883*54e21c12Shikaru 884*54e21c12Shikaru if (sc->sc_hw.qhw_init_etr_intr != NULL) { 885*54e21c12Shikaru sc->sc_hw.qhw_init_etr_intr(sc, bank); 886*54e21c12Shikaru } else { 887*54e21c12Shikaru /* common code in qat 1.7 */ 888*54e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_REG, 889*54e21c12Shikaru ETR_INT_REG_CLEAR_MASK); 890*54e21c12Shikaru for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank / 891*54e21c12Shikaru ETR_RINGS_PER_INT_SRCSEL; i++) { 892*54e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL + 893*54e21c12Shikaru (i * ETR_INT_SRCSEL_NEXT_OFFSET), 894*54e21c12Shikaru ETR_INT_SRCSEL_MASK); 895*54e21c12Shikaru } 896*54e21c12Shikaru } 897*54e21c12Shikaru 898*54e21c12Shikaru return 0; 899*54e21c12Shikaru } 900*54e21c12Shikaru 901*54e21c12Shikaru int 902*54e21c12Shikaru qat_etr_ap_bank_init(struct qat_softc *sc) 903*54e21c12Shikaru { 904*54e21c12Shikaru int ap_bank; 905*54e21c12Shikaru 906*54e21c12Shikaru for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) { 907*54e21c12Shikaru struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank]; 908*54e21c12Shikaru 909*54e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK, 910*54e21c12Shikaru ETR_AP_NF_MASK_INIT); 911*54e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0); 912*54e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK, 913*54e21c12Shikaru ETR_AP_NE_MASK_INIT); 914*54e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0); 915*54e21c12Shikaru 916*54e21c12Shikaru memset(qab, 0, sizeof(*qab)); 917*54e21c12Shikaru } 918*54e21c12Shikaru 919*54e21c12Shikaru return 0; 920*54e21c12Shikaru } 921*54e21c12Shikaru 922*54e21c12Shikaru void 923*54e21c12Shikaru qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask) 924*54e21c12Shikaru { 925*54e21c12Shikaru if (set_mask) 926*54e21c12Shikaru *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); 927*54e21c12Shikaru else 928*54e21c12Shikaru *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); 929*54e21c12Shikaru } 930*54e21c12Shikaru 931*54e21c12Shikaru void 932*54e21c12Shikaru qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest, 933*54e21c12Shikaru uint32_t ring, int set_dest) 934*54e21c12Shikaru { 935*54e21c12Shikaru uint32_t ae_mask; 936*54e21c12Shikaru uint8_t mailbox, ae, nae; 937*54e21c12Shikaru uint8_t *dest = (uint8_t *)ap_dest; 938*54e21c12Shikaru 939*54e21c12Shikaru mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring); 940*54e21c12Shikaru 941*54e21c12Shikaru nae = 0; 942*54e21c12Shikaru ae_mask = sc->sc_ae_mask; 943*54e21c12Shikaru for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) { 944*54e21c12Shikaru if ((ae_mask & (1 << ae)) == 0) 945*54e21c12Shikaru continue; 946*54e21c12Shikaru 947*54e21c12Shikaru if (set_dest) { 948*54e21c12Shikaru dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) | 949*54e21c12Shikaru __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) | 950*54e21c12Shikaru ETR_AP_DEST_ENABLE; 951*54e21c12Shikaru } else { 952*54e21c12Shikaru dest[nae] = 0; 953*54e21c12Shikaru } 954*54e21c12Shikaru nae++; 955*54e21c12Shikaru if (nae == ETR_MAX_AE_PER_MAILBOX) 956*54e21c12Shikaru break; 957*54e21c12Shikaru 958*54e21c12Shikaru } 959*54e21c12Shikaru } 960*54e21c12Shikaru 961*54e21c12Shikaru void 962*54e21c12Shikaru qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr) 963*54e21c12Shikaru { 964*54e21c12Shikaru struct qat_ap_bank *qab; 965*54e21c12Shikaru int ap_bank; 966*54e21c12Shikaru 967*54e21c12Shikaru if (sc->sc_hw.qhw_num_ap_banks == 0) 968*54e21c12Shikaru return; 969*54e21c12Shikaru 970*54e21c12Shikaru ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring); 971*54e21c12Shikaru KASSERT(ap_bank < sc->sc_hw.qhw_num_ap_banks); 972*54e21c12Shikaru qab = &sc->sc_etr_ap_banks[ap_bank]; 973*54e21c12Shikaru 974*54e21c12Shikaru if (qr->qr_cb == NULL) { 975*54e21c12Shikaru qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1); 976*54e21c12Shikaru if (!qab->qab_ne_dest) { 977*54e21c12Shikaru qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest, 978*54e21c12Shikaru qr->qr_ring, 1); 979*54e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 980*54e21c12Shikaru qab->qab_ne_dest); 981*54e21c12Shikaru } 982*54e21c12Shikaru } else { 983*54e21c12Shikaru qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1); 984*54e21c12Shikaru if (!qab->qab_nf_dest) { 985*54e21c12Shikaru qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest, 986*54e21c12Shikaru qr->qr_ring, 1); 987*54e21c12Shikaru qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 988*54e21c12Shikaru qab->qab_nf_dest); 989*54e21c12Shikaru } 990*54e21c12Shikaru } 991*54e21c12Shikaru } 992*54e21c12Shikaru 993*54e21c12Shikaru int 994*54e21c12Shikaru qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs) 995*54e21c12Shikaru { 996*54e21c12Shikaru int i = QAT_MIN_RING_SIZE; 997*54e21c12Shikaru 998*54e21c12Shikaru for (; i <= QAT_MAX_RING_SIZE; i++) 999*54e21c12Shikaru if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i)) 1000*54e21c12Shikaru return i; 1001*54e21c12Shikaru 1002*54e21c12Shikaru return QAT_DEFAULT_RING_SIZE; 1003*54e21c12Shikaru } 1004*54e21c12Shikaru 1005*54e21c12Shikaru int 1006*54e21c12Shikaru qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring, 1007*54e21c12Shikaru uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg, 1008*54e21c12Shikaru const char *name, struct qat_ring **rqr) 1009*54e21c12Shikaru { 1010*54e21c12Shikaru struct qat_bank *qb; 1011*54e21c12Shikaru struct qat_ring *qr = NULL; 1012*54e21c12Shikaru int error; 1013*54e21c12Shikaru uint32_t ring_size_bytes, ring_config; 1014*54e21c12Shikaru uint64_t ring_base; 1015*54e21c12Shikaru uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512; 1016*54e21c12Shikaru uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0; 1017*54e21c12Shikaru 1018*54e21c12Shikaru KASSERT(bank < sc->sc_hw.qhw_num_banks); 1019*54e21c12Shikaru 1020*54e21c12Shikaru /* Allocate a ring from specified bank */ 1021*54e21c12Shikaru qb = &sc->sc_etr_banks[bank]; 1022*54e21c12Shikaru 1023*54e21c12Shikaru if (ring >= sc->sc_hw.qhw_num_rings_per_bank) 1024*54e21c12Shikaru return EINVAL; 1025*54e21c12Shikaru if (qb->qb_allocated_rings & (1 << ring)) 1026*54e21c12Shikaru return ENOENT; 1027*54e21c12Shikaru qr = &qb->qb_et_rings[ring]; 1028*54e21c12Shikaru qb->qb_allocated_rings |= 1 << ring; 1029*54e21c12Shikaru 1030*54e21c12Shikaru /* Intialize allocated ring */ 1031*54e21c12Shikaru qr->qr_ring = ring; 1032*54e21c12Shikaru qr->qr_bank = bank; 1033*54e21c12Shikaru qr->qr_name = name; 1034*54e21c12Shikaru qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring; 1035*54e21c12Shikaru qr->qr_ring_mask = (1 << ring); 1036*54e21c12Shikaru qr->qr_cb = cb; 1037*54e21c12Shikaru qr->qr_cb_arg = cb_arg; 1038*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxintr, EVCNT_TYPE_INTR, 1039*54e21c12Shikaru qr->qr_ev_rxintr_name, "bank%d ring%d rxintr", bank, ring); 1040*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxmsg, EVCNT_TYPE_MISC, 1041*54e21c12Shikaru qr->qr_ev_rxmsg_name, "bank%d ring%d rxmsg", bank, ring); 1042*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txmsg, EVCNT_TYPE_MISC, 1043*54e21c12Shikaru qr->qr_ev_txmsg_name, "bank%d ring%d txmsg", bank, ring); 1044*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txfull, EVCNT_TYPE_MISC, 1045*54e21c12Shikaru qr->qr_ev_txfull_name, "bank%d ring%d txfull", bank, ring); 1046*54e21c12Shikaru 1047*54e21c12Shikaru /* Setup the shadow variables */ 1048*54e21c12Shikaru qr->qr_head = 0; 1049*54e21c12Shikaru qr->qr_tail = 0; 1050*54e21c12Shikaru qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size); 1051*54e21c12Shikaru qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs); 1052*54e21c12Shikaru 1053*54e21c12Shikaru /* 1054*54e21c12Shikaru * To make sure that ring is alligned to ring size allocate 1055*54e21c12Shikaru * at least 4k and then tell the user it is smaller. 1056*54e21c12Shikaru */ 1057*54e21c12Shikaru ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size); 1058*54e21c12Shikaru ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes); 1059*54e21c12Shikaru error = qat_alloc_dmamem(sc, &qr->qr_dma, 1060*54e21c12Shikaru ring_size_bytes, ring_size_bytes); 1061*54e21c12Shikaru if (error) 1062*54e21c12Shikaru return error; 1063*54e21c12Shikaru 1064*54e21c12Shikaru KASSERT(qr->qr_dma.qdm_dma_map->dm_nsegs == 1); 1065*54e21c12Shikaru 1066*54e21c12Shikaru qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr; 1067*54e21c12Shikaru qr->qr_ring_paddr = qr->qr_dma.qdm_dma_map->dm_segs[0].ds_addr; 1068*54e21c12Shikaru 1069*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, 1070*54e21c12Shikaru "allocate ring %d of bank %d for %s " 1071*54e21c12Shikaru "size %d %d at vaddr %p paddr 0x%llx\n", 1072*54e21c12Shikaru ring, bank, name, ring_size_bytes, 1073*54e21c12Shikaru (int)qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len, 1074*54e21c12Shikaru qr->qr_ring_vaddr, 1075*54e21c12Shikaru (unsigned long long)qr->qr_ring_paddr); 1076*54e21c12Shikaru 1077*54e21c12Shikaru memset(qr->qr_ring_vaddr, QAT_RING_PATTERN, 1078*54e21c12Shikaru qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len); 1079*54e21c12Shikaru 1080*54e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, 0, 1081*54e21c12Shikaru qr->qr_dma.qdm_dma_map->dm_mapsize, 1082*54e21c12Shikaru BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1083*54e21c12Shikaru 1084*54e21c12Shikaru if (((uintptr_t)qr->qr_ring_paddr & (ring_size_bytes - 1)) != 0) { 1085*54e21c12Shikaru aprint_error_dev(sc->sc_dev, "ring address not aligned\n"); 1086*54e21c12Shikaru return EFAULT; 1087*54e21c12Shikaru } 1088*54e21c12Shikaru 1089*54e21c12Shikaru if (cb == NULL) { 1090*54e21c12Shikaru ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size); 1091*54e21c12Shikaru } else { 1092*54e21c12Shikaru ring_config = 1093*54e21c12Shikaru ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne); 1094*54e21c12Shikaru } 1095*54e21c12Shikaru qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config); 1096*54e21c12Shikaru 1097*54e21c12Shikaru ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size); 1098*54e21c12Shikaru qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base); 1099*54e21c12Shikaru 1100*54e21c12Shikaru if (sc->sc_hw.qhw_init_arb != NULL) 1101*54e21c12Shikaru qat_arb_update(sc, qb); 1102*54e21c12Shikaru 1103*54e21c12Shikaru mutex_init(&qr->qr_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1104*54e21c12Shikaru 1105*54e21c12Shikaru qat_etr_ap_bank_setup_ring(sc, qr); 1106*54e21c12Shikaru 1107*54e21c12Shikaru if (cb != NULL) { 1108*54e21c12Shikaru uint32_t intr_mask; 1109*54e21c12Shikaru 1110*54e21c12Shikaru qb->qb_intr_mask |= qr->qr_ring_mask; 1111*54e21c12Shikaru intr_mask = qb->qb_intr_mask; 1112*54e21c12Shikaru 1113*54e21c12Shikaru aprint_verbose_dev(sc->sc_dev, 1114*54e21c12Shikaru "update intr mask for bank %d " 1115*54e21c12Shikaru "(coalescing time %dns): 0x%08x\n", 1116*54e21c12Shikaru bank, qb->qb_coalescing_time, intr_mask); 1117*54e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, 1118*54e21c12Shikaru intr_mask); 1119*54e21c12Shikaru qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL, 1120*54e21c12Shikaru ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); 1121*54e21c12Shikaru } 1122*54e21c12Shikaru 1123*54e21c12Shikaru *rqr = qr; 1124*54e21c12Shikaru 1125*54e21c12Shikaru return 0; 1126*54e21c12Shikaru } 1127*54e21c12Shikaru 1128*54e21c12Shikaru static inline u_int 1129*54e21c12Shikaru qat_modulo(u_int data, u_int shift) 1130*54e21c12Shikaru { 1131*54e21c12Shikaru u_int div = data >> shift; 1132*54e21c12Shikaru u_int mult = div << shift; 1133*54e21c12Shikaru return data - mult; 1134*54e21c12Shikaru } 1135*54e21c12Shikaru 1136*54e21c12Shikaru int 1137*54e21c12Shikaru qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg) 1138*54e21c12Shikaru { 1139*54e21c12Shikaru uint32_t inflight; 1140*54e21c12Shikaru uint32_t *addr; 1141*54e21c12Shikaru 1142*54e21c12Shikaru mutex_spin_enter(&qr->qr_ring_mtx); 1143*54e21c12Shikaru 1144*54e21c12Shikaru inflight = atomic_inc_32_nv(qr->qr_inflight); 1145*54e21c12Shikaru if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) { 1146*54e21c12Shikaru atomic_dec_32(qr->qr_inflight); 1147*54e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_txfull); 1148*54e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx); 1149*54e21c12Shikaru return EBUSY; 1150*54e21c12Shikaru } 1151*54e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_txmsg); 1152*54e21c12Shikaru 1153*54e21c12Shikaru addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail); 1154*54e21c12Shikaru 1155*54e21c12Shikaru memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size)); 1156*54e21c12Shikaru #ifdef QAT_DUMP 1157*54e21c12Shikaru qat_dump_raw(QAT_DUMP_RING_MSG, "put_msg", addr, 1158*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size)); 1159*54e21c12Shikaru #endif 1160*54e21c12Shikaru 1161*54e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_tail, 1162*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1163*54e21c12Shikaru BUS_DMASYNC_PREWRITE); 1164*54e21c12Shikaru 1165*54e21c12Shikaru qr->qr_tail = qat_modulo(qr->qr_tail + 1166*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1167*54e21c12Shikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size)); 1168*54e21c12Shikaru 1169*54e21c12Shikaru qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, 1170*54e21c12Shikaru ETR_RING_TAIL_OFFSET, qr->qr_tail); 1171*54e21c12Shikaru 1172*54e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx); 1173*54e21c12Shikaru 1174*54e21c12Shikaru return 0; 1175*54e21c12Shikaru } 1176*54e21c12Shikaru 1177*54e21c12Shikaru int 1178*54e21c12Shikaru qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb, 1179*54e21c12Shikaru struct qat_ring *qr) 1180*54e21c12Shikaru { 1181*54e21c12Shikaru int handled = 0; 1182*54e21c12Shikaru uint32_t *msg; 1183*54e21c12Shikaru uint32_t nmsg = 0; 1184*54e21c12Shikaru 1185*54e21c12Shikaru mutex_spin_enter(&qr->qr_ring_mtx); 1186*54e21c12Shikaru 1187*54e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_rxintr); 1188*54e21c12Shikaru 1189*54e21c12Shikaru msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); 1190*54e21c12Shikaru 1191*54e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head, 1192*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1193*54e21c12Shikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1194*54e21c12Shikaru 1195*54e21c12Shikaru while (*msg != ETR_RING_EMPTY_ENTRY_SIG) { 1196*54e21c12Shikaru atomic_dec_32(qr->qr_inflight); 1197*54e21c12Shikaru QAT_EVCNT_INCR(&qr->qr_ev_rxmsg); 1198*54e21c12Shikaru 1199*54e21c12Shikaru if (qr->qr_cb != NULL) { 1200*54e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx); 1201*54e21c12Shikaru handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg); 1202*54e21c12Shikaru mutex_spin_enter(&qr->qr_ring_mtx); 1203*54e21c12Shikaru } 1204*54e21c12Shikaru 1205*54e21c12Shikaru *msg = ETR_RING_EMPTY_ENTRY_SIG; 1206*54e21c12Shikaru 1207*54e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head, 1208*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1209*54e21c12Shikaru BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1210*54e21c12Shikaru 1211*54e21c12Shikaru qr->qr_head = qat_modulo(qr->qr_head + 1212*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1213*54e21c12Shikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size)); 1214*54e21c12Shikaru nmsg++; 1215*54e21c12Shikaru 1216*54e21c12Shikaru msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); 1217*54e21c12Shikaru 1218*54e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head, 1219*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1220*54e21c12Shikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1221*54e21c12Shikaru } 1222*54e21c12Shikaru 1223*54e21c12Shikaru if (nmsg > 0) { 1224*54e21c12Shikaru qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, 1225*54e21c12Shikaru ETR_RING_HEAD_OFFSET, qr->qr_head); 1226*54e21c12Shikaru } 1227*54e21c12Shikaru 1228*54e21c12Shikaru mutex_spin_exit(&qr->qr_ring_mtx); 1229*54e21c12Shikaru 1230*54e21c12Shikaru return handled; 1231*54e21c12Shikaru } 1232*54e21c12Shikaru 1233*54e21c12Shikaru int 1234*54e21c12Shikaru qat_etr_bank_intr(void *arg) 1235*54e21c12Shikaru { 1236*54e21c12Shikaru struct qat_bank *qb = arg; 1237*54e21c12Shikaru struct qat_softc *sc = qb->qb_sc; 1238*54e21c12Shikaru uint32_t estat; 1239*54e21c12Shikaru int i, handled = 0; 1240*54e21c12Shikaru 1241*54e21c12Shikaru mutex_spin_enter(&qb->qb_bank_mtx); 1242*54e21c12Shikaru 1243*54e21c12Shikaru QAT_EVCNT_INCR(&qb->qb_ev_rxintr); 1244*54e21c12Shikaru 1245*54e21c12Shikaru qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0); 1246*54e21c12Shikaru 1247*54e21c12Shikaru /* Now handle all the responses */ 1248*54e21c12Shikaru estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT); 1249*54e21c12Shikaru estat &= qb->qb_intr_mask; 1250*54e21c12Shikaru 1251*54e21c12Shikaru qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 1252*54e21c12Shikaru ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); 1253*54e21c12Shikaru 1254*54e21c12Shikaru mutex_spin_exit(&qb->qb_bank_mtx); 1255*54e21c12Shikaru 1256*54e21c12Shikaru while ((i = ffs32(estat)) != 0) { 1257*54e21c12Shikaru struct qat_ring *qr = &qb->qb_et_rings[--i]; 1258*54e21c12Shikaru estat &= ~(1 << i); 1259*54e21c12Shikaru handled |= qat_etr_ring_intr(sc, qb, qr); 1260*54e21c12Shikaru } 1261*54e21c12Shikaru 1262*54e21c12Shikaru return handled; 1263*54e21c12Shikaru } 1264*54e21c12Shikaru 1265*54e21c12Shikaru void 1266*54e21c12Shikaru qat_arb_update(struct qat_softc *sc, struct qat_bank *qb) 1267*54e21c12Shikaru { 1268*54e21c12Shikaru 1269*54e21c12Shikaru qat_arb_ringsrvarben_write_4(sc, qb->qb_bank, 1270*54e21c12Shikaru qb->qb_allocated_rings & 0xff); 1271*54e21c12Shikaru } 1272*54e21c12Shikaru 1273*54e21c12Shikaru struct qat_sym_cookie * 1274*54e21c12Shikaru qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb) 1275*54e21c12Shikaru { 1276*54e21c12Shikaru struct qat_sym_cookie *qsc; 1277*54e21c12Shikaru 1278*54e21c12Shikaru mutex_spin_enter(&qcb->qcb_bank_mtx); 1279*54e21c12Shikaru 1280*54e21c12Shikaru if (qcb->qcb_symck_free_count == 0) { 1281*54e21c12Shikaru QAT_EVCNT_INCR(&qcb->qcb_ev_no_symck); 1282*54e21c12Shikaru mutex_spin_exit(&qcb->qcb_bank_mtx); 1283*54e21c12Shikaru return NULL; 1284*54e21c12Shikaru } 1285*54e21c12Shikaru 1286*54e21c12Shikaru qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count]; 1287*54e21c12Shikaru 1288*54e21c12Shikaru mutex_spin_exit(&qcb->qcb_bank_mtx); 1289*54e21c12Shikaru 1290*54e21c12Shikaru return qsc; 1291*54e21c12Shikaru } 1292*54e21c12Shikaru 1293*54e21c12Shikaru void 1294*54e21c12Shikaru qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc) 1295*54e21c12Shikaru { 1296*54e21c12Shikaru 1297*54e21c12Shikaru mutex_spin_enter(&qcb->qcb_bank_mtx); 1298*54e21c12Shikaru qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc; 1299*54e21c12Shikaru mutex_spin_exit(&qcb->qcb_bank_mtx); 1300*54e21c12Shikaru } 1301*54e21c12Shikaru 1302*54e21c12Shikaru 1303*54e21c12Shikaru void 1304*54e21c12Shikaru qat_memcpy_htobe64(void *dst, const void *src, size_t len) 1305*54e21c12Shikaru { 1306*54e21c12Shikaru uint64_t *dst0 = dst; 1307*54e21c12Shikaru const uint64_t *src0 = src; 1308*54e21c12Shikaru size_t i; 1309*54e21c12Shikaru 1310*54e21c12Shikaru KASSERT(len % sizeof(*dst0) == 0); 1311*54e21c12Shikaru 1312*54e21c12Shikaru for (i = 0; i < len / sizeof(*dst0); i++) 1313*54e21c12Shikaru *(dst0 + i) = htobe64(*(src0 + i)); 1314*54e21c12Shikaru } 1315*54e21c12Shikaru 1316*54e21c12Shikaru void 1317*54e21c12Shikaru qat_memcpy_htobe32(void *dst, const void *src, size_t len) 1318*54e21c12Shikaru { 1319*54e21c12Shikaru uint32_t *dst0 = dst; 1320*54e21c12Shikaru const uint32_t *src0 = src; 1321*54e21c12Shikaru size_t i; 1322*54e21c12Shikaru 1323*54e21c12Shikaru KASSERT(len % sizeof(*dst0) == 0); 1324*54e21c12Shikaru 1325*54e21c12Shikaru for (i = 0; i < len / sizeof(*dst0); i++) 1326*54e21c12Shikaru *(dst0 + i) = htobe32(*(src0 + i)); 1327*54e21c12Shikaru } 1328*54e21c12Shikaru 1329*54e21c12Shikaru void 1330*54e21c12Shikaru qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte) 1331*54e21c12Shikaru { 1332*54e21c12Shikaru switch (wordbyte) { 1333*54e21c12Shikaru case 4: 1334*54e21c12Shikaru qat_memcpy_htobe32(dst, src, len); 1335*54e21c12Shikaru break; 1336*54e21c12Shikaru case 8: 1337*54e21c12Shikaru qat_memcpy_htobe64(dst, src, len); 1338*54e21c12Shikaru break; 1339*54e21c12Shikaru default: 1340*54e21c12Shikaru KASSERT(0); 1341*54e21c12Shikaru } 1342*54e21c12Shikaru } 1343*54e21c12Shikaru 1344*54e21c12Shikaru void 1345*54e21c12Shikaru qat_crypto_hmac_precompute(struct qat_crypto_desc *desc, struct cryptoini *cria, 1346*54e21c12Shikaru struct qat_sym_hash_def const *hash_def, uint8_t *state1, uint8_t *state2) 1347*54e21c12Shikaru { 1348*54e21c12Shikaru int i, state_swap; 1349*54e21c12Shikaru struct swcr_auth_hash const *sah = hash_def->qshd_alg->qshai_sah; 1350*54e21c12Shikaru uint32_t blklen = hash_def->qshd_alg->qshai_block_len; 1351*54e21c12Shikaru uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset; 1352*54e21c12Shikaru uint32_t state_size = hash_def->qshd_alg->qshai_state_size; 1353*54e21c12Shikaru uint32_t state_word = hash_def->qshd_alg->qshai_state_word; 1354*54e21c12Shikaru uint32_t keylen = cria->cri_klen / 8; 1355*54e21c12Shikaru uint32_t padlen = blklen - keylen; 1356*54e21c12Shikaru uint8_t *ipad = desc->qcd_hash_state_prefix_buf; 1357*54e21c12Shikaru uint8_t *opad = desc->qcd_hash_state_prefix_buf + 1358*54e21c12Shikaru sizeof(desc->qcd_hash_state_prefix_buf) / 2; 1359*54e21c12Shikaru /* XXX 1360*54e21c12Shikaru * For "stack protector not protecting local variables" error, 1361*54e21c12Shikaru * use constant variable. 1362*54e21c12Shikaru * Currently, the max length is sizeof(aesxcbc_ctx) used by 1363*54e21c12Shikaru * swcr_auth_hash_aes_xcbc_mac 1364*54e21c12Shikaru */ 1365*54e21c12Shikaru uint8_t ctx[sizeof(aesxcbc_ctx)]; 1366*54e21c12Shikaru 1367*54e21c12Shikaru memcpy(ipad, cria->cri_key, keylen); 1368*54e21c12Shikaru memcpy(opad, cria->cri_key, keylen); 1369*54e21c12Shikaru 1370*54e21c12Shikaru if (padlen > 0) { 1371*54e21c12Shikaru memset(ipad + keylen, 0, padlen); 1372*54e21c12Shikaru memset(opad + keylen, 0, padlen); 1373*54e21c12Shikaru } 1374*54e21c12Shikaru for (i = 0; i < blklen; i++) { 1375*54e21c12Shikaru ipad[i] ^= 0x36; 1376*54e21c12Shikaru opad[i] ^= 0x5c; 1377*54e21c12Shikaru } 1378*54e21c12Shikaru 1379*54e21c12Shikaru /* ipad */ 1380*54e21c12Shikaru sah->Init(ctx); 1381*54e21c12Shikaru /* Check the endian of kernel built-in hash state */ 1382*54e21c12Shikaru state_swap = memcmp(hash_def->qshd_alg->qshai_init_state, 1383*54e21c12Shikaru ((uint8_t *)ctx) + state_offset, state_word); 1384*54e21c12Shikaru sah->Update(ctx, ipad, blklen); 1385*54e21c12Shikaru if (state_swap == 0) { 1386*54e21c12Shikaru memcpy(state1, ((uint8_t *)ctx) + state_offset, state_size); 1387*54e21c12Shikaru } else { 1388*54e21c12Shikaru qat_memcpy_htobe(state1, ((uint8_t *)ctx) + state_offset, 1389*54e21c12Shikaru state_size, state_word); 1390*54e21c12Shikaru } 1391*54e21c12Shikaru 1392*54e21c12Shikaru /* opad */ 1393*54e21c12Shikaru sah->Init(ctx); 1394*54e21c12Shikaru sah->Update(ctx, opad, blklen); 1395*54e21c12Shikaru if (state_swap == 0) { 1396*54e21c12Shikaru memcpy(state2, ((uint8_t *)ctx) + state_offset, state_size); 1397*54e21c12Shikaru } else { 1398*54e21c12Shikaru qat_memcpy_htobe(state2, ((uint8_t *)ctx) + state_offset, 1399*54e21c12Shikaru state_size, state_word); 1400*54e21c12Shikaru } 1401*54e21c12Shikaru } 1402*54e21c12Shikaru 1403*54e21c12Shikaru uint16_t 1404*54e21c12Shikaru qat_crypto_load_cipher_cryptoini( 1405*54e21c12Shikaru struct qat_crypto_desc *desc, struct cryptoini *crie) 1406*54e21c12Shikaru { 1407*54e21c12Shikaru enum hw_cipher_algo algo = HW_CIPHER_ALGO_NULL; 1408*54e21c12Shikaru enum hw_cipher_mode mode = HW_CIPHER_CBC_MODE; 1409*54e21c12Shikaru enum hw_cipher_convert key_convert = HW_CIPHER_NO_CONVERT; 1410*54e21c12Shikaru 1411*54e21c12Shikaru switch (crie->cri_alg) { 1412*54e21c12Shikaru case CRYPTO_DES_CBC: 1413*54e21c12Shikaru algo = HW_CIPHER_ALGO_DES; 1414*54e21c12Shikaru desc->qcd_cipher_blk_sz = HW_DES_BLK_SZ; 1415*54e21c12Shikaru break; 1416*54e21c12Shikaru case CRYPTO_3DES_CBC: 1417*54e21c12Shikaru algo = HW_CIPHER_ALGO_3DES; 1418*54e21c12Shikaru desc->qcd_cipher_blk_sz = HW_3DES_BLK_SZ; 1419*54e21c12Shikaru break; 1420*54e21c12Shikaru case CRYPTO_AES_CBC: 1421*54e21c12Shikaru switch (crie->cri_klen / 8) { 1422*54e21c12Shikaru case HW_AES_128_KEY_SZ: 1423*54e21c12Shikaru algo = HW_CIPHER_ALGO_AES128; 1424*54e21c12Shikaru break; 1425*54e21c12Shikaru case HW_AES_192_KEY_SZ: 1426*54e21c12Shikaru algo = HW_CIPHER_ALGO_AES192; 1427*54e21c12Shikaru break; 1428*54e21c12Shikaru case HW_AES_256_KEY_SZ: 1429*54e21c12Shikaru algo = HW_CIPHER_ALGO_AES256; 1430*54e21c12Shikaru break; 1431*54e21c12Shikaru default: 1432*54e21c12Shikaru KASSERT(0); 1433*54e21c12Shikaru break; 1434*54e21c12Shikaru } 1435*54e21c12Shikaru desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ; 1436*54e21c12Shikaru /* 1437*54e21c12Shikaru * AES decrypt key needs to be reversed. 1438*54e21c12Shikaru * Instead of reversing the key at session registration, 1439*54e21c12Shikaru * it is instead reversed on-the-fly by setting the KEY_CONVERT 1440*54e21c12Shikaru * bit here 1441*54e21c12Shikaru */ 1442*54e21c12Shikaru if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT) 1443*54e21c12Shikaru key_convert = HW_CIPHER_KEY_CONVERT; 1444*54e21c12Shikaru 1445*54e21c12Shikaru break; 1446*54e21c12Shikaru default: 1447*54e21c12Shikaru KASSERT(0); 1448*54e21c12Shikaru break; 1449*54e21c12Shikaru } 1450*54e21c12Shikaru 1451*54e21c12Shikaru return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, 1452*54e21c12Shikaru desc->qcd_cipher_dir); 1453*54e21c12Shikaru } 1454*54e21c12Shikaru 1455*54e21c12Shikaru uint16_t 1456*54e21c12Shikaru qat_crypto_load_auth_cryptoini( 1457*54e21c12Shikaru struct qat_crypto_desc *desc, struct cryptoini *cria, 1458*54e21c12Shikaru struct qat_sym_hash_def const **hash_def) 1459*54e21c12Shikaru { 1460*54e21c12Shikaru const struct swcr_auth_hash *sah; 1461*54e21c12Shikaru enum qat_sym_hash_algorithm algo = 0; 1462*54e21c12Shikaru 1463*54e21c12Shikaru switch (cria->cri_alg) { 1464*54e21c12Shikaru case CRYPTO_MD5_HMAC_96: 1465*54e21c12Shikaru algo = QAT_SYM_HASH_MD5; 1466*54e21c12Shikaru break; 1467*54e21c12Shikaru case CRYPTO_SHA1_HMAC_96: 1468*54e21c12Shikaru algo = QAT_SYM_HASH_SHA1; 1469*54e21c12Shikaru break; 1470*54e21c12Shikaru case CRYPTO_SHA2_256_HMAC: 1471*54e21c12Shikaru algo = QAT_SYM_HASH_SHA256; 1472*54e21c12Shikaru break; 1473*54e21c12Shikaru case CRYPTO_SHA2_384_HMAC: 1474*54e21c12Shikaru algo = QAT_SYM_HASH_SHA384; 1475*54e21c12Shikaru break; 1476*54e21c12Shikaru case CRYPTO_SHA2_512_HMAC: 1477*54e21c12Shikaru algo = QAT_SYM_HASH_SHA512; 1478*54e21c12Shikaru break; 1479*54e21c12Shikaru default: 1480*54e21c12Shikaru KASSERT(0); 1481*54e21c12Shikaru break; 1482*54e21c12Shikaru } 1483*54e21c12Shikaru *hash_def = &qat_sym_hash_defs[algo]; 1484*54e21c12Shikaru sah = (*hash_def)->qshd_alg->qshai_sah; 1485*54e21c12Shikaru KASSERT(sah != NULL); 1486*54e21c12Shikaru desc->qcd_auth_sz = sah->auth_hash->authsize; 1487*54e21c12Shikaru 1488*54e21c12Shikaru return HW_AUTH_CONFIG_BUILD(HW_AUTH_MODE1, 1489*54e21c12Shikaru (*hash_def)->qshd_qat->qshqi_algo_enc, 1490*54e21c12Shikaru (*hash_def)->qshd_alg->qshai_digest_len); 1491*54e21c12Shikaru } 1492*54e21c12Shikaru 1493*54e21c12Shikaru int 1494*54e21c12Shikaru qat_crypto_load_buf(struct qat_softc *sc, struct cryptop *crp, 1495*54e21c12Shikaru struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc, 1496*54e21c12Shikaru uint8_t *icv_buf, int icv_offset, bus_addr_t *icv_paddr) 1497*54e21c12Shikaru { 1498*54e21c12Shikaru int error, i, nsegs; 1499*54e21c12Shikaru 1500*54e21c12Shikaru if (crp->crp_flags & CRYPTO_F_IMBUF) { 1501*54e21c12Shikaru struct mbuf *m = (struct mbuf *)crp->crp_buf; 1502*54e21c12Shikaru 1503*54e21c12Shikaru if (icv_offset >= 0) { 1504*54e21c12Shikaru if (m_length(m) == icv_offset) { 1505*54e21c12Shikaru m_copyback(m, icv_offset, desc->qcd_auth_sz, 1506*54e21c12Shikaru icv_buf); 1507*54e21c12Shikaru if (m_length(m) == icv_offset) 1508*54e21c12Shikaru return ENOBUFS; 1509*54e21c12Shikaru } else { 1510*54e21c12Shikaru struct mbuf *m0; 1511*54e21c12Shikaru m0 = m_pulldown(m, icv_offset, 1512*54e21c12Shikaru desc->qcd_auth_sz, NULL); 1513*54e21c12Shikaru if (m0 == NULL) 1514*54e21c12Shikaru return ENOBUFS; 1515*54e21c12Shikaru } 1516*54e21c12Shikaru } 1517*54e21c12Shikaru 1518*54e21c12Shikaru error = bus_dmamap_load_mbuf(sc->sc_dmat, qsc->qsc_buf_dmamap, 1519*54e21c12Shikaru m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1520*54e21c12Shikaru if (error == EFBIG) { 1521*54e21c12Shikaru struct mbuf *m_new; 1522*54e21c12Shikaru m_new = m_defrag(m, M_DONTWAIT); 1523*54e21c12Shikaru if (m_new != NULL) { 1524*54e21c12Shikaru crp->crp_buf = m_new; 1525*54e21c12Shikaru qsc->qsc_buf = m_new; 1526*54e21c12Shikaru error = bus_dmamap_load_mbuf(sc->sc_dmat, 1527*54e21c12Shikaru qsc->qsc_buf_dmamap, m_new, 1528*54e21c12Shikaru BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1529*54e21c12Shikaru if (error) { 1530*54e21c12Shikaru m_freem(m_new); 1531*54e21c12Shikaru crp->crp_buf = NULL; 1532*54e21c12Shikaru } 1533*54e21c12Shikaru } 1534*54e21c12Shikaru } 1535*54e21c12Shikaru 1536*54e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IOV) { 1537*54e21c12Shikaru error = bus_dmamap_load_uio(sc->sc_dmat, qsc->qsc_buf_dmamap, 1538*54e21c12Shikaru (struct uio *)crp->crp_buf, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1539*54e21c12Shikaru } else { 1540*54e21c12Shikaru error = bus_dmamap_load(sc->sc_dmat, qsc->qsc_buf_dmamap, 1541*54e21c12Shikaru crp->crp_buf, crp->crp_ilen, NULL, BUS_DMA_NOWAIT); 1542*54e21c12Shikaru } 1543*54e21c12Shikaru if (error) { 1544*54e21c12Shikaru aprint_debug_dev(sc->sc_dev, 1545*54e21c12Shikaru "can't load crp_buf, error %d\n", error); 1546*54e21c12Shikaru crp->crp_etype = error; 1547*54e21c12Shikaru return error; 1548*54e21c12Shikaru } 1549*54e21c12Shikaru 1550*54e21c12Shikaru nsegs = qsc->qsc_buf_dmamap->dm_nsegs; 1551*54e21c12Shikaru qsc->qsc_buf_list.num_buffers = nsegs; 1552*54e21c12Shikaru for (i = 0; i < nsegs; i++) { 1553*54e21c12Shikaru struct flat_buffer_desc *flatbuf = 1554*54e21c12Shikaru &qsc->qsc_buf_list.phy_buffers[i]; 1555*54e21c12Shikaru bus_addr_t paddr = qsc->qsc_buf_dmamap->dm_segs[i].ds_addr; 1556*54e21c12Shikaru bus_size_t len = qsc->qsc_buf_dmamap->dm_segs[i].ds_len; 1557*54e21c12Shikaru 1558*54e21c12Shikaru flatbuf->data_len_in_bytes = len; 1559*54e21c12Shikaru flatbuf->phy_buffer = (uint64_t)paddr; 1560*54e21c12Shikaru 1561*54e21c12Shikaru if (icv_offset >= 0) { 1562*54e21c12Shikaru if (icv_offset < len) 1563*54e21c12Shikaru *icv_paddr = paddr + icv_offset; 1564*54e21c12Shikaru else 1565*54e21c12Shikaru icv_offset -= len; 1566*54e21c12Shikaru } 1567*54e21c12Shikaru } 1568*54e21c12Shikaru 1569*54e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0, 1570*54e21c12Shikaru qsc->qsc_buf_dmamap->dm_mapsize, 1571*54e21c12Shikaru BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1572*54e21c12Shikaru 1573*54e21c12Shikaru return 0; 1574*54e21c12Shikaru } 1575*54e21c12Shikaru 1576*54e21c12Shikaru int 1577*54e21c12Shikaru qat_crypto_load_iv(struct qat_sym_cookie *qsc, struct cryptop *crp, 1578*54e21c12Shikaru struct cryptodesc *crde, struct qat_crypto_desc const *desc) 1579*54e21c12Shikaru { 1580*54e21c12Shikaru uint32_t rand; 1581*54e21c12Shikaru uint32_t ivlen = desc->qcd_cipher_blk_sz; 1582*54e21c12Shikaru int i; 1583*54e21c12Shikaru 1584*54e21c12Shikaru if (crde->crd_flags & CRD_F_IV_EXPLICIT) { 1585*54e21c12Shikaru memcpy(qsc->qsc_iv_buf, crde->crd_iv, ivlen); 1586*54e21c12Shikaru } else { 1587*54e21c12Shikaru if (crde->crd_flags & CRD_F_ENCRYPT) { 1588*54e21c12Shikaru for (i = 0; i + sizeof(rand) <= ivlen; 1589*54e21c12Shikaru i += sizeof(rand)) { 1590*54e21c12Shikaru rand = cprng_fast32(); 1591*54e21c12Shikaru memcpy(qsc->qsc_iv_buf + i, &rand, sizeof(rand)); 1592*54e21c12Shikaru } 1593*54e21c12Shikaru if (sizeof(qsc->qsc_iv_buf) % sizeof(rand) != 0) { 1594*54e21c12Shikaru rand = cprng_fast32(); 1595*54e21c12Shikaru memcpy(qsc->qsc_iv_buf + i, &rand, 1596*54e21c12Shikaru sizeof(qsc->qsc_iv_buf) - i); 1597*54e21c12Shikaru } 1598*54e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1599*54e21c12Shikaru /* get iv from buf */ 1600*54e21c12Shikaru m_copydata(qsc->qsc_buf, crde->crd_inject, ivlen, 1601*54e21c12Shikaru qsc->qsc_iv_buf); 1602*54e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IOV) { 1603*54e21c12Shikaru cuio_copydata(qsc->qsc_buf, crde->crd_inject, ivlen, 1604*54e21c12Shikaru qsc->qsc_iv_buf); 1605*54e21c12Shikaru } 1606*54e21c12Shikaru } 1607*54e21c12Shikaru 1608*54e21c12Shikaru if ((crde->crd_flags & CRD_F_ENCRYPT) != 0 && 1609*54e21c12Shikaru (crde->crd_flags & CRD_F_IV_PRESENT) == 0) { 1610*54e21c12Shikaru if (crp->crp_flags & CRYPTO_F_IMBUF) { 1611*54e21c12Shikaru m_copyback(qsc->qsc_buf, crde->crd_inject, ivlen, 1612*54e21c12Shikaru qsc->qsc_iv_buf); 1613*54e21c12Shikaru } else if (crp->crp_flags & CRYPTO_F_IOV) { 1614*54e21c12Shikaru cuio_copyback(qsc->qsc_buf, crde->crd_inject, ivlen, 1615*54e21c12Shikaru qsc->qsc_iv_buf); 1616*54e21c12Shikaru } 1617*54e21c12Shikaru } 1618*54e21c12Shikaru 1619*54e21c12Shikaru return 0; 1620*54e21c12Shikaru } 1621*54e21c12Shikaru 1622*54e21c12Shikaru static inline struct qat_crypto_bank * 1623*54e21c12Shikaru qat_crypto_select_bank(struct qat_crypto *qcy) 1624*54e21c12Shikaru { 1625*54e21c12Shikaru u_int cpuid = cpu_index(curcpu()); 1626*54e21c12Shikaru 1627*54e21c12Shikaru return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks]; 1628*54e21c12Shikaru } 1629*54e21c12Shikaru 1630*54e21c12Shikaru int 1631*54e21c12Shikaru qat_crypto_process(void *arg, struct cryptop *crp, int hint) 1632*54e21c12Shikaru { 1633*54e21c12Shikaru struct qat_crypto *qcy = arg; 1634*54e21c12Shikaru struct qat_crypto_bank *qcb; 1635*54e21c12Shikaru struct qat_session *qs = NULL; 1636*54e21c12Shikaru struct qat_crypto_desc const *desc; 1637*54e21c12Shikaru struct qat_sym_cookie *qsc = NULL; 1638*54e21c12Shikaru struct qat_sym_bulk_cookie *qsbc; 1639*54e21c12Shikaru struct cryptodesc *crd, *crda = NULL, *crde = NULL; 1640*54e21c12Shikaru bus_addr_t icv_paddr = 0; 1641*54e21c12Shikaru int error, icv_offset = -1; 1642*54e21c12Shikaru uint8_t icv_buf[CRYPTO_MAX_MAC_LEN]; 1643*54e21c12Shikaru 1644*54e21c12Shikaru qs = qcy->qcy_sessions[CRYPTO_SESID2LID(crp->crp_sid)]; 1645*54e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx); 1646*54e21c12Shikaru KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE); 1647*54e21c12Shikaru qs->qs_inflight++; 1648*54e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx); 1649*54e21c12Shikaru 1650*54e21c12Shikaru qcb = qat_crypto_select_bank(qcy); 1651*54e21c12Shikaru 1652*54e21c12Shikaru qsc = qat_crypto_alloc_sym_cookie(qcb); 1653*54e21c12Shikaru if (qsc == NULL) { 1654*54e21c12Shikaru error = ENOBUFS; 1655*54e21c12Shikaru goto fail; 1656*54e21c12Shikaru } 1657*54e21c12Shikaru 1658*54e21c12Shikaru error = 0; 1659*54e21c12Shikaru desc = &qs->qs_dec_desc; 1660*54e21c12Shikaru crd = crp->crp_desc; 1661*54e21c12Shikaru while (crd != NULL) { 1662*54e21c12Shikaru switch (crd->crd_alg) { 1663*54e21c12Shikaru case CRYPTO_DES_CBC: 1664*54e21c12Shikaru case CRYPTO_3DES_CBC: 1665*54e21c12Shikaru case CRYPTO_AES_CBC: 1666*54e21c12Shikaru if (crde != NULL) 1667*54e21c12Shikaru error = EINVAL; 1668*54e21c12Shikaru if (crd->crd_flags & CRD_F_ENCRYPT) { 1669*54e21c12Shikaru /* use encrypt desc */ 1670*54e21c12Shikaru desc = &qs->qs_enc_desc; 1671*54e21c12Shikaru if (crda != NULL) 1672*54e21c12Shikaru error = ENOTSUP; 1673*54e21c12Shikaru } 1674*54e21c12Shikaru crde = crd; 1675*54e21c12Shikaru break; 1676*54e21c12Shikaru case CRYPTO_MD5_HMAC_96: 1677*54e21c12Shikaru case CRYPTO_SHA1_HMAC_96: 1678*54e21c12Shikaru case CRYPTO_SHA2_256_HMAC: 1679*54e21c12Shikaru case CRYPTO_SHA2_384_HMAC: 1680*54e21c12Shikaru case CRYPTO_SHA2_512_HMAC: 1681*54e21c12Shikaru if (crda != NULL) 1682*54e21c12Shikaru error = EINVAL; 1683*54e21c12Shikaru if (crde != NULL && 1684*54e21c12Shikaru (crde->crd_flags & CRD_F_ENCRYPT) == 0) 1685*54e21c12Shikaru error = EINVAL; 1686*54e21c12Shikaru crda = crd; 1687*54e21c12Shikaru icv_offset = crd->crd_inject; 1688*54e21c12Shikaru break; 1689*54e21c12Shikaru } 1690*54e21c12Shikaru if (error) 1691*54e21c12Shikaru goto fail; 1692*54e21c12Shikaru 1693*54e21c12Shikaru crd = crd->crd_next; 1694*54e21c12Shikaru } 1695*54e21c12Shikaru 1696*54e21c12Shikaru qsc->qsc_buf = crp->crp_buf; 1697*54e21c12Shikaru 1698*54e21c12Shikaru if (crde != NULL) { 1699*54e21c12Shikaru error = qat_crypto_load_iv(qsc, crp, crde, desc); 1700*54e21c12Shikaru if (error) 1701*54e21c12Shikaru goto fail; 1702*54e21c12Shikaru } 1703*54e21c12Shikaru 1704*54e21c12Shikaru error = qat_crypto_load_buf(qcy->qcy_sc, crp, qsc, desc, icv_buf, 1705*54e21c12Shikaru icv_offset, &icv_paddr); 1706*54e21c12Shikaru if (error) 1707*54e21c12Shikaru goto fail; 1708*54e21c12Shikaru 1709*54e21c12Shikaru qsbc = &qsc->u.qsc_bulk_cookie; 1710*54e21c12Shikaru 1711*54e21c12Shikaru qsbc->qsbc_crypto = qcy; 1712*54e21c12Shikaru qsbc->qsbc_session = qs; 1713*54e21c12Shikaru qsbc->qsbc_cb_tag = crp; 1714*54e21c12Shikaru 1715*54e21c12Shikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, 1716*54e21c12Shikaru crde, crda, icv_paddr); 1717*54e21c12Shikaru 1718*54e21c12Shikaru bus_dmamap_sync(qcy->qcy_sc->sc_dmat, *qsc->qsc_self_dmamap, 0, 1719*54e21c12Shikaru offsetof(struct qat_sym_cookie, qsc_self_dmamap), 1720*54e21c12Shikaru BUS_DMASYNC_PREWRITE); 1721*54e21c12Shikaru 1722*54e21c12Shikaru error = qat_etr_put_msg(qcy->qcy_sc, qcb->qcb_sym_tx, 1723*54e21c12Shikaru (uint32_t *)qsbc->qsbc_msg); 1724*54e21c12Shikaru if (error) 1725*54e21c12Shikaru goto fail; 1726*54e21c12Shikaru 1727*54e21c12Shikaru return 0; 1728*54e21c12Shikaru fail: 1729*54e21c12Shikaru if (qsc) 1730*54e21c12Shikaru qat_crypto_free_sym_cookie(qcb, qsc); 1731*54e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx); 1732*54e21c12Shikaru qs->qs_inflight--; 1733*54e21c12Shikaru qat_crypto_check_free_session(qcy, qs); 1734*54e21c12Shikaru crp->crp_etype = error; 1735*54e21c12Shikaru crypto_done(crp); 1736*54e21c12Shikaru return 0; 1737*54e21c12Shikaru } 1738*54e21c12Shikaru 1739*54e21c12Shikaru int 1740*54e21c12Shikaru qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb) 1741*54e21c12Shikaru { 1742*54e21c12Shikaru int error, i, bank; 1743*54e21c12Shikaru int curname = 0; 1744*54e21c12Shikaru char *name; 1745*54e21c12Shikaru 1746*54e21c12Shikaru bank = qcb->qcb_bank; 1747*54e21c12Shikaru 1748*54e21c12Shikaru name = qcb->qcb_ring_names[curname++]; 1749*54e21c12Shikaru snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank); 1750*54e21c12Shikaru error = qat_etr_setup_ring(sc, qcb->qcb_bank, 1751*54e21c12Shikaru sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size, 1752*54e21c12Shikaru NULL, NULL, name, &qcb->qcb_sym_tx); 1753*54e21c12Shikaru if (error) 1754*54e21c12Shikaru return error; 1755*54e21c12Shikaru 1756*54e21c12Shikaru name = qcb->qcb_ring_names[curname++]; 1757*54e21c12Shikaru snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank); 1758*54e21c12Shikaru error = qat_etr_setup_ring(sc, qcb->qcb_bank, 1759*54e21c12Shikaru sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size, 1760*54e21c12Shikaru qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx); 1761*54e21c12Shikaru if (error) 1762*54e21c12Shikaru return error; 1763*54e21c12Shikaru 1764*54e21c12Shikaru for (i = 0; i < QAT_NSYMCOOKIE; i++) { 1765*54e21c12Shikaru struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i]; 1766*54e21c12Shikaru struct qat_sym_cookie *qsc; 1767*54e21c12Shikaru 1768*54e21c12Shikaru error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_sym_cookie), 1769*54e21c12Shikaru QAT_OPTIMAL_ALIGN); 1770*54e21c12Shikaru if (error) 1771*54e21c12Shikaru return error; 1772*54e21c12Shikaru 1773*54e21c12Shikaru qsc = qdm->qdm_dma_vaddr; 1774*54e21c12Shikaru qsc->qsc_self_dmamap = &qdm->qdm_dma_map; 1775*54e21c12Shikaru qsc->qsc_bulk_req_params_buf_paddr = 1776*54e21c12Shikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1777*54e21c12Shikaru u.qsc_bulk_cookie.qsbc_req_params_buf); 1778*54e21c12Shikaru qsc->qsc_buffer_list_desc_paddr = 1779*54e21c12Shikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1780*54e21c12Shikaru qsc_buf_list); 1781*54e21c12Shikaru qsc->qsc_iv_buf_paddr = 1782*54e21c12Shikaru qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1783*54e21c12Shikaru qsc_iv_buf); 1784*54e21c12Shikaru qcb->qcb_symck_free[i] = qsc; 1785*54e21c12Shikaru qcb->qcb_symck_free_count++; 1786*54e21c12Shikaru 1787*54e21c12Shikaru error = bus_dmamap_create(sc->sc_dmat, QAT_MAXLEN, 1788*54e21c12Shikaru QAT_MAXSEG, MCLBYTES, 0, 0, &qsc->qsc_buf_dmamap); 1789*54e21c12Shikaru if (error) 1790*54e21c12Shikaru return error; 1791*54e21c12Shikaru } 1792*54e21c12Shikaru 1793*54e21c12Shikaru return 0; 1794*54e21c12Shikaru } 1795*54e21c12Shikaru 1796*54e21c12Shikaru int 1797*54e21c12Shikaru qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb) 1798*54e21c12Shikaru { 1799*54e21c12Shikaru int error; 1800*54e21c12Shikaru 1801*54e21c12Shikaru mutex_init(&qcb->qcb_bank_mtx, MUTEX_DEFAULT, IPL_NET); 1802*54e21c12Shikaru 1803*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcb->qcb_ev_no_symck, EVCNT_TYPE_MISC, 1804*54e21c12Shikaru qcb->qcb_ev_no_symck_name, "crypto no_symck"); 1805*54e21c12Shikaru 1806*54e21c12Shikaru error = qat_crypto_setup_ring(sc, qcb); 1807*54e21c12Shikaru if (error) 1808*54e21c12Shikaru return error; 1809*54e21c12Shikaru 1810*54e21c12Shikaru return 0; 1811*54e21c12Shikaru } 1812*54e21c12Shikaru 1813*54e21c12Shikaru int 1814*54e21c12Shikaru qat_crypto_init(struct qat_softc *sc) 1815*54e21c12Shikaru { 1816*54e21c12Shikaru struct qat_crypto *qcy = &sc->sc_crypto; 1817*54e21c12Shikaru int error, bank, i; 1818*54e21c12Shikaru int num_banks; 1819*54e21c12Shikaru 1820*54e21c12Shikaru qcy->qcy_sc = sc; 1821*54e21c12Shikaru 1822*54e21c12Shikaru if (sc->sc_hw.qhw_init_arb != NULL) 1823*54e21c12Shikaru num_banks = uimin(ncpu, sc->sc_hw.qhw_num_banks); 1824*54e21c12Shikaru else 1825*54e21c12Shikaru num_banks = sc->sc_ae_num; 1826*54e21c12Shikaru 1827*54e21c12Shikaru qcy->qcy_num_banks = num_banks; 1828*54e21c12Shikaru 1829*54e21c12Shikaru qcy->qcy_banks = 1830*54e21c12Shikaru qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks); 1831*54e21c12Shikaru 1832*54e21c12Shikaru for (bank = 0; bank < num_banks; bank++) { 1833*54e21c12Shikaru struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank]; 1834*54e21c12Shikaru qcb->qcb_bank = bank; 1835*54e21c12Shikaru qcb->qcb_crypto = qcy; 1836*54e21c12Shikaru error = qat_crypto_bank_init(sc, qcb); 1837*54e21c12Shikaru if (error) 1838*54e21c12Shikaru return error; 1839*54e21c12Shikaru } 1840*54e21c12Shikaru 1841*54e21c12Shikaru mutex_init(&qcy->qcy_crypto_mtx, MUTEX_DEFAULT, IPL_NET); 1842*54e21c12Shikaru 1843*54e21c12Shikaru for (i = 0; i < QAT_NSESSION; i++) { 1844*54e21c12Shikaru struct qat_dmamem *qdm = &qcy->qcy_session_dmamems[i]; 1845*54e21c12Shikaru struct qat_session *qs; 1846*54e21c12Shikaru 1847*54e21c12Shikaru error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_session), 1848*54e21c12Shikaru QAT_OPTIMAL_ALIGN); 1849*54e21c12Shikaru if (error) 1850*54e21c12Shikaru return error; 1851*54e21c12Shikaru 1852*54e21c12Shikaru qs = qdm->qdm_dma_vaddr; 1853*54e21c12Shikaru qs->qs_lid = i; 1854*54e21c12Shikaru qs->qs_dec_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr; 1855*54e21c12Shikaru qs->qs_dec_desc.qcd_hash_state_paddr = 1856*54e21c12Shikaru qs->qs_dec_desc.qcd_desc_paddr + 1857*54e21c12Shikaru offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); 1858*54e21c12Shikaru qs->qs_enc_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr + 1859*54e21c12Shikaru offsetof(struct qat_session, qs_enc_desc); 1860*54e21c12Shikaru qs->qs_enc_desc.qcd_hash_state_paddr = 1861*54e21c12Shikaru qs->qs_enc_desc.qcd_desc_paddr + 1862*54e21c12Shikaru offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); 1863*54e21c12Shikaru 1864*54e21c12Shikaru mutex_init(&qs->qs_session_mtx, MUTEX_DEFAULT, IPL_NET); 1865*54e21c12Shikaru 1866*54e21c12Shikaru qcy->qcy_sessions[i] = qs; 1867*54e21c12Shikaru qcy->qcy_session_free[i] = qs; 1868*54e21c12Shikaru qcy->qcy_session_free_count++; 1869*54e21c12Shikaru } 1870*54e21c12Shikaru 1871*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_new_sess, EVCNT_TYPE_MISC, 1872*54e21c12Shikaru qcy->qcy_ev_new_sess_name, "crypto new_sess"); 1873*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_free_sess, EVCNT_TYPE_MISC, 1874*54e21c12Shikaru qcy->qcy_ev_free_sess_name, "crypto free_sess"); 1875*54e21c12Shikaru QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_no_sess, EVCNT_TYPE_MISC, 1876*54e21c12Shikaru qcy->qcy_ev_no_sess_name, "crypto no_sess"); 1877*54e21c12Shikaru 1878*54e21c12Shikaru return 0; 1879*54e21c12Shikaru } 1880*54e21c12Shikaru 1881*54e21c12Shikaru int 1882*54e21c12Shikaru qat_crypto_new_session(void *arg, uint32_t *lid, struct cryptoini *cri) 1883*54e21c12Shikaru { 1884*54e21c12Shikaru struct qat_crypto *qcy = arg; 1885*54e21c12Shikaru struct qat_session *qs = NULL; 1886*54e21c12Shikaru struct cryptoini *crie = NULL; 1887*54e21c12Shikaru struct cryptoini *cria = NULL; 1888*54e21c12Shikaru int slice, error; 1889*54e21c12Shikaru 1890*54e21c12Shikaru mutex_spin_enter(&qcy->qcy_crypto_mtx); 1891*54e21c12Shikaru 1892*54e21c12Shikaru if (qcy->qcy_session_free_count == 0) { 1893*54e21c12Shikaru QAT_EVCNT_INCR(&qcy->qcy_ev_no_sess); 1894*54e21c12Shikaru mutex_spin_exit(&qcy->qcy_crypto_mtx); 1895*54e21c12Shikaru return ENOBUFS; 1896*54e21c12Shikaru } 1897*54e21c12Shikaru qs = qcy->qcy_session_free[--qcy->qcy_session_free_count]; 1898*54e21c12Shikaru QAT_EVCNT_INCR(&qcy->qcy_ev_new_sess); 1899*54e21c12Shikaru 1900*54e21c12Shikaru mutex_spin_exit(&qcy->qcy_crypto_mtx); 1901*54e21c12Shikaru 1902*54e21c12Shikaru qs->qs_status = QAT_SESSION_STATUS_ACTIVE; 1903*54e21c12Shikaru qs->qs_inflight = 0; 1904*54e21c12Shikaru *lid = qs->qs_lid; 1905*54e21c12Shikaru 1906*54e21c12Shikaru error = 0; 1907*54e21c12Shikaru while (cri) { 1908*54e21c12Shikaru switch (cri->cri_alg) { 1909*54e21c12Shikaru case CRYPTO_DES_CBC: 1910*54e21c12Shikaru case CRYPTO_3DES_CBC: 1911*54e21c12Shikaru case CRYPTO_AES_CBC: 1912*54e21c12Shikaru if (crie != NULL) 1913*54e21c12Shikaru error = EINVAL; 1914*54e21c12Shikaru crie = cri; 1915*54e21c12Shikaru break; 1916*54e21c12Shikaru case CRYPTO_MD5_HMAC_96: 1917*54e21c12Shikaru case CRYPTO_SHA1_HMAC_96: 1918*54e21c12Shikaru case CRYPTO_SHA2_256_HMAC: 1919*54e21c12Shikaru case CRYPTO_SHA2_384_HMAC: 1920*54e21c12Shikaru case CRYPTO_SHA2_512_HMAC: 1921*54e21c12Shikaru if (cria != NULL) 1922*54e21c12Shikaru error = EINVAL; 1923*54e21c12Shikaru cria = cri; 1924*54e21c12Shikaru break; 1925*54e21c12Shikaru default: 1926*54e21c12Shikaru error = EINVAL; 1927*54e21c12Shikaru } 1928*54e21c12Shikaru if (error) 1929*54e21c12Shikaru goto fail; 1930*54e21c12Shikaru cri = cri->cri_next; 1931*54e21c12Shikaru } 1932*54e21c12Shikaru 1933*54e21c12Shikaru slice = 1; 1934*54e21c12Shikaru if (crie != NULL && cria != NULL) { 1935*54e21c12Shikaru slice = 2; 1936*54e21c12Shikaru /* auth then decrypt */ 1937*54e21c12Shikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH; 1938*54e21c12Shikaru qs->qs_dec_desc.qcd_slices[1] = FW_SLICE_CIPHER; 1939*54e21c12Shikaru qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT; 1940*54e21c12Shikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; 1941*54e21c12Shikaru /* encrypt then auth */ 1942*54e21c12Shikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER; 1943*54e21c12Shikaru qs->qs_enc_desc.qcd_slices[1] = FW_SLICE_AUTH; 1944*54e21c12Shikaru qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT; 1945*54e21c12Shikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; 1946*54e21c12Shikaru } else if (crie != NULL) { 1947*54e21c12Shikaru /* decrypt */ 1948*54e21c12Shikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_CIPHER; 1949*54e21c12Shikaru qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT; 1950*54e21c12Shikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_CIPHER; 1951*54e21c12Shikaru /* encrypt */ 1952*54e21c12Shikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER; 1953*54e21c12Shikaru qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT; 1954*54e21c12Shikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER; 1955*54e21c12Shikaru } else if (cria != NULL) { 1956*54e21c12Shikaru /* auth */ 1957*54e21c12Shikaru qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH; 1958*54e21c12Shikaru qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_AUTH; 1959*54e21c12Shikaru /* auth */ 1960*54e21c12Shikaru qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_AUTH; 1961*54e21c12Shikaru qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_AUTH; 1962*54e21c12Shikaru } else { 1963*54e21c12Shikaru error = EINVAL; 1964*54e21c12Shikaru goto fail; 1965*54e21c12Shikaru } 1966*54e21c12Shikaru qs->qs_dec_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR; 1967*54e21c12Shikaru qs->qs_enc_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR; 1968*54e21c12Shikaru 1969*54e21c12Shikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_dec_desc, crie, cria); 1970*54e21c12Shikaru qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_enc_desc, crie, cria); 1971*54e21c12Shikaru 1972*54e21c12Shikaru membar_producer(); 1973*54e21c12Shikaru 1974*54e21c12Shikaru return 0; 1975*54e21c12Shikaru fail: 1976*54e21c12Shikaru if (qs != NULL) { 1977*54e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx); 1978*54e21c12Shikaru qat_crypto_free_session0(qcy, qs); 1979*54e21c12Shikaru } 1980*54e21c12Shikaru return error; 1981*54e21c12Shikaru } 1982*54e21c12Shikaru 1983*54e21c12Shikaru static inline void 1984*54e21c12Shikaru qat_crypto_clean_desc(struct qat_crypto_desc *desc) 1985*54e21c12Shikaru { 1986*54e21c12Shikaru explicit_memset(desc->qcd_content_desc, 0, 1987*54e21c12Shikaru sizeof(desc->qcd_content_desc)); 1988*54e21c12Shikaru explicit_memset(desc->qcd_hash_state_prefix_buf, 0, 1989*54e21c12Shikaru sizeof(desc->qcd_hash_state_prefix_buf)); 1990*54e21c12Shikaru explicit_memset(desc->qcd_req_cache, 0, 1991*54e21c12Shikaru sizeof(desc->qcd_req_cache)); 1992*54e21c12Shikaru } 1993*54e21c12Shikaru 1994*54e21c12Shikaru int 1995*54e21c12Shikaru qat_crypto_free_session0(struct qat_crypto *qcy, struct qat_session *qs) 1996*54e21c12Shikaru { 1997*54e21c12Shikaru 1998*54e21c12Shikaru qat_crypto_clean_desc(&qs->qs_dec_desc); 1999*54e21c12Shikaru qat_crypto_clean_desc(&qs->qs_enc_desc); 2000*54e21c12Shikaru qs->qs_status &= ~QAT_SESSION_STATUS_ACTIVE; 2001*54e21c12Shikaru 2002*54e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx); 2003*54e21c12Shikaru 2004*54e21c12Shikaru mutex_spin_enter(&qcy->qcy_crypto_mtx); 2005*54e21c12Shikaru 2006*54e21c12Shikaru qcy->qcy_session_free[qcy->qcy_session_free_count++] = qs; 2007*54e21c12Shikaru QAT_EVCNT_INCR(&qcy->qcy_ev_free_sess); 2008*54e21c12Shikaru 2009*54e21c12Shikaru mutex_spin_exit(&qcy->qcy_crypto_mtx); 2010*54e21c12Shikaru 2011*54e21c12Shikaru return 0; 2012*54e21c12Shikaru } 2013*54e21c12Shikaru 2014*54e21c12Shikaru void 2015*54e21c12Shikaru qat_crypto_check_free_session(struct qat_crypto *qcy, struct qat_session *qs) 2016*54e21c12Shikaru { 2017*54e21c12Shikaru 2018*54e21c12Shikaru if ((qs->qs_status & QAT_SESSION_STATUS_FREEING) && 2019*54e21c12Shikaru qs->qs_inflight == 0) { 2020*54e21c12Shikaru qat_crypto_free_session0(qcy, qs); 2021*54e21c12Shikaru } else { 2022*54e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx); 2023*54e21c12Shikaru } 2024*54e21c12Shikaru } 2025*54e21c12Shikaru 2026*54e21c12Shikaru int 2027*54e21c12Shikaru qat_crypto_free_session(void *arg, uint64_t sid) 2028*54e21c12Shikaru { 2029*54e21c12Shikaru struct qat_crypto *qcy = arg; 2030*54e21c12Shikaru struct qat_session *qs; 2031*54e21c12Shikaru int error; 2032*54e21c12Shikaru 2033*54e21c12Shikaru qs = qcy->qcy_sessions[CRYPTO_SESID2LID(sid)]; 2034*54e21c12Shikaru 2035*54e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx); 2036*54e21c12Shikaru 2037*54e21c12Shikaru if (qs->qs_inflight > 0) { 2038*54e21c12Shikaru qs->qs_status |= QAT_SESSION_STATUS_FREEING; 2039*54e21c12Shikaru mutex_spin_exit(&qs->qs_session_mtx); 2040*54e21c12Shikaru return 0; 2041*54e21c12Shikaru } 2042*54e21c12Shikaru 2043*54e21c12Shikaru error = qat_crypto_free_session0(qcy, qs); 2044*54e21c12Shikaru 2045*54e21c12Shikaru return error; 2046*54e21c12Shikaru } 2047*54e21c12Shikaru 2048*54e21c12Shikaru int 2049*54e21c12Shikaru qat_crypto_start(struct qat_softc *sc) 2050*54e21c12Shikaru { 2051*54e21c12Shikaru struct qat_crypto *qcy = &sc->sc_crypto; 2052*54e21c12Shikaru int error, i; 2053*54e21c12Shikaru static const int algs[] = { 2054*54e21c12Shikaru CRYPTO_DES_CBC, CRYPTO_3DES_CBC, CRYPTO_AES_CBC, 2055*54e21c12Shikaru CRYPTO_MD5_HMAC_96, CRYPTO_SHA1_HMAC_96, CRYPTO_SHA2_256_HMAC, 2056*54e21c12Shikaru CRYPTO_SHA2_384_HMAC, CRYPTO_SHA2_512_HMAC, 2057*54e21c12Shikaru }; 2058*54e21c12Shikaru 2059*54e21c12Shikaru /* opencrypto */ 2060*54e21c12Shikaru qcy->qcy_cid = crypto_get_driverid(0); 2061*54e21c12Shikaru if (qcy->qcy_cid < 0) { 2062*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 2063*54e21c12Shikaru "could not get opencrypto driver id\n"); 2064*54e21c12Shikaru return ENOENT; 2065*54e21c12Shikaru } 2066*54e21c12Shikaru 2067*54e21c12Shikaru for (i = 0; i < __arraycount(algs); i++) { 2068*54e21c12Shikaru error = crypto_register(qcy->qcy_cid, algs[i], 0, 0, 2069*54e21c12Shikaru qat_crypto_new_session, qat_crypto_free_session, 2070*54e21c12Shikaru qat_crypto_process, qcy); 2071*54e21c12Shikaru if (error) { 2072*54e21c12Shikaru aprint_error_dev(sc->sc_dev, 2073*54e21c12Shikaru "could not register crypto: %d\n", error); 2074*54e21c12Shikaru return error; 2075*54e21c12Shikaru } 2076*54e21c12Shikaru } 2077*54e21c12Shikaru 2078*54e21c12Shikaru return 0; 2079*54e21c12Shikaru } 2080*54e21c12Shikaru 2081*54e21c12Shikaru int 2082*54e21c12Shikaru qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg) 2083*54e21c12Shikaru { 2084*54e21c12Shikaru struct qat_crypto_bank *qcb = arg; 2085*54e21c12Shikaru struct qat_crypto *qcy; 2086*54e21c12Shikaru struct qat_session *qs; 2087*54e21c12Shikaru struct qat_sym_cookie *qsc; 2088*54e21c12Shikaru struct qat_sym_bulk_cookie *qsbc; 2089*54e21c12Shikaru struct cryptop *crp; 2090*54e21c12Shikaru 2091*54e21c12Shikaru qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset); 2092*54e21c12Shikaru 2093*54e21c12Shikaru qsbc = &qsc->u.qsc_bulk_cookie; 2094*54e21c12Shikaru qcy = qsbc->qsbc_crypto; 2095*54e21c12Shikaru qs = qsbc->qsbc_session; 2096*54e21c12Shikaru crp = qsbc->qsbc_cb_tag; 2097*54e21c12Shikaru 2098*54e21c12Shikaru bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0, 2099*54e21c12Shikaru qsc->qsc_buf_dmamap->dm_mapsize, 2100*54e21c12Shikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2101*54e21c12Shikaru bus_dmamap_unload(sc->sc_dmat, qsc->qsc_buf_dmamap); 2102*54e21c12Shikaru qat_crypto_free_sym_cookie(qcb, qsc); 2103*54e21c12Shikaru 2104*54e21c12Shikaru crp->crp_etype = 0; 2105*54e21c12Shikaru crypto_done(crp); 2106*54e21c12Shikaru 2107*54e21c12Shikaru mutex_spin_enter(&qs->qs_session_mtx); 2108*54e21c12Shikaru KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE); 2109*54e21c12Shikaru qs->qs_inflight--; 2110*54e21c12Shikaru qat_crypto_check_free_session(qcy, qs); 2111*54e21c12Shikaru 2112*54e21c12Shikaru return 1; 2113*54e21c12Shikaru } 2114*54e21c12Shikaru 2115*54e21c12Shikaru #ifdef QAT_DUMP 2116*54e21c12Shikaru 2117*54e21c12Shikaru void 2118*54e21c12Shikaru qat_dump_raw(int flag, const char *label, void *d, size_t len) 2119*54e21c12Shikaru { 2120*54e21c12Shikaru uintptr_t pc; 2121*54e21c12Shikaru size_t pos; 2122*54e21c12Shikaru uint8_t *dp = (uint8_t *)d; 2123*54e21c12Shikaru 2124*54e21c12Shikaru if ((qat_dump & flag) == 0) 2125*54e21c12Shikaru return; 2126*54e21c12Shikaru 2127*54e21c12Shikaru printf("dumping %s at %p len %zu\n", label, d, len); 2128*54e21c12Shikaru 2129*54e21c12Shikaru pc = __RETURN_ADDRESS; 2130*54e21c12Shikaru printf("\tcallpc "); 2131*54e21c12Shikaru qat_print_sym(pc); 2132*54e21c12Shikaru printf("\n"); 2133*54e21c12Shikaru 2134*54e21c12Shikaru for (pos = 0; pos < len; pos++) { 2135*54e21c12Shikaru if (pos % 32 == 0) 2136*54e21c12Shikaru printf("%8zx: ", pos); 2137*54e21c12Shikaru else if (pos % 4 == 0) 2138*54e21c12Shikaru printf(" "); 2139*54e21c12Shikaru 2140*54e21c12Shikaru printf("%02x", dp[pos]); 2141*54e21c12Shikaru 2142*54e21c12Shikaru if (pos % 32 == 31 || pos + 1 == len) 2143*54e21c12Shikaru printf("\n"); 2144*54e21c12Shikaru } 2145*54e21c12Shikaru } 2146*54e21c12Shikaru 2147*54e21c12Shikaru void 2148*54e21c12Shikaru qat_dump_ring(int bank, int ring) 2149*54e21c12Shikaru { 2150*54e21c12Shikaru struct qat_softc *sc = gsc; 2151*54e21c12Shikaru struct qat_bank *qb = &sc->sc_etr_banks[bank]; 2152*54e21c12Shikaru struct qat_ring *qr = &qb->qb_et_rings[ring]; 2153*54e21c12Shikaru u_int offset; 2154*54e21c12Shikaru int i; 2155*54e21c12Shikaru uint32_t msg; 2156*54e21c12Shikaru 2157*54e21c12Shikaru printf("dumping bank %d ring %d\n", bank, ring); 2158*54e21c12Shikaru printf("\tid %d name %s msg size %d ring size %d\n", 2159*54e21c12Shikaru qr->qr_ring_id, qr->qr_name, 2160*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 2161*54e21c12Shikaru qr->qr_ring_size); 2162*54e21c12Shikaru printf("\thost head 0x%08x tail 0x%08x\n", qr->qr_head, qr->qr_tail); 2163*54e21c12Shikaru printf("\ttarget head 0x%08x tail 0x%08x\n", 2164*54e21c12Shikaru qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring, 2165*54e21c12Shikaru ETR_RING_HEAD_OFFSET), 2166*54e21c12Shikaru qat_etr_bank_ring_read_4(sc, qr->qr_bank, qr->qr_ring, 2167*54e21c12Shikaru ETR_RING_TAIL_OFFSET)); 2168*54e21c12Shikaru 2169*54e21c12Shikaru printf("\n"); 2170*54e21c12Shikaru i = 0; 2171*54e21c12Shikaru offset = 0; 2172*54e21c12Shikaru do { 2173*54e21c12Shikaru if (i % 8 == 0) 2174*54e21c12Shikaru printf("%8x:", offset); 2175*54e21c12Shikaru 2176*54e21c12Shikaru if (offset == qr->qr_head) { 2177*54e21c12Shikaru printf("*"); 2178*54e21c12Shikaru } else if (offset == qr->qr_tail) { 2179*54e21c12Shikaru printf("v"); 2180*54e21c12Shikaru } else { 2181*54e21c12Shikaru printf(" "); 2182*54e21c12Shikaru } 2183*54e21c12Shikaru 2184*54e21c12Shikaru msg = *(uint32_t *)((uintptr_t)qr->qr_ring_vaddr + offset); 2185*54e21c12Shikaru printf("%08x", htobe32(msg)); 2186*54e21c12Shikaru 2187*54e21c12Shikaru if (i % 8 == 7) 2188*54e21c12Shikaru printf("\n"); 2189*54e21c12Shikaru 2190*54e21c12Shikaru i++; 2191*54e21c12Shikaru offset = qat_modulo(offset + 2192*54e21c12Shikaru QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 2193*54e21c12Shikaru QAT_RING_SIZE_MODULO(qr->qr_ring_size)); 2194*54e21c12Shikaru } while (offset != 0); 2195*54e21c12Shikaru } 2196*54e21c12Shikaru 2197*54e21c12Shikaru void 2198*54e21c12Shikaru qat_dump_mbuf(struct mbuf *m0, int pre, int post) 2199*54e21c12Shikaru { 2200*54e21c12Shikaru struct mbuf *m; 2201*54e21c12Shikaru 2202*54e21c12Shikaru for (m = m0; m != NULL; m = m->m_next) { 2203*54e21c12Shikaru size_t pos, len; 2204*54e21c12Shikaru uint8_t *buf_start, *data_start, *data_end, *buf_end; 2205*54e21c12Shikaru uint8_t *start, *end, *dp; 2206*54e21c12Shikaru bool skip_ind; 2207*54e21c12Shikaru const char *ind; 2208*54e21c12Shikaru 2209*54e21c12Shikaru printf("dumping mbuf %p len %d flags 0x%08x\n", 2210*54e21c12Shikaru m, m->m_len, m->m_flags); 2211*54e21c12Shikaru if (m->m_len == 0) 2212*54e21c12Shikaru continue; 2213*54e21c12Shikaru 2214*54e21c12Shikaru data_start = (uint8_t *)m->m_data; 2215*54e21c12Shikaru data_end = data_start + m->m_len; 2216*54e21c12Shikaru switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) { 2217*54e21c12Shikaru case 0: 2218*54e21c12Shikaru buf_start = (uint8_t *)M_BUFADDR(m); 2219*54e21c12Shikaru buf_end = buf_start + 2220*54e21c12Shikaru ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN); 2221*54e21c12Shikaru break; 2222*54e21c12Shikaru case M_EXT|M_EXT_CLUSTER: 2223*54e21c12Shikaru buf_start = (uint8_t *)m->m_ext.ext_buf; 2224*54e21c12Shikaru buf_end = buf_start +m->m_ext.ext_size; 2225*54e21c12Shikaru break; 2226*54e21c12Shikaru default: 2227*54e21c12Shikaru /* XXX */ 2228*54e21c12Shikaru buf_start = data_start; 2229*54e21c12Shikaru buf_end = data_end; 2230*54e21c12Shikaru break; 2231*54e21c12Shikaru } 2232*54e21c12Shikaru 2233*54e21c12Shikaru start = data_start - pre; 2234*54e21c12Shikaru if (start < buf_start) 2235*54e21c12Shikaru start = buf_start; 2236*54e21c12Shikaru end = data_end + post; 2237*54e21c12Shikaru if (end > buf_end) 2238*54e21c12Shikaru end = buf_end; 2239*54e21c12Shikaru 2240*54e21c12Shikaru dp = start; 2241*54e21c12Shikaru len = (size_t)(end - start); 2242*54e21c12Shikaru skip_ind = false; 2243*54e21c12Shikaru for (pos = 0; pos < len; pos++) { 2244*54e21c12Shikaru 2245*54e21c12Shikaru if (skip_ind) 2246*54e21c12Shikaru ind = ""; 2247*54e21c12Shikaru else if (&dp[pos] == data_start) 2248*54e21c12Shikaru ind = "`"; 2249*54e21c12Shikaru else 2250*54e21c12Shikaru ind = " "; 2251*54e21c12Shikaru 2252*54e21c12Shikaru if (pos % 32 == 0) 2253*54e21c12Shikaru printf("%8zx:%s", pos, ind); 2254*54e21c12Shikaru else if (pos % 2 == 0) 2255*54e21c12Shikaru printf("%s", ind); 2256*54e21c12Shikaru 2257*54e21c12Shikaru printf("%02x", dp[pos]); 2258*54e21c12Shikaru 2259*54e21c12Shikaru skip_ind = false; 2260*54e21c12Shikaru if (&dp[pos + 1] == data_end) { 2261*54e21c12Shikaru skip_ind = true; 2262*54e21c12Shikaru printf("'"); 2263*54e21c12Shikaru } 2264*54e21c12Shikaru 2265*54e21c12Shikaru if (pos % 32 == 31 || pos + 1 == len) { 2266*54e21c12Shikaru printf("\n"); 2267*54e21c12Shikaru skip_ind = false; 2268*54e21c12Shikaru } 2269*54e21c12Shikaru } 2270*54e21c12Shikaru } 2271*54e21c12Shikaru } 2272*54e21c12Shikaru 2273*54e21c12Shikaru #endif /* QAT_DUMP */ 2274*54e21c12Shikaru 2275*54e21c12Shikaru MODULE(MODULE_CLASS_DRIVER, qat, "pci,opencrypto"); 2276*54e21c12Shikaru 2277*54e21c12Shikaru #ifdef _MODULE 2278*54e21c12Shikaru #include "ioconf.c" 2279*54e21c12Shikaru #endif 2280*54e21c12Shikaru 2281*54e21c12Shikaru int 2282*54e21c12Shikaru qat_modcmd(modcmd_t cmd, void *data) 2283*54e21c12Shikaru { 2284*54e21c12Shikaru int error = 0; 2285*54e21c12Shikaru 2286*54e21c12Shikaru switch (cmd) { 2287*54e21c12Shikaru case MODULE_CMD_INIT: 2288*54e21c12Shikaru #ifdef _MODULE 2289*54e21c12Shikaru error = config_init_component(cfdriver_ioconf_qat, 2290*54e21c12Shikaru cfattach_ioconf_qat, cfdata_ioconf_qat); 2291*54e21c12Shikaru #endif 2292*54e21c12Shikaru return error; 2293*54e21c12Shikaru case MODULE_CMD_FINI: 2294*54e21c12Shikaru #ifdef _MODULE 2295*54e21c12Shikaru error = config_fini_component(cfdriver_ioconf_qat, 2296*54e21c12Shikaru cfattach_ioconf_qat, cfdata_ioconf_qat); 2297*54e21c12Shikaru #endif 2298*54e21c12Shikaru return error; 2299*54e21c12Shikaru default: 2300*54e21c12Shikaru return ENOTTY; 2301*54e21c12Shikaru } 2302*54e21c12Shikaru } 2303