1 /*- 2 * Copyright (c) 2017 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/bus.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/module.h> 37 #include <sys/sglist.h> 38 39 #include <opencrypto/cryptodev.h> 40 #include <opencrypto/xform.h> 41 42 #include "cryptodev_if.h" 43 44 #include "common/common.h" 45 #include "crypto/t4_crypto.h" 46 47 /* 48 * Requests consist of: 49 * 50 * +-------------------------------+ 51 * | struct fw_crypto_lookaside_wr | 52 * +-------------------------------+ 53 * | struct ulp_txpkt | 54 * +-------------------------------+ 55 * | struct ulptx_idata | 56 * +-------------------------------+ 57 * | struct cpl_tx_sec_pdu | 58 * +-------------------------------+ 59 * | struct cpl_tls_tx_scmd_fmt | 60 * +-------------------------------+ 61 * | key context header | 62 * +-------------------------------+ 63 * | AES key | ----- For requests with AES 64 * +-------------------------------+ 65 * | Hash state | ----- For hash-only requests 66 * +-------------------------------+ - 67 * | IPAD (16-byte aligned) | \ 68 * +-------------------------------+ +---- For requests with HMAC 69 * | OPAD (16-byte aligned) | / 70 * +-------------------------------+ - 71 * | GMAC H | ----- For AES-GCM 72 * +-------------------------------+ - 73 * | struct cpl_rx_phys_dsgl | \ 74 * +-------------------------------+ +---- Destination buffer for 75 * | PHYS_DSGL entries | / non-hash-only requests 76 * +-------------------------------+ - 77 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests 78 * +-------------------------------+ 79 * | IV | ----- If immediate IV 80 * +-------------------------------+ 81 * | Payload | ----- If immediate Payload 82 * +-------------------------------+ - 83 * | struct ulptx_sgl | \ 84 * +-------------------------------+ +---- If payload via SGL 85 * | SGL entries | / 86 * +-------------------------------+ - 87 * 88 * Note that the key context must be padded to ensure 16-byte alignment. 89 * For HMAC requests, the key consists of the partial hash of the IPAD 90 * followed by the partial hash of the OPAD. 91 * 92 * Replies consist of: 93 * 94 * +-------------------------------+ 95 * | struct cpl_fw6_pld | 96 * +-------------------------------+ 97 * | hash digest | ----- For HMAC request with 98 * +-------------------------------+ 'hash_size' set in work request 99 * 100 * A 32-bit big-endian error status word is supplied in the last 4 101 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a 102 * "MAC" error and bit 1 indicates a "PAD" error. 103 * 104 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message 105 * in the request is returned in data[1] of the CPL_FW6_PLD message. 106 * 107 * For block cipher replies, the updated IV is supplied in data[2] and 108 * data[3] of the CPL_FW6_PLD message. 109 * 110 * For hash replies where the work request set 'hash_size' to request 111 * a copy of the hash in the reply, the hash digest is supplied 112 * immediately following the CPL_FW6_PLD message. 113 */ 114 115 /* 116 * The crypto engine supports a maximum AAD size of 511 bytes. 117 */ 118 #define MAX_AAD_LEN 511 119 120 /* 121 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG 122 * entries. While the CPL includes a 16-bit length field, the T6 can 123 * sometimes hang if an error occurs while processing a request with a 124 * single DSGL entry larger than 2k. 125 */ 126 #define MAX_RX_PHYS_DSGL_SGE 32 127 #define DSGL_SGE_MAXLEN 2048 128 129 /* 130 * The adapter only supports requests with a total input or output 131 * length of 64k-1 or smaller. Longer requests either result in hung 132 * requests or incorrect results. 133 */ 134 #define MAX_REQUEST_SIZE 65535 135 136 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); 137 138 struct ccr_session_hmac { 139 struct auth_hash *auth_hash; 140 int hash_len; 141 unsigned int partial_digest_len; 142 unsigned int auth_mode; 143 unsigned int mk_size; 144 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2]; 145 }; 146 147 struct ccr_session_gmac { 148 int hash_len; 149 char ghash_h[GMAC_BLOCK_LEN]; 150 }; 151 152 struct ccr_session_ccm_mac { 153 int hash_len; 154 }; 155 156 struct ccr_session_blkcipher { 157 unsigned int cipher_mode; 158 unsigned int key_len; 159 unsigned int iv_len; 160 __be32 key_ctx_hdr; 161 char enckey[CHCR_AES_MAX_KEY_LEN]; 162 char deckey[CHCR_AES_MAX_KEY_LEN]; 163 }; 164 165 struct ccr_port { 166 struct sge_wrq *txq; 167 struct sge_rxq *rxq; 168 int tx_channel_id; 169 u_int active_sessions; 170 }; 171 172 struct ccr_session { 173 bool active; 174 int pending; 175 enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode; 176 struct ccr_port *port; 177 union { 178 struct ccr_session_hmac hmac; 179 struct ccr_session_gmac gmac; 180 struct ccr_session_ccm_mac ccm_mac; 181 }; 182 struct ccr_session_blkcipher blkcipher; 183 }; 184 185 struct ccr_softc { 186 struct adapter *adapter; 187 device_t dev; 188 uint32_t cid; 189 struct mtx lock; 190 bool detaching; 191 struct ccr_port ports[MAX_NPORTS]; 192 u_int port_mask; 193 194 /* 195 * Pre-allocate S/G lists used when preparing a work request. 196 * 'sg_input' contains an sglist describing the entire input 197 * buffer for a 'struct cryptop'. 'sg_output' contains an 198 * sglist describing the entire output buffer. 'sg_ulptx' is 199 * used to describe the data the engine should DMA as input 200 * via ULPTX_SGL. 'sg_dsgl' is used to describe the 201 * destination that cipher text and a tag should be written 202 * to. 203 */ 204 struct sglist *sg_input; 205 struct sglist *sg_output; 206 struct sglist *sg_ulptx; 207 struct sglist *sg_dsgl; 208 209 /* 210 * Pre-allocate a dummy output buffer for the IV and AAD for 211 * AEAD requests. 212 */ 213 char *iv_aad_buf; 214 struct sglist *sg_iv_aad; 215 216 /* Statistics. */ 217 uint64_t stats_blkcipher_encrypt; 218 uint64_t stats_blkcipher_decrypt; 219 uint64_t stats_hash; 220 uint64_t stats_hmac; 221 uint64_t stats_eta_encrypt; 222 uint64_t stats_eta_decrypt; 223 uint64_t stats_gcm_encrypt; 224 uint64_t stats_gcm_decrypt; 225 uint64_t stats_ccm_encrypt; 226 uint64_t stats_ccm_decrypt; 227 uint64_t stats_wr_nomem; 228 uint64_t stats_inflight; 229 uint64_t stats_mac_error; 230 uint64_t stats_pad_error; 231 uint64_t stats_bad_session; 232 uint64_t stats_sglist_error; 233 uint64_t stats_process_error; 234 uint64_t stats_sw_fallback; 235 }; 236 237 /* 238 * Crypto requests involve two kind of scatter/gather lists. 239 * 240 * Non-hash-only requests require a PHYS_DSGL that describes the 241 * location to store the results of the encryption or decryption 242 * operation. This SGL uses a different format (PHYS_DSGL) and should 243 * exclude the skip bytes at the start of the data as well as any AAD 244 * or IV. For authenticated encryption requests it should include the 245 * destination of the hash or tag. 246 * 247 * The input payload may either be supplied inline as immediate data, 248 * or via a standard ULP_TX SGL. This SGL should include AAD, 249 * ciphertext, and the hash or tag for authenticated decryption 250 * requests. 251 * 252 * These scatter/gather lists can describe different subsets of the 253 * buffers described by the crypto operation. ccr_populate_sglist() 254 * generates a scatter/gather list that covers an entire crypto 255 * operation buffer that is then used to construct the other 256 * scatter/gather lists. 257 */ 258 static int 259 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) 260 { 261 int error; 262 263 sglist_reset(sg); 264 switch (cb->cb_type) { 265 case CRYPTO_BUF_MBUF: 266 error = sglist_append_mbuf(sg, cb->cb_mbuf); 267 break; 268 case CRYPTO_BUF_UIO: 269 error = sglist_append_uio(sg, cb->cb_uio); 270 break; 271 case CRYPTO_BUF_CONTIG: 272 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); 273 break; 274 default: 275 error = EINVAL; 276 } 277 return (error); 278 } 279 280 /* 281 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple 282 * segments. 283 */ 284 static int 285 ccr_count_sgl(struct sglist *sg, int maxsegsize) 286 { 287 int i, nsegs; 288 289 nsegs = 0; 290 for (i = 0; i < sg->sg_nseg; i++) 291 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); 292 return (nsegs); 293 } 294 295 /* These functions deal with PHYS_DSGL for the reply buffer. */ 296 static inline int 297 ccr_phys_dsgl_len(int nsegs) 298 { 299 int len; 300 301 len = (nsegs / 8) * sizeof(struct phys_sge_pairs); 302 if ((nsegs % 8) != 0) { 303 len += sizeof(uint16_t) * 8; 304 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); 305 } 306 return (len); 307 } 308 309 static void 310 ccr_write_phys_dsgl(struct ccr_softc *sc, struct ccr_session *s, void *dst, 311 int nsegs) 312 { 313 struct sglist *sg; 314 struct cpl_rx_phys_dsgl *cpl; 315 struct phys_sge_pairs *sgl; 316 vm_paddr_t paddr; 317 size_t seglen; 318 u_int i, j; 319 320 sg = sc->sg_dsgl; 321 cpl = dst; 322 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | 323 V_CPL_RX_PHYS_DSGL_ISRDMA(0)); 324 cpl->pcirlxorder_to_noofsgentr = htobe32( 325 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | 326 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | 327 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | 328 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); 329 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 330 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); 331 cpl->rss_hdr_int.hash_val = 0; 332 sgl = (struct phys_sge_pairs *)(cpl + 1); 333 j = 0; 334 for (i = 0; i < sg->sg_nseg; i++) { 335 seglen = sg->sg_segs[i].ss_len; 336 paddr = sg->sg_segs[i].ss_paddr; 337 do { 338 sgl->addr[j] = htobe64(paddr); 339 if (seglen > DSGL_SGE_MAXLEN) { 340 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); 341 paddr += DSGL_SGE_MAXLEN; 342 seglen -= DSGL_SGE_MAXLEN; 343 } else { 344 sgl->len[j] = htobe16(seglen); 345 seglen = 0; 346 } 347 j++; 348 if (j == 8) { 349 sgl++; 350 j = 0; 351 } 352 } while (seglen != 0); 353 } 354 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); 355 } 356 357 /* These functions deal with the ULPTX_SGL for input payload. */ 358 static inline int 359 ccr_ulptx_sgl_len(int nsegs) 360 { 361 u_int n; 362 363 nsegs--; /* first segment is part of ulptx_sgl */ 364 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 365 return (roundup2(n, 16)); 366 } 367 368 static void 369 ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs) 370 { 371 struct ulptx_sgl *usgl; 372 struct sglist *sg; 373 struct sglist_seg *ss; 374 int i; 375 376 sg = sc->sg_ulptx; 377 MPASS(nsegs == sg->sg_nseg); 378 ss = &sg->sg_segs[0]; 379 usgl = dst; 380 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 381 V_ULPTX_NSGE(nsegs)); 382 usgl->len0 = htobe32(ss->ss_len); 383 usgl->addr0 = htobe64(ss->ss_paddr); 384 ss++; 385 for (i = 0; i < sg->sg_nseg - 1; i++) { 386 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); 387 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); 388 ss++; 389 } 390 391 } 392 393 static bool 394 ccr_use_imm_data(u_int transhdr_len, u_int input_len) 395 { 396 397 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) 398 return (false); 399 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > 400 SGE_MAX_WR_LEN) 401 return (false); 402 return (true); 403 } 404 405 static void 406 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, 407 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, 408 u_int sgl_len, u_int hash_size, struct cryptop *crp) 409 { 410 u_int cctx_size, idata_len; 411 412 cctx_size = sizeof(struct _key_ctx) + kctx_len; 413 crwr->wreq.op_to_cctx_size = htobe32( 414 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | 415 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | 416 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | 417 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | 418 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); 419 crwr->wreq.len16_pkd = htobe32( 420 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); 421 crwr->wreq.session_id = 0; 422 crwr->wreq.rx_chid_to_rx_q_id = htobe32( 423 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->tx_channel_id) | 424 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | 425 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | 426 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | 427 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | 428 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | 429 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id)); 430 crwr->wreq.key_addr = 0; 431 crwr->wreq.pld_size_hash_size = htobe32( 432 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | 433 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); 434 crwr->wreq.cookie = htobe64((uintptr_t)crp); 435 436 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 437 V_ULP_TXPKT_DATAMODIFY(0) | 438 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) | 439 V_ULP_TXPKT_DEST(0) | 440 V_ULP_TXPKT_FID(s->port->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1)); 441 crwr->ulptx.len = htobe32( 442 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); 443 444 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 445 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0)); 446 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len; 447 if (imm_len % 16 != 0) 448 idata_len -= 16 - imm_len % 16; 449 crwr->sc_imm.len = htobe32(idata_len); 450 } 451 452 static int 453 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 454 { 455 struct chcr_wr *crwr; 456 struct wrqe *wr; 457 struct auth_hash *axf; 458 char *dst; 459 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; 460 u_int hmac_ctrl, imm_len, iopad_size; 461 int error, sgl_nsegs, sgl_len, use_opad; 462 463 /* Reject requests with too large of an input buffer. */ 464 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 465 return (EFBIG); 466 467 axf = s->hmac.auth_hash; 468 469 if (s->mode == HMAC) { 470 use_opad = 1; 471 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC; 472 } else { 473 use_opad = 0; 474 hmac_ctrl = SCMD_HMAC_CTRL_NOP; 475 } 476 477 /* PADs must be 128-bit aligned. */ 478 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 479 480 /* 481 * The 'key' part of the context includes the aligned IPAD and 482 * OPAD. 483 */ 484 kctx_len = iopad_size; 485 if (use_opad) 486 kctx_len += iopad_size; 487 hash_size_in_response = axf->hashsize; 488 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); 489 490 if (crp->crp_payload_length == 0) { 491 imm_len = axf->blocksize; 492 sgl_nsegs = 0; 493 sgl_len = 0; 494 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) { 495 imm_len = crp->crp_payload_length; 496 sgl_nsegs = 0; 497 sgl_len = 0; 498 } else { 499 imm_len = 0; 500 sglist_reset(sc->sg_ulptx); 501 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 502 crp->crp_payload_start, crp->crp_payload_length); 503 if (error) 504 return (error); 505 sgl_nsegs = sc->sg_ulptx->sg_nseg; 506 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 507 } 508 509 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; 510 if (wr_len > SGE_MAX_WR_LEN) 511 return (EFBIG); 512 wr = alloc_wrqe(wr_len, s->port->txq); 513 if (wr == NULL) { 514 sc->stats_wr_nomem++; 515 return (ENOMEM); 516 } 517 crwr = wrtod(wr); 518 memset(crwr, 0, wr_len); 519 520 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 521 hash_size_in_response, crp); 522 523 crwr->sec_cpl.op_ivinsrtofst = htobe32( 524 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 525 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 526 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 527 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 528 V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); 529 530 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ? 531 axf->blocksize : crp->crp_payload_length); 532 533 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 534 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); 535 536 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 537 crwr->sec_cpl.seqno_numivs = htobe32( 538 V_SCMD_SEQ_NO_CTRL(0) | 539 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 540 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) | 541 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 542 V_SCMD_HMAC_CTRL(hmac_ctrl)); 543 crwr->sec_cpl.ivgen_hdrlen = htobe32( 544 V_SCMD_LAST_FRAG(0) | 545 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) | 546 V_SCMD_MAC_ONLY(1)); 547 548 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len); 549 550 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ 551 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 552 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 553 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) | 554 V_KEY_CONTEXT_SALT_PRESENT(1) | 555 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | 556 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); 557 558 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; 559 if (crp->crp_payload_length == 0) { 560 dst[0] = 0x80; 561 if (s->mode == HMAC) 562 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = 563 htobe64(axf->blocksize << 3); 564 } else if (imm_len != 0) 565 crypto_copydata(crp, crp->crp_payload_start, 566 crp->crp_payload_length, dst); 567 else 568 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 569 570 /* XXX: TODO backpressure */ 571 t4_wrq_tx(sc->adapter, wr); 572 573 return (0); 574 } 575 576 static int 577 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, 578 const struct cpl_fw6_pld *cpl, int error) 579 { 580 uint8_t hash[HASH_MAX_LEN]; 581 582 if (error) 583 return (error); 584 585 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 586 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, 587 hash); 588 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0) 589 return (EBADMSG); 590 } else 591 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, 592 (cpl + 1)); 593 return (0); 594 } 595 596 static int 597 ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 598 { 599 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 600 struct chcr_wr *crwr; 601 struct wrqe *wr; 602 char *dst; 603 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 604 u_int imm_len, iv_len; 605 int dsgl_nsegs, dsgl_len; 606 int sgl_nsegs, sgl_len; 607 int error; 608 609 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 610 return (EINVAL); 611 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 612 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 613 return (EINVAL); 614 615 /* Reject requests with too large of an input buffer. */ 616 if (crp->crp_payload_length > MAX_REQUEST_SIZE) 617 return (EFBIG); 618 619 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 620 op_type = CHCR_ENCRYPT_OP; 621 else 622 op_type = CHCR_DECRYPT_OP; 623 624 sglist_reset(sc->sg_dsgl); 625 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 626 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 627 crp->crp_payload_output_start, crp->crp_payload_length); 628 else 629 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 630 crp->crp_payload_start, crp->crp_payload_length); 631 if (error) 632 return (error); 633 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 634 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 635 return (EFBIG); 636 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 637 638 /* The 'key' must be 128-bit aligned. */ 639 kctx_len = roundup2(s->blkcipher.key_len, 16); 640 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 641 642 /* For AES-XTS we send a 16-byte IV in the work request. */ 643 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 644 iv_len = AES_BLOCK_LEN; 645 else 646 iv_len = s->blkcipher.iv_len; 647 648 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) { 649 imm_len = crp->crp_payload_length; 650 sgl_nsegs = 0; 651 sgl_len = 0; 652 } else { 653 imm_len = 0; 654 sglist_reset(sc->sg_ulptx); 655 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 656 crp->crp_payload_start, crp->crp_payload_length); 657 if (error) 658 return (error); 659 sgl_nsegs = sc->sg_ulptx->sg_nseg; 660 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 661 } 662 663 wr_len = roundup2(transhdr_len, 16) + iv_len + 664 roundup2(imm_len, 16) + sgl_len; 665 if (wr_len > SGE_MAX_WR_LEN) 666 return (EFBIG); 667 wr = alloc_wrqe(wr_len, s->port->txq); 668 if (wr == NULL) { 669 sc->stats_wr_nomem++; 670 return (ENOMEM); 671 } 672 crwr = wrtod(wr); 673 memset(crwr, 0, wr_len); 674 675 crypto_read_iv(crp, iv); 676 677 /* Zero the remainder of the IV for AES-XTS. */ 678 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 679 680 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 681 crp); 682 683 crwr->sec_cpl.op_ivinsrtofst = htobe32( 684 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 685 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 686 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 687 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 688 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 689 690 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length); 691 692 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 693 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) | 694 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 695 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 696 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); 697 698 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 699 crwr->sec_cpl.seqno_numivs = htobe32( 700 V_SCMD_SEQ_NO_CTRL(0) | 701 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 702 V_SCMD_ENC_DEC_CTRL(op_type) | 703 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 704 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | 705 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | 706 V_SCMD_IV_SIZE(iv_len / 2) | 707 V_SCMD_NUM_IVS(0)); 708 crwr->sec_cpl.ivgen_hdrlen = htobe32( 709 V_SCMD_IV_GEN_CTRL(0) | 710 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 711 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); 712 713 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 714 switch (s->blkcipher.cipher_mode) { 715 case SCMD_CIPH_MODE_AES_CBC: 716 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 717 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 718 s->blkcipher.key_len); 719 else 720 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 721 s->blkcipher.key_len); 722 break; 723 case SCMD_CIPH_MODE_AES_CTR: 724 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 725 s->blkcipher.key_len); 726 break; 727 case SCMD_CIPH_MODE_AES_XTS: 728 key_half = s->blkcipher.key_len / 2; 729 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 730 key_half); 731 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 732 memcpy(crwr->key_ctx.key + key_half, 733 s->blkcipher.enckey, key_half); 734 else 735 memcpy(crwr->key_ctx.key + key_half, 736 s->blkcipher.deckey, key_half); 737 break; 738 } 739 740 dst = (char *)(crwr + 1) + kctx_len; 741 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 742 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 743 memcpy(dst, iv, iv_len); 744 dst += iv_len; 745 if (imm_len != 0) 746 crypto_copydata(crp, crp->crp_payload_start, 747 crp->crp_payload_length, dst); 748 else 749 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 750 751 /* XXX: TODO backpressure */ 752 t4_wrq_tx(sc->adapter, wr); 753 754 explicit_bzero(iv, sizeof(iv)); 755 return (0); 756 } 757 758 static int 759 ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, 760 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 761 { 762 763 /* 764 * The updated IV to permit chained requests is at 765 * cpl->data[2], but OCF doesn't permit chained requests. 766 */ 767 return (error); 768 } 769 770 /* 771 * 'hashsize' is the length of a full digest. 'authsize' is the 772 * requested digest length for this operation which may be less 773 * than 'hashsize'. 774 */ 775 static int 776 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) 777 { 778 779 if (authsize == 10) 780 return (SCMD_HMAC_CTRL_TRUNC_RFC4366); 781 if (authsize == 12) 782 return (SCMD_HMAC_CTRL_IPSEC_96BIT); 783 if (authsize == hashsize / 2) 784 return (SCMD_HMAC_CTRL_DIV2); 785 return (SCMD_HMAC_CTRL_NO_TRUNC); 786 } 787 788 static int 789 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 790 { 791 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 792 struct chcr_wr *crwr; 793 struct wrqe *wr; 794 struct auth_hash *axf; 795 char *dst; 796 u_int kctx_len, key_half, op_type, transhdr_len, wr_len; 797 u_int hash_size_in_response, imm_len, iopad_size, iv_len; 798 u_int aad_start, aad_stop; 799 u_int auth_insert; 800 u_int cipher_start, cipher_stop; 801 u_int hmac_ctrl, input_len; 802 int dsgl_nsegs, dsgl_len; 803 int sgl_nsegs, sgl_len; 804 int error; 805 806 /* 807 * If there is a need in the future, requests with an empty 808 * payload could be supported as HMAC-only requests. 809 */ 810 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) 811 return (EINVAL); 812 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && 813 (crp->crp_payload_length % AES_BLOCK_LEN) != 0) 814 return (EINVAL); 815 816 /* For AES-XTS we send a 16-byte IV in the work request. */ 817 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 818 iv_len = AES_BLOCK_LEN; 819 else 820 iv_len = s->blkcipher.iv_len; 821 822 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN) 823 return (EINVAL); 824 825 axf = s->hmac.auth_hash; 826 hash_size_in_response = s->hmac.hash_len; 827 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 828 op_type = CHCR_ENCRYPT_OP; 829 else 830 op_type = CHCR_DECRYPT_OP; 831 832 /* 833 * The output buffer consists of the cipher text followed by 834 * the hash when encrypting. For decryption it only contains 835 * the plain text. 836 * 837 * Due to a firmware bug, the output buffer must include a 838 * dummy output buffer for the IV and AAD prior to the real 839 * output buffer. 840 */ 841 if (op_type == CHCR_ENCRYPT_OP) { 842 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 843 hash_size_in_response > MAX_REQUEST_SIZE) 844 return (EFBIG); 845 } else { 846 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 847 MAX_REQUEST_SIZE) 848 return (EFBIG); 849 } 850 sglist_reset(sc->sg_dsgl); 851 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, 852 iv_len + crp->crp_aad_length); 853 if (error) 854 return (error); 855 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 856 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 857 crp->crp_payload_output_start, crp->crp_payload_length); 858 else 859 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 860 crp->crp_payload_start, crp->crp_payload_length); 861 if (error) 862 return (error); 863 if (op_type == CHCR_ENCRYPT_OP) { 864 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 865 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 866 crp->crp_digest_start, hash_size_in_response); 867 else 868 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 869 crp->crp_digest_start, hash_size_in_response); 870 if (error) 871 return (error); 872 } 873 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 874 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 875 return (EFBIG); 876 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 877 878 /* PADs must be 128-bit aligned. */ 879 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 880 881 /* 882 * The 'key' part of the key context consists of the key followed 883 * by the IPAD and OPAD. 884 */ 885 kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; 886 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 887 888 /* 889 * The input buffer consists of the IV, any AAD, and then the 890 * cipher/plain text. For decryption requests the hash is 891 * appended after the cipher text. 892 * 893 * The IV is always stored at the start of the input buffer 894 * even though it may be duplicated in the payload. The 895 * crypto engine doesn't work properly if the IV offset points 896 * inside of the AAD region, so a second copy is always 897 * required. 898 */ 899 input_len = crp->crp_aad_length + crp->crp_payload_length; 900 901 /* 902 * The firmware hangs if sent a request which is a 903 * bit smaller than MAX_REQUEST_SIZE. In particular, the 904 * firmware appears to require 512 - 16 bytes of spare room 905 * along with the size of the hash even if the hash isn't 906 * included in the input buffer. 907 */ 908 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > 909 MAX_REQUEST_SIZE) 910 return (EFBIG); 911 if (op_type == CHCR_DECRYPT_OP) 912 input_len += hash_size_in_response; 913 914 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 915 imm_len = input_len; 916 sgl_nsegs = 0; 917 sgl_len = 0; 918 } else { 919 imm_len = 0; 920 sglist_reset(sc->sg_ulptx); 921 if (crp->crp_aad_length != 0) { 922 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 923 crp->crp_aad_start, crp->crp_aad_length); 924 if (error) 925 return (error); 926 } 927 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 928 crp->crp_payload_start, crp->crp_payload_length); 929 if (error) 930 return (error); 931 if (op_type == CHCR_DECRYPT_OP) { 932 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 933 crp->crp_digest_start, hash_size_in_response); 934 if (error) 935 return (error); 936 } 937 sgl_nsegs = sc->sg_ulptx->sg_nseg; 938 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 939 } 940 941 /* 942 * Any auth-only data before the cipher region is marked as AAD. 943 * Auth-data that overlaps with the cipher region is placed in 944 * the auth section. 945 */ 946 if (crp->crp_aad_length != 0) { 947 aad_start = iv_len + 1; 948 aad_stop = aad_start + crp->crp_aad_length - 1; 949 } else { 950 aad_start = 0; 951 aad_stop = 0; 952 } 953 cipher_start = iv_len + crp->crp_aad_length + 1; 954 if (op_type == CHCR_DECRYPT_OP) 955 cipher_stop = hash_size_in_response; 956 else 957 cipher_stop = 0; 958 if (op_type == CHCR_DECRYPT_OP) 959 auth_insert = hash_size_in_response; 960 else 961 auth_insert = 0; 962 963 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 964 sgl_len; 965 if (wr_len > SGE_MAX_WR_LEN) 966 return (EFBIG); 967 wr = alloc_wrqe(wr_len, s->port->txq); 968 if (wr == NULL) { 969 sc->stats_wr_nomem++; 970 return (ENOMEM); 971 } 972 crwr = wrtod(wr); 973 memset(crwr, 0, wr_len); 974 975 crypto_read_iv(crp, iv); 976 977 /* Zero the remainder of the IV for AES-XTS. */ 978 memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); 979 980 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 981 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); 982 983 crwr->sec_cpl.op_ivinsrtofst = htobe32( 984 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 985 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 986 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 987 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 988 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 989 990 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 991 992 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 993 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 994 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 995 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 996 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); 997 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 998 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | 999 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1000 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1001 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1002 1003 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1004 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); 1005 crwr->sec_cpl.seqno_numivs = htobe32( 1006 V_SCMD_SEQ_NO_CTRL(0) | 1007 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1008 V_SCMD_ENC_DEC_CTRL(op_type) | 1009 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1010 V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | 1011 V_SCMD_AUTH_MODE(s->hmac.auth_mode) | 1012 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1013 V_SCMD_IV_SIZE(iv_len / 2) | 1014 V_SCMD_NUM_IVS(0)); 1015 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1016 V_SCMD_IV_GEN_CTRL(0) | 1017 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1018 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1019 1020 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1021 switch (s->blkcipher.cipher_mode) { 1022 case SCMD_CIPH_MODE_AES_CBC: 1023 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1024 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1025 s->blkcipher.key_len); 1026 else 1027 memcpy(crwr->key_ctx.key, s->blkcipher.deckey, 1028 s->blkcipher.key_len); 1029 break; 1030 case SCMD_CIPH_MODE_AES_CTR: 1031 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, 1032 s->blkcipher.key_len); 1033 break; 1034 case SCMD_CIPH_MODE_AES_XTS: 1035 key_half = s->blkcipher.key_len / 2; 1036 memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, 1037 key_half); 1038 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1039 memcpy(crwr->key_ctx.key + key_half, 1040 s->blkcipher.enckey, key_half); 1041 else 1042 memcpy(crwr->key_ctx.key + key_half, 1043 s->blkcipher.deckey, key_half); 1044 break; 1045 } 1046 1047 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1048 memcpy(dst, s->hmac.pads, iopad_size * 2); 1049 1050 dst = (char *)(crwr + 1) + kctx_len; 1051 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 1052 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1053 memcpy(dst, iv, iv_len); 1054 dst += iv_len; 1055 if (imm_len != 0) { 1056 if (crp->crp_aad_length != 0) { 1057 crypto_copydata(crp, crp->crp_aad_start, 1058 crp->crp_aad_length, dst); 1059 dst += crp->crp_aad_length; 1060 } 1061 crypto_copydata(crp, crp->crp_payload_start, 1062 crp->crp_payload_length, dst); 1063 dst += crp->crp_payload_length; 1064 if (op_type == CHCR_DECRYPT_OP) 1065 crypto_copydata(crp, crp->crp_digest_start, 1066 hash_size_in_response, dst); 1067 } else 1068 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1069 1070 /* XXX: TODO backpressure */ 1071 t4_wrq_tx(sc->adapter, wr); 1072 1073 explicit_bzero(iv, sizeof(iv)); 1074 return (0); 1075 } 1076 1077 static int 1078 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, 1079 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1080 { 1081 1082 /* 1083 * The updated IV to permit chained requests is at 1084 * cpl->data[2], but OCF doesn't permit chained requests. 1085 */ 1086 return (error); 1087 } 1088 1089 static int 1090 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1091 { 1092 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1093 struct chcr_wr *crwr; 1094 struct wrqe *wr; 1095 char *dst; 1096 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1097 u_int hash_size_in_response, imm_len; 1098 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1099 u_int hmac_ctrl, input_len; 1100 int dsgl_nsegs, dsgl_len; 1101 int sgl_nsegs, sgl_len; 1102 int error; 1103 1104 if (s->blkcipher.key_len == 0) 1105 return (EINVAL); 1106 1107 /* 1108 * The crypto engine doesn't handle GCM requests with an empty 1109 * payload, so handle those in software instead. 1110 */ 1111 if (crp->crp_payload_length == 0) 1112 return (EMSGSIZE); 1113 1114 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN) 1115 return (EMSGSIZE); 1116 1117 hash_size_in_response = s->gmac.hash_len; 1118 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1119 op_type = CHCR_ENCRYPT_OP; 1120 else 1121 op_type = CHCR_DECRYPT_OP; 1122 1123 /* 1124 * The IV handling for GCM in OCF is a bit more complicated in 1125 * that IPSec provides a full 16-byte IV (including the 1126 * counter), whereas the /dev/crypto interface sometimes 1127 * provides a full 16-byte IV (if no IV is provided in the 1128 * ioctl) and sometimes a 12-byte IV (if the IV was explicit). 1129 * 1130 * When provided a 12-byte IV, assume the IV is really 16 bytes 1131 * with a counter in the last 4 bytes initialized to 1. 1132 * 1133 * While iv_len is checked below, the value is currently 1134 * always set to 12 when creating a GCM session in this driver 1135 * due to limitations in OCF (there is no way to know what the 1136 * IV length of a given request will be). This means that the 1137 * driver always assumes as 12-byte IV for now. 1138 */ 1139 if (s->blkcipher.iv_len == 12) 1140 iv_len = AES_BLOCK_LEN; 1141 else 1142 iv_len = s->blkcipher.iv_len; 1143 1144 /* 1145 * GCM requests should always provide an explicit IV. 1146 */ 1147 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1148 return (EINVAL); 1149 1150 /* 1151 * The output buffer consists of the cipher text followed by 1152 * the tag when encrypting. For decryption it only contains 1153 * the plain text. 1154 * 1155 * Due to a firmware bug, the output buffer must include a 1156 * dummy output buffer for the IV and AAD prior to the real 1157 * output buffer. 1158 */ 1159 if (op_type == CHCR_ENCRYPT_OP) { 1160 if (iv_len + crp->crp_aad_length + crp->crp_payload_length + 1161 hash_size_in_response > MAX_REQUEST_SIZE) 1162 return (EFBIG); 1163 } else { 1164 if (iv_len + crp->crp_aad_length + crp->crp_payload_length > 1165 MAX_REQUEST_SIZE) 1166 return (EFBIG); 1167 } 1168 sglist_reset(sc->sg_dsgl); 1169 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1170 crp->crp_aad_length); 1171 if (error) 1172 return (error); 1173 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1174 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1175 crp->crp_payload_output_start, crp->crp_payload_length); 1176 else 1177 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1178 crp->crp_payload_start, crp->crp_payload_length); 1179 if (error) 1180 return (error); 1181 if (op_type == CHCR_ENCRYPT_OP) { 1182 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1183 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1184 crp->crp_digest_start, hash_size_in_response); 1185 else 1186 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1187 crp->crp_digest_start, hash_size_in_response); 1188 if (error) 1189 return (error); 1190 } 1191 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 1192 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1193 return (EFBIG); 1194 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1195 1196 /* 1197 * The 'key' part of the key context consists of the key followed 1198 * by the Galois hash key. 1199 */ 1200 kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; 1201 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1202 1203 /* 1204 * The input buffer consists of the IV, any AAD, and then the 1205 * cipher/plain text. For decryption requests the hash is 1206 * appended after the cipher text. 1207 * 1208 * The IV is always stored at the start of the input buffer 1209 * even though it may be duplicated in the payload. The 1210 * crypto engine doesn't work properly if the IV offset points 1211 * inside of the AAD region, so a second copy is always 1212 * required. 1213 */ 1214 input_len = crp->crp_aad_length + crp->crp_payload_length; 1215 if (op_type == CHCR_DECRYPT_OP) 1216 input_len += hash_size_in_response; 1217 if (input_len > MAX_REQUEST_SIZE) 1218 return (EFBIG); 1219 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1220 imm_len = input_len; 1221 sgl_nsegs = 0; 1222 sgl_len = 0; 1223 } else { 1224 imm_len = 0; 1225 sglist_reset(sc->sg_ulptx); 1226 if (crp->crp_aad_length != 0) { 1227 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1228 crp->crp_aad_start, crp->crp_aad_length); 1229 if (error) 1230 return (error); 1231 } 1232 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1233 crp->crp_payload_start, crp->crp_payload_length); 1234 if (error) 1235 return (error); 1236 if (op_type == CHCR_DECRYPT_OP) { 1237 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1238 crp->crp_digest_start, hash_size_in_response); 1239 if (error) 1240 return (error); 1241 } 1242 sgl_nsegs = sc->sg_ulptx->sg_nseg; 1243 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1244 } 1245 1246 if (crp->crp_aad_length != 0) { 1247 aad_start = iv_len + 1; 1248 aad_stop = aad_start + crp->crp_aad_length - 1; 1249 } else { 1250 aad_start = 0; 1251 aad_stop = 0; 1252 } 1253 cipher_start = iv_len + crp->crp_aad_length + 1; 1254 if (op_type == CHCR_DECRYPT_OP) 1255 cipher_stop = hash_size_in_response; 1256 else 1257 cipher_stop = 0; 1258 if (op_type == CHCR_DECRYPT_OP) 1259 auth_insert = hash_size_in_response; 1260 else 1261 auth_insert = 0; 1262 1263 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1264 sgl_len; 1265 if (wr_len > SGE_MAX_WR_LEN) 1266 return (EFBIG); 1267 wr = alloc_wrqe(wr_len, s->port->txq); 1268 if (wr == NULL) { 1269 sc->stats_wr_nomem++; 1270 return (ENOMEM); 1271 } 1272 crwr = wrtod(wr); 1273 memset(crwr, 0, wr_len); 1274 1275 memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); 1276 if (s->blkcipher.iv_len == 12) 1277 *(uint32_t *)&iv[12] = htobe32(1); 1278 1279 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1280 crp); 1281 1282 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1283 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1284 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 1285 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1286 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1287 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1288 1289 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1290 1291 /* 1292 * NB: cipherstop is explicitly set to 0. On encrypt it 1293 * should normally be set to 0 anyway. However, for decrypt 1294 * the cipher ends before the tag in the ETA case (and 1295 * authstop is set to stop before the tag), but for GCM the 1296 * cipher still runs to the end of the buffer. Not sure if 1297 * this is intentional or a firmware quirk, but it is required 1298 * for working tag validation with GCM decryption. 1299 */ 1300 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1301 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1302 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1303 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1304 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1305 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1306 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1307 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1308 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1309 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1310 1311 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1312 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); 1313 crwr->sec_cpl.seqno_numivs = htobe32( 1314 V_SCMD_SEQ_NO_CTRL(0) | 1315 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1316 V_SCMD_ENC_DEC_CTRL(op_type) | 1317 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | 1318 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) | 1319 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) | 1320 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1321 V_SCMD_IV_SIZE(iv_len / 2) | 1322 V_SCMD_NUM_IVS(0)); 1323 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1324 V_SCMD_IV_GEN_CTRL(0) | 1325 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1326 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1327 1328 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1329 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1330 dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); 1331 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); 1332 1333 dst = (char *)(crwr + 1) + kctx_len; 1334 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 1335 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1336 memcpy(dst, iv, iv_len); 1337 dst += iv_len; 1338 if (imm_len != 0) { 1339 if (crp->crp_aad_length != 0) { 1340 crypto_copydata(crp, crp->crp_aad_start, 1341 crp->crp_aad_length, dst); 1342 dst += crp->crp_aad_length; 1343 } 1344 crypto_copydata(crp, crp->crp_payload_start, 1345 crp->crp_payload_length, dst); 1346 dst += crp->crp_payload_length; 1347 if (op_type == CHCR_DECRYPT_OP) 1348 crypto_copydata(crp, crp->crp_digest_start, 1349 hash_size_in_response, dst); 1350 } else 1351 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1352 1353 /* XXX: TODO backpressure */ 1354 t4_wrq_tx(sc->adapter, wr); 1355 1356 explicit_bzero(iv, sizeof(iv)); 1357 return (0); 1358 } 1359 1360 static int 1361 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, 1362 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1363 { 1364 1365 /* 1366 * The updated IV to permit chained requests is at 1367 * cpl->data[2], but OCF doesn't permit chained requests. 1368 * 1369 * Note that the hardware should always verify the GMAC hash. 1370 */ 1371 return (error); 1372 } 1373 1374 /* 1375 * Handle a GCM request that is not supported by the crypto engine by 1376 * performing the operation in software. Derived from swcr_authenc(). 1377 */ 1378 static void 1379 ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp) 1380 { 1381 struct auth_hash *axf; 1382 struct enc_xform *exf; 1383 void *auth_ctx, *kschedule; 1384 char block[GMAC_BLOCK_LEN]; 1385 char digest[GMAC_DIGEST_LEN]; 1386 char iv[AES_BLOCK_LEN]; 1387 int error, i, len; 1388 1389 auth_ctx = NULL; 1390 kschedule = NULL; 1391 1392 /* Initialize the MAC. */ 1393 switch (s->blkcipher.key_len) { 1394 case 16: 1395 axf = &auth_hash_nist_gmac_aes_128; 1396 break; 1397 case 24: 1398 axf = &auth_hash_nist_gmac_aes_192; 1399 break; 1400 case 32: 1401 axf = &auth_hash_nist_gmac_aes_256; 1402 break; 1403 default: 1404 error = EINVAL; 1405 goto out; 1406 } 1407 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1408 if (auth_ctx == NULL) { 1409 error = ENOMEM; 1410 goto out; 1411 } 1412 axf->Init(auth_ctx); 1413 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1414 1415 /* Initialize the cipher. */ 1416 exf = &enc_xform_aes_nist_gcm; 1417 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1418 if (kschedule == NULL) { 1419 error = ENOMEM; 1420 goto out; 1421 } 1422 error = exf->setkey(kschedule, s->blkcipher.enckey, 1423 s->blkcipher.key_len); 1424 if (error) 1425 goto out; 1426 1427 /* 1428 * This assumes a 12-byte IV from the crp. See longer comment 1429 * above in ccr_gcm() for more details. 1430 */ 1431 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1432 error = EINVAL; 1433 goto out; 1434 } 1435 memcpy(iv, crp->crp_iv, 12); 1436 *(uint32_t *)&iv[12] = htobe32(1); 1437 1438 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1439 1440 /* MAC the AAD. */ 1441 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { 1442 len = imin(crp->crp_aad_length - i, sizeof(block)); 1443 crypto_copydata(crp, crp->crp_aad_start + i, len, block); 1444 bzero(block + len, sizeof(block) - len); 1445 axf->Update(auth_ctx, block, sizeof(block)); 1446 } 1447 1448 exf->reinit(kschedule, iv); 1449 1450 /* Do encryption with MAC */ 1451 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1452 len = imin(crp->crp_payload_length - i, sizeof(block)); 1453 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1454 bzero(block + len, sizeof(block) - len); 1455 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1456 exf->encrypt(kschedule, block, block); 1457 axf->Update(auth_ctx, block, len); 1458 crypto_copyback(crp, crp->crp_payload_start + i, len, 1459 block); 1460 } else { 1461 axf->Update(auth_ctx, block, len); 1462 } 1463 } 1464 1465 /* Length block. */ 1466 bzero(block, sizeof(block)); 1467 ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8); 1468 ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8); 1469 axf->Update(auth_ctx, block, sizeof(block)); 1470 1471 /* Finalize MAC. */ 1472 axf->Final(digest, auth_ctx); 1473 1474 /* Inject or validate tag. */ 1475 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1476 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1477 digest); 1478 error = 0; 1479 } else { 1480 char digest2[GMAC_DIGEST_LEN]; 1481 1482 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1483 digest2); 1484 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1485 error = 0; 1486 1487 /* Tag matches, decrypt data. */ 1488 for (i = 0; i < crp->crp_payload_length; 1489 i += sizeof(block)) { 1490 len = imin(crp->crp_payload_length - i, 1491 sizeof(block)); 1492 crypto_copydata(crp, crp->crp_payload_start + i, 1493 len, block); 1494 bzero(block + len, sizeof(block) - len); 1495 exf->decrypt(kschedule, block, block); 1496 crypto_copyback(crp, crp->crp_payload_start + i, 1497 len, block); 1498 } 1499 } else 1500 error = EBADMSG; 1501 explicit_bzero(digest2, sizeof(digest2)); 1502 } 1503 1504 out: 1505 zfree(kschedule, M_CCR); 1506 zfree(auth_ctx, M_CCR); 1507 explicit_bzero(block, sizeof(block)); 1508 explicit_bzero(iv, sizeof(iv)); 1509 explicit_bzero(digest, sizeof(digest)); 1510 crp->crp_etype = error; 1511 crypto_done(crp); 1512 } 1513 1514 static void 1515 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, 1516 const char *iv, char *b0) 1517 { 1518 u_int i, payload_len; 1519 1520 /* NB: L is already set in the first byte of the IV. */ 1521 memcpy(b0, iv, CCM_B0_SIZE); 1522 1523 /* Set length of hash in bits 3 - 5. */ 1524 b0[0] |= (((hash_size_in_response - 2) / 2) << 3); 1525 1526 /* Store the payload length as a big-endian value. */ 1527 payload_len = crp->crp_payload_length; 1528 for (i = 0; i < iv[0]; i++) { 1529 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len; 1530 payload_len >>= 8; 1531 } 1532 1533 /* 1534 * If there is AAD in the request, set bit 6 in the flags 1535 * field and store the AAD length as a big-endian value at the 1536 * start of block 1. This only assumes a 16-bit AAD length 1537 * since T6 doesn't support large AAD sizes. 1538 */ 1539 if (crp->crp_aad_length != 0) { 1540 b0[0] |= (1 << 6); 1541 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length); 1542 } 1543 } 1544 1545 static int 1546 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) 1547 { 1548 char iv[CHCR_MAX_CRYPTO_IV_LEN]; 1549 struct ulptx_idata *idata; 1550 struct chcr_wr *crwr; 1551 struct wrqe *wr; 1552 char *dst; 1553 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; 1554 u_int aad_len, b0_len, hash_size_in_response, imm_len; 1555 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; 1556 u_int hmac_ctrl, input_len; 1557 int dsgl_nsegs, dsgl_len; 1558 int sgl_nsegs, sgl_len; 1559 int error; 1560 1561 if (s->blkcipher.key_len == 0) 1562 return (EINVAL); 1563 1564 /* 1565 * The crypto engine doesn't handle CCM requests with an empty 1566 * payload, so handle those in software instead. 1567 */ 1568 if (crp->crp_payload_length == 0) 1569 return (EMSGSIZE); 1570 1571 /* 1572 * CCM always includes block 0 in the AAD before AAD from the 1573 * request. 1574 */ 1575 b0_len = CCM_B0_SIZE; 1576 if (crp->crp_aad_length != 0) 1577 b0_len += CCM_AAD_FIELD_SIZE; 1578 aad_len = b0_len + crp->crp_aad_length; 1579 1580 /* 1581 * CCM requests should always provide an explicit IV (really 1582 * the nonce). 1583 */ 1584 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 1585 return (EINVAL); 1586 1587 /* 1588 * Always assume a 12 byte input nonce for now since that is 1589 * what OCF always generates. The full IV in the work request 1590 * is 16 bytes. 1591 */ 1592 iv_len = AES_BLOCK_LEN; 1593 1594 if (iv_len + aad_len > MAX_AAD_LEN) 1595 return (EMSGSIZE); 1596 1597 hash_size_in_response = s->ccm_mac.hash_len; 1598 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 1599 op_type = CHCR_ENCRYPT_OP; 1600 else 1601 op_type = CHCR_DECRYPT_OP; 1602 1603 /* 1604 * The output buffer consists of the cipher text followed by 1605 * the tag when encrypting. For decryption it only contains 1606 * the plain text. 1607 * 1608 * Due to a firmware bug, the output buffer must include a 1609 * dummy output buffer for the IV and AAD prior to the real 1610 * output buffer. 1611 */ 1612 if (op_type == CHCR_ENCRYPT_OP) { 1613 if (iv_len + aad_len + crp->crp_payload_length + 1614 hash_size_in_response > MAX_REQUEST_SIZE) 1615 return (EFBIG); 1616 } else { 1617 if (iv_len + aad_len + crp->crp_payload_length > 1618 MAX_REQUEST_SIZE) 1619 return (EFBIG); 1620 } 1621 sglist_reset(sc->sg_dsgl); 1622 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + 1623 aad_len); 1624 if (error) 1625 return (error); 1626 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1627 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1628 crp->crp_payload_output_start, crp->crp_payload_length); 1629 else 1630 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1631 crp->crp_payload_start, crp->crp_payload_length); 1632 if (error) 1633 return (error); 1634 if (op_type == CHCR_ENCRYPT_OP) { 1635 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1636 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, 1637 crp->crp_digest_start, hash_size_in_response); 1638 else 1639 error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, 1640 crp->crp_digest_start, hash_size_in_response); 1641 if (error) 1642 return (error); 1643 } 1644 dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); 1645 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) 1646 return (EFBIG); 1647 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); 1648 1649 /* 1650 * The 'key' part of the key context consists of two copies of 1651 * the AES key. 1652 */ 1653 kctx_len = roundup2(s->blkcipher.key_len, 16) * 2; 1654 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); 1655 1656 /* 1657 * The input buffer consists of the IV, AAD (including block 1658 * 0), and then the cipher/plain text. For decryption 1659 * requests the hash is appended after the cipher text. 1660 * 1661 * The IV is always stored at the start of the input buffer 1662 * even though it may be duplicated in the payload. The 1663 * crypto engine doesn't work properly if the IV offset points 1664 * inside of the AAD region, so a second copy is always 1665 * required. 1666 */ 1667 input_len = aad_len + crp->crp_payload_length; 1668 if (op_type == CHCR_DECRYPT_OP) 1669 input_len += hash_size_in_response; 1670 if (input_len > MAX_REQUEST_SIZE) 1671 return (EFBIG); 1672 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { 1673 imm_len = input_len; 1674 sgl_nsegs = 0; 1675 sgl_len = 0; 1676 } else { 1677 /* Block 0 is passed as immediate data. */ 1678 imm_len = b0_len; 1679 1680 sglist_reset(sc->sg_ulptx); 1681 if (crp->crp_aad_length != 0) { 1682 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1683 crp->crp_aad_start, crp->crp_aad_length); 1684 if (error) 1685 return (error); 1686 } 1687 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1688 crp->crp_payload_start, crp->crp_payload_length); 1689 if (error) 1690 return (error); 1691 if (op_type == CHCR_DECRYPT_OP) { 1692 error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, 1693 crp->crp_digest_start, hash_size_in_response); 1694 if (error) 1695 return (error); 1696 } 1697 sgl_nsegs = sc->sg_ulptx->sg_nseg; 1698 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); 1699 } 1700 1701 aad_start = iv_len + 1; 1702 aad_stop = aad_start + aad_len - 1; 1703 cipher_start = aad_stop + 1; 1704 if (op_type == CHCR_DECRYPT_OP) 1705 cipher_stop = hash_size_in_response; 1706 else 1707 cipher_stop = 0; 1708 if (op_type == CHCR_DECRYPT_OP) 1709 auth_insert = hash_size_in_response; 1710 else 1711 auth_insert = 0; 1712 1713 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + 1714 sgl_len; 1715 if (wr_len > SGE_MAX_WR_LEN) 1716 return (EFBIG); 1717 wr = alloc_wrqe(wr_len, s->port->txq); 1718 if (wr == NULL) { 1719 sc->stats_wr_nomem++; 1720 return (ENOMEM); 1721 } 1722 crwr = wrtod(wr); 1723 memset(crwr, 0, wr_len); 1724 1725 /* 1726 * Read the nonce from the request. Use the nonce to generate 1727 * the full IV with the counter set to 0. 1728 */ 1729 memset(iv, 0, iv_len); 1730 iv[0] = (15 - AES_CCM_IV_LEN) - 1; 1731 memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN); 1732 1733 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, 1734 crp); 1735 1736 crwr->sec_cpl.op_ivinsrtofst = htobe32( 1737 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | 1738 V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | 1739 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | 1740 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | 1741 V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); 1742 1743 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); 1744 1745 /* 1746 * NB: cipherstop is explicitly set to 0. See comments above 1747 * in ccr_gcm(). 1748 */ 1749 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( 1750 V_CPL_TX_SEC_PDU_AADSTART(aad_start) | 1751 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | 1752 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | 1753 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); 1754 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( 1755 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | 1756 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | 1757 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | 1758 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); 1759 1760 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ 1761 hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response); 1762 crwr->sec_cpl.seqno_numivs = htobe32( 1763 V_SCMD_SEQ_NO_CTRL(0) | 1764 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | 1765 V_SCMD_ENC_DEC_CTRL(op_type) | 1766 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) | 1767 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) | 1768 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) | 1769 V_SCMD_HMAC_CTRL(hmac_ctrl) | 1770 V_SCMD_IV_SIZE(iv_len / 2) | 1771 V_SCMD_NUM_IVS(0)); 1772 crwr->sec_cpl.ivgen_hdrlen = htobe32( 1773 V_SCMD_IV_GEN_CTRL(0) | 1774 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | 1775 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); 1776 1777 crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; 1778 memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); 1779 memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16), 1780 s->blkcipher.enckey, s->blkcipher.key_len); 1781 1782 dst = (char *)(crwr + 1) + kctx_len; 1783 ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); 1784 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; 1785 memcpy(dst, iv, iv_len); 1786 dst += iv_len; 1787 generate_ccm_b0(crp, hash_size_in_response, iv, dst); 1788 if (sgl_nsegs == 0) { 1789 dst += b0_len; 1790 if (crp->crp_aad_length != 0) { 1791 crypto_copydata(crp, crp->crp_aad_start, 1792 crp->crp_aad_length, dst); 1793 dst += crp->crp_aad_length; 1794 } 1795 crypto_copydata(crp, crp->crp_payload_start, 1796 crp->crp_payload_length, dst); 1797 dst += crp->crp_payload_length; 1798 if (op_type == CHCR_DECRYPT_OP) 1799 crypto_copydata(crp, crp->crp_digest_start, 1800 hash_size_in_response, dst); 1801 } else { 1802 dst += CCM_B0_SIZE; 1803 if (b0_len > CCM_B0_SIZE) { 1804 /* 1805 * If there is AAD, insert padding including a 1806 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL 1807 * is 16-byte aligned. 1808 */ 1809 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE, 1810 ("b0_len mismatch")); 1811 memset(dst + CCM_AAD_FIELD_SIZE, 0, 1812 8 - CCM_AAD_FIELD_SIZE); 1813 idata = (void *)(dst + 8); 1814 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 1815 idata->len = htobe32(0); 1816 dst = (void *)(idata + 1); 1817 } 1818 ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); 1819 } 1820 1821 /* XXX: TODO backpressure */ 1822 t4_wrq_tx(sc->adapter, wr); 1823 1824 explicit_bzero(iv, sizeof(iv)); 1825 return (0); 1826 } 1827 1828 static int 1829 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, 1830 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) 1831 { 1832 1833 /* 1834 * The updated IV to permit chained requests is at 1835 * cpl->data[2], but OCF doesn't permit chained requests. 1836 * 1837 * Note that the hardware should always verify the CBC MAC 1838 * hash. 1839 */ 1840 return (error); 1841 } 1842 1843 /* 1844 * Handle a CCM request that is not supported by the crypto engine by 1845 * performing the operation in software. Derived from swcr_authenc(). 1846 */ 1847 static void 1848 ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp) 1849 { 1850 struct auth_hash *axf; 1851 struct enc_xform *exf; 1852 union authctx *auth_ctx; 1853 void *kschedule; 1854 char block[CCM_CBC_BLOCK_LEN]; 1855 char digest[AES_CBC_MAC_HASH_LEN]; 1856 char iv[AES_CCM_IV_LEN]; 1857 int error, i, len; 1858 1859 auth_ctx = NULL; 1860 kschedule = NULL; 1861 1862 /* Initialize the MAC. */ 1863 switch (s->blkcipher.key_len) { 1864 case 16: 1865 axf = &auth_hash_ccm_cbc_mac_128; 1866 break; 1867 case 24: 1868 axf = &auth_hash_ccm_cbc_mac_192; 1869 break; 1870 case 32: 1871 axf = &auth_hash_ccm_cbc_mac_256; 1872 break; 1873 default: 1874 error = EINVAL; 1875 goto out; 1876 } 1877 auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); 1878 if (auth_ctx == NULL) { 1879 error = ENOMEM; 1880 goto out; 1881 } 1882 axf->Init(auth_ctx); 1883 axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); 1884 1885 /* Initialize the cipher. */ 1886 exf = &enc_xform_ccm; 1887 kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); 1888 if (kschedule == NULL) { 1889 error = ENOMEM; 1890 goto out; 1891 } 1892 error = exf->setkey(kschedule, s->blkcipher.enckey, 1893 s->blkcipher.key_len); 1894 if (error) 1895 goto out; 1896 1897 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { 1898 error = EINVAL; 1899 goto out; 1900 } 1901 memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN); 1902 1903 auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 1904 auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 1905 axf->Reinit(auth_ctx, iv, sizeof(iv)); 1906 1907 /* MAC the AAD. */ 1908 for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { 1909 len = imin(crp->crp_aad_length - i, sizeof(block)); 1910 crypto_copydata(crp, crp->crp_aad_start + i, len, block); 1911 bzero(block + len, sizeof(block) - len); 1912 axf->Update(auth_ctx, block, sizeof(block)); 1913 } 1914 1915 exf->reinit(kschedule, iv); 1916 1917 /* Do encryption/decryption with MAC */ 1918 for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { 1919 len = imin(crp->crp_payload_length - i, sizeof(block)); 1920 crypto_copydata(crp, crp->crp_payload_start + i, len, block); 1921 bzero(block + len, sizeof(block) - len); 1922 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1923 axf->Update(auth_ctx, block, len); 1924 exf->encrypt(kschedule, block, block); 1925 crypto_copyback(crp, crp->crp_payload_start + i, len, 1926 block); 1927 } else { 1928 exf->decrypt(kschedule, block, block); 1929 axf->Update(auth_ctx, block, len); 1930 } 1931 } 1932 1933 /* Finalize MAC. */ 1934 axf->Final(digest, auth_ctx); 1935 1936 /* Inject or validate tag. */ 1937 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1938 crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), 1939 digest); 1940 error = 0; 1941 } else { 1942 char digest2[AES_CBC_MAC_HASH_LEN]; 1943 1944 crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), 1945 digest2); 1946 if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { 1947 error = 0; 1948 1949 /* Tag matches, decrypt data. */ 1950 exf->reinit(kschedule, iv); 1951 for (i = 0; i < crp->crp_payload_length; 1952 i += sizeof(block)) { 1953 len = imin(crp->crp_payload_length - i, 1954 sizeof(block)); 1955 crypto_copydata(crp, crp->crp_payload_start + i, 1956 len, block); 1957 bzero(block + len, sizeof(block) - len); 1958 exf->decrypt(kschedule, block, block); 1959 crypto_copyback(crp, crp->crp_payload_start + i, 1960 len, block); 1961 } 1962 } else 1963 error = EBADMSG; 1964 explicit_bzero(digest2, sizeof(digest2)); 1965 } 1966 1967 out: 1968 zfree(kschedule, M_CCR); 1969 zfree(auth_ctx, M_CCR); 1970 explicit_bzero(block, sizeof(block)); 1971 explicit_bzero(iv, sizeof(iv)); 1972 explicit_bzero(digest, sizeof(digest)); 1973 crp->crp_etype = error; 1974 crypto_done(crp); 1975 } 1976 1977 static void 1978 ccr_identify(driver_t *driver, device_t parent) 1979 { 1980 struct adapter *sc; 1981 1982 sc = device_get_softc(parent); 1983 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && 1984 device_find_child(parent, "ccr", -1) == NULL) 1985 device_add_child(parent, "ccr", -1); 1986 } 1987 1988 static int 1989 ccr_probe(device_t dev) 1990 { 1991 1992 device_set_desc(dev, "Chelsio Crypto Accelerator"); 1993 return (BUS_PROBE_DEFAULT); 1994 } 1995 1996 static void 1997 ccr_sysctls(struct ccr_softc *sc) 1998 { 1999 struct sysctl_ctx_list *ctx; 2000 struct sysctl_oid *oid, *port_oid; 2001 struct sysctl_oid_list *children; 2002 char buf[16]; 2003 int i; 2004 2005 ctx = device_get_sysctl_ctx(sc->dev); 2006 2007 /* 2008 * dev.ccr.X. 2009 */ 2010 oid = device_get_sysctl_tree(sc->dev); 2011 children = SYSCTL_CHILDREN(oid); 2012 2013 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW, 2014 &sc->port_mask, 0, "Mask of enabled ports"); 2015 2016 /* 2017 * dev.ccr.X.stats. 2018 */ 2019 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", 2020 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); 2021 children = SYSCTL_CHILDREN(oid); 2022 2023 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD, 2024 &sc->stats_hash, 0, "Hash requests submitted"); 2025 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, 2026 &sc->stats_hmac, 0, "HMAC requests submitted"); 2027 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD, 2028 &sc->stats_blkcipher_encrypt, 0, 2029 "Cipher encryption requests submitted"); 2030 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD, 2031 &sc->stats_blkcipher_decrypt, 0, 2032 "Cipher decryption requests submitted"); 2033 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD, 2034 &sc->stats_eta_encrypt, 0, 2035 "Combined AES+HMAC encryption requests submitted"); 2036 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD, 2037 &sc->stats_eta_decrypt, 0, 2038 "Combined AES+HMAC decryption requests submitted"); 2039 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD, 2040 &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted"); 2041 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD, 2042 &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted"); 2043 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD, 2044 &sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted"); 2045 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD, 2046 &sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted"); 2047 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, 2048 &sc->stats_wr_nomem, 0, "Work request memory allocation failures"); 2049 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, 2050 &sc->stats_inflight, 0, "Requests currently pending"); 2051 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, 2052 &sc->stats_mac_error, 0, "MAC errors"); 2053 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, 2054 &sc->stats_pad_error, 0, "Padding errors"); 2055 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD, 2056 &sc->stats_bad_session, 0, "Requests with invalid session ID"); 2057 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD, 2058 &sc->stats_sglist_error, 0, 2059 "Requests for which DMA mapping failed"); 2060 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD, 2061 &sc->stats_process_error, 0, "Requests failed during queueing"); 2062 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD, 2063 &sc->stats_sw_fallback, 0, 2064 "Requests processed by falling back to software"); 2065 2066 /* 2067 * dev.ccr.X.stats.port 2068 */ 2069 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port", 2070 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics"); 2071 2072 for (i = 0; i < nitems(sc->ports); i++) { 2073 if (sc->ports[i].rxq == NULL) 2074 continue; 2075 2076 /* 2077 * dev.ccr.X.stats.port.Y 2078 */ 2079 snprintf(buf, sizeof(buf), "%d", i); 2080 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO, 2081 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf); 2082 children = SYSCTL_CHILDREN(oid); 2083 2084 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions", 2085 CTLFLAG_RD, &sc->ports[i].active_sessions, 0, 2086 "Count of active sessions"); 2087 } 2088 } 2089 2090 static void 2091 ccr_init_port(struct ccr_softc *sc, int port) 2092 { 2093 2094 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port]; 2095 sc->ports[port].rxq = 2096 &sc->adapter->sge.rxq[sc->adapter->port[port]->vi->first_rxq]; 2097 sc->ports[port].tx_channel_id = port; 2098 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1, 2099 "Too many ports to fit in port_mask"); 2100 sc->port_mask |= 1u << port; 2101 } 2102 2103 static int 2104 ccr_attach(device_t dev) 2105 { 2106 struct ccr_softc *sc; 2107 int32_t cid; 2108 int i; 2109 2110 sc = device_get_softc(dev); 2111 sc->dev = dev; 2112 sc->adapter = device_get_softc(device_get_parent(dev)); 2113 for_each_port(sc->adapter, i) { 2114 ccr_init_port(sc, i); 2115 } 2116 cid = crypto_get_driverid(dev, sizeof(struct ccr_session), 2117 CRYPTOCAP_F_HARDWARE); 2118 if (cid < 0) { 2119 device_printf(dev, "could not get crypto driver id\n"); 2120 return (ENXIO); 2121 } 2122 sc->cid = cid; 2123 sc->adapter->ccr_softc = sc; 2124 2125 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); 2126 sc->sg_input = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 2127 sc->sg_output = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 2128 sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 2129 sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK); 2130 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); 2131 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); 2132 ccr_sysctls(sc); 2133 2134 return (0); 2135 } 2136 2137 static int 2138 ccr_detach(device_t dev) 2139 { 2140 struct ccr_softc *sc; 2141 2142 sc = device_get_softc(dev); 2143 2144 mtx_lock(&sc->lock); 2145 sc->detaching = true; 2146 mtx_unlock(&sc->lock); 2147 2148 crypto_unregister_all(sc->cid); 2149 2150 mtx_destroy(&sc->lock); 2151 sglist_free(sc->sg_iv_aad); 2152 free(sc->iv_aad_buf, M_CCR); 2153 sglist_free(sc->sg_dsgl); 2154 sglist_free(sc->sg_ulptx); 2155 sglist_free(sc->sg_output); 2156 sglist_free(sc->sg_input); 2157 sc->adapter->ccr_softc = NULL; 2158 return (0); 2159 } 2160 2161 static void 2162 ccr_init_hash_digest(struct ccr_session *s) 2163 { 2164 union authctx auth_ctx; 2165 struct auth_hash *axf; 2166 2167 axf = s->hmac.auth_hash; 2168 axf->Init(&auth_ctx); 2169 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads); 2170 } 2171 2172 static bool 2173 ccr_aes_check_keylen(int alg, int klen) 2174 { 2175 2176 switch (klen * 8) { 2177 case 128: 2178 case 192: 2179 if (alg == CRYPTO_AES_XTS) 2180 return (false); 2181 break; 2182 case 256: 2183 break; 2184 case 512: 2185 if (alg != CRYPTO_AES_XTS) 2186 return (false); 2187 break; 2188 default: 2189 return (false); 2190 } 2191 return (true); 2192 } 2193 2194 static void 2195 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen) 2196 { 2197 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; 2198 unsigned int opad_present; 2199 2200 if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) 2201 kbits = (klen / 2) * 8; 2202 else 2203 kbits = klen * 8; 2204 switch (kbits) { 2205 case 128: 2206 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 2207 break; 2208 case 192: 2209 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 2210 break; 2211 case 256: 2212 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 2213 break; 2214 default: 2215 panic("should not get here"); 2216 } 2217 2218 s->blkcipher.key_len = klen; 2219 memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); 2220 switch (s->blkcipher.cipher_mode) { 2221 case SCMD_CIPH_MODE_AES_CBC: 2222 case SCMD_CIPH_MODE_AES_XTS: 2223 t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); 2224 break; 2225 } 2226 2227 kctx_len = roundup2(s->blkcipher.key_len, 16); 2228 switch (s->mode) { 2229 case ETA: 2230 mk_size = s->hmac.mk_size; 2231 opad_present = 1; 2232 iopad_size = roundup2(s->hmac.partial_digest_len, 16); 2233 kctx_len += iopad_size * 2; 2234 break; 2235 case GCM: 2236 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2237 opad_present = 0; 2238 kctx_len += GMAC_BLOCK_LEN; 2239 break; 2240 case CCM: 2241 switch (kbits) { 2242 case 128: 2243 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 2244 break; 2245 case 192: 2246 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; 2247 break; 2248 case 256: 2249 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2250 break; 2251 default: 2252 panic("should not get here"); 2253 } 2254 opad_present = 0; 2255 kctx_len *= 2; 2256 break; 2257 default: 2258 mk_size = CHCR_KEYCTX_NO_KEY; 2259 opad_present = 0; 2260 break; 2261 } 2262 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; 2263 s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | 2264 V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode == 2265 SCMD_CIPH_MODE_AES_XTS) | 2266 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | 2267 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | 2268 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); 2269 } 2270 2271 static bool 2272 ccr_auth_supported(const struct crypto_session_params *csp) 2273 { 2274 2275 switch (csp->csp_auth_alg) { 2276 case CRYPTO_SHA1: 2277 case CRYPTO_SHA2_224: 2278 case CRYPTO_SHA2_256: 2279 case CRYPTO_SHA2_384: 2280 case CRYPTO_SHA2_512: 2281 case CRYPTO_SHA1_HMAC: 2282 case CRYPTO_SHA2_224_HMAC: 2283 case CRYPTO_SHA2_256_HMAC: 2284 case CRYPTO_SHA2_384_HMAC: 2285 case CRYPTO_SHA2_512_HMAC: 2286 break; 2287 default: 2288 return (false); 2289 } 2290 return (true); 2291 } 2292 2293 static bool 2294 ccr_cipher_supported(const struct crypto_session_params *csp) 2295 { 2296 2297 switch (csp->csp_cipher_alg) { 2298 case CRYPTO_AES_CBC: 2299 if (csp->csp_ivlen != AES_BLOCK_LEN) 2300 return (false); 2301 break; 2302 case CRYPTO_AES_ICM: 2303 if (csp->csp_ivlen != AES_BLOCK_LEN) 2304 return (false); 2305 break; 2306 case CRYPTO_AES_XTS: 2307 if (csp->csp_ivlen != AES_XTS_IV_LEN) 2308 return (false); 2309 break; 2310 default: 2311 return (false); 2312 } 2313 return (ccr_aes_check_keylen(csp->csp_cipher_alg, 2314 csp->csp_cipher_klen)); 2315 } 2316 2317 static int 2318 ccr_cipher_mode(const struct crypto_session_params *csp) 2319 { 2320 2321 switch (csp->csp_cipher_alg) { 2322 case CRYPTO_AES_CBC: 2323 return (SCMD_CIPH_MODE_AES_CBC); 2324 case CRYPTO_AES_ICM: 2325 return (SCMD_CIPH_MODE_AES_CTR); 2326 case CRYPTO_AES_NIST_GCM_16: 2327 return (SCMD_CIPH_MODE_AES_GCM); 2328 case CRYPTO_AES_XTS: 2329 return (SCMD_CIPH_MODE_AES_XTS); 2330 case CRYPTO_AES_CCM_16: 2331 return (SCMD_CIPH_MODE_AES_CCM); 2332 default: 2333 return (SCMD_CIPH_MODE_NOP); 2334 } 2335 } 2336 2337 static int 2338 ccr_probesession(device_t dev, const struct crypto_session_params *csp) 2339 { 2340 unsigned int cipher_mode; 2341 2342 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) 2343 return (EINVAL); 2344 switch (csp->csp_mode) { 2345 case CSP_MODE_DIGEST: 2346 if (!ccr_auth_supported(csp)) 2347 return (EINVAL); 2348 break; 2349 case CSP_MODE_CIPHER: 2350 if (!ccr_cipher_supported(csp)) 2351 return (EINVAL); 2352 break; 2353 case CSP_MODE_AEAD: 2354 switch (csp->csp_cipher_alg) { 2355 case CRYPTO_AES_NIST_GCM_16: 2356 if (csp->csp_ivlen != AES_GCM_IV_LEN) 2357 return (EINVAL); 2358 if (csp->csp_auth_mlen < 0 || 2359 csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 2360 return (EINVAL); 2361 break; 2362 case CRYPTO_AES_CCM_16: 2363 if (csp->csp_ivlen != AES_CCM_IV_LEN) 2364 return (EINVAL); 2365 if (csp->csp_auth_mlen < 0 || 2366 csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN) 2367 return (EINVAL); 2368 break; 2369 default: 2370 return (EINVAL); 2371 } 2372 break; 2373 case CSP_MODE_ETA: 2374 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp)) 2375 return (EINVAL); 2376 break; 2377 default: 2378 return (EINVAL); 2379 } 2380 2381 if (csp->csp_cipher_klen != 0) { 2382 cipher_mode = ccr_cipher_mode(csp); 2383 if (cipher_mode == SCMD_CIPH_MODE_NOP) 2384 return (EINVAL); 2385 } 2386 2387 return (CRYPTODEV_PROBE_HARDWARE); 2388 } 2389 2390 /* 2391 * Select an available port with the lowest number of active sessions. 2392 */ 2393 static struct ccr_port * 2394 ccr_choose_port(struct ccr_softc *sc) 2395 { 2396 struct ccr_port *best, *p; 2397 int i; 2398 2399 mtx_assert(&sc->lock, MA_OWNED); 2400 best = NULL; 2401 for (i = 0; i < nitems(sc->ports); i++) { 2402 p = &sc->ports[i]; 2403 2404 /* Ignore non-existent ports. */ 2405 if (p->rxq == NULL) 2406 continue; 2407 2408 /* 2409 * XXX: Ignore ports whose queues aren't initialized. 2410 * This is racy as the rxq can be destroyed by the 2411 * associated VI detaching. Eventually ccr should use 2412 * dedicated queues. 2413 */ 2414 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL) 2415 continue; 2416 2417 if ((sc->port_mask & (1u << i)) == 0) 2418 continue; 2419 2420 if (best == NULL || 2421 p->active_sessions < best->active_sessions) 2422 best = p; 2423 } 2424 return (best); 2425 } 2426 2427 static int 2428 ccr_newsession(device_t dev, crypto_session_t cses, 2429 const struct crypto_session_params *csp) 2430 { 2431 struct ccr_softc *sc; 2432 struct ccr_session *s; 2433 struct auth_hash *auth_hash; 2434 unsigned int auth_mode, cipher_mode, mk_size; 2435 unsigned int partial_digest_len; 2436 2437 switch (csp->csp_auth_alg) { 2438 case CRYPTO_SHA1: 2439 case CRYPTO_SHA1_HMAC: 2440 auth_hash = &auth_hash_hmac_sha1; 2441 auth_mode = SCMD_AUTH_MODE_SHA1; 2442 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; 2443 partial_digest_len = SHA1_HASH_LEN; 2444 break; 2445 case CRYPTO_SHA2_224: 2446 case CRYPTO_SHA2_224_HMAC: 2447 auth_hash = &auth_hash_hmac_sha2_224; 2448 auth_mode = SCMD_AUTH_MODE_SHA224; 2449 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2450 partial_digest_len = SHA2_256_HASH_LEN; 2451 break; 2452 case CRYPTO_SHA2_256: 2453 case CRYPTO_SHA2_256_HMAC: 2454 auth_hash = &auth_hash_hmac_sha2_256; 2455 auth_mode = SCMD_AUTH_MODE_SHA256; 2456 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 2457 partial_digest_len = SHA2_256_HASH_LEN; 2458 break; 2459 case CRYPTO_SHA2_384: 2460 case CRYPTO_SHA2_384_HMAC: 2461 auth_hash = &auth_hash_hmac_sha2_384; 2462 auth_mode = SCMD_AUTH_MODE_SHA512_384; 2463 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2464 partial_digest_len = SHA2_512_HASH_LEN; 2465 break; 2466 case CRYPTO_SHA2_512: 2467 case CRYPTO_SHA2_512_HMAC: 2468 auth_hash = &auth_hash_hmac_sha2_512; 2469 auth_mode = SCMD_AUTH_MODE_SHA512_512; 2470 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 2471 partial_digest_len = SHA2_512_HASH_LEN; 2472 break; 2473 default: 2474 auth_hash = NULL; 2475 auth_mode = SCMD_AUTH_MODE_NOP; 2476 mk_size = 0; 2477 partial_digest_len = 0; 2478 break; 2479 } 2480 2481 cipher_mode = ccr_cipher_mode(csp); 2482 2483 #ifdef INVARIANTS 2484 switch (csp->csp_mode) { 2485 case CSP_MODE_CIPHER: 2486 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2487 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2488 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2489 panic("invalid cipher algo"); 2490 break; 2491 case CSP_MODE_DIGEST: 2492 if (auth_mode == SCMD_AUTH_MODE_NOP) 2493 panic("invalid auth algo"); 2494 break; 2495 case CSP_MODE_AEAD: 2496 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM && 2497 cipher_mode != SCMD_CIPH_MODE_AES_CCM) 2498 panic("invalid aead cipher algo"); 2499 if (auth_mode != SCMD_AUTH_MODE_NOP) 2500 panic("invalid aead auth aglo"); 2501 break; 2502 case CSP_MODE_ETA: 2503 if (cipher_mode == SCMD_CIPH_MODE_NOP || 2504 cipher_mode == SCMD_CIPH_MODE_AES_GCM || 2505 cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2506 panic("invalid cipher algo"); 2507 if (auth_mode == SCMD_AUTH_MODE_NOP) 2508 panic("invalid auth algo"); 2509 break; 2510 default: 2511 panic("invalid csp mode"); 2512 } 2513 #endif 2514 2515 sc = device_get_softc(dev); 2516 2517 mtx_lock(&sc->lock); 2518 if (sc->detaching) { 2519 mtx_unlock(&sc->lock); 2520 return (ENXIO); 2521 } 2522 2523 s = crypto_get_driver_session(cses); 2524 s->port = ccr_choose_port(sc); 2525 if (s->port == NULL) { 2526 mtx_unlock(&sc->lock); 2527 return (ENXIO); 2528 } 2529 2530 switch (csp->csp_mode) { 2531 case CSP_MODE_AEAD: 2532 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM) 2533 s->mode = CCM; 2534 else 2535 s->mode = GCM; 2536 break; 2537 case CSP_MODE_ETA: 2538 s->mode = ETA; 2539 break; 2540 case CSP_MODE_DIGEST: 2541 if (csp->csp_auth_klen != 0) 2542 s->mode = HMAC; 2543 else 2544 s->mode = HASH; 2545 break; 2546 case CSP_MODE_CIPHER: 2547 s->mode = BLKCIPHER; 2548 break; 2549 } 2550 2551 if (s->mode == GCM) { 2552 if (csp->csp_auth_mlen == 0) 2553 s->gmac.hash_len = AES_GMAC_HASH_LEN; 2554 else 2555 s->gmac.hash_len = csp->csp_auth_mlen; 2556 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen, 2557 s->gmac.ghash_h); 2558 } else if (s->mode == CCM) { 2559 if (csp->csp_auth_mlen == 0) 2560 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN; 2561 else 2562 s->ccm_mac.hash_len = csp->csp_auth_mlen; 2563 } else if (auth_mode != SCMD_AUTH_MODE_NOP) { 2564 s->hmac.auth_hash = auth_hash; 2565 s->hmac.auth_mode = auth_mode; 2566 s->hmac.mk_size = mk_size; 2567 s->hmac.partial_digest_len = partial_digest_len; 2568 if (csp->csp_auth_mlen == 0) 2569 s->hmac.hash_len = auth_hash->hashsize; 2570 else 2571 s->hmac.hash_len = csp->csp_auth_mlen; 2572 if (csp->csp_auth_key != NULL) 2573 t4_init_hmac_digest(auth_hash, partial_digest_len, 2574 csp->csp_auth_key, csp->csp_auth_klen, 2575 s->hmac.pads); 2576 else 2577 ccr_init_hash_digest(s); 2578 } 2579 if (cipher_mode != SCMD_CIPH_MODE_NOP) { 2580 s->blkcipher.cipher_mode = cipher_mode; 2581 s->blkcipher.iv_len = csp->csp_ivlen; 2582 if (csp->csp_cipher_key != NULL) 2583 ccr_aes_setkey(s, csp->csp_cipher_key, 2584 csp->csp_cipher_klen); 2585 } 2586 2587 s->active = true; 2588 s->port->active_sessions++; 2589 mtx_unlock(&sc->lock); 2590 return (0); 2591 } 2592 2593 static void 2594 ccr_freesession(device_t dev, crypto_session_t cses) 2595 { 2596 struct ccr_softc *sc; 2597 struct ccr_session *s; 2598 2599 sc = device_get_softc(dev); 2600 s = crypto_get_driver_session(cses); 2601 mtx_lock(&sc->lock); 2602 if (s->pending != 0) 2603 device_printf(dev, 2604 "session %p freed with %d pending requests\n", s, 2605 s->pending); 2606 s->active = false; 2607 s->port->active_sessions--; 2608 mtx_unlock(&sc->lock); 2609 } 2610 2611 static int 2612 ccr_process(device_t dev, struct cryptop *crp, int hint) 2613 { 2614 const struct crypto_session_params *csp; 2615 struct ccr_softc *sc; 2616 struct ccr_session *s; 2617 int error; 2618 2619 csp = crypto_get_params(crp->crp_session); 2620 s = crypto_get_driver_session(crp->crp_session); 2621 sc = device_get_softc(dev); 2622 2623 mtx_lock(&sc->lock); 2624 error = ccr_populate_sglist(sc->sg_input, &crp->crp_buf); 2625 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) 2626 error = ccr_populate_sglist(sc->sg_output, &crp->crp_obuf); 2627 if (error) { 2628 sc->stats_sglist_error++; 2629 goto out; 2630 } 2631 2632 switch (s->mode) { 2633 case HASH: 2634 error = ccr_hash(sc, s, crp); 2635 if (error == 0) 2636 sc->stats_hash++; 2637 break; 2638 case HMAC: 2639 if (crp->crp_auth_key != NULL) 2640 t4_init_hmac_digest(s->hmac.auth_hash, 2641 s->hmac.partial_digest_len, crp->crp_auth_key, 2642 csp->csp_auth_klen, s->hmac.pads); 2643 error = ccr_hash(sc, s, crp); 2644 if (error == 0) 2645 sc->stats_hmac++; 2646 break; 2647 case BLKCIPHER: 2648 if (crp->crp_cipher_key != NULL) 2649 ccr_aes_setkey(s, crp->crp_cipher_key, 2650 csp->csp_cipher_klen); 2651 error = ccr_blkcipher(sc, s, crp); 2652 if (error == 0) { 2653 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2654 sc->stats_blkcipher_encrypt++; 2655 else 2656 sc->stats_blkcipher_decrypt++; 2657 } 2658 break; 2659 case ETA: 2660 if (crp->crp_auth_key != NULL) 2661 t4_init_hmac_digest(s->hmac.auth_hash, 2662 s->hmac.partial_digest_len, crp->crp_auth_key, 2663 csp->csp_auth_klen, s->hmac.pads); 2664 if (crp->crp_cipher_key != NULL) 2665 ccr_aes_setkey(s, crp->crp_cipher_key, 2666 csp->csp_cipher_klen); 2667 error = ccr_eta(sc, s, crp); 2668 if (error == 0) { 2669 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2670 sc->stats_eta_encrypt++; 2671 else 2672 sc->stats_eta_decrypt++; 2673 } 2674 break; 2675 case GCM: 2676 if (crp->crp_cipher_key != NULL) { 2677 t4_init_gmac_hash(crp->crp_cipher_key, 2678 csp->csp_cipher_klen, s->gmac.ghash_h); 2679 ccr_aes_setkey(s, crp->crp_cipher_key, 2680 csp->csp_cipher_klen); 2681 } 2682 if (crp->crp_payload_length == 0) { 2683 mtx_unlock(&sc->lock); 2684 ccr_gcm_soft(s, crp); 2685 return (0); 2686 } 2687 error = ccr_gcm(sc, s, crp); 2688 if (error == EMSGSIZE) { 2689 sc->stats_sw_fallback++; 2690 mtx_unlock(&sc->lock); 2691 ccr_gcm_soft(s, crp); 2692 return (0); 2693 } 2694 if (error == 0) { 2695 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2696 sc->stats_gcm_encrypt++; 2697 else 2698 sc->stats_gcm_decrypt++; 2699 } 2700 break; 2701 case CCM: 2702 if (crp->crp_cipher_key != NULL) { 2703 ccr_aes_setkey(s, crp->crp_cipher_key, 2704 csp->csp_cipher_klen); 2705 } 2706 error = ccr_ccm(sc, s, crp); 2707 if (error == EMSGSIZE) { 2708 sc->stats_sw_fallback++; 2709 mtx_unlock(&sc->lock); 2710 ccr_ccm_soft(s, crp); 2711 return (0); 2712 } 2713 if (error == 0) { 2714 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2715 sc->stats_ccm_encrypt++; 2716 else 2717 sc->stats_ccm_decrypt++; 2718 } 2719 break; 2720 } 2721 2722 if (error == 0) { 2723 s->pending++; 2724 sc->stats_inflight++; 2725 } else 2726 sc->stats_process_error++; 2727 2728 out: 2729 mtx_unlock(&sc->lock); 2730 2731 if (error) { 2732 crp->crp_etype = error; 2733 crypto_done(crp); 2734 } 2735 2736 return (0); 2737 } 2738 2739 static int 2740 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, 2741 struct mbuf *m) 2742 { 2743 struct ccr_softc *sc = iq->adapter->ccr_softc; 2744 struct ccr_session *s; 2745 const struct cpl_fw6_pld *cpl; 2746 struct cryptop *crp; 2747 uint32_t status; 2748 int error; 2749 2750 if (m != NULL) 2751 cpl = mtod(m, const void *); 2752 else 2753 cpl = (const void *)(rss + 1); 2754 2755 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); 2756 s = crypto_get_driver_session(crp->crp_session); 2757 status = be64toh(cpl->data[0]); 2758 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) 2759 error = EBADMSG; 2760 else 2761 error = 0; 2762 2763 mtx_lock(&sc->lock); 2764 s->pending--; 2765 sc->stats_inflight--; 2766 2767 switch (s->mode) { 2768 case HASH: 2769 case HMAC: 2770 error = ccr_hash_done(sc, s, crp, cpl, error); 2771 break; 2772 case BLKCIPHER: 2773 error = ccr_blkcipher_done(sc, s, crp, cpl, error); 2774 break; 2775 case ETA: 2776 error = ccr_eta_done(sc, s, crp, cpl, error); 2777 break; 2778 case GCM: 2779 error = ccr_gcm_done(sc, s, crp, cpl, error); 2780 break; 2781 case CCM: 2782 error = ccr_ccm_done(sc, s, crp, cpl, error); 2783 break; 2784 } 2785 2786 if (error == EBADMSG) { 2787 if (CHK_MAC_ERR_BIT(status)) 2788 sc->stats_mac_error++; 2789 if (CHK_PAD_ERR_BIT(status)) 2790 sc->stats_pad_error++; 2791 } 2792 mtx_unlock(&sc->lock); 2793 crp->crp_etype = error; 2794 crypto_done(crp); 2795 m_freem(m); 2796 return (0); 2797 } 2798 2799 static int 2800 ccr_modevent(module_t mod, int cmd, void *arg) 2801 { 2802 2803 switch (cmd) { 2804 case MOD_LOAD: 2805 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); 2806 return (0); 2807 case MOD_UNLOAD: 2808 t4_register_cpl_handler(CPL_FW6_PLD, NULL); 2809 return (0); 2810 default: 2811 return (EOPNOTSUPP); 2812 } 2813 } 2814 2815 static device_method_t ccr_methods[] = { 2816 DEVMETHOD(device_identify, ccr_identify), 2817 DEVMETHOD(device_probe, ccr_probe), 2818 DEVMETHOD(device_attach, ccr_attach), 2819 DEVMETHOD(device_detach, ccr_detach), 2820 2821 DEVMETHOD(cryptodev_probesession, ccr_probesession), 2822 DEVMETHOD(cryptodev_newsession, ccr_newsession), 2823 DEVMETHOD(cryptodev_freesession, ccr_freesession), 2824 DEVMETHOD(cryptodev_process, ccr_process), 2825 2826 DEVMETHOD_END 2827 }; 2828 2829 static driver_t ccr_driver = { 2830 "ccr", 2831 ccr_methods, 2832 sizeof(struct ccr_softc) 2833 }; 2834 2835 static devclass_t ccr_devclass; 2836 2837 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); 2838 MODULE_VERSION(ccr, 1); 2839 MODULE_DEPEND(ccr, crypto, 1, 1, 1); 2840 MODULE_DEPEND(ccr, t6nex, 1, 1, 1); 2841