1 /*-
2 * Copyright (c) 2017 Chelsio Communications, Inc.
3 * Copyright (c) 2021 The FreeBSD Foundation
4 * All rights reserved.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 *
7 * Portions of this software were developed by Ararat River
8 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/types.h>
33 #include <sys/bus.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39
40 #include <opencrypto/cryptodev.h>
41 #include <opencrypto/xform.h>
42
43 #include "cryptodev_if.h"
44
45 #include "common/common.h"
46 #include "crypto/t4_crypto.h"
47
48 /*
49 * Requests consist of:
50 *
51 * +-------------------------------+
52 * | struct fw_crypto_lookaside_wr |
53 * +-------------------------------+
54 * | struct ulp_txpkt |
55 * +-------------------------------+
56 * | struct ulptx_idata |
57 * +-------------------------------+
58 * | struct cpl_tx_sec_pdu |
59 * +-------------------------------+
60 * | struct cpl_tls_tx_scmd_fmt |
61 * +-------------------------------+
62 * | key context header |
63 * +-------------------------------+
64 * | AES key | ----- For requests with AES
65 * +-------------------------------+
66 * | Hash state | ----- For hash-only requests
67 * +-------------------------------+ -
68 * | IPAD (16-byte aligned) | \
69 * +-------------------------------+ +---- For requests with HMAC
70 * | OPAD (16-byte aligned) | /
71 * +-------------------------------+ -
72 * | GMAC H | ----- For AES-GCM
73 * +-------------------------------+ -
74 * | struct cpl_rx_phys_dsgl | \
75 * +-------------------------------+ +---- Destination buffer for
76 * | PHYS_DSGL entries | / non-hash-only requests
77 * +-------------------------------+ -
78 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests
79 * +-------------------------------+
80 * | IV | ----- If immediate IV
81 * +-------------------------------+
82 * | Payload | ----- If immediate Payload
83 * +-------------------------------+ -
84 * | struct ulptx_sgl | \
85 * +-------------------------------+ +---- If payload via SGL
86 * | SGL entries | /
87 * +-------------------------------+ -
88 *
89 * Note that the key context must be padded to ensure 16-byte alignment.
90 * For HMAC requests, the key consists of the partial hash of the IPAD
91 * followed by the partial hash of the OPAD.
92 *
93 * Replies consist of:
94 *
95 * +-------------------------------+
96 * | struct cpl_fw6_pld |
97 * +-------------------------------+
98 * | hash digest | ----- For HMAC request with
99 * +-------------------------------+ 'hash_size' set in work request
100 *
101 * A 32-bit big-endian error status word is supplied in the last 4
102 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
103 * "MAC" error and bit 1 indicates a "PAD" error.
104 *
105 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
106 * in the request is returned in data[1] of the CPL_FW6_PLD message.
107 *
108 * For block cipher replies, the updated IV is supplied in data[2] and
109 * data[3] of the CPL_FW6_PLD message.
110 *
111 * For hash replies where the work request set 'hash_size' to request
112 * a copy of the hash in the reply, the hash digest is supplied
113 * immediately following the CPL_FW6_PLD message.
114 */
115
116 /*
117 * The crypto engine supports a maximum AAD size of 511 bytes.
118 */
119 #define MAX_AAD_LEN 511
120
121 /*
122 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
123 * entries. While the CPL includes a 16-bit length field, the T6 can
124 * sometimes hang if an error occurs while processing a request with a
125 * single DSGL entry larger than 2k.
126 */
127 #define MAX_RX_PHYS_DSGL_SGE 32
128 #define DSGL_SGE_MAXLEN 2048
129
130 /*
131 * The adapter only supports requests with a total input or output
132 * length of 64k-1 or smaller. Longer requests either result in hung
133 * requests or incorrect results.
134 */
135 #define MAX_REQUEST_SIZE 65535
136
137 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
138
139 struct ccr_session_hmac {
140 const struct auth_hash *auth_hash;
141 int hash_len;
142 unsigned int partial_digest_len;
143 unsigned int auth_mode;
144 unsigned int mk_size;
145 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2];
146 };
147
148 struct ccr_session_gmac {
149 int hash_len;
150 char ghash_h[GMAC_BLOCK_LEN];
151 };
152
153 struct ccr_session_ccm_mac {
154 int hash_len;
155 };
156
157 struct ccr_session_cipher {
158 unsigned int cipher_mode;
159 unsigned int key_len;
160 unsigned int iv_len;
161 __be32 key_ctx_hdr;
162 char enckey[CHCR_AES_MAX_KEY_LEN];
163 char deckey[CHCR_AES_MAX_KEY_LEN];
164 };
165
166 struct ccr_port {
167 struct sge_wrq *txq;
168 struct sge_rxq *rxq;
169 int rx_channel_id;
170 int tx_channel_id;
171 u_int active_sessions;
172
173 counter_u64_t stats_queued;
174 counter_u64_t stats_completed;
175 };
176
177 struct ccr_softc {
178 struct adapter *adapter;
179 device_t dev;
180 uint32_t cid;
181 struct mtx lock;
182 bool detaching;
183 struct ccr_port ports[MAX_NPORTS];
184 u_int port_mask;
185 int first_rxq_id;
186
187 /*
188 * Pre-allocate a dummy output buffer for the IV and AAD for
189 * AEAD requests.
190 */
191 char *iv_aad_buf;
192 struct sglist *sg_iv_aad;
193
194 /* Statistics. */
195 counter_u64_t stats_cipher_encrypt;
196 counter_u64_t stats_cipher_decrypt;
197 counter_u64_t stats_hash;
198 counter_u64_t stats_hmac;
199 counter_u64_t stats_eta_encrypt;
200 counter_u64_t stats_eta_decrypt;
201 counter_u64_t stats_gcm_encrypt;
202 counter_u64_t stats_gcm_decrypt;
203 counter_u64_t stats_ccm_encrypt;
204 counter_u64_t stats_ccm_decrypt;
205 counter_u64_t stats_wr_nomem;
206 counter_u64_t stats_inflight;
207 counter_u64_t stats_mac_error;
208 counter_u64_t stats_pad_error;
209 counter_u64_t stats_sglist_error;
210 counter_u64_t stats_process_error;
211 counter_u64_t stats_sw_fallback;
212
213 struct sysctl_ctx_list ctx;
214 };
215
216 struct ccr_session {
217 #ifdef INVARIANTS
218 int pending;
219 #endif
220 enum { HASH, HMAC, CIPHER, ETA, GCM, CCM } mode;
221 struct ccr_softc *sc;
222 struct ccr_port *port;
223 union {
224 struct ccr_session_hmac hmac;
225 struct ccr_session_gmac gmac;
226 struct ccr_session_ccm_mac ccm_mac;
227 };
228 struct ccr_session_cipher cipher;
229 struct mtx lock;
230
231 /*
232 * A fallback software session is used for certain GCM/CCM
233 * requests that the hardware can't handle such as requests
234 * with only AAD and no payload.
235 */
236 crypto_session_t sw_session;
237
238 /*
239 * Pre-allocate S/G lists used when preparing a work request.
240 * 'sg_input' contains an sglist describing the entire input
241 * buffer for a 'struct cryptop'. 'sg_output' contains an
242 * sglist describing the entire output buffer. 'sg_ulptx' is
243 * used to describe the data the engine should DMA as input
244 * via ULPTX_SGL. 'sg_dsgl' is used to describe the
245 * destination that cipher text and a tag should be written
246 * to.
247 */
248 struct sglist *sg_input;
249 struct sglist *sg_output;
250 struct sglist *sg_ulptx;
251 struct sglist *sg_dsgl;
252 };
253
254 /*
255 * Crypto requests involve two kind of scatter/gather lists.
256 *
257 * Non-hash-only requests require a PHYS_DSGL that describes the
258 * location to store the results of the encryption or decryption
259 * operation. This SGL uses a different format (PHYS_DSGL) and should
260 * exclude the skip bytes at the start of the data as well as any AAD
261 * or IV. For authenticated encryption requests it should include the
262 * destination of the hash or tag.
263 *
264 * The input payload may either be supplied inline as immediate data,
265 * or via a standard ULP_TX SGL. This SGL should include AAD,
266 * ciphertext, and the hash or tag for authenticated decryption
267 * requests.
268 *
269 * These scatter/gather lists can describe different subsets of the
270 * buffers described by the crypto operation. ccr_populate_sglist()
271 * generates a scatter/gather list that covers an entire crypto
272 * operation buffer that is then used to construct the other
273 * scatter/gather lists.
274 */
275 static int
ccr_populate_sglist(struct sglist * sg,struct crypto_buffer * cb)276 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb)
277 {
278 int error;
279
280 sglist_reset(sg);
281 switch (cb->cb_type) {
282 case CRYPTO_BUF_MBUF:
283 error = sglist_append_mbuf(sg, cb->cb_mbuf);
284 break;
285 case CRYPTO_BUF_SINGLE_MBUF:
286 error = sglist_append_single_mbuf(sg, cb->cb_mbuf);
287 break;
288 case CRYPTO_BUF_UIO:
289 error = sglist_append_uio(sg, cb->cb_uio);
290 break;
291 case CRYPTO_BUF_CONTIG:
292 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len);
293 break;
294 case CRYPTO_BUF_VMPAGE:
295 error = sglist_append_vmpages(sg, cb->cb_vm_page,
296 cb->cb_vm_page_offset, cb->cb_vm_page_len);
297 break;
298 default:
299 error = EINVAL;
300 }
301 return (error);
302 }
303
304 /*
305 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
306 * segments.
307 */
308 static int
ccr_count_sgl(struct sglist * sg,int maxsegsize)309 ccr_count_sgl(struct sglist *sg, int maxsegsize)
310 {
311 int i, nsegs;
312
313 nsegs = 0;
314 for (i = 0; i < sg->sg_nseg; i++)
315 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
316 return (nsegs);
317 }
318
319 /* These functions deal with PHYS_DSGL for the reply buffer. */
320 static inline int
ccr_phys_dsgl_len(int nsegs)321 ccr_phys_dsgl_len(int nsegs)
322 {
323 int len;
324
325 len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
326 if ((nsegs % 8) != 0) {
327 len += sizeof(uint16_t) * 8;
328 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
329 }
330 return (len);
331 }
332
333 static void
ccr_write_phys_dsgl(struct ccr_session * s,void * dst,int nsegs)334 ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs)
335 {
336 struct sglist *sg;
337 struct cpl_rx_phys_dsgl *cpl;
338 struct phys_sge_pairs *sgl;
339 vm_paddr_t paddr;
340 size_t seglen;
341 u_int i, j;
342
343 sg = s->sg_dsgl;
344 cpl = dst;
345 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
346 V_CPL_RX_PHYS_DSGL_ISRDMA(0));
347 cpl->pcirlxorder_to_noofsgentr = htobe32(
348 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
349 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
350 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
351 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
352 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
353 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id);
354 cpl->rss_hdr_int.hash_val = 0;
355 cpl->rss_hdr_int.channel = s->port->rx_channel_id;
356 sgl = (struct phys_sge_pairs *)(cpl + 1);
357 j = 0;
358 for (i = 0; i < sg->sg_nseg; i++) {
359 seglen = sg->sg_segs[i].ss_len;
360 paddr = sg->sg_segs[i].ss_paddr;
361 do {
362 sgl->addr[j] = htobe64(paddr);
363 if (seglen > DSGL_SGE_MAXLEN) {
364 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
365 paddr += DSGL_SGE_MAXLEN;
366 seglen -= DSGL_SGE_MAXLEN;
367 } else {
368 sgl->len[j] = htobe16(seglen);
369 seglen = 0;
370 }
371 j++;
372 if (j == 8) {
373 sgl++;
374 j = 0;
375 }
376 } while (seglen != 0);
377 }
378 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
379 }
380
381 /* These functions deal with the ULPTX_SGL for input payload. */
382 static inline int
ccr_ulptx_sgl_len(int nsegs)383 ccr_ulptx_sgl_len(int nsegs)
384 {
385 u_int n;
386
387 nsegs--; /* first segment is part of ulptx_sgl */
388 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
389 return (roundup2(n, 16));
390 }
391
392 static void
ccr_write_ulptx_sgl(struct ccr_session * s,void * dst,int nsegs)393 ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs)
394 {
395 struct ulptx_sgl *usgl;
396 struct sglist *sg;
397 struct sglist_seg *ss;
398 int i;
399
400 sg = s->sg_ulptx;
401 MPASS(nsegs == sg->sg_nseg);
402 ss = &sg->sg_segs[0];
403 usgl = dst;
404 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
405 V_ULPTX_NSGE(nsegs));
406 usgl->len0 = htobe32(ss->ss_len);
407 usgl->addr0 = htobe64(ss->ss_paddr);
408 ss++;
409 for (i = 0; i < sg->sg_nseg - 1; i++) {
410 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
411 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
412 ss++;
413 }
414 }
415
416 static bool
ccr_use_imm_data(u_int transhdr_len,u_int input_len)417 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
418 {
419
420 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
421 return (false);
422 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
423 SGE_MAX_WR_LEN)
424 return (false);
425 return (true);
426 }
427
428 static void
ccr_populate_wreq(struct ccr_softc * sc,struct ccr_session * s,struct chcr_wr * crwr,u_int kctx_len,u_int wr_len,u_int imm_len,u_int sgl_len,u_int hash_size,struct cryptop * crp)429 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
430 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len,
431 u_int sgl_len, u_int hash_size, struct cryptop *crp)
432 {
433 u_int cctx_size, idata_len;
434
435 cctx_size = sizeof(struct _key_ctx) + kctx_len;
436 crwr->wreq.op_to_cctx_size = htobe32(
437 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
438 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
439 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
440 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
441 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
442 crwr->wreq.len16_pkd = htobe32(
443 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
444 crwr->wreq.session_id = 0;
445 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
446 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->rx_channel_id) |
447 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
448 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
449 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
450 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
451 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | /* unused in firmware */
452 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id));
453 crwr->wreq.key_addr = 0;
454 crwr->wreq.pld_size_hash_size = htobe32(
455 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
456 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
457 crwr->wreq.cookie = htobe64((uintptr_t)crp);
458
459 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
460 V_ULP_TXPKT_DATAMODIFY(0) |
461 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
462 V_ULP_TXPKT_DEST(0) |
463 V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1));
464 crwr->ulptx.len = htobe32(
465 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
466
467 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
468 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0));
469 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
470 if (imm_len % 16 != 0)
471 idata_len -= 16 - imm_len % 16;
472 crwr->sc_imm.len = htobe32(idata_len);
473 }
474
475 static int
ccr_hash(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp)476 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
477 {
478 struct chcr_wr *crwr;
479 struct wrqe *wr;
480 const struct auth_hash *axf;
481 char *dst;
482 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
483 u_int hmac_ctrl, imm_len, iopad_size;
484 int error, sgl_nsegs, sgl_len, use_opad;
485
486 /* Reject requests with too large of an input buffer. */
487 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
488 return (EFBIG);
489
490 axf = s->hmac.auth_hash;
491
492 if (s->mode == HMAC) {
493 use_opad = 1;
494 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
495 } else {
496 use_opad = 0;
497 hmac_ctrl = SCMD_HMAC_CTRL_NOP;
498 }
499
500 /* PADs must be 128-bit aligned. */
501 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
502
503 /*
504 * The 'key' part of the context includes the aligned IPAD and
505 * OPAD.
506 */
507 kctx_len = iopad_size;
508 if (use_opad)
509 kctx_len += iopad_size;
510 hash_size_in_response = axf->hashsize;
511 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
512
513 if (crp->crp_payload_length == 0) {
514 imm_len = axf->blocksize;
515 sgl_nsegs = 0;
516 sgl_len = 0;
517 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) {
518 imm_len = crp->crp_payload_length;
519 sgl_nsegs = 0;
520 sgl_len = 0;
521 } else {
522 imm_len = 0;
523 sglist_reset(s->sg_ulptx);
524 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
525 crp->crp_payload_start, crp->crp_payload_length);
526 if (error)
527 return (error);
528 sgl_nsegs = s->sg_ulptx->sg_nseg;
529 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
530 }
531
532 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
533 if (wr_len > SGE_MAX_WR_LEN)
534 return (EFBIG);
535 wr = alloc_wrqe(wr_len, s->port->txq);
536 if (wr == NULL) {
537 counter_u64_add(sc->stats_wr_nomem, 1);
538 return (ENOMEM);
539 }
540 crwr = wrtod(wr);
541 memset(crwr, 0, wr_len);
542
543 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
544 hash_size_in_response, crp);
545
546 crwr->sec_cpl.op_ivinsrtofst = htobe32(
547 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
548 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
549 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
550 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
551 V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
552
553 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ?
554 axf->blocksize : crp->crp_payload_length);
555
556 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
557 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
558
559 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
560 crwr->sec_cpl.seqno_numivs = htobe32(
561 V_SCMD_SEQ_NO_CTRL(0) |
562 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
563 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) |
564 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
565 V_SCMD_HMAC_CTRL(hmac_ctrl));
566 crwr->sec_cpl.ivgen_hdrlen = htobe32(
567 V_SCMD_LAST_FRAG(0) |
568 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) |
569 V_SCMD_MAC_ONLY(1));
570
571 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
572
573 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
574 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
575 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
576 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) |
577 V_KEY_CONTEXT_SALT_PRESENT(1) |
578 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
579 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
580
581 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
582 if (crp->crp_payload_length == 0) {
583 dst[0] = 0x80;
584 if (s->mode == HMAC)
585 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
586 htobe64(axf->blocksize << 3);
587 } else if (imm_len != 0)
588 crypto_copydata(crp, crp->crp_payload_start,
589 crp->crp_payload_length, dst);
590 else
591 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
592
593 /* XXX: TODO backpressure */
594 t4_wrq_tx(sc->adapter, wr);
595
596 return (0);
597 }
598
599 static int
ccr_hash_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error)600 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
601 const struct cpl_fw6_pld *cpl, int error)
602 {
603 uint8_t hash[HASH_MAX_LEN];
604
605 if (error)
606 return (error);
607
608 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
609 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
610 hash);
611 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0)
612 return (EBADMSG);
613 } else
614 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
615 (cpl + 1));
616 return (0);
617 }
618
619 static int
ccr_cipher(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp)620 ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
621 {
622 char iv[CHCR_MAX_CRYPTO_IV_LEN];
623 struct chcr_wr *crwr;
624 struct wrqe *wr;
625 char *dst;
626 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
627 u_int imm_len, iv_len;
628 int dsgl_nsegs, dsgl_len;
629 int sgl_nsegs, sgl_len;
630 int error;
631
632 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
633 return (EINVAL);
634 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
635 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
636 return (EINVAL);
637
638 /* Reject requests with too large of an input buffer. */
639 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
640 return (EFBIG);
641
642 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
643 op_type = CHCR_ENCRYPT_OP;
644 else
645 op_type = CHCR_DECRYPT_OP;
646
647 sglist_reset(s->sg_dsgl);
648 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
649 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
650 crp->crp_payload_output_start, crp->crp_payload_length);
651 else
652 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
653 crp->crp_payload_start, crp->crp_payload_length);
654 if (error)
655 return (error);
656 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
657 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
658 return (EFBIG);
659 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
660
661 /* The 'key' must be 128-bit aligned. */
662 kctx_len = roundup2(s->cipher.key_len, 16);
663 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
664
665 /* For AES-XTS we send a 16-byte IV in the work request. */
666 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
667 iv_len = AES_BLOCK_LEN;
668 else
669 iv_len = s->cipher.iv_len;
670
671 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) {
672 imm_len = crp->crp_payload_length;
673 sgl_nsegs = 0;
674 sgl_len = 0;
675 } else {
676 imm_len = 0;
677 sglist_reset(s->sg_ulptx);
678 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
679 crp->crp_payload_start, crp->crp_payload_length);
680 if (error)
681 return (error);
682 sgl_nsegs = s->sg_ulptx->sg_nseg;
683 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
684 }
685
686 wr_len = roundup2(transhdr_len, 16) + iv_len +
687 roundup2(imm_len, 16) + sgl_len;
688 if (wr_len > SGE_MAX_WR_LEN)
689 return (EFBIG);
690 wr = alloc_wrqe(wr_len, s->port->txq);
691 if (wr == NULL) {
692 counter_u64_add(sc->stats_wr_nomem, 1);
693 return (ENOMEM);
694 }
695 crwr = wrtod(wr);
696 memset(crwr, 0, wr_len);
697
698 crypto_read_iv(crp, iv);
699
700 /* Zero the remainder of the IV for AES-XTS. */
701 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
702
703 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
704 crp);
705
706 crwr->sec_cpl.op_ivinsrtofst = htobe32(
707 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
708 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
709 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
710 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
711 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
712
713 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length);
714
715 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
716 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) |
717 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
718 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
719 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
720
721 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
722 crwr->sec_cpl.seqno_numivs = htobe32(
723 V_SCMD_SEQ_NO_CTRL(0) |
724 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
725 V_SCMD_ENC_DEC_CTRL(op_type) |
726 V_SCMD_CIPH_MODE(s->cipher.cipher_mode) |
727 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
728 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
729 V_SCMD_IV_SIZE(iv_len / 2) |
730 V_SCMD_NUM_IVS(0));
731 crwr->sec_cpl.ivgen_hdrlen = htobe32(
732 V_SCMD_IV_GEN_CTRL(0) |
733 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
734 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
735
736 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
737 switch (s->cipher.cipher_mode) {
738 case SCMD_CIPH_MODE_AES_CBC:
739 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
740 memcpy(crwr->key_ctx.key, s->cipher.enckey,
741 s->cipher.key_len);
742 else
743 memcpy(crwr->key_ctx.key, s->cipher.deckey,
744 s->cipher.key_len);
745 break;
746 case SCMD_CIPH_MODE_AES_CTR:
747 memcpy(crwr->key_ctx.key, s->cipher.enckey,
748 s->cipher.key_len);
749 break;
750 case SCMD_CIPH_MODE_AES_XTS:
751 key_half = s->cipher.key_len / 2;
752 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
753 key_half);
754 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
755 memcpy(crwr->key_ctx.key + key_half,
756 s->cipher.enckey, key_half);
757 else
758 memcpy(crwr->key_ctx.key + key_half,
759 s->cipher.deckey, key_half);
760 break;
761 }
762
763 dst = (char *)(crwr + 1) + kctx_len;
764 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
765 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
766 memcpy(dst, iv, iv_len);
767 dst += iv_len;
768 if (imm_len != 0)
769 crypto_copydata(crp, crp->crp_payload_start,
770 crp->crp_payload_length, dst);
771 else
772 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
773
774 /* XXX: TODO backpressure */
775 t4_wrq_tx(sc->adapter, wr);
776
777 explicit_bzero(iv, sizeof(iv));
778 return (0);
779 }
780
781 static int
ccr_cipher_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error)782 ccr_cipher_done(struct ccr_softc *sc, struct ccr_session *s,
783 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
784 {
785
786 /*
787 * The updated IV to permit chained requests is at
788 * cpl->data[2], but OCF doesn't permit chained requests.
789 */
790 return (error);
791 }
792
793 /*
794 * 'hashsize' is the length of a full digest. 'authsize' is the
795 * requested digest length for this operation which may be less
796 * than 'hashsize'.
797 */
798 static int
ccr_hmac_ctrl(unsigned int hashsize,unsigned int authsize)799 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
800 {
801
802 if (authsize == 10)
803 return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
804 if (authsize == 12)
805 return (SCMD_HMAC_CTRL_IPSEC_96BIT);
806 if (authsize == hashsize / 2)
807 return (SCMD_HMAC_CTRL_DIV2);
808 return (SCMD_HMAC_CTRL_NO_TRUNC);
809 }
810
811 static int
ccr_eta(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp)812 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
813 {
814 char iv[CHCR_MAX_CRYPTO_IV_LEN];
815 struct chcr_wr *crwr;
816 struct wrqe *wr;
817 const struct auth_hash *axf;
818 char *dst;
819 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
820 u_int hash_size_in_response, imm_len, iopad_size, iv_len;
821 u_int aad_start, aad_stop;
822 u_int auth_insert;
823 u_int cipher_start, cipher_stop;
824 u_int hmac_ctrl, input_len;
825 int dsgl_nsegs, dsgl_len;
826 int sgl_nsegs, sgl_len;
827 int error;
828
829 /*
830 * If there is a need in the future, requests with an empty
831 * payload could be supported as HMAC-only requests.
832 */
833 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
834 return (EINVAL);
835 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
836 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
837 return (EINVAL);
838
839 /* For AES-XTS we send a 16-byte IV in the work request. */
840 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
841 iv_len = AES_BLOCK_LEN;
842 else
843 iv_len = s->cipher.iv_len;
844
845 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN)
846 return (EINVAL);
847
848 axf = s->hmac.auth_hash;
849 hash_size_in_response = s->hmac.hash_len;
850 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
851 op_type = CHCR_ENCRYPT_OP;
852 else
853 op_type = CHCR_DECRYPT_OP;
854
855 /*
856 * The output buffer consists of the cipher text followed by
857 * the hash when encrypting. For decryption it only contains
858 * the plain text.
859 *
860 * Due to a firmware bug, the output buffer must include a
861 * dummy output buffer for the IV and AAD prior to the real
862 * output buffer.
863 */
864 if (op_type == CHCR_ENCRYPT_OP) {
865 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
866 hash_size_in_response > MAX_REQUEST_SIZE)
867 return (EFBIG);
868 } else {
869 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
870 MAX_REQUEST_SIZE)
871 return (EFBIG);
872 }
873 sglist_reset(s->sg_dsgl);
874 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0,
875 iv_len + crp->crp_aad_length);
876 if (error)
877 return (error);
878 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
879 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
880 crp->crp_payload_output_start, crp->crp_payload_length);
881 else
882 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
883 crp->crp_payload_start, crp->crp_payload_length);
884 if (error)
885 return (error);
886 if (op_type == CHCR_ENCRYPT_OP) {
887 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
888 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
889 crp->crp_digest_start, hash_size_in_response);
890 else
891 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
892 crp->crp_digest_start, hash_size_in_response);
893 if (error)
894 return (error);
895 }
896 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
897 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
898 return (EFBIG);
899 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
900
901 /* PADs must be 128-bit aligned. */
902 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
903
904 /*
905 * The 'key' part of the key context consists of the key followed
906 * by the IPAD and OPAD.
907 */
908 kctx_len = roundup2(s->cipher.key_len, 16) + iopad_size * 2;
909 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
910
911 /*
912 * The input buffer consists of the IV, any AAD, and then the
913 * cipher/plain text. For decryption requests the hash is
914 * appended after the cipher text.
915 *
916 * The IV is always stored at the start of the input buffer
917 * even though it may be duplicated in the payload. The
918 * crypto engine doesn't work properly if the IV offset points
919 * inside of the AAD region, so a second copy is always
920 * required.
921 */
922 input_len = crp->crp_aad_length + crp->crp_payload_length;
923
924 /*
925 * The firmware hangs if sent a request which is a
926 * bit smaller than MAX_REQUEST_SIZE. In particular, the
927 * firmware appears to require 512 - 16 bytes of spare room
928 * along with the size of the hash even if the hash isn't
929 * included in the input buffer.
930 */
931 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
932 MAX_REQUEST_SIZE)
933 return (EFBIG);
934 if (op_type == CHCR_DECRYPT_OP)
935 input_len += hash_size_in_response;
936
937 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
938 imm_len = input_len;
939 sgl_nsegs = 0;
940 sgl_len = 0;
941 } else {
942 imm_len = 0;
943 sglist_reset(s->sg_ulptx);
944 if (crp->crp_aad_length != 0) {
945 if (crp->crp_aad != NULL)
946 error = sglist_append(s->sg_ulptx,
947 crp->crp_aad, crp->crp_aad_length);
948 else
949 error = sglist_append_sglist(s->sg_ulptx,
950 s->sg_input, crp->crp_aad_start,
951 crp->crp_aad_length);
952 if (error)
953 return (error);
954 }
955 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
956 crp->crp_payload_start, crp->crp_payload_length);
957 if (error)
958 return (error);
959 if (op_type == CHCR_DECRYPT_OP) {
960 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
961 crp->crp_digest_start, hash_size_in_response);
962 if (error)
963 return (error);
964 }
965 sgl_nsegs = s->sg_ulptx->sg_nseg;
966 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
967 }
968
969 /* Any AAD comes after the IV. */
970 if (crp->crp_aad_length != 0) {
971 aad_start = iv_len + 1;
972 aad_stop = aad_start + crp->crp_aad_length - 1;
973 } else {
974 aad_start = 0;
975 aad_stop = 0;
976 }
977 cipher_start = iv_len + crp->crp_aad_length + 1;
978 if (op_type == CHCR_DECRYPT_OP)
979 cipher_stop = hash_size_in_response;
980 else
981 cipher_stop = 0;
982 if (op_type == CHCR_DECRYPT_OP)
983 auth_insert = hash_size_in_response;
984 else
985 auth_insert = 0;
986
987 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
988 sgl_len;
989 if (wr_len > SGE_MAX_WR_LEN)
990 return (EFBIG);
991 wr = alloc_wrqe(wr_len, s->port->txq);
992 if (wr == NULL) {
993 counter_u64_add(sc->stats_wr_nomem, 1);
994 return (ENOMEM);
995 }
996 crwr = wrtod(wr);
997 memset(crwr, 0, wr_len);
998
999 crypto_read_iv(crp, iv);
1000
1001 /* Zero the remainder of the IV for AES-XTS. */
1002 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
1003
1004 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
1005 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
1006
1007 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1008 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1009 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1010 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1011 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1012 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1013
1014 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1015
1016 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1017 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1018 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1019 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1020 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1021 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1022 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1023 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1024 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1025 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1026
1027 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1028 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1029 crwr->sec_cpl.seqno_numivs = htobe32(
1030 V_SCMD_SEQ_NO_CTRL(0) |
1031 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1032 V_SCMD_ENC_DEC_CTRL(op_type) |
1033 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1034 V_SCMD_CIPH_MODE(s->cipher.cipher_mode) |
1035 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1036 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1037 V_SCMD_IV_SIZE(iv_len / 2) |
1038 V_SCMD_NUM_IVS(0));
1039 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1040 V_SCMD_IV_GEN_CTRL(0) |
1041 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1042 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1043
1044 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1045 switch (s->cipher.cipher_mode) {
1046 case SCMD_CIPH_MODE_AES_CBC:
1047 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1048 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1049 s->cipher.key_len);
1050 else
1051 memcpy(crwr->key_ctx.key, s->cipher.deckey,
1052 s->cipher.key_len);
1053 break;
1054 case SCMD_CIPH_MODE_AES_CTR:
1055 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1056 s->cipher.key_len);
1057 break;
1058 case SCMD_CIPH_MODE_AES_XTS:
1059 key_half = s->cipher.key_len / 2;
1060 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
1061 key_half);
1062 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1063 memcpy(crwr->key_ctx.key + key_half,
1064 s->cipher.enckey, key_half);
1065 else
1066 memcpy(crwr->key_ctx.key + key_half,
1067 s->cipher.deckey, key_half);
1068 break;
1069 }
1070
1071 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1072 memcpy(dst, s->hmac.pads, iopad_size * 2);
1073
1074 dst = (char *)(crwr + 1) + kctx_len;
1075 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1076 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1077 memcpy(dst, iv, iv_len);
1078 dst += iv_len;
1079 if (imm_len != 0) {
1080 if (crp->crp_aad_length != 0) {
1081 if (crp->crp_aad != NULL)
1082 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1083 else
1084 crypto_copydata(crp, crp->crp_aad_start,
1085 crp->crp_aad_length, dst);
1086 dst += crp->crp_aad_length;
1087 }
1088 crypto_copydata(crp, crp->crp_payload_start,
1089 crp->crp_payload_length, dst);
1090 dst += crp->crp_payload_length;
1091 if (op_type == CHCR_DECRYPT_OP)
1092 crypto_copydata(crp, crp->crp_digest_start,
1093 hash_size_in_response, dst);
1094 } else
1095 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1096
1097 /* XXX: TODO backpressure */
1098 t4_wrq_tx(sc->adapter, wr);
1099
1100 explicit_bzero(iv, sizeof(iv));
1101 return (0);
1102 }
1103
1104 static int
ccr_eta_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error)1105 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s,
1106 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1107 {
1108
1109 /*
1110 * The updated IV to permit chained requests is at
1111 * cpl->data[2], but OCF doesn't permit chained requests.
1112 */
1113 return (error);
1114 }
1115
1116 static int
ccr_gcm(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp)1117 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1118 {
1119 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1120 struct chcr_wr *crwr;
1121 struct wrqe *wr;
1122 char *dst;
1123 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1124 u_int hash_size_in_response, imm_len;
1125 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1126 u_int hmac_ctrl, input_len;
1127 int dsgl_nsegs, dsgl_len;
1128 int sgl_nsegs, sgl_len;
1129 int error;
1130
1131 if (s->cipher.key_len == 0)
1132 return (EINVAL);
1133
1134 /*
1135 * The crypto engine doesn't handle GCM requests with an empty
1136 * payload, so handle those in software instead.
1137 */
1138 if (crp->crp_payload_length == 0)
1139 return (EMSGSIZE);
1140
1141 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN)
1142 return (EMSGSIZE);
1143
1144 hash_size_in_response = s->gmac.hash_len;
1145 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1146 op_type = CHCR_ENCRYPT_OP;
1147 else
1148 op_type = CHCR_DECRYPT_OP;
1149
1150 iv_len = AES_BLOCK_LEN;
1151
1152 /*
1153 * GCM requests should always provide an explicit IV.
1154 */
1155 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1156 return (EINVAL);
1157
1158 /*
1159 * The output buffer consists of the cipher text followed by
1160 * the tag when encrypting. For decryption it only contains
1161 * the plain text.
1162 *
1163 * Due to a firmware bug, the output buffer must include a
1164 * dummy output buffer for the IV and AAD prior to the real
1165 * output buffer.
1166 */
1167 if (op_type == CHCR_ENCRYPT_OP) {
1168 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
1169 hash_size_in_response > MAX_REQUEST_SIZE)
1170 return (EFBIG);
1171 } else {
1172 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
1173 MAX_REQUEST_SIZE)
1174 return (EFBIG);
1175 }
1176 sglist_reset(s->sg_dsgl);
1177 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1178 crp->crp_aad_length);
1179 if (error)
1180 return (error);
1181 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1182 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1183 crp->crp_payload_output_start, crp->crp_payload_length);
1184 else
1185 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1186 crp->crp_payload_start, crp->crp_payload_length);
1187 if (error)
1188 return (error);
1189 if (op_type == CHCR_ENCRYPT_OP) {
1190 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1191 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1192 crp->crp_digest_start, hash_size_in_response);
1193 else
1194 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1195 crp->crp_digest_start, hash_size_in_response);
1196 if (error)
1197 return (error);
1198 }
1199 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1200 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1201 return (EFBIG);
1202 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1203
1204 /*
1205 * The 'key' part of the key context consists of the key followed
1206 * by the Galois hash key.
1207 */
1208 kctx_len = roundup2(s->cipher.key_len, 16) + GMAC_BLOCK_LEN;
1209 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1210
1211 /*
1212 * The input buffer consists of the IV, any AAD, and then the
1213 * cipher/plain text. For decryption requests the hash is
1214 * appended after the cipher text.
1215 *
1216 * The IV is always stored at the start of the input buffer
1217 * even though it may be duplicated in the payload. The
1218 * crypto engine doesn't work properly if the IV offset points
1219 * inside of the AAD region, so a second copy is always
1220 * required.
1221 */
1222 input_len = crp->crp_aad_length + crp->crp_payload_length;
1223 if (op_type == CHCR_DECRYPT_OP)
1224 input_len += hash_size_in_response;
1225 if (input_len > MAX_REQUEST_SIZE)
1226 return (EFBIG);
1227 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1228 imm_len = input_len;
1229 sgl_nsegs = 0;
1230 sgl_len = 0;
1231 } else {
1232 imm_len = 0;
1233 sglist_reset(s->sg_ulptx);
1234 if (crp->crp_aad_length != 0) {
1235 if (crp->crp_aad != NULL)
1236 error = sglist_append(s->sg_ulptx,
1237 crp->crp_aad, crp->crp_aad_length);
1238 else
1239 error = sglist_append_sglist(s->sg_ulptx,
1240 s->sg_input, crp->crp_aad_start,
1241 crp->crp_aad_length);
1242 if (error)
1243 return (error);
1244 }
1245 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1246 crp->crp_payload_start, crp->crp_payload_length);
1247 if (error)
1248 return (error);
1249 if (op_type == CHCR_DECRYPT_OP) {
1250 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1251 crp->crp_digest_start, hash_size_in_response);
1252 if (error)
1253 return (error);
1254 }
1255 sgl_nsegs = s->sg_ulptx->sg_nseg;
1256 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1257 }
1258
1259 if (crp->crp_aad_length != 0) {
1260 aad_start = iv_len + 1;
1261 aad_stop = aad_start + crp->crp_aad_length - 1;
1262 } else {
1263 aad_start = 0;
1264 aad_stop = 0;
1265 }
1266 cipher_start = iv_len + crp->crp_aad_length + 1;
1267 if (op_type == CHCR_DECRYPT_OP)
1268 cipher_stop = hash_size_in_response;
1269 else
1270 cipher_stop = 0;
1271 if (op_type == CHCR_DECRYPT_OP)
1272 auth_insert = hash_size_in_response;
1273 else
1274 auth_insert = 0;
1275
1276 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1277 sgl_len;
1278 if (wr_len > SGE_MAX_WR_LEN)
1279 return (EFBIG);
1280 wr = alloc_wrqe(wr_len, s->port->txq);
1281 if (wr == NULL) {
1282 counter_u64_add(sc->stats_wr_nomem, 1);
1283 return (ENOMEM);
1284 }
1285 crwr = wrtod(wr);
1286 memset(crwr, 0, wr_len);
1287
1288 crypto_read_iv(crp, iv);
1289 *(uint32_t *)&iv[12] = htobe32(1);
1290
1291 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1292 crp);
1293
1294 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1295 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1296 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1297 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1298 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1299 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1300
1301 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1302
1303 /*
1304 * NB: cipherstop is explicitly set to 0. On encrypt it
1305 * should normally be set to 0 anyway. However, for decrypt
1306 * the cipher ends before the tag in the ETA case (and
1307 * authstop is set to stop before the tag), but for GCM the
1308 * cipher still runs to the end of the buffer. Not sure if
1309 * this is intentional or a firmware quirk, but it is required
1310 * for working tag validation with GCM decryption.
1311 */
1312 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1313 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1314 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1315 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1316 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1317 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1318 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1319 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1320 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1321 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1322
1323 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1324 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1325 crwr->sec_cpl.seqno_numivs = htobe32(
1326 V_SCMD_SEQ_NO_CTRL(0) |
1327 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1328 V_SCMD_ENC_DEC_CTRL(op_type) |
1329 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1330 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) |
1331 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) |
1332 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1333 V_SCMD_IV_SIZE(iv_len / 2) |
1334 V_SCMD_NUM_IVS(0));
1335 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1336 V_SCMD_IV_GEN_CTRL(0) |
1337 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1338 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1339
1340 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1341 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1342 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1343 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1344
1345 dst = (char *)(crwr + 1) + kctx_len;
1346 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1347 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1348 memcpy(dst, iv, iv_len);
1349 dst += iv_len;
1350 if (imm_len != 0) {
1351 if (crp->crp_aad_length != 0) {
1352 if (crp->crp_aad != NULL)
1353 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1354 else
1355 crypto_copydata(crp, crp->crp_aad_start,
1356 crp->crp_aad_length, dst);
1357 dst += crp->crp_aad_length;
1358 }
1359 crypto_copydata(crp, crp->crp_payload_start,
1360 crp->crp_payload_length, dst);
1361 dst += crp->crp_payload_length;
1362 if (op_type == CHCR_DECRYPT_OP)
1363 crypto_copydata(crp, crp->crp_digest_start,
1364 hash_size_in_response, dst);
1365 } else
1366 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1367
1368 /* XXX: TODO backpressure */
1369 t4_wrq_tx(sc->adapter, wr);
1370
1371 explicit_bzero(iv, sizeof(iv));
1372 return (0);
1373 }
1374
1375 static int
ccr_gcm_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error)1376 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1377 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1378 {
1379
1380 /*
1381 * The updated IV to permit chained requests is at
1382 * cpl->data[2], but OCF doesn't permit chained requests.
1383 *
1384 * Note that the hardware should always verify the GMAC hash.
1385 */
1386 return (error);
1387 }
1388
1389 static int
ccr_ccm_hmac_ctrl(unsigned int authsize)1390 ccr_ccm_hmac_ctrl(unsigned int authsize)
1391 {
1392 switch (authsize) {
1393 case 4:
1394 return (SCMD_HMAC_CTRL_PL1);
1395 case 6:
1396 return (SCMD_HMAC_CTRL_PL2);
1397 case 8:
1398 return (SCMD_HMAC_CTRL_DIV2);
1399 case 10:
1400 return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
1401 case 12:
1402 return (SCMD_HMAC_CTRL_IPSEC_96BIT);
1403 case 14:
1404 return (SCMD_HMAC_CTRL_PL3);
1405 case 16:
1406 return (SCMD_HMAC_CTRL_NO_TRUNC);
1407 default:
1408 __assert_unreachable();
1409 }
1410 }
1411
1412 static void
generate_ccm_b0(struct cryptop * crp,u_int hash_size_in_response,const char * iv,char * b0)1413 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response,
1414 const char *iv, char *b0)
1415 {
1416 u_int i, payload_len, L;
1417
1418 /* NB: L is already set in the first byte of the IV. */
1419 memcpy(b0, iv, CCM_B0_SIZE);
1420 L = iv[0] + 1;
1421
1422 /* Set length of hash in bits 3 - 5. */
1423 b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1424
1425 /* Store the payload length as a big-endian value. */
1426 payload_len = crp->crp_payload_length;
1427 for (i = 0; i < L; i++) {
1428 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1429 payload_len >>= 8;
1430 }
1431
1432 /*
1433 * If there is AAD in the request, set bit 6 in the flags
1434 * field and store the AAD length as a big-endian value at the
1435 * start of block 1. This only assumes a 16-bit AAD length
1436 * since T6 doesn't support large AAD sizes.
1437 */
1438 if (crp->crp_aad_length != 0) {
1439 b0[0] |= (1 << 6);
1440 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length);
1441 }
1442 }
1443
1444 static int
ccr_ccm(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp)1445 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1446 {
1447 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1448 const struct crypto_session_params *csp;
1449 struct ulptx_idata *idata;
1450 struct chcr_wr *crwr;
1451 struct wrqe *wr;
1452 char *dst;
1453 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1454 u_int aad_len, b0_len, hash_size_in_response, imm_len;
1455 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1456 u_int hmac_ctrl, input_len;
1457 int dsgl_nsegs, dsgl_len;
1458 int sgl_nsegs, sgl_len;
1459 int error;
1460
1461 csp = crypto_get_params(crp->crp_session);
1462
1463 if (s->cipher.key_len == 0)
1464 return (EINVAL);
1465
1466 /*
1467 * The crypto engine doesn't handle CCM requests with an empty
1468 * payload, so handle those in software instead.
1469 */
1470 if (crp->crp_payload_length == 0)
1471 return (EMSGSIZE);
1472
1473 /* The length has to fit within the length field in block 0. */
1474 if (crp->crp_payload_length > ccm_max_payload_length(csp))
1475 return (EMSGSIZE);
1476
1477 /*
1478 * CCM always includes block 0 in the AAD before AAD from the
1479 * request.
1480 */
1481 b0_len = CCM_B0_SIZE;
1482 if (crp->crp_aad_length != 0)
1483 b0_len += CCM_AAD_FIELD_SIZE;
1484 aad_len = b0_len + crp->crp_aad_length;
1485
1486 /*
1487 * CCM requests should always provide an explicit IV (really
1488 * the nonce).
1489 */
1490 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1491 return (EINVAL);
1492
1493 /*
1494 * The IV in the work request is 16 bytes and not just the
1495 * nonce.
1496 */
1497 iv_len = AES_BLOCK_LEN;
1498
1499 if (iv_len + aad_len > MAX_AAD_LEN)
1500 return (EMSGSIZE);
1501
1502 hash_size_in_response = s->ccm_mac.hash_len;
1503 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1504 op_type = CHCR_ENCRYPT_OP;
1505 else
1506 op_type = CHCR_DECRYPT_OP;
1507
1508 /*
1509 * The output buffer consists of the cipher text followed by
1510 * the tag when encrypting. For decryption it only contains
1511 * the plain text.
1512 *
1513 * Due to a firmware bug, the output buffer must include a
1514 * dummy output buffer for the IV and AAD prior to the real
1515 * output buffer.
1516 */
1517 if (op_type == CHCR_ENCRYPT_OP) {
1518 if (iv_len + aad_len + crp->crp_payload_length +
1519 hash_size_in_response > MAX_REQUEST_SIZE)
1520 return (EFBIG);
1521 } else {
1522 if (iv_len + aad_len + crp->crp_payload_length >
1523 MAX_REQUEST_SIZE)
1524 return (EFBIG);
1525 }
1526 sglist_reset(s->sg_dsgl);
1527 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1528 aad_len);
1529 if (error)
1530 return (error);
1531 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1532 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1533 crp->crp_payload_output_start, crp->crp_payload_length);
1534 else
1535 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1536 crp->crp_payload_start, crp->crp_payload_length);
1537 if (error)
1538 return (error);
1539 if (op_type == CHCR_ENCRYPT_OP) {
1540 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1541 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1542 crp->crp_digest_start, hash_size_in_response);
1543 else
1544 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1545 crp->crp_digest_start, hash_size_in_response);
1546 if (error)
1547 return (error);
1548 }
1549 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1550 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1551 return (EFBIG);
1552 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1553
1554 /*
1555 * The 'key' part of the key context consists of two copies of
1556 * the AES key.
1557 */
1558 kctx_len = roundup2(s->cipher.key_len, 16) * 2;
1559 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1560
1561 /*
1562 * The input buffer consists of the IV, AAD (including block
1563 * 0), and then the cipher/plain text. For decryption
1564 * requests the hash is appended after the cipher text.
1565 *
1566 * The IV is always stored at the start of the input buffer
1567 * even though it may be duplicated in the payload. The
1568 * crypto engine doesn't work properly if the IV offset points
1569 * inside of the AAD region, so a second copy is always
1570 * required.
1571 */
1572 input_len = aad_len + crp->crp_payload_length;
1573 if (op_type == CHCR_DECRYPT_OP)
1574 input_len += hash_size_in_response;
1575 if (input_len > MAX_REQUEST_SIZE)
1576 return (EFBIG);
1577 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1578 imm_len = input_len;
1579 sgl_nsegs = 0;
1580 sgl_len = 0;
1581 } else {
1582 /* Block 0 is passed as immediate data. */
1583 imm_len = b0_len;
1584
1585 sglist_reset(s->sg_ulptx);
1586 if (crp->crp_aad_length != 0) {
1587 if (crp->crp_aad != NULL)
1588 error = sglist_append(s->sg_ulptx,
1589 crp->crp_aad, crp->crp_aad_length);
1590 else
1591 error = sglist_append_sglist(s->sg_ulptx,
1592 s->sg_input, crp->crp_aad_start,
1593 crp->crp_aad_length);
1594 if (error)
1595 return (error);
1596 }
1597 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1598 crp->crp_payload_start, crp->crp_payload_length);
1599 if (error)
1600 return (error);
1601 if (op_type == CHCR_DECRYPT_OP) {
1602 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1603 crp->crp_digest_start, hash_size_in_response);
1604 if (error)
1605 return (error);
1606 }
1607 sgl_nsegs = s->sg_ulptx->sg_nseg;
1608 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1609 }
1610
1611 aad_start = iv_len + 1;
1612 aad_stop = aad_start + aad_len - 1;
1613 cipher_start = aad_stop + 1;
1614 if (op_type == CHCR_DECRYPT_OP)
1615 cipher_stop = hash_size_in_response;
1616 else
1617 cipher_stop = 0;
1618 if (op_type == CHCR_DECRYPT_OP)
1619 auth_insert = hash_size_in_response;
1620 else
1621 auth_insert = 0;
1622
1623 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1624 sgl_len;
1625 if (wr_len > SGE_MAX_WR_LEN)
1626 return (EFBIG);
1627 wr = alloc_wrqe(wr_len, s->port->txq);
1628 if (wr == NULL) {
1629 counter_u64_add(sc->stats_wr_nomem, 1);
1630 return (ENOMEM);
1631 }
1632 crwr = wrtod(wr);
1633 memset(crwr, 0, wr_len);
1634
1635 /*
1636 * Read the nonce from the request. Use the nonce to generate
1637 * the full IV with the counter set to 0.
1638 */
1639 memset(iv, 0, iv_len);
1640 iv[0] = (15 - csp->csp_ivlen) - 1;
1641 crypto_read_iv(crp, iv + 1);
1642
1643 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1644 crp);
1645
1646 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1647 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1648 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1649 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1650 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1651 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1652
1653 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1654
1655 /*
1656 * NB: cipherstop is explicitly set to 0. See comments above
1657 * in ccr_gcm().
1658 */
1659 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1660 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1661 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1662 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1663 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1664 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1665 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1666 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1667 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1668 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1669
1670 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1671 hmac_ctrl = ccr_ccm_hmac_ctrl(hash_size_in_response);
1672 crwr->sec_cpl.seqno_numivs = htobe32(
1673 V_SCMD_SEQ_NO_CTRL(0) |
1674 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1675 V_SCMD_ENC_DEC_CTRL(op_type) |
1676 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) |
1677 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) |
1678 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) |
1679 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1680 V_SCMD_IV_SIZE(iv_len / 2) |
1681 V_SCMD_NUM_IVS(0));
1682 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1683 V_SCMD_IV_GEN_CTRL(0) |
1684 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1685 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1686
1687 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1688 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1689 memcpy(crwr->key_ctx.key + roundup(s->cipher.key_len, 16),
1690 s->cipher.enckey, s->cipher.key_len);
1691
1692 dst = (char *)(crwr + 1) + kctx_len;
1693 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1694 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1695 memcpy(dst, iv, iv_len);
1696 dst += iv_len;
1697 generate_ccm_b0(crp, hash_size_in_response, iv, dst);
1698 if (sgl_nsegs == 0) {
1699 dst += b0_len;
1700 if (crp->crp_aad_length != 0) {
1701 if (crp->crp_aad != NULL)
1702 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1703 else
1704 crypto_copydata(crp, crp->crp_aad_start,
1705 crp->crp_aad_length, dst);
1706 dst += crp->crp_aad_length;
1707 }
1708 crypto_copydata(crp, crp->crp_payload_start,
1709 crp->crp_payload_length, dst);
1710 dst += crp->crp_payload_length;
1711 if (op_type == CHCR_DECRYPT_OP)
1712 crypto_copydata(crp, crp->crp_digest_start,
1713 hash_size_in_response, dst);
1714 } else {
1715 dst += CCM_B0_SIZE;
1716 if (b0_len > CCM_B0_SIZE) {
1717 /*
1718 * If there is AAD, insert padding including a
1719 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL
1720 * is 16-byte aligned.
1721 */
1722 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1723 ("b0_len mismatch"));
1724 memset(dst + CCM_AAD_FIELD_SIZE, 0,
1725 8 - CCM_AAD_FIELD_SIZE);
1726 idata = (void *)(dst + 8);
1727 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1728 idata->len = htobe32(0);
1729 dst = (void *)(idata + 1);
1730 }
1731 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1732 }
1733
1734 /* XXX: TODO backpressure */
1735 t4_wrq_tx(sc->adapter, wr);
1736
1737 explicit_bzero(iv, sizeof(iv));
1738 return (0);
1739 }
1740
1741 static int
ccr_ccm_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error)1742 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s,
1743 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1744 {
1745
1746 /*
1747 * The updated IV to permit chained requests is at
1748 * cpl->data[2], but OCF doesn't permit chained requests.
1749 *
1750 * Note that the hardware should always verify the CBC MAC
1751 * hash.
1752 */
1753 return (error);
1754 }
1755
1756 /*
1757 * Use the software session for requests not supported by the crypto
1758 * engine (e.g. CCM and GCM requests with an empty payload).
1759 */
1760 static int
ccr_soft_done(struct cryptop * crp)1761 ccr_soft_done(struct cryptop *crp)
1762 {
1763 struct cryptop *orig;
1764
1765 orig = crp->crp_opaque;
1766 orig->crp_etype = crp->crp_etype;
1767 crypto_freereq(crp);
1768 crypto_done(orig);
1769 return (0);
1770 }
1771
1772 static void
ccr_soft(struct ccr_session * s,struct cryptop * crp)1773 ccr_soft(struct ccr_session *s, struct cryptop *crp)
1774 {
1775 struct cryptop *new;
1776 int error;
1777
1778 new = crypto_clonereq(crp, s->sw_session, M_NOWAIT);
1779 if (new == NULL) {
1780 crp->crp_etype = ENOMEM;
1781 crypto_done(crp);
1782 return;
1783 }
1784
1785 /*
1786 * XXX: This only really needs CRYPTO_ASYNC_ORDERED if the
1787 * original request was dispatched that way. There is no way
1788 * to know that though since crypto_dispatch_async() discards
1789 * the flag for async backends (such as ccr(4)).
1790 */
1791 new->crp_opaque = crp;
1792 new->crp_callback = ccr_soft_done;
1793 error = crypto_dispatch_async(new, CRYPTO_ASYNC_ORDERED);
1794 if (error != 0) {
1795 crp->crp_etype = error;
1796 crypto_done(crp);
1797 }
1798 }
1799
1800 static void
ccr_identify(driver_t * driver,device_t parent)1801 ccr_identify(driver_t *driver, device_t parent)
1802 {
1803 struct adapter *sc;
1804
1805 sc = device_get_softc(parent);
1806 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1807 device_find_child(parent, "ccr", -1) == NULL)
1808 device_add_child(parent, "ccr", -1);
1809 }
1810
1811 static int
ccr_probe(device_t dev)1812 ccr_probe(device_t dev)
1813 {
1814
1815 device_set_desc(dev, "Chelsio Crypto Accelerator");
1816 return (BUS_PROBE_DEFAULT);
1817 }
1818
1819 static void
ccr_sysctls(struct ccr_softc * sc)1820 ccr_sysctls(struct ccr_softc *sc)
1821 {
1822 struct sysctl_ctx_list *ctx = &sc->ctx;
1823 struct sysctl_oid *oid, *port_oid;
1824 struct sysctl_oid_list *children;
1825 char buf[16];
1826 int i;
1827
1828 /*
1829 * dev.ccr.X.
1830 */
1831 oid = device_get_sysctl_tree(sc->dev);
1832 children = SYSCTL_CHILDREN(oid);
1833
1834 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW,
1835 &sc->port_mask, 0, "Mask of enabled ports");
1836
1837 /*
1838 * dev.ccr.X.stats.
1839 */
1840 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1841 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1842 children = SYSCTL_CHILDREN(oid);
1843
1844 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
1845 &sc->stats_hash, "Hash requests submitted");
1846 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1847 &sc->stats_hmac, "HMAC requests submitted");
1848 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt",
1849 CTLFLAG_RD, &sc->stats_cipher_encrypt,
1850 "Cipher encryption requests submitted");
1851 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt",
1852 CTLFLAG_RD, &sc->stats_cipher_decrypt,
1853 "Cipher decryption requests submitted");
1854 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt",
1855 CTLFLAG_RD, &sc->stats_eta_encrypt,
1856 "Combined AES+HMAC encryption requests submitted");
1857 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt",
1858 CTLFLAG_RD, &sc->stats_eta_decrypt,
1859 "Combined AES+HMAC decryption requests submitted");
1860 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt",
1861 CTLFLAG_RD, &sc->stats_gcm_encrypt,
1862 "AES-GCM encryption requests submitted");
1863 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt",
1864 CTLFLAG_RD, &sc->stats_gcm_decrypt,
1865 "AES-GCM decryption requests submitted");
1866 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt",
1867 CTLFLAG_RD, &sc->stats_ccm_encrypt,
1868 "AES-CCM encryption requests submitted");
1869 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt",
1870 CTLFLAG_RD, &sc->stats_ccm_decrypt,
1871 "AES-CCM decryption requests submitted");
1872 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1873 &sc->stats_wr_nomem, "Work request memory allocation failures");
1874 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1875 &sc->stats_inflight, "Requests currently pending");
1876 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1877 &sc->stats_mac_error, "MAC errors");
1878 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1879 &sc->stats_pad_error, "Padding errors");
1880 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error",
1881 CTLFLAG_RD, &sc->stats_sglist_error,
1882 "Requests for which DMA mapping failed");
1883 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
1884 CTLFLAG_RD, &sc->stats_process_error,
1885 "Requests failed during queueing");
1886 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
1887 CTLFLAG_RD, &sc->stats_sw_fallback,
1888 "Requests processed by falling back to software");
1889
1890 /*
1891 * dev.ccr.X.stats.port
1892 */
1893 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port",
1894 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics");
1895
1896 for (i = 0; i < nitems(sc->ports); i++) {
1897 if (sc->ports[i].rxq == NULL)
1898 continue;
1899
1900 /*
1901 * dev.ccr.X.stats.port.Y
1902 */
1903 snprintf(buf, sizeof(buf), "%d", i);
1904 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO,
1905 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf);
1906 children = SYSCTL_CHILDREN(oid);
1907
1908 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions",
1909 CTLFLAG_RD, &sc->ports[i].active_sessions, 0,
1910 "Count of active sessions");
1911 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "queued",
1912 CTLFLAG_RD, &sc->ports[i].stats_queued, "Requests queued");
1913 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "completed",
1914 CTLFLAG_RD, &sc->ports[i].stats_completed,
1915 "Requests completed");
1916 }
1917 }
1918
1919 static void
ccr_init_port(struct ccr_softc * sc,int port)1920 ccr_init_port(struct ccr_softc *sc, int port)
1921 {
1922 struct port_info *pi;
1923
1924 pi = sc->adapter->port[port];
1925 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port];
1926 sc->ports[port].rxq = &sc->adapter->sge.rxq[pi->vi->first_rxq];
1927 sc->ports[port].rx_channel_id = pi->rx_chan;
1928 sc->ports[port].tx_channel_id = pi->tx_chan;
1929 sc->ports[port].stats_queued = counter_u64_alloc(M_WAITOK);
1930 sc->ports[port].stats_completed = counter_u64_alloc(M_WAITOK);
1931 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1,
1932 "Too many ports to fit in port_mask");
1933
1934 /*
1935 * Completions for crypto requests on port 1 can sometimes
1936 * return a stale cookie value due to a firmware bug. Disable
1937 * requests on port 1 by default on affected firmware.
1938 */
1939 if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
1940 port == 0)
1941 sc->port_mask |= 1u << port;
1942 }
1943
1944 static int
ccr_attach(device_t dev)1945 ccr_attach(device_t dev)
1946 {
1947 struct ccr_softc *sc;
1948 int32_t cid;
1949 int i;
1950
1951 sc = device_get_softc(dev);
1952 sc->dev = dev;
1953 sysctl_ctx_init(&sc->ctx);
1954 sc->adapter = device_get_softc(device_get_parent(dev));
1955 for_each_port(sc->adapter, i) {
1956 ccr_init_port(sc, i);
1957 }
1958 cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
1959 CRYPTOCAP_F_HARDWARE);
1960 if (cid < 0) {
1961 device_printf(dev, "could not get crypto driver id\n");
1962 return (ENXIO);
1963 }
1964 sc->cid = cid;
1965
1966 /*
1967 * The FID must be the first RXQ for port 0 regardless of
1968 * which port is used to service the request.
1969 */
1970 sc->first_rxq_id = sc->adapter->sge.rxq[0].iq.abs_id;
1971
1972 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1973 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1974 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1975 sc->stats_cipher_encrypt = counter_u64_alloc(M_WAITOK);
1976 sc->stats_cipher_decrypt = counter_u64_alloc(M_WAITOK);
1977 sc->stats_hash = counter_u64_alloc(M_WAITOK);
1978 sc->stats_hmac = counter_u64_alloc(M_WAITOK);
1979 sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK);
1980 sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK);
1981 sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK);
1982 sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK);
1983 sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK);
1984 sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK);
1985 sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK);
1986 sc->stats_inflight = counter_u64_alloc(M_WAITOK);
1987 sc->stats_mac_error = counter_u64_alloc(M_WAITOK);
1988 sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
1989 sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
1990 sc->stats_process_error = counter_u64_alloc(M_WAITOK);
1991 sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
1992 ccr_sysctls(sc);
1993
1994 return (0);
1995 }
1996
1997 static void
ccr_free_port(struct ccr_softc * sc,int port)1998 ccr_free_port(struct ccr_softc *sc, int port)
1999 {
2000
2001 counter_u64_free(sc->ports[port].stats_queued);
2002 counter_u64_free(sc->ports[port].stats_completed);
2003 }
2004
2005 static int
ccr_detach(device_t dev)2006 ccr_detach(device_t dev)
2007 {
2008 struct ccr_softc *sc;
2009 int i;
2010
2011 sc = device_get_softc(dev);
2012
2013 mtx_lock(&sc->lock);
2014 sc->detaching = true;
2015 mtx_unlock(&sc->lock);
2016
2017 crypto_unregister_all(sc->cid);
2018
2019 sysctl_ctx_free(&sc->ctx);
2020 mtx_destroy(&sc->lock);
2021 counter_u64_free(sc->stats_cipher_encrypt);
2022 counter_u64_free(sc->stats_cipher_decrypt);
2023 counter_u64_free(sc->stats_hash);
2024 counter_u64_free(sc->stats_hmac);
2025 counter_u64_free(sc->stats_eta_encrypt);
2026 counter_u64_free(sc->stats_eta_decrypt);
2027 counter_u64_free(sc->stats_gcm_encrypt);
2028 counter_u64_free(sc->stats_gcm_decrypt);
2029 counter_u64_free(sc->stats_ccm_encrypt);
2030 counter_u64_free(sc->stats_ccm_decrypt);
2031 counter_u64_free(sc->stats_wr_nomem);
2032 counter_u64_free(sc->stats_inflight);
2033 counter_u64_free(sc->stats_mac_error);
2034 counter_u64_free(sc->stats_pad_error);
2035 counter_u64_free(sc->stats_sglist_error);
2036 counter_u64_free(sc->stats_process_error);
2037 counter_u64_free(sc->stats_sw_fallback);
2038 for_each_port(sc->adapter, i) {
2039 ccr_free_port(sc, i);
2040 }
2041 sglist_free(sc->sg_iv_aad);
2042 free(sc->iv_aad_buf, M_CCR);
2043 return (0);
2044 }
2045
2046 static void
ccr_init_hash_digest(struct ccr_session * s)2047 ccr_init_hash_digest(struct ccr_session *s)
2048 {
2049 union authctx auth_ctx;
2050 const struct auth_hash *axf;
2051
2052 axf = s->hmac.auth_hash;
2053 axf->Init(&auth_ctx);
2054 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads);
2055 }
2056
2057 static bool
ccr_aes_check_keylen(int alg,int klen)2058 ccr_aes_check_keylen(int alg, int klen)
2059 {
2060
2061 switch (klen * 8) {
2062 case 128:
2063 case 192:
2064 if (alg == CRYPTO_AES_XTS)
2065 return (false);
2066 break;
2067 case 256:
2068 break;
2069 case 512:
2070 if (alg != CRYPTO_AES_XTS)
2071 return (false);
2072 break;
2073 default:
2074 return (false);
2075 }
2076 return (true);
2077 }
2078
2079 static void
ccr_aes_setkey(struct ccr_session * s,const void * key,int klen)2080 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen)
2081 {
2082 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
2083 unsigned int opad_present;
2084
2085 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
2086 kbits = (klen / 2) * 8;
2087 else
2088 kbits = klen * 8;
2089 switch (kbits) {
2090 case 128:
2091 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2092 break;
2093 case 192:
2094 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2095 break;
2096 case 256:
2097 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2098 break;
2099 default:
2100 panic("should not get here");
2101 }
2102
2103 s->cipher.key_len = klen;
2104 memcpy(s->cipher.enckey, key, s->cipher.key_len);
2105 switch (s->cipher.cipher_mode) {
2106 case SCMD_CIPH_MODE_AES_CBC:
2107 case SCMD_CIPH_MODE_AES_XTS:
2108 t4_aes_getdeckey(s->cipher.deckey, key, kbits);
2109 break;
2110 }
2111
2112 kctx_len = roundup2(s->cipher.key_len, 16);
2113 switch (s->mode) {
2114 case ETA:
2115 mk_size = s->hmac.mk_size;
2116 opad_present = 1;
2117 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2118 kctx_len += iopad_size * 2;
2119 break;
2120 case GCM:
2121 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2122 opad_present = 0;
2123 kctx_len += GMAC_BLOCK_LEN;
2124 break;
2125 case CCM:
2126 switch (kbits) {
2127 case 128:
2128 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2129 break;
2130 case 192:
2131 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2132 break;
2133 case 256:
2134 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2135 break;
2136 default:
2137 panic("should not get here");
2138 }
2139 opad_present = 0;
2140 kctx_len *= 2;
2141 break;
2142 default:
2143 mk_size = CHCR_KEYCTX_NO_KEY;
2144 opad_present = 0;
2145 break;
2146 }
2147 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
2148 s->cipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2149 V_KEY_CONTEXT_DUAL_CK(s->cipher.cipher_mode ==
2150 SCMD_CIPH_MODE_AES_XTS) |
2151 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
2152 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
2153 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
2154 }
2155
2156 static bool
ccr_auth_supported(const struct crypto_session_params * csp)2157 ccr_auth_supported(const struct crypto_session_params *csp)
2158 {
2159
2160 switch (csp->csp_auth_alg) {
2161 case CRYPTO_SHA1:
2162 case CRYPTO_SHA2_224:
2163 case CRYPTO_SHA2_256:
2164 case CRYPTO_SHA2_384:
2165 case CRYPTO_SHA2_512:
2166 case CRYPTO_SHA1_HMAC:
2167 case CRYPTO_SHA2_224_HMAC:
2168 case CRYPTO_SHA2_256_HMAC:
2169 case CRYPTO_SHA2_384_HMAC:
2170 case CRYPTO_SHA2_512_HMAC:
2171 break;
2172 default:
2173 return (false);
2174 }
2175 return (true);
2176 }
2177
2178 static bool
ccr_cipher_supported(const struct crypto_session_params * csp)2179 ccr_cipher_supported(const struct crypto_session_params *csp)
2180 {
2181
2182 switch (csp->csp_cipher_alg) {
2183 case CRYPTO_AES_CBC:
2184 if (csp->csp_ivlen != AES_BLOCK_LEN)
2185 return (false);
2186 break;
2187 case CRYPTO_AES_ICM:
2188 if (csp->csp_ivlen != AES_BLOCK_LEN)
2189 return (false);
2190 break;
2191 case CRYPTO_AES_XTS:
2192 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2193 return (false);
2194 break;
2195 default:
2196 return (false);
2197 }
2198 return (ccr_aes_check_keylen(csp->csp_cipher_alg,
2199 csp->csp_cipher_klen));
2200 }
2201
2202 static int
ccr_cipher_mode(const struct crypto_session_params * csp)2203 ccr_cipher_mode(const struct crypto_session_params *csp)
2204 {
2205
2206 switch (csp->csp_cipher_alg) {
2207 case CRYPTO_AES_CBC:
2208 return (SCMD_CIPH_MODE_AES_CBC);
2209 case CRYPTO_AES_ICM:
2210 return (SCMD_CIPH_MODE_AES_CTR);
2211 case CRYPTO_AES_NIST_GCM_16:
2212 return (SCMD_CIPH_MODE_AES_GCM);
2213 case CRYPTO_AES_XTS:
2214 return (SCMD_CIPH_MODE_AES_XTS);
2215 case CRYPTO_AES_CCM_16:
2216 return (SCMD_CIPH_MODE_AES_CCM);
2217 default:
2218 return (SCMD_CIPH_MODE_NOP);
2219 }
2220 }
2221
2222 static int
ccr_probesession(device_t dev,const struct crypto_session_params * csp)2223 ccr_probesession(device_t dev, const struct crypto_session_params *csp)
2224 {
2225 unsigned int cipher_mode;
2226
2227 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
2228 0)
2229 return (EINVAL);
2230 switch (csp->csp_mode) {
2231 case CSP_MODE_DIGEST:
2232 if (!ccr_auth_supported(csp))
2233 return (EINVAL);
2234 break;
2235 case CSP_MODE_CIPHER:
2236 if (!ccr_cipher_supported(csp))
2237 return (EINVAL);
2238 break;
2239 case CSP_MODE_AEAD:
2240 switch (csp->csp_cipher_alg) {
2241 case CRYPTO_AES_NIST_GCM_16:
2242 case CRYPTO_AES_CCM_16:
2243 break;
2244 default:
2245 return (EINVAL);
2246 }
2247 break;
2248 case CSP_MODE_ETA:
2249 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp))
2250 return (EINVAL);
2251 break;
2252 default:
2253 return (EINVAL);
2254 }
2255
2256 if (csp->csp_cipher_klen != 0) {
2257 cipher_mode = ccr_cipher_mode(csp);
2258 if (cipher_mode == SCMD_CIPH_MODE_NOP)
2259 return (EINVAL);
2260 }
2261
2262 return (CRYPTODEV_PROBE_HARDWARE);
2263 }
2264
2265 /*
2266 * Select an available port with the lowest number of active sessions.
2267 */
2268 static struct ccr_port *
ccr_choose_port(struct ccr_softc * sc)2269 ccr_choose_port(struct ccr_softc *sc)
2270 {
2271 struct ccr_port *best, *p;
2272 int i;
2273
2274 mtx_assert(&sc->lock, MA_OWNED);
2275 best = NULL;
2276 for (i = 0; i < nitems(sc->ports); i++) {
2277 p = &sc->ports[i];
2278
2279 /* Ignore non-existent ports. */
2280 if (p->rxq == NULL)
2281 continue;
2282
2283 /*
2284 * XXX: Ignore ports whose queues aren't initialized.
2285 * This is racy as the rxq can be destroyed by the
2286 * associated VI detaching. Eventually ccr should use
2287 * dedicated queues.
2288 */
2289 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL)
2290 continue;
2291
2292 if ((sc->port_mask & (1u << i)) == 0)
2293 continue;
2294
2295 if (best == NULL ||
2296 p->active_sessions < best->active_sessions)
2297 best = p;
2298 }
2299 return (best);
2300 }
2301
2302 static void
ccr_delete_session(struct ccr_session * s)2303 ccr_delete_session(struct ccr_session *s)
2304 {
2305 crypto_freesession(s->sw_session);
2306 sglist_free(s->sg_input);
2307 sglist_free(s->sg_output);
2308 sglist_free(s->sg_ulptx);
2309 sglist_free(s->sg_dsgl);
2310 mtx_destroy(&s->lock);
2311 }
2312
2313 static int
ccr_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)2314 ccr_newsession(device_t dev, crypto_session_t cses,
2315 const struct crypto_session_params *csp)
2316 {
2317 struct ccr_softc *sc;
2318 struct ccr_session *s;
2319 const struct auth_hash *auth_hash;
2320 unsigned int auth_mode, cipher_mode, mk_size;
2321 unsigned int partial_digest_len;
2322 int error;
2323
2324 switch (csp->csp_auth_alg) {
2325 case CRYPTO_SHA1:
2326 case CRYPTO_SHA1_HMAC:
2327 auth_hash = &auth_hash_hmac_sha1;
2328 auth_mode = SCMD_AUTH_MODE_SHA1;
2329 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
2330 partial_digest_len = SHA1_HASH_LEN;
2331 break;
2332 case CRYPTO_SHA2_224:
2333 case CRYPTO_SHA2_224_HMAC:
2334 auth_hash = &auth_hash_hmac_sha2_224;
2335 auth_mode = SCMD_AUTH_MODE_SHA224;
2336 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2337 partial_digest_len = SHA2_256_HASH_LEN;
2338 break;
2339 case CRYPTO_SHA2_256:
2340 case CRYPTO_SHA2_256_HMAC:
2341 auth_hash = &auth_hash_hmac_sha2_256;
2342 auth_mode = SCMD_AUTH_MODE_SHA256;
2343 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2344 partial_digest_len = SHA2_256_HASH_LEN;
2345 break;
2346 case CRYPTO_SHA2_384:
2347 case CRYPTO_SHA2_384_HMAC:
2348 auth_hash = &auth_hash_hmac_sha2_384;
2349 auth_mode = SCMD_AUTH_MODE_SHA512_384;
2350 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2351 partial_digest_len = SHA2_512_HASH_LEN;
2352 break;
2353 case CRYPTO_SHA2_512:
2354 case CRYPTO_SHA2_512_HMAC:
2355 auth_hash = &auth_hash_hmac_sha2_512;
2356 auth_mode = SCMD_AUTH_MODE_SHA512_512;
2357 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2358 partial_digest_len = SHA2_512_HASH_LEN;
2359 break;
2360 default:
2361 auth_hash = NULL;
2362 auth_mode = SCMD_AUTH_MODE_NOP;
2363 mk_size = 0;
2364 partial_digest_len = 0;
2365 break;
2366 }
2367
2368 cipher_mode = ccr_cipher_mode(csp);
2369
2370 #ifdef INVARIANTS
2371 switch (csp->csp_mode) {
2372 case CSP_MODE_CIPHER:
2373 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2374 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2375 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2376 panic("invalid cipher algo");
2377 break;
2378 case CSP_MODE_DIGEST:
2379 if (auth_mode == SCMD_AUTH_MODE_NOP)
2380 panic("invalid auth algo");
2381 break;
2382 case CSP_MODE_AEAD:
2383 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM &&
2384 cipher_mode != SCMD_CIPH_MODE_AES_CCM)
2385 panic("invalid aead cipher algo");
2386 if (auth_mode != SCMD_AUTH_MODE_NOP)
2387 panic("invalid aead auth aglo");
2388 break;
2389 case CSP_MODE_ETA:
2390 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2391 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2392 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2393 panic("invalid cipher algo");
2394 if (auth_mode == SCMD_AUTH_MODE_NOP)
2395 panic("invalid auth algo");
2396 break;
2397 default:
2398 panic("invalid csp mode");
2399 }
2400 #endif
2401
2402 s = crypto_get_driver_session(cses);
2403 mtx_init(&s->lock, "ccr session", NULL, MTX_DEF);
2404 s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2405 s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2406 s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2407 s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT);
2408 if (s->sg_input == NULL || s->sg_output == NULL ||
2409 s->sg_ulptx == NULL || s->sg_dsgl == NULL) {
2410 ccr_delete_session(s);
2411 return (ENOMEM);
2412 }
2413
2414 if (csp->csp_mode == CSP_MODE_AEAD) {
2415 error = crypto_newsession(&s->sw_session, csp,
2416 CRYPTOCAP_F_SOFTWARE);
2417 if (error) {
2418 ccr_delete_session(s);
2419 return (error);
2420 }
2421 }
2422
2423 sc = device_get_softc(dev);
2424 s->sc = sc;
2425
2426 mtx_lock(&sc->lock);
2427 if (sc->detaching) {
2428 mtx_unlock(&sc->lock);
2429 ccr_delete_session(s);
2430 return (ENXIO);
2431 }
2432
2433 s->port = ccr_choose_port(sc);
2434 if (s->port == NULL) {
2435 mtx_unlock(&sc->lock);
2436 ccr_delete_session(s);
2437 return (ENXIO);
2438 }
2439
2440 switch (csp->csp_mode) {
2441 case CSP_MODE_AEAD:
2442 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2443 s->mode = CCM;
2444 else
2445 s->mode = GCM;
2446 break;
2447 case CSP_MODE_ETA:
2448 s->mode = ETA;
2449 break;
2450 case CSP_MODE_DIGEST:
2451 if (csp->csp_auth_klen != 0)
2452 s->mode = HMAC;
2453 else
2454 s->mode = HASH;
2455 break;
2456 case CSP_MODE_CIPHER:
2457 s->mode = CIPHER;
2458 break;
2459 }
2460
2461 if (s->mode == GCM) {
2462 if (csp->csp_auth_mlen == 0)
2463 s->gmac.hash_len = AES_GMAC_HASH_LEN;
2464 else
2465 s->gmac.hash_len = csp->csp_auth_mlen;
2466 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen,
2467 s->gmac.ghash_h);
2468 } else if (s->mode == CCM) {
2469 if (csp->csp_auth_mlen == 0)
2470 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2471 else
2472 s->ccm_mac.hash_len = csp->csp_auth_mlen;
2473 } else if (auth_mode != SCMD_AUTH_MODE_NOP) {
2474 s->hmac.auth_hash = auth_hash;
2475 s->hmac.auth_mode = auth_mode;
2476 s->hmac.mk_size = mk_size;
2477 s->hmac.partial_digest_len = partial_digest_len;
2478 if (csp->csp_auth_mlen == 0)
2479 s->hmac.hash_len = auth_hash->hashsize;
2480 else
2481 s->hmac.hash_len = csp->csp_auth_mlen;
2482 if (csp->csp_auth_key != NULL)
2483 t4_init_hmac_digest(auth_hash, partial_digest_len,
2484 csp->csp_auth_key, csp->csp_auth_klen,
2485 s->hmac.pads);
2486 else
2487 ccr_init_hash_digest(s);
2488 }
2489 if (cipher_mode != SCMD_CIPH_MODE_NOP) {
2490 s->cipher.cipher_mode = cipher_mode;
2491 s->cipher.iv_len = csp->csp_ivlen;
2492 if (csp->csp_cipher_key != NULL)
2493 ccr_aes_setkey(s, csp->csp_cipher_key,
2494 csp->csp_cipher_klen);
2495 }
2496
2497 s->port->active_sessions++;
2498 mtx_unlock(&sc->lock);
2499 return (0);
2500 }
2501
2502 static void
ccr_freesession(device_t dev,crypto_session_t cses)2503 ccr_freesession(device_t dev, crypto_session_t cses)
2504 {
2505 struct ccr_softc *sc;
2506 struct ccr_session *s;
2507
2508 sc = device_get_softc(dev);
2509 s = crypto_get_driver_session(cses);
2510 #ifdef INVARIANTS
2511 if (s->pending != 0)
2512 device_printf(dev,
2513 "session %p freed with %d pending requests\n", s,
2514 s->pending);
2515 #endif
2516 mtx_lock(&sc->lock);
2517 s->port->active_sessions--;
2518 mtx_unlock(&sc->lock);
2519 ccr_delete_session(s);
2520 }
2521
2522 static int
ccr_process(device_t dev,struct cryptop * crp,int hint)2523 ccr_process(device_t dev, struct cryptop *crp, int hint)
2524 {
2525 const struct crypto_session_params *csp;
2526 struct ccr_softc *sc;
2527 struct ccr_session *s;
2528 int error;
2529
2530 csp = crypto_get_params(crp->crp_session);
2531 s = crypto_get_driver_session(crp->crp_session);
2532 sc = device_get_softc(dev);
2533
2534 mtx_lock(&s->lock);
2535 error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
2536 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
2537 error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf);
2538 if (error) {
2539 counter_u64_add(sc->stats_sglist_error, 1);
2540 goto out;
2541 }
2542
2543 switch (s->mode) {
2544 case HASH:
2545 error = ccr_hash(sc, s, crp);
2546 if (error == 0)
2547 counter_u64_add(sc->stats_hash, 1);
2548 break;
2549 case HMAC:
2550 if (crp->crp_auth_key != NULL)
2551 t4_init_hmac_digest(s->hmac.auth_hash,
2552 s->hmac.partial_digest_len, crp->crp_auth_key,
2553 csp->csp_auth_klen, s->hmac.pads);
2554 error = ccr_hash(sc, s, crp);
2555 if (error == 0)
2556 counter_u64_add(sc->stats_hmac, 1);
2557 break;
2558 case CIPHER:
2559 if (crp->crp_cipher_key != NULL)
2560 ccr_aes_setkey(s, crp->crp_cipher_key,
2561 csp->csp_cipher_klen);
2562 error = ccr_cipher(sc, s, crp);
2563 if (error == 0) {
2564 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2565 counter_u64_add(sc->stats_cipher_encrypt, 1);
2566 else
2567 counter_u64_add(sc->stats_cipher_decrypt, 1);
2568 }
2569 break;
2570 case ETA:
2571 if (crp->crp_auth_key != NULL)
2572 t4_init_hmac_digest(s->hmac.auth_hash,
2573 s->hmac.partial_digest_len, crp->crp_auth_key,
2574 csp->csp_auth_klen, s->hmac.pads);
2575 if (crp->crp_cipher_key != NULL)
2576 ccr_aes_setkey(s, crp->crp_cipher_key,
2577 csp->csp_cipher_klen);
2578 error = ccr_eta(sc, s, crp);
2579 if (error == 0) {
2580 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2581 counter_u64_add(sc->stats_eta_encrypt, 1);
2582 else
2583 counter_u64_add(sc->stats_eta_decrypt, 1);
2584 }
2585 break;
2586 case GCM:
2587 if (crp->crp_cipher_key != NULL) {
2588 t4_init_gmac_hash(crp->crp_cipher_key,
2589 csp->csp_cipher_klen, s->gmac.ghash_h);
2590 ccr_aes_setkey(s, crp->crp_cipher_key,
2591 csp->csp_cipher_klen);
2592 }
2593 error = ccr_gcm(sc, s, crp);
2594 if (error == EMSGSIZE || error == EFBIG) {
2595 counter_u64_add(sc->stats_sw_fallback, 1);
2596 mtx_unlock(&s->lock);
2597 ccr_soft(s, crp);
2598 return (0);
2599 }
2600 if (error == 0) {
2601 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2602 counter_u64_add(sc->stats_gcm_encrypt, 1);
2603 else
2604 counter_u64_add(sc->stats_gcm_decrypt, 1);
2605 }
2606 break;
2607 case CCM:
2608 if (crp->crp_cipher_key != NULL) {
2609 ccr_aes_setkey(s, crp->crp_cipher_key,
2610 csp->csp_cipher_klen);
2611 }
2612 error = ccr_ccm(sc, s, crp);
2613 if (error == EMSGSIZE || error == EFBIG) {
2614 counter_u64_add(sc->stats_sw_fallback, 1);
2615 mtx_unlock(&s->lock);
2616 ccr_soft(s, crp);
2617 return (0);
2618 }
2619 if (error == 0) {
2620 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2621 counter_u64_add(sc->stats_ccm_encrypt, 1);
2622 else
2623 counter_u64_add(sc->stats_ccm_decrypt, 1);
2624 }
2625 break;
2626 }
2627
2628 if (error == 0) {
2629 #ifdef INVARIANTS
2630 s->pending++;
2631 #endif
2632 counter_u64_add(sc->stats_inflight, 1);
2633 counter_u64_add(s->port->stats_queued, 1);
2634 } else
2635 counter_u64_add(sc->stats_process_error, 1);
2636
2637 out:
2638 mtx_unlock(&s->lock);
2639
2640 if (error) {
2641 crp->crp_etype = error;
2642 crypto_done(crp);
2643 }
2644
2645 return (0);
2646 }
2647
2648 static int
do_cpl6_fw_pld(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)2649 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2650 struct mbuf *m)
2651 {
2652 struct ccr_softc *sc;
2653 struct ccr_session *s;
2654 const struct cpl_fw6_pld *cpl;
2655 struct cryptop *crp;
2656 uint32_t status;
2657 int error;
2658
2659 if (m != NULL)
2660 cpl = mtod(m, const void *);
2661 else
2662 cpl = (const void *)(rss + 1);
2663
2664 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2665 s = crypto_get_driver_session(crp->crp_session);
2666 status = be64toh(cpl->data[0]);
2667 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2668 error = EBADMSG;
2669 else
2670 error = 0;
2671
2672 sc = s->sc;
2673 #ifdef INVARIANTS
2674 mtx_lock(&s->lock);
2675 s->pending--;
2676 mtx_unlock(&s->lock);
2677 #endif
2678 counter_u64_add(sc->stats_inflight, -1);
2679 counter_u64_add(s->port->stats_completed, 1);
2680
2681 switch (s->mode) {
2682 case HASH:
2683 case HMAC:
2684 error = ccr_hash_done(sc, s, crp, cpl, error);
2685 break;
2686 case CIPHER:
2687 error = ccr_cipher_done(sc, s, crp, cpl, error);
2688 break;
2689 case ETA:
2690 error = ccr_eta_done(sc, s, crp, cpl, error);
2691 break;
2692 case GCM:
2693 error = ccr_gcm_done(sc, s, crp, cpl, error);
2694 break;
2695 case CCM:
2696 error = ccr_ccm_done(sc, s, crp, cpl, error);
2697 break;
2698 }
2699
2700 if (error == EBADMSG) {
2701 if (CHK_MAC_ERR_BIT(status))
2702 counter_u64_add(sc->stats_mac_error, 1);
2703 if (CHK_PAD_ERR_BIT(status))
2704 counter_u64_add(sc->stats_pad_error, 1);
2705 }
2706 crp->crp_etype = error;
2707 crypto_done(crp);
2708 m_freem(m);
2709 return (0);
2710 }
2711
2712 static int
ccr_modevent(module_t mod,int cmd,void * arg)2713 ccr_modevent(module_t mod, int cmd, void *arg)
2714 {
2715
2716 switch (cmd) {
2717 case MOD_LOAD:
2718 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2719 return (0);
2720 case MOD_UNLOAD:
2721 t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2722 return (0);
2723 default:
2724 return (EOPNOTSUPP);
2725 }
2726 }
2727
2728 static device_method_t ccr_methods[] = {
2729 DEVMETHOD(device_identify, ccr_identify),
2730 DEVMETHOD(device_probe, ccr_probe),
2731 DEVMETHOD(device_attach, ccr_attach),
2732 DEVMETHOD(device_detach, ccr_detach),
2733
2734 DEVMETHOD(cryptodev_probesession, ccr_probesession),
2735 DEVMETHOD(cryptodev_newsession, ccr_newsession),
2736 DEVMETHOD(cryptodev_freesession, ccr_freesession),
2737 DEVMETHOD(cryptodev_process, ccr_process),
2738
2739 DEVMETHOD_END
2740 };
2741
2742 static driver_t ccr_driver = {
2743 "ccr",
2744 ccr_methods,
2745 sizeof(struct ccr_softc)
2746 };
2747
2748 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_modevent, NULL);
2749 MODULE_VERSION(ccr, 1);
2750 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2751 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
2752