1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2022 Intel Corporation */ 3 /* $FreeBSD$ */ 4 /* System headers */ 5 #include <sys/param.h> 6 #include <sys/systm.h> 7 #include <sys/bus.h> 8 #include <sys/kernel.h> 9 #include <sys/mbuf.h> 10 #include <sys/mutex.h> 11 #include <machine/bus.h> 12 13 /* Cryptodev headers */ 14 #include <opencrypto/cryptodev.h> 15 #include <opencrypto/xform.h> 16 17 /* QAT specific headers */ 18 #include "qat_ocf_mem_pool.h" 19 #include "qat_ocf_utils.h" 20 #include "cpa.h" 21 22 /* Private functions */ 23 static void 24 qat_ocf_alloc_single_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 25 { 26 struct qat_ocf_dma_mem *dma_mem; 27 28 if (error != 0) 29 return; 30 31 dma_mem = arg; 32 dma_mem->dma_seg = segs[0]; 33 } 34 35 static int 36 qat_ocf_populate_buf_list_cb(struct qat_ocf_buffer_list *buffers, 37 bus_dma_segment_t *segs, 38 int niseg, 39 int skip_seg, 40 int skip_bytes) 41 { 42 CpaPhysFlatBuffer *flatBuffer; 43 bus_addr_t segment_addr; 44 bus_size_t segment_len; 45 int iseg, oseg; 46 47 for (iseg = 0, oseg = skip_seg; 48 iseg < niseg && oseg < QAT_OCF_MAX_FLATS; 49 iseg++) { 50 segment_addr = segs[iseg].ds_addr; 51 segment_len = segs[iseg].ds_len; 52 53 if (skip_bytes > 0) { 54 if (skip_bytes < segment_len) { 55 segment_addr += skip_bytes; 56 segment_len -= skip_bytes; 57 skip_bytes = 0; 58 } else { 59 skip_bytes -= segment_len; 60 continue; 61 } 62 } 63 flatBuffer = &buffers->flatBuffers[oseg++]; 64 flatBuffer->dataLenInBytes = (Cpa32U)segment_len; 65 flatBuffer->bufferPhysAddr = (CpaPhysicalAddr)segment_addr; 66 }; 67 buffers->numBuffers = oseg; 68 69 return iseg < niseg ? E2BIG : 0; 70 } 71 72 void 73 qat_ocf_crypto_load_aadbuf_cb(void *_arg, 74 bus_dma_segment_t *segs, 75 int nseg, 76 int error) 77 { 78 struct qat_ocf_load_cb_arg *arg; 79 struct qat_ocf_cookie *qat_cookie; 80 81 arg = _arg; 82 if (error != 0) { 83 arg->error = error; 84 return; 85 } 86 87 qat_cookie = arg->qat_cookie; 88 arg->error = qat_ocf_populate_buf_list_cb( 89 &qat_cookie->src_buffers, segs, nseg, 0, 0); 90 } 91 92 void 93 qat_ocf_crypto_load_buf_cb(void *_arg, 94 bus_dma_segment_t *segs, 95 int nseg, 96 int error) 97 { 98 struct qat_ocf_cookie *qat_cookie; 99 struct qat_ocf_load_cb_arg *arg; 100 int start_segment = 0, skip_bytes = 0; 101 102 arg = _arg; 103 if (error != 0) { 104 arg->error = error; 105 return; 106 } 107 108 qat_cookie = arg->qat_cookie; 109 110 skip_bytes = 0; 111 start_segment = qat_cookie->src_buffers.numBuffers; 112 113 arg->error = qat_ocf_populate_buf_list_cb( 114 &qat_cookie->src_buffers, segs, nseg, start_segment, skip_bytes); 115 } 116 117 void 118 qat_ocf_crypto_load_obuf_cb(void *_arg, 119 bus_dma_segment_t *segs, 120 int nseg, 121 int error) 122 { 123 struct qat_ocf_load_cb_arg *arg; 124 struct cryptop *crp; 125 struct qat_ocf_cookie *qat_cookie; 126 const struct crypto_session_params *csp; 127 int osegs = 0, to_copy = 0; 128 129 arg = _arg; 130 if (error != 0) { 131 arg->error = error; 132 return; 133 } 134 135 crp = arg->crp_op; 136 qat_cookie = arg->qat_cookie; 137 csp = crypto_get_params(crp->crp_session); 138 139 /* 140 * The payload must start at the same offset in the output SG list as in 141 * the input SG list. Copy over SG entries from the input corresponding 142 * to the AAD buffer. 143 */ 144 if (crp->crp_aad_length == 0 || 145 (CPA_TRUE == is_sep_aad_supported(csp) && crp->crp_aad)) { 146 arg->error = 147 qat_ocf_populate_buf_list_cb(&qat_cookie->dst_buffers, 148 segs, 149 nseg, 150 0, 151 crp->crp_payload_output_start); 152 return; 153 } 154 155 /* Copy AAD from source SGL to keep payload in the same position in 156 * destination buffers */ 157 if (NULL == crp->crp_aad) 158 to_copy = crp->crp_payload_start - crp->crp_aad_start; 159 else 160 to_copy = crp->crp_aad_length; 161 162 for (; osegs < qat_cookie->src_buffers.numBuffers; osegs++) { 163 CpaPhysFlatBuffer *src_flat; 164 CpaPhysFlatBuffer *dst_flat; 165 int data_len; 166 167 if (to_copy <= 0) 168 break; 169 170 src_flat = &qat_cookie->src_buffers.flatBuffers[osegs]; 171 dst_flat = &qat_cookie->dst_buffers.flatBuffers[osegs]; 172 173 dst_flat->bufferPhysAddr = src_flat->bufferPhysAddr; 174 data_len = imin(src_flat->dataLenInBytes, to_copy); 175 dst_flat->dataLenInBytes = data_len; 176 to_copy -= data_len; 177 } 178 179 arg->error = 180 qat_ocf_populate_buf_list_cb(&qat_cookie->dst_buffers, 181 segs, 182 nseg, 183 osegs, 184 crp->crp_payload_output_start); 185 } 186 187 static int 188 qat_ocf_alloc_dma_mem(device_t dev, 189 struct qat_ocf_dma_mem *dma_mem, 190 int nseg, 191 bus_size_t size, 192 bus_size_t alignment) 193 { 194 int error; 195 196 error = bus_dma_tag_create(bus_get_dma_tag(dev), 197 alignment, 198 0, /* alignment, boundary */ 199 BUS_SPACE_MAXADDR, /* lowaddr */ 200 BUS_SPACE_MAXADDR, /* highaddr */ 201 NULL, 202 NULL, /* filter, filterarg */ 203 size, /* maxsize */ 204 nseg, /* nsegments */ 205 size, /* maxsegsize */ 206 BUS_DMA_COHERENT, /* flags */ 207 NULL, 208 NULL, /* lockfunc, lockarg */ 209 &dma_mem->dma_tag); 210 if (error != 0) { 211 device_printf(dev, 212 "couldn't create DMA tag, error = %d\n", 213 error); 214 return error; 215 } 216 217 error = 218 bus_dmamem_alloc(dma_mem->dma_tag, 219 &dma_mem->dma_vaddr, 220 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 221 &dma_mem->dma_map); 222 if (error != 0) { 223 device_printf(dev, 224 "couldn't allocate dmamem, error = %d\n", 225 error); 226 goto fail_0; 227 } 228 229 error = bus_dmamap_load(dma_mem->dma_tag, 230 dma_mem->dma_map, 231 dma_mem->dma_vaddr, 232 size, 233 qat_ocf_alloc_single_cb, 234 dma_mem, 235 BUS_DMA_NOWAIT); 236 if (error) { 237 device_printf(dev, 238 "couldn't load dmamem map, error = %d\n", 239 error); 240 goto fail_1; 241 } 242 243 return 0; 244 fail_1: 245 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); 246 fail_0: 247 bus_dma_tag_destroy(dma_mem->dma_tag); 248 249 return error; 250 } 251 252 static void 253 qat_ocf_free_dma_mem(struct qat_ocf_dma_mem *qdm) 254 { 255 if (qdm->dma_tag != NULL && qdm->dma_vaddr != NULL) { 256 bus_dmamap_unload(qdm->dma_tag, qdm->dma_map); 257 bus_dmamem_free(qdm->dma_tag, qdm->dma_vaddr, qdm->dma_map); 258 bus_dma_tag_destroy(qdm->dma_tag); 259 explicit_bzero(qdm, sizeof(*qdm)); 260 } 261 } 262 263 static int 264 qat_ocf_dma_tag_and_map(device_t dev, 265 struct qat_ocf_dma_mem *dma_mem, 266 bus_size_t size, 267 bus_size_t segs) 268 { 269 int error; 270 271 error = bus_dma_tag_create(bus_get_dma_tag(dev), 272 1, 273 0, /* alignment, boundary */ 274 BUS_SPACE_MAXADDR, /* lowaddr */ 275 BUS_SPACE_MAXADDR, /* highaddr */ 276 NULL, 277 NULL, /* filter, filterarg */ 278 size, /* maxsize */ 279 segs, /* nsegments */ 280 size, /* maxsegsize */ 281 BUS_DMA_COHERENT, /* flags */ 282 NULL, 283 NULL, /* lockfunc, lockarg */ 284 &dma_mem->dma_tag); 285 if (error != 0) 286 return error; 287 288 error = bus_dmamap_create(dma_mem->dma_tag, 289 BUS_DMA_COHERENT, 290 &dma_mem->dma_map); 291 if (error != 0) 292 return error; 293 294 return 0; 295 } 296 297 static void 298 qat_ocf_clear_cookie(struct qat_ocf_cookie *qat_cookie) 299 { 300 qat_cookie->src_buffers.numBuffers = 0; 301 qat_cookie->dst_buffers.numBuffers = 0; 302 qat_cookie->is_sep_aad_used = CPA_FALSE; 303 explicit_bzero(qat_cookie->qat_ocf_iv_buf, 304 sizeof(qat_cookie->qat_ocf_iv_buf)); 305 explicit_bzero(qat_cookie->qat_ocf_digest, 306 sizeof(qat_cookie->qat_ocf_digest)); 307 explicit_bzero(qat_cookie->qat_ocf_gcm_aad, 308 sizeof(qat_cookie->qat_ocf_gcm_aad)); 309 qat_cookie->crp_op = NULL; 310 } 311 312 /* Public functions */ 313 CpaStatus 314 qat_ocf_cookie_dma_pre_sync(struct cryptop *crp, CpaCySymDpOpData *pOpData) 315 { 316 struct qat_ocf_cookie *qat_cookie; 317 318 if (NULL == pOpData->pCallbackTag) 319 return CPA_STATUS_FAIL; 320 321 qat_cookie = (struct qat_ocf_cookie *)pOpData->pCallbackTag; 322 323 if (CPA_TRUE == qat_cookie->is_sep_aad_used) { 324 bus_dmamap_sync(qat_cookie->gcm_aad_dma_mem.dma_tag, 325 qat_cookie->gcm_aad_dma_mem.dma_map, 326 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 327 } 328 329 bus_dmamap_sync(qat_cookie->src_dma_mem.dma_tag, 330 qat_cookie->src_dma_mem.dma_map, 331 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 332 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 333 bus_dmamap_sync(qat_cookie->dst_dma_mem.dma_tag, 334 qat_cookie->dst_dma_mem.dma_map, 335 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 336 } 337 bus_dmamap_sync(qat_cookie->dma_tag, 338 qat_cookie->dma_map, 339 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 340 341 return CPA_STATUS_SUCCESS; 342 } 343 344 CpaStatus 345 qat_ocf_cookie_dma_post_sync(struct cryptop *crp, CpaCySymDpOpData *pOpData) 346 { 347 struct qat_ocf_cookie *qat_cookie; 348 349 if (NULL == pOpData->pCallbackTag) 350 return CPA_STATUS_FAIL; 351 352 qat_cookie = (struct qat_ocf_cookie *)pOpData->pCallbackTag; 353 354 bus_dmamap_sync(qat_cookie->src_dma_mem.dma_tag, 355 qat_cookie->src_dma_mem.dma_map, 356 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 357 358 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 359 bus_dmamap_sync(qat_cookie->dst_dma_mem.dma_tag, 360 qat_cookie->dst_dma_mem.dma_map, 361 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 362 } 363 bus_dmamap_sync(qat_cookie->dma_tag, 364 qat_cookie->dma_map, 365 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 366 367 if (qat_cookie->is_sep_aad_used) 368 bus_dmamap_sync(qat_cookie->gcm_aad_dma_mem.dma_tag, 369 qat_cookie->gcm_aad_dma_mem.dma_map, 370 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 371 372 return CPA_STATUS_SUCCESS; 373 } 374 375 CpaStatus 376 qat_ocf_cookie_dma_unload(struct cryptop *crp, CpaCySymDpOpData *pOpData) 377 { 378 struct qat_ocf_cookie *qat_cookie; 379 380 qat_cookie = pOpData->pCallbackTag; 381 382 if (NULL == qat_cookie) 383 return CPA_STATUS_FAIL; 384 385 bus_dmamap_unload(qat_cookie->src_dma_mem.dma_tag, 386 qat_cookie->src_dma_mem.dma_map); 387 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 388 bus_dmamap_unload(qat_cookie->dst_dma_mem.dma_tag, 389 qat_cookie->dst_dma_mem.dma_map); 390 if (qat_cookie->is_sep_aad_used) 391 bus_dmamap_unload(qat_cookie->gcm_aad_dma_mem.dma_tag, 392 qat_cookie->gcm_aad_dma_mem.dma_map); 393 394 return CPA_STATUS_SUCCESS; 395 } 396 397 CpaStatus 398 qat_ocf_cookie_pool_init(struct qat_ocf_instance *instance, device_t dev) 399 { 400 int i, error = 0; 401 402 mtx_init(&instance->cookie_pool_mtx, 403 "QAT cookie pool MTX", 404 NULL, 405 MTX_DEF); 406 instance->free_cookie_ptr = 0; 407 for (i = 0; i < QAT_OCF_MEM_POOL_SIZE; i++) { 408 struct qat_ocf_cookie *qat_cookie; 409 struct qat_ocf_dma_mem *entry_dma_mem; 410 411 entry_dma_mem = &instance->cookie_dmamem[i]; 412 413 /* Allocate DMA segment for cache entry. 414 * Cache has to be stored in DMAable mem due to 415 * it contains i.a src and dst flat buffer 416 * lists. 417 */ 418 error = qat_ocf_alloc_dma_mem(dev, 419 entry_dma_mem, 420 1, 421 sizeof(struct qat_ocf_cookie), 422 (1 << 6)); 423 if (error) 424 break; 425 426 qat_cookie = entry_dma_mem->dma_vaddr; 427 instance->cookie_pool[i] = qat_cookie; 428 429 qat_cookie->dma_map = entry_dma_mem->dma_map; 430 qat_cookie->dma_tag = entry_dma_mem->dma_tag; 431 432 qat_ocf_clear_cookie(qat_cookie); 433 434 /* Physical address of IV buffer */ 435 qat_cookie->qat_ocf_iv_buf_paddr = 436 entry_dma_mem->dma_seg.ds_addr + 437 offsetof(struct qat_ocf_cookie, qat_ocf_iv_buf); 438 439 /* Physical address of digest buffer */ 440 qat_cookie->qat_ocf_digest_paddr = 441 entry_dma_mem->dma_seg.ds_addr + 442 offsetof(struct qat_ocf_cookie, qat_ocf_digest); 443 444 /* Physical address of AAD buffer */ 445 qat_cookie->qat_ocf_gcm_aad_paddr = 446 entry_dma_mem->dma_seg.ds_addr + 447 offsetof(struct qat_ocf_cookie, qat_ocf_gcm_aad); 448 449 /* We already got physical address of src and dest SGL header */ 450 qat_cookie->src_buffer_list_paddr = 451 entry_dma_mem->dma_seg.ds_addr + 452 offsetof(struct qat_ocf_cookie, src_buffers); 453 454 qat_cookie->dst_buffer_list_paddr = 455 entry_dma_mem->dma_seg.ds_addr + 456 offsetof(struct qat_ocf_cookie, dst_buffers); 457 458 /* We already have physical address of pOpdata */ 459 qat_cookie->pOpData_paddr = entry_dma_mem->dma_seg.ds_addr + 460 offsetof(struct qat_ocf_cookie, pOpdata); 461 /* Init QAT DP API OP data with const values */ 462 qat_cookie->pOpdata.pCallbackTag = (void *)qat_cookie; 463 qat_cookie->pOpdata.thisPhys = 464 (CpaPhysicalAddr)qat_cookie->pOpData_paddr; 465 466 error = qat_ocf_dma_tag_and_map(dev, 467 &qat_cookie->src_dma_mem, 468 QAT_OCF_MAXLEN, 469 QAT_OCF_MAX_FLATS); 470 if (error) 471 break; 472 473 error = qat_ocf_dma_tag_and_map(dev, 474 &qat_cookie->dst_dma_mem, 475 QAT_OCF_MAXLEN, 476 QAT_OCF_MAX_FLATS); 477 if (error) 478 break; 479 480 /* Max one flat buffer for embedded AAD if provided as separated 481 * by OCF and it's not supported by QAT */ 482 error = qat_ocf_dma_tag_and_map(dev, 483 &qat_cookie->gcm_aad_dma_mem, 484 QAT_OCF_MAXLEN, 485 1); 486 if (error) 487 break; 488 489 instance->free_cookie[i] = qat_cookie; 490 instance->free_cookie_ptr++; 491 } 492 493 return error; 494 } 495 496 CpaStatus 497 qat_ocf_cookie_alloc(struct qat_ocf_instance *qat_instance, 498 struct qat_ocf_cookie **cookie_out) 499 { 500 mtx_lock(&qat_instance->cookie_pool_mtx); 501 if (qat_instance->free_cookie_ptr == 0) { 502 mtx_unlock(&qat_instance->cookie_pool_mtx); 503 return CPA_STATUS_FAIL; 504 } 505 *cookie_out = 506 qat_instance->free_cookie[--qat_instance->free_cookie_ptr]; 507 mtx_unlock(&qat_instance->cookie_pool_mtx); 508 509 return CPA_STATUS_SUCCESS; 510 } 511 512 void 513 qat_ocf_cookie_free(struct qat_ocf_instance *qat_instance, 514 struct qat_ocf_cookie *cookie) 515 { 516 qat_ocf_clear_cookie(cookie); 517 mtx_lock(&qat_instance->cookie_pool_mtx); 518 qat_instance->free_cookie[qat_instance->free_cookie_ptr++] = cookie; 519 mtx_unlock(&qat_instance->cookie_pool_mtx); 520 } 521 522 void 523 qat_ocf_cookie_pool_deinit(struct qat_ocf_instance *qat_instance) 524 { 525 int i; 526 527 for (i = 0; i < QAT_OCF_MEM_POOL_SIZE; i++) { 528 struct qat_ocf_cookie *cookie; 529 struct qat_ocf_dma_mem *cookie_dma; 530 531 cookie = qat_instance->cookie_pool[i]; 532 if (NULL == cookie) 533 continue; 534 535 /* Destroy tag and map for source SGL */ 536 if (cookie->src_dma_mem.dma_tag) { 537 bus_dmamap_destroy(cookie->src_dma_mem.dma_tag, 538 cookie->src_dma_mem.dma_map); 539 bus_dma_tag_destroy(cookie->src_dma_mem.dma_tag); 540 } 541 542 /* Destroy tag and map for dest SGL */ 543 if (cookie->dst_dma_mem.dma_tag) { 544 bus_dmamap_destroy(cookie->dst_dma_mem.dma_tag, 545 cookie->dst_dma_mem.dma_map); 546 bus_dma_tag_destroy(cookie->dst_dma_mem.dma_tag); 547 } 548 549 /* Destroy tag and map for separated AAD */ 550 if (cookie->gcm_aad_dma_mem.dma_tag) { 551 bus_dmamap_destroy(cookie->gcm_aad_dma_mem.dma_tag, 552 cookie->gcm_aad_dma_mem.dma_map); 553 bus_dma_tag_destroy(cookie->gcm_aad_dma_mem.dma_tag); 554 } 555 556 /* Free DMA memory */ 557 cookie_dma = &qat_instance->cookie_dmamem[i]; 558 qat_ocf_free_dma_mem(cookie_dma); 559 qat_instance->cookie_pool[i] = NULL; 560 } 561 mtx_destroy(&qat_instance->cookie_pool_mtx); 562 563 return; 564 } 565