1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2014-2019 Netflix Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_kern_tls.h" 34 #include "opt_ratelimit.h" 35 #include "opt_rss.h" 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/domainset.h> 40 #include <sys/endian.h> 41 #include <sys/ktls.h> 42 #include <sys/lock.h> 43 #include <sys/mbuf.h> 44 #include <sys/mutex.h> 45 #include <sys/rmlock.h> 46 #include <sys/proc.h> 47 #include <sys/protosw.h> 48 #include <sys/refcount.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/sysctl.h> 53 #include <sys/taskqueue.h> 54 #include <sys/kthread.h> 55 #include <sys/uio.h> 56 #include <sys/vmmeter.h> 57 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 58 #include <machine/pcb.h> 59 #endif 60 #include <machine/vmparam.h> 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #ifdef RSS 64 #include <net/netisr.h> 65 #include <net/rss_config.h> 66 #endif 67 #include <net/route.h> 68 #include <net/route/nhop.h> 69 #if defined(INET) || defined(INET6) 70 #include <netinet/in.h> 71 #include <netinet/in_pcb.h> 72 #endif 73 #include <netinet/tcp_var.h> 74 #ifdef TCP_OFFLOAD 75 #include <netinet/tcp_offload.h> 76 #endif 77 #include <opencrypto/cryptodev.h> 78 #include <opencrypto/ktls.h> 79 #include <vm/uma_dbg.h> 80 #include <vm/vm.h> 81 #include <vm/vm_pageout.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_pagequeue.h> 84 85 struct ktls_wq { 86 struct mtx mtx; 87 STAILQ_HEAD(, mbuf) m_head; 88 STAILQ_HEAD(, socket) so_head; 89 bool running; 90 int lastallocfail; 91 } __aligned(CACHE_LINE_SIZE); 92 93 struct ktls_alloc_thread { 94 uint64_t wakeups; 95 uint64_t allocs; 96 struct thread *td; 97 int running; 98 }; 99 100 struct ktls_domain_info { 101 int count; 102 int cpu[MAXCPU]; 103 struct ktls_alloc_thread alloc_td; 104 }; 105 106 struct ktls_domain_info ktls_domains[MAXMEMDOM]; 107 static struct ktls_wq *ktls_wq; 108 static struct proc *ktls_proc; 109 static uma_zone_t ktls_session_zone; 110 static uma_zone_t ktls_buffer_zone; 111 static uint16_t ktls_cpuid_lookup[MAXCPU]; 112 static int ktls_init_state; 113 static struct sx ktls_init_lock; 114 SX_SYSINIT(ktls_init_lock, &ktls_init_lock, "ktls init"); 115 116 SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 117 "Kernel TLS offload"); 118 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 119 "Kernel TLS offload stats"); 120 121 #ifdef RSS 122 static int ktls_bind_threads = 1; 123 #else 124 static int ktls_bind_threads; 125 #endif 126 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, bind_threads, CTLFLAG_RDTUN, 127 &ktls_bind_threads, 0, 128 "Bind crypto threads to cores (1) or cores and domains (2) at boot"); 129 130 static u_int ktls_maxlen = 16384; 131 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN, 132 &ktls_maxlen, 0, "Maximum TLS record size"); 133 134 static int ktls_number_threads; 135 SYSCTL_INT(_kern_ipc_tls_stats, OID_AUTO, threads, CTLFLAG_RD, 136 &ktls_number_threads, 0, 137 "Number of TLS threads in thread-pool"); 138 139 unsigned int ktls_ifnet_max_rexmit_pct = 2; 140 SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, ifnet_max_rexmit_pct, CTLFLAG_RWTUN, 141 &ktls_ifnet_max_rexmit_pct, 2, 142 "Max percent bytes retransmitted before ifnet TLS is disabled"); 143 144 static bool ktls_offload_enable; 145 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, enable, CTLFLAG_RWTUN, 146 &ktls_offload_enable, 0, 147 "Enable support for kernel TLS offload"); 148 149 static bool ktls_cbc_enable = true; 150 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, cbc_enable, CTLFLAG_RWTUN, 151 &ktls_cbc_enable, 1, 152 "Enable Support of AES-CBC crypto for kernel TLS"); 153 154 static bool ktls_sw_buffer_cache = true; 155 SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, sw_buffer_cache, CTLFLAG_RDTUN, 156 &ktls_sw_buffer_cache, 1, 157 "Enable caching of output buffers for SW encryption"); 158 159 static int ktls_max_alloc = 128; 160 SYSCTL_INT(_kern_ipc_tls, OID_AUTO, max_alloc, CTLFLAG_RWTUN, 161 &ktls_max_alloc, 128, 162 "Max number of 16k buffers to allocate in thread context"); 163 164 static COUNTER_U64_DEFINE_EARLY(ktls_tasks_active); 165 SYSCTL_COUNTER_U64(_kern_ipc_tls, OID_AUTO, tasks_active, CTLFLAG_RD, 166 &ktls_tasks_active, "Number of active tasks"); 167 168 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_pending); 169 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_pending, CTLFLAG_RD, 170 &ktls_cnt_tx_pending, 171 "Number of TLS 1.0 records waiting for earlier TLS records"); 172 173 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_tx_queued); 174 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_tx_inqueue, CTLFLAG_RD, 175 &ktls_cnt_tx_queued, 176 "Number of TLS records in queue to tasks for SW encryption"); 177 178 static COUNTER_U64_DEFINE_EARLY(ktls_cnt_rx_queued); 179 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, sw_rx_inqueue, CTLFLAG_RD, 180 &ktls_cnt_rx_queued, 181 "Number of TLS sockets in queue to tasks for SW decryption"); 182 183 static COUNTER_U64_DEFINE_EARLY(ktls_offload_total); 184 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, offload_total, 185 CTLFLAG_RD, &ktls_offload_total, 186 "Total successful TLS setups (parameters set)"); 187 188 static COUNTER_U64_DEFINE_EARLY(ktls_offload_enable_calls); 189 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, enable_calls, 190 CTLFLAG_RD, &ktls_offload_enable_calls, 191 "Total number of TLS enable calls made"); 192 193 static COUNTER_U64_DEFINE_EARLY(ktls_offload_active); 194 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, active, CTLFLAG_RD, 195 &ktls_offload_active, "Total Active TLS sessions"); 196 197 static COUNTER_U64_DEFINE_EARLY(ktls_offload_corrupted_records); 198 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, corrupted_records, CTLFLAG_RD, 199 &ktls_offload_corrupted_records, "Total corrupted TLS records received"); 200 201 static COUNTER_U64_DEFINE_EARLY(ktls_offload_failed_crypto); 202 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, failed_crypto, CTLFLAG_RD, 203 &ktls_offload_failed_crypto, "Total TLS crypto failures"); 204 205 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_ifnet); 206 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_ifnet, CTLFLAG_RD, 207 &ktls_switch_to_ifnet, "TLS sessions switched from SW to ifnet"); 208 209 static COUNTER_U64_DEFINE_EARLY(ktls_switch_to_sw); 210 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_to_sw, CTLFLAG_RD, 211 &ktls_switch_to_sw, "TLS sessions switched from ifnet to SW"); 212 213 static COUNTER_U64_DEFINE_EARLY(ktls_switch_failed); 214 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, switch_failed, CTLFLAG_RD, 215 &ktls_switch_failed, "TLS sessions unable to switch between SW and ifnet"); 216 217 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_fail); 218 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_failed, CTLFLAG_RD, 219 &ktls_ifnet_disable_fail, "TLS sessions unable to switch to SW from ifnet"); 220 221 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_disable_ok); 222 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, ifnet_disable_ok, CTLFLAG_RD, 223 &ktls_ifnet_disable_ok, "TLS sessions able to switch to SW from ifnet"); 224 225 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, sw, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 226 "Software TLS session stats"); 227 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, ifnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 228 "Hardware (ifnet) TLS session stats"); 229 #ifdef TCP_OFFLOAD 230 SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 231 "TOE TLS session stats"); 232 #endif 233 234 static COUNTER_U64_DEFINE_EARLY(ktls_sw_cbc); 235 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, cbc, CTLFLAG_RD, &ktls_sw_cbc, 236 "Active number of software TLS sessions using AES-CBC"); 237 238 static COUNTER_U64_DEFINE_EARLY(ktls_sw_gcm); 239 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, gcm, CTLFLAG_RD, &ktls_sw_gcm, 240 "Active number of software TLS sessions using AES-GCM"); 241 242 static COUNTER_U64_DEFINE_EARLY(ktls_sw_chacha20); 243 SYSCTL_COUNTER_U64(_kern_ipc_tls_sw, OID_AUTO, chacha20, CTLFLAG_RD, 244 &ktls_sw_chacha20, 245 "Active number of software TLS sessions using Chacha20-Poly1305"); 246 247 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_cbc); 248 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, cbc, CTLFLAG_RD, 249 &ktls_ifnet_cbc, 250 "Active number of ifnet TLS sessions using AES-CBC"); 251 252 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_gcm); 253 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, gcm, CTLFLAG_RD, 254 &ktls_ifnet_gcm, 255 "Active number of ifnet TLS sessions using AES-GCM"); 256 257 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_chacha20); 258 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, chacha20, CTLFLAG_RD, 259 &ktls_ifnet_chacha20, 260 "Active number of ifnet TLS sessions using Chacha20-Poly1305"); 261 262 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset); 263 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset, CTLFLAG_RD, 264 &ktls_ifnet_reset, "TLS sessions updated to a new ifnet send tag"); 265 266 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_dropped); 267 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_dropped, CTLFLAG_RD, 268 &ktls_ifnet_reset_dropped, 269 "TLS sessions dropped after failing to update ifnet send tag"); 270 271 static COUNTER_U64_DEFINE_EARLY(ktls_ifnet_reset_failed); 272 SYSCTL_COUNTER_U64(_kern_ipc_tls_ifnet, OID_AUTO, reset_failed, CTLFLAG_RD, 273 &ktls_ifnet_reset_failed, 274 "TLS sessions that failed to allocate a new ifnet send tag"); 275 276 static int ktls_ifnet_permitted; 277 SYSCTL_UINT(_kern_ipc_tls_ifnet, OID_AUTO, permitted, CTLFLAG_RWTUN, 278 &ktls_ifnet_permitted, 1, 279 "Whether to permit hardware (ifnet) TLS sessions"); 280 281 #ifdef TCP_OFFLOAD 282 static COUNTER_U64_DEFINE_EARLY(ktls_toe_cbc); 283 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, cbc, CTLFLAG_RD, 284 &ktls_toe_cbc, 285 "Active number of TOE TLS sessions using AES-CBC"); 286 287 static COUNTER_U64_DEFINE_EARLY(ktls_toe_gcm); 288 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, gcm, CTLFLAG_RD, 289 &ktls_toe_gcm, 290 "Active number of TOE TLS sessions using AES-GCM"); 291 292 static COUNTER_U64_DEFINE_EARLY(ktls_toe_chacha20); 293 SYSCTL_COUNTER_U64(_kern_ipc_tls_toe, OID_AUTO, chacha20, CTLFLAG_RD, 294 &ktls_toe_chacha20, 295 "Active number of TOE TLS sessions using Chacha20-Poly1305"); 296 #endif 297 298 static MALLOC_DEFINE(M_KTLS, "ktls", "Kernel TLS"); 299 300 static void ktls_cleanup(struct ktls_session *tls); 301 #if defined(INET) || defined(INET6) 302 static void ktls_reset_send_tag(void *context, int pending); 303 #endif 304 static void ktls_work_thread(void *ctx); 305 static void ktls_alloc_thread(void *ctx); 306 307 #if defined(INET) || defined(INET6) 308 static u_int 309 ktls_get_cpu(struct socket *so) 310 { 311 struct inpcb *inp; 312 #ifdef NUMA 313 struct ktls_domain_info *di; 314 #endif 315 u_int cpuid; 316 317 inp = sotoinpcb(so); 318 #ifdef RSS 319 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 320 if (cpuid != NETISR_CPUID_NONE) 321 return (cpuid); 322 #endif 323 /* 324 * Just use the flowid to shard connections in a repeatable 325 * fashion. Note that TLS 1.0 sessions rely on the 326 * serialization provided by having the same connection use 327 * the same queue. 328 */ 329 #ifdef NUMA 330 if (ktls_bind_threads > 1 && inp->inp_numa_domain != M_NODOM) { 331 di = &ktls_domains[inp->inp_numa_domain]; 332 cpuid = di->cpu[inp->inp_flowid % di->count]; 333 } else 334 #endif 335 cpuid = ktls_cpuid_lookup[inp->inp_flowid % ktls_number_threads]; 336 return (cpuid); 337 } 338 #endif 339 340 static int 341 ktls_buffer_import(void *arg, void **store, int count, int domain, int flags) 342 { 343 vm_page_t m; 344 int i, req; 345 346 KASSERT((ktls_maxlen & PAGE_MASK) == 0, 347 ("%s: ktls max length %d is not page size-aligned", 348 __func__, ktls_maxlen)); 349 350 req = VM_ALLOC_WIRED | VM_ALLOC_NODUMP | malloc2vm_flags(flags); 351 for (i = 0; i < count; i++) { 352 m = vm_page_alloc_noobj_contig_domain(domain, req, 353 atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0, 354 VM_MEMATTR_DEFAULT); 355 if (m == NULL) 356 break; 357 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 358 } 359 return (i); 360 } 361 362 static void 363 ktls_buffer_release(void *arg __unused, void **store, int count) 364 { 365 vm_page_t m; 366 int i, j; 367 368 for (i = 0; i < count; i++) { 369 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); 370 for (j = 0; j < atop(ktls_maxlen); j++) { 371 (void)vm_page_unwire_noq(m + j); 372 vm_page_free(m + j); 373 } 374 } 375 } 376 377 static void 378 ktls_free_mext_contig(struct mbuf *m) 379 { 380 M_ASSERTEXTPG(m); 381 uma_zfree(ktls_buffer_zone, (void *)PHYS_TO_DMAP(m->m_epg_pa[0])); 382 } 383 384 static int 385 ktls_init(void) 386 { 387 struct thread *td; 388 struct pcpu *pc; 389 int count, domain, error, i; 390 391 ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS, 392 M_WAITOK | M_ZERO); 393 394 ktls_session_zone = uma_zcreate("ktls_session", 395 sizeof(struct ktls_session), 396 NULL, NULL, NULL, NULL, 397 UMA_ALIGN_CACHE, 0); 398 399 if (ktls_sw_buffer_cache) { 400 ktls_buffer_zone = uma_zcache_create("ktls_buffers", 401 roundup2(ktls_maxlen, PAGE_SIZE), NULL, NULL, NULL, NULL, 402 ktls_buffer_import, ktls_buffer_release, NULL, 403 UMA_ZONE_FIRSTTOUCH); 404 } 405 406 /* 407 * Initialize the workqueues to run the TLS work. We create a 408 * work queue for each CPU. 409 */ 410 CPU_FOREACH(i) { 411 STAILQ_INIT(&ktls_wq[i].m_head); 412 STAILQ_INIT(&ktls_wq[i].so_head); 413 mtx_init(&ktls_wq[i].mtx, "ktls work queue", NULL, MTX_DEF); 414 if (ktls_bind_threads > 1) { 415 pc = pcpu_find(i); 416 domain = pc->pc_domain; 417 count = ktls_domains[domain].count; 418 ktls_domains[domain].cpu[count] = i; 419 ktls_domains[domain].count++; 420 } 421 ktls_cpuid_lookup[ktls_number_threads] = i; 422 ktls_number_threads++; 423 } 424 425 /* 426 * If we somehow have an empty domain, fall back to choosing 427 * among all KTLS threads. 428 */ 429 if (ktls_bind_threads > 1) { 430 for (i = 0; i < vm_ndomains; i++) { 431 if (ktls_domains[i].count == 0) { 432 ktls_bind_threads = 1; 433 break; 434 } 435 } 436 } 437 438 /* Start kthreads for each workqueue. */ 439 CPU_FOREACH(i) { 440 error = kproc_kthread_add(ktls_work_thread, &ktls_wq[i], 441 &ktls_proc, &td, 0, 0, "KTLS", "thr_%d", i); 442 if (error) { 443 printf("Can't add KTLS thread %d error %d\n", i, error); 444 return (error); 445 } 446 } 447 448 /* 449 * Start an allocation thread per-domain to perform blocking allocations 450 * of 16k physically contiguous TLS crypto destination buffers. 451 */ 452 if (ktls_sw_buffer_cache) { 453 for (domain = 0; domain < vm_ndomains; domain++) { 454 if (VM_DOMAIN_EMPTY(domain)) 455 continue; 456 if (CPU_EMPTY(&cpuset_domain[domain])) 457 continue; 458 error = kproc_kthread_add(ktls_alloc_thread, 459 &ktls_domains[domain], &ktls_proc, 460 &ktls_domains[domain].alloc_td.td, 461 0, 0, "KTLS", "alloc_%d", domain); 462 if (error) { 463 printf("Can't add KTLS alloc thread %d error %d\n", 464 domain, error); 465 return (error); 466 } 467 } 468 } 469 470 if (bootverbose) 471 printf("KTLS: Initialized %d threads\n", ktls_number_threads); 472 return (0); 473 } 474 475 static int 476 ktls_start_kthreads(void) 477 { 478 int error, state; 479 480 start: 481 state = atomic_load_acq_int(&ktls_init_state); 482 if (__predict_true(state > 0)) 483 return (0); 484 if (state < 0) 485 return (ENXIO); 486 487 sx_xlock(&ktls_init_lock); 488 if (ktls_init_state != 0) { 489 sx_xunlock(&ktls_init_lock); 490 goto start; 491 } 492 493 error = ktls_init(); 494 if (error == 0) 495 state = 1; 496 else 497 state = -1; 498 atomic_store_rel_int(&ktls_init_state, state); 499 sx_xunlock(&ktls_init_lock); 500 return (error); 501 } 502 503 #if defined(INET) || defined(INET6) 504 static int 505 ktls_create_session(struct socket *so, struct tls_enable *en, 506 struct ktls_session **tlsp) 507 { 508 struct ktls_session *tls; 509 int error; 510 511 /* Only TLS 1.0 - 1.3 are supported. */ 512 if (en->tls_vmajor != TLS_MAJOR_VER_ONE) 513 return (EINVAL); 514 if (en->tls_vminor < TLS_MINOR_VER_ZERO || 515 en->tls_vminor > TLS_MINOR_VER_THREE) 516 return (EINVAL); 517 518 if (en->auth_key_len < 0 || en->auth_key_len > TLS_MAX_PARAM_SIZE) 519 return (EINVAL); 520 if (en->cipher_key_len < 0 || en->cipher_key_len > TLS_MAX_PARAM_SIZE) 521 return (EINVAL); 522 if (en->iv_len < 0 || en->iv_len > sizeof(tls->params.iv)) 523 return (EINVAL); 524 525 /* All supported algorithms require a cipher key. */ 526 if (en->cipher_key_len == 0) 527 return (EINVAL); 528 529 /* No flags are currently supported. */ 530 if (en->flags != 0) 531 return (EINVAL); 532 533 /* Common checks for supported algorithms. */ 534 switch (en->cipher_algorithm) { 535 case CRYPTO_AES_NIST_GCM_16: 536 /* 537 * auth_algorithm isn't used, but permit GMAC values 538 * for compatibility. 539 */ 540 switch (en->auth_algorithm) { 541 case 0: 542 #ifdef COMPAT_FREEBSD12 543 /* XXX: Really 13.0-current COMPAT. */ 544 case CRYPTO_AES_128_NIST_GMAC: 545 case CRYPTO_AES_192_NIST_GMAC: 546 case CRYPTO_AES_256_NIST_GMAC: 547 #endif 548 break; 549 default: 550 return (EINVAL); 551 } 552 if (en->auth_key_len != 0) 553 return (EINVAL); 554 switch (en->tls_vminor) { 555 case TLS_MINOR_VER_TWO: 556 if (en->iv_len != TLS_AEAD_GCM_LEN) 557 return (EINVAL); 558 break; 559 case TLS_MINOR_VER_THREE: 560 if (en->iv_len != TLS_1_3_GCM_IV_LEN) 561 return (EINVAL); 562 break; 563 default: 564 return (EINVAL); 565 } 566 break; 567 case CRYPTO_AES_CBC: 568 switch (en->auth_algorithm) { 569 case CRYPTO_SHA1_HMAC: 570 break; 571 case CRYPTO_SHA2_256_HMAC: 572 case CRYPTO_SHA2_384_HMAC: 573 if (en->tls_vminor != TLS_MINOR_VER_TWO) 574 return (EINVAL); 575 break; 576 default: 577 return (EINVAL); 578 } 579 if (en->auth_key_len == 0) 580 return (EINVAL); 581 582 /* 583 * TLS 1.0 requires an implicit IV. TLS 1.1 and 1.2 584 * use explicit IVs. 585 */ 586 switch (en->tls_vminor) { 587 case TLS_MINOR_VER_ZERO: 588 if (en->iv_len != TLS_CBC_IMPLICIT_IV_LEN) 589 return (EINVAL); 590 break; 591 case TLS_MINOR_VER_ONE: 592 case TLS_MINOR_VER_TWO: 593 /* Ignore any supplied IV. */ 594 en->iv_len = 0; 595 break; 596 default: 597 return (EINVAL); 598 } 599 break; 600 case CRYPTO_CHACHA20_POLY1305: 601 if (en->auth_algorithm != 0 || en->auth_key_len != 0) 602 return (EINVAL); 603 if (en->tls_vminor != TLS_MINOR_VER_TWO && 604 en->tls_vminor != TLS_MINOR_VER_THREE) 605 return (EINVAL); 606 if (en->iv_len != TLS_CHACHA20_IV_LEN) 607 return (EINVAL); 608 break; 609 default: 610 return (EINVAL); 611 } 612 613 error = ktls_start_kthreads(); 614 if (error != 0) 615 return (error); 616 617 tls = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 618 619 counter_u64_add(ktls_offload_active, 1); 620 621 refcount_init(&tls->refcount, 1); 622 TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls); 623 624 tls->wq_index = ktls_get_cpu(so); 625 626 tls->params.cipher_algorithm = en->cipher_algorithm; 627 tls->params.auth_algorithm = en->auth_algorithm; 628 tls->params.tls_vmajor = en->tls_vmajor; 629 tls->params.tls_vminor = en->tls_vminor; 630 tls->params.flags = en->flags; 631 tls->params.max_frame_len = min(TLS_MAX_MSG_SIZE_V10_2, ktls_maxlen); 632 633 /* Set the header and trailer lengths. */ 634 tls->params.tls_hlen = sizeof(struct tls_record_layer); 635 switch (en->cipher_algorithm) { 636 case CRYPTO_AES_NIST_GCM_16: 637 /* 638 * TLS 1.2 uses a 4 byte implicit IV with an explicit 8 byte 639 * nonce. TLS 1.3 uses a 12 byte implicit IV. 640 */ 641 if (en->tls_vminor < TLS_MINOR_VER_THREE) 642 tls->params.tls_hlen += sizeof(uint64_t); 643 tls->params.tls_tlen = AES_GMAC_HASH_LEN; 644 tls->params.tls_bs = 1; 645 break; 646 case CRYPTO_AES_CBC: 647 switch (en->auth_algorithm) { 648 case CRYPTO_SHA1_HMAC: 649 if (en->tls_vminor == TLS_MINOR_VER_ZERO) { 650 /* Implicit IV, no nonce. */ 651 tls->sequential_records = true; 652 tls->next_seqno = be64dec(en->rec_seq); 653 STAILQ_INIT(&tls->pending_records); 654 } else { 655 tls->params.tls_hlen += AES_BLOCK_LEN; 656 } 657 tls->params.tls_tlen = AES_BLOCK_LEN + 658 SHA1_HASH_LEN; 659 break; 660 case CRYPTO_SHA2_256_HMAC: 661 tls->params.tls_hlen += AES_BLOCK_LEN; 662 tls->params.tls_tlen = AES_BLOCK_LEN + 663 SHA2_256_HASH_LEN; 664 break; 665 case CRYPTO_SHA2_384_HMAC: 666 tls->params.tls_hlen += AES_BLOCK_LEN; 667 tls->params.tls_tlen = AES_BLOCK_LEN + 668 SHA2_384_HASH_LEN; 669 break; 670 default: 671 panic("invalid hmac"); 672 } 673 tls->params.tls_bs = AES_BLOCK_LEN; 674 break; 675 case CRYPTO_CHACHA20_POLY1305: 676 /* 677 * Chacha20 uses a 12 byte implicit IV. 678 */ 679 tls->params.tls_tlen = POLY1305_HASH_LEN; 680 tls->params.tls_bs = 1; 681 break; 682 default: 683 panic("invalid cipher"); 684 } 685 686 /* 687 * TLS 1.3 includes optional padding which we do not support, 688 * and also puts the "real" record type at the end of the 689 * encrypted data. 690 */ 691 if (en->tls_vminor == TLS_MINOR_VER_THREE) 692 tls->params.tls_tlen += sizeof(uint8_t); 693 694 KASSERT(tls->params.tls_hlen <= MBUF_PEXT_HDR_LEN, 695 ("TLS header length too long: %d", tls->params.tls_hlen)); 696 KASSERT(tls->params.tls_tlen <= MBUF_PEXT_TRAIL_LEN, 697 ("TLS trailer length too long: %d", tls->params.tls_tlen)); 698 699 if (en->auth_key_len != 0) { 700 tls->params.auth_key_len = en->auth_key_len; 701 tls->params.auth_key = malloc(en->auth_key_len, M_KTLS, 702 M_WAITOK); 703 error = copyin(en->auth_key, tls->params.auth_key, 704 en->auth_key_len); 705 if (error) 706 goto out; 707 } 708 709 tls->params.cipher_key_len = en->cipher_key_len; 710 tls->params.cipher_key = malloc(en->cipher_key_len, M_KTLS, M_WAITOK); 711 error = copyin(en->cipher_key, tls->params.cipher_key, 712 en->cipher_key_len); 713 if (error) 714 goto out; 715 716 /* 717 * This holds the implicit portion of the nonce for AEAD 718 * ciphers and the initial implicit IV for TLS 1.0. The 719 * explicit portions of the IV are generated in ktls_frame(). 720 */ 721 if (en->iv_len != 0) { 722 tls->params.iv_len = en->iv_len; 723 error = copyin(en->iv, tls->params.iv, en->iv_len); 724 if (error) 725 goto out; 726 727 /* 728 * For TLS 1.2 with GCM, generate an 8-byte nonce as a 729 * counter to generate unique explicit IVs. 730 * 731 * Store this counter in the last 8 bytes of the IV 732 * array so that it is 8-byte aligned. 733 */ 734 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && 735 en->tls_vminor == TLS_MINOR_VER_TWO) 736 arc4rand(tls->params.iv + 8, sizeof(uint64_t), 0); 737 } 738 739 *tlsp = tls; 740 return (0); 741 742 out: 743 ktls_cleanup(tls); 744 return (error); 745 } 746 747 static struct ktls_session * 748 ktls_clone_session(struct ktls_session *tls) 749 { 750 struct ktls_session *tls_new; 751 752 tls_new = uma_zalloc(ktls_session_zone, M_WAITOK | M_ZERO); 753 754 counter_u64_add(ktls_offload_active, 1); 755 756 refcount_init(&tls_new->refcount, 1); 757 TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, tls_new); 758 759 /* Copy fields from existing session. */ 760 tls_new->params = tls->params; 761 tls_new->wq_index = tls->wq_index; 762 763 /* Deep copy keys. */ 764 if (tls_new->params.auth_key != NULL) { 765 tls_new->params.auth_key = malloc(tls->params.auth_key_len, 766 M_KTLS, M_WAITOK); 767 memcpy(tls_new->params.auth_key, tls->params.auth_key, 768 tls->params.auth_key_len); 769 } 770 771 tls_new->params.cipher_key = malloc(tls->params.cipher_key_len, M_KTLS, 772 M_WAITOK); 773 memcpy(tls_new->params.cipher_key, tls->params.cipher_key, 774 tls->params.cipher_key_len); 775 776 return (tls_new); 777 } 778 #endif 779 780 static void 781 ktls_cleanup(struct ktls_session *tls) 782 { 783 784 counter_u64_add(ktls_offload_active, -1); 785 switch (tls->mode) { 786 case TCP_TLS_MODE_SW: 787 switch (tls->params.cipher_algorithm) { 788 case CRYPTO_AES_CBC: 789 counter_u64_add(ktls_sw_cbc, -1); 790 break; 791 case CRYPTO_AES_NIST_GCM_16: 792 counter_u64_add(ktls_sw_gcm, -1); 793 break; 794 case CRYPTO_CHACHA20_POLY1305: 795 counter_u64_add(ktls_sw_chacha20, -1); 796 break; 797 } 798 break; 799 case TCP_TLS_MODE_IFNET: 800 switch (tls->params.cipher_algorithm) { 801 case CRYPTO_AES_CBC: 802 counter_u64_add(ktls_ifnet_cbc, -1); 803 break; 804 case CRYPTO_AES_NIST_GCM_16: 805 counter_u64_add(ktls_ifnet_gcm, -1); 806 break; 807 case CRYPTO_CHACHA20_POLY1305: 808 counter_u64_add(ktls_ifnet_chacha20, -1); 809 break; 810 } 811 if (tls->snd_tag != NULL) 812 m_snd_tag_rele(tls->snd_tag); 813 break; 814 #ifdef TCP_OFFLOAD 815 case TCP_TLS_MODE_TOE: 816 switch (tls->params.cipher_algorithm) { 817 case CRYPTO_AES_CBC: 818 counter_u64_add(ktls_toe_cbc, -1); 819 break; 820 case CRYPTO_AES_NIST_GCM_16: 821 counter_u64_add(ktls_toe_gcm, -1); 822 break; 823 case CRYPTO_CHACHA20_POLY1305: 824 counter_u64_add(ktls_toe_chacha20, -1); 825 break; 826 } 827 break; 828 #endif 829 } 830 if (tls->ocf_session != NULL) 831 ktls_ocf_free(tls); 832 if (tls->params.auth_key != NULL) { 833 zfree(tls->params.auth_key, M_KTLS); 834 tls->params.auth_key = NULL; 835 tls->params.auth_key_len = 0; 836 } 837 if (tls->params.cipher_key != NULL) { 838 zfree(tls->params.cipher_key, M_KTLS); 839 tls->params.cipher_key = NULL; 840 tls->params.cipher_key_len = 0; 841 } 842 explicit_bzero(tls->params.iv, sizeof(tls->params.iv)); 843 } 844 845 #if defined(INET) || defined(INET6) 846 847 #ifdef TCP_OFFLOAD 848 static int 849 ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction) 850 { 851 struct inpcb *inp; 852 struct tcpcb *tp; 853 int error; 854 855 inp = so->so_pcb; 856 INP_WLOCK(inp); 857 if (inp->inp_flags2 & INP_FREED) { 858 INP_WUNLOCK(inp); 859 return (ECONNRESET); 860 } 861 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 862 INP_WUNLOCK(inp); 863 return (ECONNRESET); 864 } 865 if (inp->inp_socket == NULL) { 866 INP_WUNLOCK(inp); 867 return (ECONNRESET); 868 } 869 tp = intotcpcb(inp); 870 if (!(tp->t_flags & TF_TOE)) { 871 INP_WUNLOCK(inp); 872 return (EOPNOTSUPP); 873 } 874 875 error = tcp_offload_alloc_tls_session(tp, tls, direction); 876 INP_WUNLOCK(inp); 877 if (error == 0) { 878 tls->mode = TCP_TLS_MODE_TOE; 879 switch (tls->params.cipher_algorithm) { 880 case CRYPTO_AES_CBC: 881 counter_u64_add(ktls_toe_cbc, 1); 882 break; 883 case CRYPTO_AES_NIST_GCM_16: 884 counter_u64_add(ktls_toe_gcm, 1); 885 break; 886 case CRYPTO_CHACHA20_POLY1305: 887 counter_u64_add(ktls_toe_chacha20, 1); 888 break; 889 } 890 } 891 return (error); 892 } 893 #endif 894 895 /* 896 * Common code used when first enabling ifnet TLS on a connection or 897 * when allocating a new ifnet TLS session due to a routing change. 898 * This function allocates a new TLS send tag on whatever interface 899 * the connection is currently routed over. 900 */ 901 static int 902 ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force, 903 struct m_snd_tag **mstp) 904 { 905 union if_snd_tag_alloc_params params; 906 struct ifnet *ifp; 907 struct nhop_object *nh; 908 struct tcpcb *tp; 909 int error; 910 911 INP_RLOCK(inp); 912 if (inp->inp_flags2 & INP_FREED) { 913 INP_RUNLOCK(inp); 914 return (ECONNRESET); 915 } 916 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 917 INP_RUNLOCK(inp); 918 return (ECONNRESET); 919 } 920 if (inp->inp_socket == NULL) { 921 INP_RUNLOCK(inp); 922 return (ECONNRESET); 923 } 924 tp = intotcpcb(inp); 925 926 /* 927 * Check administrative controls on ifnet TLS to determine if 928 * ifnet TLS should be denied. 929 * 930 * - Always permit 'force' requests. 931 * - ktls_ifnet_permitted == 0: always deny. 932 */ 933 if (!force && ktls_ifnet_permitted == 0) { 934 INP_RUNLOCK(inp); 935 return (ENXIO); 936 } 937 938 /* 939 * XXX: Use the cached route in the inpcb to find the 940 * interface. This should perhaps instead use 941 * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only 942 * enabled after a connection has completed key negotiation in 943 * userland, the cached route will be present in practice. 944 */ 945 nh = inp->inp_route.ro_nh; 946 if (nh == NULL) { 947 INP_RUNLOCK(inp); 948 return (ENXIO); 949 } 950 ifp = nh->nh_ifp; 951 if_ref(ifp); 952 953 /* 954 * Allocate a TLS + ratelimit tag if the connection has an 955 * existing pacing rate. 956 */ 957 if (tp->t_pacing_rate != -1 && 958 (ifp->if_capenable & IFCAP_TXTLS_RTLMT) != 0) { 959 params.hdr.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT; 960 params.tls_rate_limit.inp = inp; 961 params.tls_rate_limit.tls = tls; 962 params.tls_rate_limit.max_rate = tp->t_pacing_rate; 963 } else { 964 params.hdr.type = IF_SND_TAG_TYPE_TLS; 965 params.tls.inp = inp; 966 params.tls.tls = tls; 967 } 968 params.hdr.flowid = inp->inp_flowid; 969 params.hdr.flowtype = inp->inp_flowtype; 970 params.hdr.numa_domain = inp->inp_numa_domain; 971 INP_RUNLOCK(inp); 972 973 if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) { 974 error = EOPNOTSUPP; 975 goto out; 976 } 977 if (inp->inp_vflag & INP_IPV6) { 978 if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) { 979 error = EOPNOTSUPP; 980 goto out; 981 } 982 } else { 983 if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) { 984 error = EOPNOTSUPP; 985 goto out; 986 } 987 } 988 error = m_snd_tag_alloc(ifp, ¶ms, mstp); 989 out: 990 if_rele(ifp); 991 return (error); 992 } 993 994 static int 995 ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force) 996 { 997 struct m_snd_tag *mst; 998 int error; 999 1000 error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst); 1001 if (error == 0) { 1002 tls->mode = TCP_TLS_MODE_IFNET; 1003 tls->snd_tag = mst; 1004 switch (tls->params.cipher_algorithm) { 1005 case CRYPTO_AES_CBC: 1006 counter_u64_add(ktls_ifnet_cbc, 1); 1007 break; 1008 case CRYPTO_AES_NIST_GCM_16: 1009 counter_u64_add(ktls_ifnet_gcm, 1); 1010 break; 1011 case CRYPTO_CHACHA20_POLY1305: 1012 counter_u64_add(ktls_ifnet_chacha20, 1); 1013 break; 1014 } 1015 } 1016 return (error); 1017 } 1018 1019 static void 1020 ktls_use_sw(struct ktls_session *tls) 1021 { 1022 tls->mode = TCP_TLS_MODE_SW; 1023 switch (tls->params.cipher_algorithm) { 1024 case CRYPTO_AES_CBC: 1025 counter_u64_add(ktls_sw_cbc, 1); 1026 break; 1027 case CRYPTO_AES_NIST_GCM_16: 1028 counter_u64_add(ktls_sw_gcm, 1); 1029 break; 1030 case CRYPTO_CHACHA20_POLY1305: 1031 counter_u64_add(ktls_sw_chacha20, 1); 1032 break; 1033 } 1034 } 1035 1036 static int 1037 ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction) 1038 { 1039 int error; 1040 1041 error = ktls_ocf_try(so, tls, direction); 1042 if (error) 1043 return (error); 1044 ktls_use_sw(tls); 1045 return (0); 1046 } 1047 1048 /* 1049 * KTLS RX stores data in the socket buffer as a list of TLS records, 1050 * where each record is stored as a control message containg the TLS 1051 * header followed by data mbufs containing the decrypted data. This 1052 * is different from KTLS TX which always uses an mb_ext_pgs mbuf for 1053 * both encrypted and decrypted data. TLS records decrypted by a NIC 1054 * should be queued to the socket buffer as records, but encrypted 1055 * data which needs to be decrypted by software arrives as a stream of 1056 * regular mbufs which need to be converted. In addition, there may 1057 * already be pending encrypted data in the socket buffer when KTLS RX 1058 * is enabled. 1059 * 1060 * To manage not-yet-decrypted data for KTLS RX, the following scheme 1061 * is used: 1062 * 1063 * - A single chain of NOTREADY mbufs is hung off of sb_mtls. 1064 * 1065 * - ktls_check_rx checks this chain of mbufs reading the TLS header 1066 * from the first mbuf. Once all of the data for that TLS record is 1067 * queued, the socket is queued to a worker thread. 1068 * 1069 * - The worker thread calls ktls_decrypt to decrypt TLS records in 1070 * the TLS chain. Each TLS record is detached from the TLS chain, 1071 * decrypted, and inserted into the regular socket buffer chain as 1072 * record starting with a control message holding the TLS header and 1073 * a chain of mbufs holding the encrypted data. 1074 */ 1075 1076 static void 1077 sb_mark_notready(struct sockbuf *sb) 1078 { 1079 struct mbuf *m; 1080 1081 m = sb->sb_mb; 1082 sb->sb_mtls = m; 1083 sb->sb_mb = NULL; 1084 sb->sb_mbtail = NULL; 1085 sb->sb_lastrecord = NULL; 1086 for (; m != NULL; m = m->m_next) { 1087 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL", 1088 __func__)); 1089 KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail", 1090 __func__)); 1091 KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len", 1092 __func__)); 1093 m->m_flags |= M_NOTREADY; 1094 sb->sb_acc -= m->m_len; 1095 sb->sb_tlscc += m->m_len; 1096 sb->sb_mtlstail = m; 1097 } 1098 KASSERT(sb->sb_acc == 0 && sb->sb_tlscc == sb->sb_ccc, 1099 ("%s: acc %u tlscc %u ccc %u", __func__, sb->sb_acc, sb->sb_tlscc, 1100 sb->sb_ccc)); 1101 } 1102 1103 /* 1104 * Return information about the pending TLS data in a socket 1105 * buffer. On return, 'seqno' is set to the sequence number 1106 * of the next TLS record to be received, 'resid' is set to 1107 * the amount of bytes still needed for the last pending 1108 * record. The function returns 'false' if the last pending 1109 * record contains a partial TLS header. In that case, 'resid' 1110 * is the number of bytes needed to complete the TLS header. 1111 */ 1112 bool 1113 ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp) 1114 { 1115 struct tls_record_layer hdr; 1116 struct mbuf *m; 1117 uint64_t seqno; 1118 size_t resid; 1119 u_int offset, record_len; 1120 1121 SOCKBUF_LOCK_ASSERT(sb); 1122 MPASS(sb->sb_flags & SB_TLS_RX); 1123 seqno = sb->sb_tls_seqno; 1124 resid = sb->sb_tlscc; 1125 m = sb->sb_mtls; 1126 offset = 0; 1127 1128 if (resid == 0) { 1129 *seqnop = seqno; 1130 *residp = 0; 1131 return (true); 1132 } 1133 1134 for (;;) { 1135 seqno++; 1136 1137 if (resid < sizeof(hdr)) { 1138 *seqnop = seqno; 1139 *residp = sizeof(hdr) - resid; 1140 return (false); 1141 } 1142 1143 m_copydata(m, offset, sizeof(hdr), (void *)&hdr); 1144 1145 record_len = sizeof(hdr) + ntohs(hdr.tls_length); 1146 if (resid <= record_len) { 1147 *seqnop = seqno; 1148 *residp = record_len - resid; 1149 return (true); 1150 } 1151 resid -= record_len; 1152 1153 while (record_len != 0) { 1154 if (m->m_len - offset > record_len) { 1155 offset += record_len; 1156 break; 1157 } 1158 1159 record_len -= (m->m_len - offset); 1160 offset = 0; 1161 m = m->m_next; 1162 } 1163 } 1164 } 1165 1166 int 1167 ktls_enable_rx(struct socket *so, struct tls_enable *en) 1168 { 1169 struct ktls_session *tls; 1170 int error; 1171 1172 if (!ktls_offload_enable) 1173 return (ENOTSUP); 1174 if (SOLISTENING(so)) 1175 return (EINVAL); 1176 1177 counter_u64_add(ktls_offload_enable_calls, 1); 1178 1179 /* 1180 * This should always be true since only the TCP socket option 1181 * invokes this function. 1182 */ 1183 if (so->so_proto->pr_protocol != IPPROTO_TCP) 1184 return (EINVAL); 1185 1186 /* 1187 * XXX: Don't overwrite existing sessions. We should permit 1188 * this to support rekeying in the future. 1189 */ 1190 if (so->so_rcv.sb_tls_info != NULL) 1191 return (EALREADY); 1192 1193 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) 1194 return (ENOTSUP); 1195 1196 /* TLS 1.3 is not yet supported. */ 1197 if (en->tls_vmajor == TLS_MAJOR_VER_ONE && 1198 en->tls_vminor == TLS_MINOR_VER_THREE) 1199 return (ENOTSUP); 1200 1201 error = ktls_create_session(so, en, &tls); 1202 if (error) 1203 return (error); 1204 1205 error = ktls_ocf_try(so, tls, KTLS_RX); 1206 if (error) { 1207 ktls_cleanup(tls); 1208 return (error); 1209 } 1210 1211 #ifdef TCP_OFFLOAD 1212 error = ktls_try_toe(so, tls, KTLS_RX); 1213 if (error) 1214 #endif 1215 ktls_use_sw(tls); 1216 1217 /* Mark the socket as using TLS offload. */ 1218 SOCKBUF_LOCK(&so->so_rcv); 1219 so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq); 1220 so->so_rcv.sb_tls_info = tls; 1221 so->so_rcv.sb_flags |= SB_TLS_RX; 1222 1223 /* Mark existing data as not ready until it can be decrypted. */ 1224 if (tls->mode != TCP_TLS_MODE_TOE) { 1225 sb_mark_notready(&so->so_rcv); 1226 ktls_check_rx(&so->so_rcv); 1227 } 1228 SOCKBUF_UNLOCK(&so->so_rcv); 1229 1230 counter_u64_add(ktls_offload_total, 1); 1231 1232 return (0); 1233 } 1234 1235 int 1236 ktls_enable_tx(struct socket *so, struct tls_enable *en) 1237 { 1238 struct ktls_session *tls; 1239 struct inpcb *inp; 1240 int error; 1241 1242 if (!ktls_offload_enable) 1243 return (ENOTSUP); 1244 if (SOLISTENING(so)) 1245 return (EINVAL); 1246 1247 counter_u64_add(ktls_offload_enable_calls, 1); 1248 1249 /* 1250 * This should always be true since only the TCP socket option 1251 * invokes this function. 1252 */ 1253 if (so->so_proto->pr_protocol != IPPROTO_TCP) 1254 return (EINVAL); 1255 1256 /* 1257 * XXX: Don't overwrite existing sessions. We should permit 1258 * this to support rekeying in the future. 1259 */ 1260 if (so->so_snd.sb_tls_info != NULL) 1261 return (EALREADY); 1262 1263 if (en->cipher_algorithm == CRYPTO_AES_CBC && !ktls_cbc_enable) 1264 return (ENOTSUP); 1265 1266 /* TLS requires ext pgs */ 1267 if (mb_use_ext_pgs == 0) 1268 return (ENXIO); 1269 1270 error = ktls_create_session(so, en, &tls); 1271 if (error) 1272 return (error); 1273 1274 /* Prefer TOE -> ifnet TLS -> software TLS. */ 1275 #ifdef TCP_OFFLOAD 1276 error = ktls_try_toe(so, tls, KTLS_TX); 1277 if (error) 1278 #endif 1279 error = ktls_try_ifnet(so, tls, false); 1280 if (error) 1281 error = ktls_try_sw(so, tls, KTLS_TX); 1282 1283 if (error) { 1284 ktls_cleanup(tls); 1285 return (error); 1286 } 1287 1288 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT); 1289 if (error) { 1290 ktls_cleanup(tls); 1291 return (error); 1292 } 1293 1294 /* 1295 * Write lock the INP when setting sb_tls_info so that 1296 * routines in tcp_ratelimit.c can read sb_tls_info while 1297 * holding the INP lock. 1298 */ 1299 inp = so->so_pcb; 1300 INP_WLOCK(inp); 1301 SOCKBUF_LOCK(&so->so_snd); 1302 so->so_snd.sb_tls_seqno = be64dec(en->rec_seq); 1303 so->so_snd.sb_tls_info = tls; 1304 if (tls->mode != TCP_TLS_MODE_SW) 1305 so->so_snd.sb_flags |= SB_TLS_IFNET; 1306 SOCKBUF_UNLOCK(&so->so_snd); 1307 INP_WUNLOCK(inp); 1308 SOCK_IO_SEND_UNLOCK(so); 1309 1310 counter_u64_add(ktls_offload_total, 1); 1311 1312 return (0); 1313 } 1314 1315 int 1316 ktls_get_rx_mode(struct socket *so, int *modep) 1317 { 1318 struct ktls_session *tls; 1319 struct inpcb *inp; 1320 1321 if (SOLISTENING(so)) 1322 return (EINVAL); 1323 inp = so->so_pcb; 1324 INP_WLOCK_ASSERT(inp); 1325 SOCK_RECVBUF_LOCK(so); 1326 tls = so->so_rcv.sb_tls_info; 1327 if (tls == NULL) 1328 *modep = TCP_TLS_MODE_NONE; 1329 else 1330 *modep = tls->mode; 1331 SOCK_RECVBUF_UNLOCK(so); 1332 return (0); 1333 } 1334 1335 int 1336 ktls_get_tx_mode(struct socket *so, int *modep) 1337 { 1338 struct ktls_session *tls; 1339 struct inpcb *inp; 1340 1341 if (SOLISTENING(so)) 1342 return (EINVAL); 1343 inp = so->so_pcb; 1344 INP_WLOCK_ASSERT(inp); 1345 SOCK_SENDBUF_LOCK(so); 1346 tls = so->so_snd.sb_tls_info; 1347 if (tls == NULL) 1348 *modep = TCP_TLS_MODE_NONE; 1349 else 1350 *modep = tls->mode; 1351 SOCK_SENDBUF_UNLOCK(so); 1352 return (0); 1353 } 1354 1355 /* 1356 * Switch between SW and ifnet TLS sessions as requested. 1357 */ 1358 int 1359 ktls_set_tx_mode(struct socket *so, int mode) 1360 { 1361 struct ktls_session *tls, *tls_new; 1362 struct inpcb *inp; 1363 int error; 1364 1365 if (SOLISTENING(so)) 1366 return (EINVAL); 1367 switch (mode) { 1368 case TCP_TLS_MODE_SW: 1369 case TCP_TLS_MODE_IFNET: 1370 break; 1371 default: 1372 return (EINVAL); 1373 } 1374 1375 inp = so->so_pcb; 1376 INP_WLOCK_ASSERT(inp); 1377 SOCKBUF_LOCK(&so->so_snd); 1378 tls = so->so_snd.sb_tls_info; 1379 if (tls == NULL) { 1380 SOCKBUF_UNLOCK(&so->so_snd); 1381 return (0); 1382 } 1383 1384 if (tls->mode == mode) { 1385 SOCKBUF_UNLOCK(&so->so_snd); 1386 return (0); 1387 } 1388 1389 tls = ktls_hold(tls); 1390 SOCKBUF_UNLOCK(&so->so_snd); 1391 INP_WUNLOCK(inp); 1392 1393 tls_new = ktls_clone_session(tls); 1394 1395 if (mode == TCP_TLS_MODE_IFNET) 1396 error = ktls_try_ifnet(so, tls_new, true); 1397 else 1398 error = ktls_try_sw(so, tls_new, KTLS_TX); 1399 if (error) { 1400 counter_u64_add(ktls_switch_failed, 1); 1401 ktls_free(tls_new); 1402 ktls_free(tls); 1403 INP_WLOCK(inp); 1404 return (error); 1405 } 1406 1407 error = SOCK_IO_SEND_LOCK(so, SBL_WAIT); 1408 if (error) { 1409 counter_u64_add(ktls_switch_failed, 1); 1410 ktls_free(tls_new); 1411 ktls_free(tls); 1412 INP_WLOCK(inp); 1413 return (error); 1414 } 1415 1416 /* 1417 * If we raced with another session change, keep the existing 1418 * session. 1419 */ 1420 if (tls != so->so_snd.sb_tls_info) { 1421 counter_u64_add(ktls_switch_failed, 1); 1422 SOCK_IO_SEND_UNLOCK(so); 1423 ktls_free(tls_new); 1424 ktls_free(tls); 1425 INP_WLOCK(inp); 1426 return (EBUSY); 1427 } 1428 1429 SOCKBUF_LOCK(&so->so_snd); 1430 so->so_snd.sb_tls_info = tls_new; 1431 if (tls_new->mode != TCP_TLS_MODE_SW) 1432 so->so_snd.sb_flags |= SB_TLS_IFNET; 1433 SOCKBUF_UNLOCK(&so->so_snd); 1434 SOCK_IO_SEND_UNLOCK(so); 1435 1436 /* 1437 * Drop two references on 'tls'. The first is for the 1438 * ktls_hold() above. The second drops the reference from the 1439 * socket buffer. 1440 */ 1441 KASSERT(tls->refcount >= 2, ("too few references on old session")); 1442 ktls_free(tls); 1443 ktls_free(tls); 1444 1445 if (mode == TCP_TLS_MODE_IFNET) 1446 counter_u64_add(ktls_switch_to_ifnet, 1); 1447 else 1448 counter_u64_add(ktls_switch_to_sw, 1); 1449 1450 INP_WLOCK(inp); 1451 return (0); 1452 } 1453 1454 /* 1455 * Try to allocate a new TLS send tag. This task is scheduled when 1456 * ip_output detects a route change while trying to transmit a packet 1457 * holding a TLS record. If a new tag is allocated, replace the tag 1458 * in the TLS session. Subsequent packets on the connection will use 1459 * the new tag. If a new tag cannot be allocated, drop the 1460 * connection. 1461 */ 1462 static void 1463 ktls_reset_send_tag(void *context, int pending) 1464 { 1465 struct epoch_tracker et; 1466 struct ktls_session *tls; 1467 struct m_snd_tag *old, *new; 1468 struct inpcb *inp; 1469 struct tcpcb *tp; 1470 int error; 1471 1472 MPASS(pending == 1); 1473 1474 tls = context; 1475 inp = tls->inp; 1476 1477 /* 1478 * Free the old tag first before allocating a new one. 1479 * ip[6]_output_send() will treat a NULL send tag the same as 1480 * an ifp mismatch and drop packets until a new tag is 1481 * allocated. 1482 * 1483 * Write-lock the INP when changing tls->snd_tag since 1484 * ip[6]_output_send() holds a read-lock when reading the 1485 * pointer. 1486 */ 1487 INP_WLOCK(inp); 1488 old = tls->snd_tag; 1489 tls->snd_tag = NULL; 1490 INP_WUNLOCK(inp); 1491 if (old != NULL) 1492 m_snd_tag_rele(old); 1493 1494 error = ktls_alloc_snd_tag(inp, tls, true, &new); 1495 1496 if (error == 0) { 1497 INP_WLOCK(inp); 1498 tls->snd_tag = new; 1499 mtx_pool_lock(mtxpool_sleep, tls); 1500 tls->reset_pending = false; 1501 mtx_pool_unlock(mtxpool_sleep, tls); 1502 if (!in_pcbrele_wlocked(inp)) 1503 INP_WUNLOCK(inp); 1504 1505 counter_u64_add(ktls_ifnet_reset, 1); 1506 1507 /* 1508 * XXX: Should we kick tcp_output explicitly now that 1509 * the send tag is fixed or just rely on timers? 1510 */ 1511 } else { 1512 NET_EPOCH_ENTER(et); 1513 INP_WLOCK(inp); 1514 if (!in_pcbrele_wlocked(inp)) { 1515 if (!(inp->inp_flags & INP_TIMEWAIT) && 1516 !(inp->inp_flags & INP_DROPPED)) { 1517 tp = intotcpcb(inp); 1518 CURVNET_SET(tp->t_vnet); 1519 tp = tcp_drop(tp, ECONNABORTED); 1520 CURVNET_RESTORE(); 1521 if (tp != NULL) 1522 INP_WUNLOCK(inp); 1523 counter_u64_add(ktls_ifnet_reset_dropped, 1); 1524 } else 1525 INP_WUNLOCK(inp); 1526 } 1527 NET_EPOCH_EXIT(et); 1528 1529 counter_u64_add(ktls_ifnet_reset_failed, 1); 1530 1531 /* 1532 * Leave reset_pending true to avoid future tasks while 1533 * the socket goes away. 1534 */ 1535 } 1536 1537 ktls_free(tls); 1538 } 1539 1540 int 1541 ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls) 1542 { 1543 1544 if (inp == NULL) 1545 return (ENOBUFS); 1546 1547 INP_LOCK_ASSERT(inp); 1548 1549 /* 1550 * See if we should schedule a task to update the send tag for 1551 * this session. 1552 */ 1553 mtx_pool_lock(mtxpool_sleep, tls); 1554 if (!tls->reset_pending) { 1555 (void) ktls_hold(tls); 1556 in_pcbref(inp); 1557 tls->inp = inp; 1558 tls->reset_pending = true; 1559 taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task); 1560 } 1561 mtx_pool_unlock(mtxpool_sleep, tls); 1562 return (ENOBUFS); 1563 } 1564 1565 #ifdef RATELIMIT 1566 int 1567 ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate) 1568 { 1569 union if_snd_tag_modify_params params = { 1570 .rate_limit.max_rate = max_pacing_rate, 1571 .rate_limit.flags = M_NOWAIT, 1572 }; 1573 struct m_snd_tag *mst; 1574 1575 /* Can't get to the inp, but it should be locked. */ 1576 /* INP_LOCK_ASSERT(inp); */ 1577 1578 MPASS(tls->mode == TCP_TLS_MODE_IFNET); 1579 1580 if (tls->snd_tag == NULL) { 1581 /* 1582 * Resetting send tag, ignore this change. The 1583 * pending reset may or may not see this updated rate 1584 * in the tcpcb. If it doesn't, we will just lose 1585 * this rate change. 1586 */ 1587 return (0); 1588 } 1589 1590 MPASS(tls->snd_tag != NULL); 1591 MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT); 1592 1593 mst = tls->snd_tag; 1594 return (mst->sw->snd_tag_modify(mst, ¶ms)); 1595 } 1596 #endif 1597 #endif 1598 1599 void 1600 ktls_destroy(struct ktls_session *tls) 1601 { 1602 1603 if (tls->sequential_records) { 1604 struct mbuf *m, *n; 1605 int page_count; 1606 1607 STAILQ_FOREACH_SAFE(m, &tls->pending_records, m_epg_stailq, n) { 1608 page_count = m->m_epg_enc_cnt; 1609 while (page_count > 0) { 1610 KASSERT(page_count >= m->m_epg_nrdy, 1611 ("%s: too few pages", __func__)); 1612 page_count -= m->m_epg_nrdy; 1613 m = m_free(m); 1614 } 1615 } 1616 } 1617 ktls_cleanup(tls); 1618 uma_zfree(ktls_session_zone, tls); 1619 } 1620 1621 void 1622 ktls_seq(struct sockbuf *sb, struct mbuf *m) 1623 { 1624 1625 for (; m != NULL; m = m->m_next) { 1626 KASSERT((m->m_flags & M_EXTPG) != 0, 1627 ("ktls_seq: mapped mbuf %p", m)); 1628 1629 m->m_epg_seqno = sb->sb_tls_seqno; 1630 sb->sb_tls_seqno++; 1631 } 1632 } 1633 1634 /* 1635 * Add TLS framing (headers and trailers) to a chain of mbufs. Each 1636 * mbuf in the chain must be an unmapped mbuf. The payload of the 1637 * mbuf must be populated with the payload of each TLS record. 1638 * 1639 * The record_type argument specifies the TLS record type used when 1640 * populating the TLS header. 1641 * 1642 * The enq_count argument on return is set to the number of pages of 1643 * payload data for this entire chain that need to be encrypted via SW 1644 * encryption. The returned value should be passed to ktls_enqueue 1645 * when scheduling encryption of this chain of mbufs. To handle the 1646 * special case of empty fragments for TLS 1.0 sessions, an empty 1647 * fragment counts as one page. 1648 */ 1649 void 1650 ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt, 1651 uint8_t record_type) 1652 { 1653 struct tls_record_layer *tlshdr; 1654 struct mbuf *m; 1655 uint64_t *noncep; 1656 uint16_t tls_len; 1657 int maxlen; 1658 1659 maxlen = tls->params.max_frame_len; 1660 *enq_cnt = 0; 1661 for (m = top; m != NULL; m = m->m_next) { 1662 /* 1663 * All mbufs in the chain should be TLS records whose 1664 * payload does not exceed the maximum frame length. 1665 * 1666 * Empty TLS records are permitted when using CBC. 1667 */ 1668 KASSERT(m->m_len <= maxlen && 1669 (tls->params.cipher_algorithm == CRYPTO_AES_CBC ? 1670 m->m_len >= 0 : m->m_len > 0), 1671 ("ktls_frame: m %p len %d\n", m, m->m_len)); 1672 1673 /* 1674 * TLS frames require unmapped mbufs to store session 1675 * info. 1676 */ 1677 KASSERT((m->m_flags & M_EXTPG) != 0, 1678 ("ktls_frame: mapped mbuf %p (top = %p)\n", m, top)); 1679 1680 tls_len = m->m_len; 1681 1682 /* Save a reference to the session. */ 1683 m->m_epg_tls = ktls_hold(tls); 1684 1685 m->m_epg_hdrlen = tls->params.tls_hlen; 1686 m->m_epg_trllen = tls->params.tls_tlen; 1687 if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) { 1688 int bs, delta; 1689 1690 /* 1691 * AES-CBC pads messages to a multiple of the 1692 * block size. Note that the padding is 1693 * applied after the digest and the encryption 1694 * is done on the "plaintext || mac || padding". 1695 * At least one byte of padding is always 1696 * present. 1697 * 1698 * Compute the final trailer length assuming 1699 * at most one block of padding. 1700 * tls->params.tls_tlen is the maximum 1701 * possible trailer length (padding + digest). 1702 * delta holds the number of excess padding 1703 * bytes if the maximum were used. Those 1704 * extra bytes are removed. 1705 */ 1706 bs = tls->params.tls_bs; 1707 delta = (tls_len + tls->params.tls_tlen) & (bs - 1); 1708 m->m_epg_trllen -= delta; 1709 } 1710 m->m_len += m->m_epg_hdrlen + m->m_epg_trllen; 1711 1712 /* Populate the TLS header. */ 1713 tlshdr = (void *)m->m_epg_hdr; 1714 tlshdr->tls_vmajor = tls->params.tls_vmajor; 1715 1716 /* 1717 * TLS 1.3 masquarades as TLS 1.2 with a record type 1718 * of TLS_RLTYPE_APP. 1719 */ 1720 if (tls->params.tls_vminor == TLS_MINOR_VER_THREE && 1721 tls->params.tls_vmajor == TLS_MAJOR_VER_ONE) { 1722 tlshdr->tls_vminor = TLS_MINOR_VER_TWO; 1723 tlshdr->tls_type = TLS_RLTYPE_APP; 1724 /* save the real record type for later */ 1725 m->m_epg_record_type = record_type; 1726 m->m_epg_trail[0] = record_type; 1727 } else { 1728 tlshdr->tls_vminor = tls->params.tls_vminor; 1729 tlshdr->tls_type = record_type; 1730 } 1731 tlshdr->tls_length = htons(m->m_len - sizeof(*tlshdr)); 1732 1733 /* 1734 * Store nonces / explicit IVs after the end of the 1735 * TLS header. 1736 * 1737 * For GCM with TLS 1.2, an 8 byte nonce is copied 1738 * from the end of the IV. The nonce is then 1739 * incremented for use by the next record. 1740 * 1741 * For CBC, a random nonce is inserted for TLS 1.1+. 1742 */ 1743 if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 && 1744 tls->params.tls_vminor == TLS_MINOR_VER_TWO) { 1745 noncep = (uint64_t *)(tls->params.iv + 8); 1746 be64enc(tlshdr + 1, *noncep); 1747 (*noncep)++; 1748 } else if (tls->params.cipher_algorithm == CRYPTO_AES_CBC && 1749 tls->params.tls_vminor >= TLS_MINOR_VER_ONE) 1750 arc4rand(tlshdr + 1, AES_BLOCK_LEN, 0); 1751 1752 /* 1753 * When using SW encryption, mark the mbuf not ready. 1754 * It will be marked ready via sbready() after the 1755 * record has been encrypted. 1756 * 1757 * When using ifnet TLS, unencrypted TLS records are 1758 * sent down the stack to the NIC. 1759 */ 1760 if (tls->mode == TCP_TLS_MODE_SW) { 1761 m->m_flags |= M_NOTREADY; 1762 if (__predict_false(tls_len == 0)) { 1763 /* TLS 1.0 empty fragment. */ 1764 m->m_epg_nrdy = 1; 1765 } else 1766 m->m_epg_nrdy = m->m_epg_npgs; 1767 *enq_cnt += m->m_epg_nrdy; 1768 } 1769 } 1770 } 1771 1772 void 1773 ktls_check_rx(struct sockbuf *sb) 1774 { 1775 struct tls_record_layer hdr; 1776 struct ktls_wq *wq; 1777 struct socket *so; 1778 bool running; 1779 1780 SOCKBUF_LOCK_ASSERT(sb); 1781 KASSERT(sb->sb_flags & SB_TLS_RX, ("%s: sockbuf %p isn't TLS RX", 1782 __func__, sb)); 1783 so = __containerof(sb, struct socket, so_rcv); 1784 1785 if (sb->sb_flags & SB_TLS_RX_RUNNING) 1786 return; 1787 1788 /* Is there enough queued for a TLS header? */ 1789 if (sb->sb_tlscc < sizeof(hdr)) { 1790 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc != 0) 1791 so->so_error = EMSGSIZE; 1792 return; 1793 } 1794 1795 m_copydata(sb->sb_mtls, 0, sizeof(hdr), (void *)&hdr); 1796 1797 /* Is the entire record queued? */ 1798 if (sb->sb_tlscc < sizeof(hdr) + ntohs(hdr.tls_length)) { 1799 if ((sb->sb_state & SBS_CANTRCVMORE) != 0) 1800 so->so_error = EMSGSIZE; 1801 return; 1802 } 1803 1804 sb->sb_flags |= SB_TLS_RX_RUNNING; 1805 1806 soref(so); 1807 wq = &ktls_wq[so->so_rcv.sb_tls_info->wq_index]; 1808 mtx_lock(&wq->mtx); 1809 STAILQ_INSERT_TAIL(&wq->so_head, so, so_ktls_rx_list); 1810 running = wq->running; 1811 mtx_unlock(&wq->mtx); 1812 if (!running) 1813 wakeup(wq); 1814 counter_u64_add(ktls_cnt_rx_queued, 1); 1815 } 1816 1817 static struct mbuf * 1818 ktls_detach_record(struct sockbuf *sb, int len) 1819 { 1820 struct mbuf *m, *n, *top; 1821 int remain; 1822 1823 SOCKBUF_LOCK_ASSERT(sb); 1824 MPASS(len <= sb->sb_tlscc); 1825 1826 /* 1827 * If TLS chain is the exact size of the record, 1828 * just grab the whole record. 1829 */ 1830 top = sb->sb_mtls; 1831 if (sb->sb_tlscc == len) { 1832 sb->sb_mtls = NULL; 1833 sb->sb_mtlstail = NULL; 1834 goto out; 1835 } 1836 1837 /* 1838 * While it would be nice to use m_split() here, we need 1839 * to know exactly what m_split() allocates to update the 1840 * accounting, so do it inline instead. 1841 */ 1842 remain = len; 1843 for (m = top; remain > m->m_len; m = m->m_next) 1844 remain -= m->m_len; 1845 1846 /* Easy case: don't have to split 'm'. */ 1847 if (remain == m->m_len) { 1848 sb->sb_mtls = m->m_next; 1849 if (sb->sb_mtls == NULL) 1850 sb->sb_mtlstail = NULL; 1851 m->m_next = NULL; 1852 goto out; 1853 } 1854 1855 /* 1856 * Need to allocate an mbuf to hold the remainder of 'm'. Try 1857 * with M_NOWAIT first. 1858 */ 1859 n = m_get(M_NOWAIT, MT_DATA); 1860 if (n == NULL) { 1861 /* 1862 * Use M_WAITOK with socket buffer unlocked. If 1863 * 'sb_mtls' changes while the lock is dropped, return 1864 * NULL to force the caller to retry. 1865 */ 1866 SOCKBUF_UNLOCK(sb); 1867 1868 n = m_get(M_WAITOK, MT_DATA); 1869 1870 SOCKBUF_LOCK(sb); 1871 if (sb->sb_mtls != top) { 1872 m_free(n); 1873 return (NULL); 1874 } 1875 } 1876 n->m_flags |= M_NOTREADY; 1877 1878 /* Store remainder in 'n'. */ 1879 n->m_len = m->m_len - remain; 1880 if (m->m_flags & M_EXT) { 1881 n->m_data = m->m_data + remain; 1882 mb_dupcl(n, m); 1883 } else { 1884 bcopy(mtod(m, caddr_t) + remain, mtod(n, caddr_t), n->m_len); 1885 } 1886 1887 /* Trim 'm' and update accounting. */ 1888 m->m_len -= n->m_len; 1889 sb->sb_tlscc -= n->m_len; 1890 sb->sb_ccc -= n->m_len; 1891 1892 /* Account for 'n'. */ 1893 sballoc_ktls_rx(sb, n); 1894 1895 /* Insert 'n' into the TLS chain. */ 1896 sb->sb_mtls = n; 1897 n->m_next = m->m_next; 1898 if (sb->sb_mtlstail == m) 1899 sb->sb_mtlstail = n; 1900 1901 /* Detach the record from the TLS chain. */ 1902 m->m_next = NULL; 1903 1904 out: 1905 MPASS(m_length(top, NULL) == len); 1906 for (m = top; m != NULL; m = m->m_next) 1907 sbfree_ktls_rx(sb, m); 1908 sb->sb_tlsdcc = len; 1909 sb->sb_ccc += len; 1910 SBCHECK(sb); 1911 return (top); 1912 } 1913 1914 static void 1915 ktls_decrypt(struct socket *so) 1916 { 1917 char tls_header[MBUF_PEXT_HDR_LEN]; 1918 struct ktls_session *tls; 1919 struct sockbuf *sb; 1920 struct tls_record_layer *hdr; 1921 struct tls_get_record tgr; 1922 struct mbuf *control, *data, *m; 1923 uint64_t seqno; 1924 int error, remain, tls_len, trail_len; 1925 1926 hdr = (struct tls_record_layer *)tls_header; 1927 sb = &so->so_rcv; 1928 SOCKBUF_LOCK(sb); 1929 KASSERT(sb->sb_flags & SB_TLS_RX_RUNNING, 1930 ("%s: socket %p not running", __func__, so)); 1931 1932 tls = sb->sb_tls_info; 1933 MPASS(tls != NULL); 1934 1935 for (;;) { 1936 /* Is there enough queued for a TLS header? */ 1937 if (sb->sb_tlscc < tls->params.tls_hlen) 1938 break; 1939 1940 m_copydata(sb->sb_mtls, 0, tls->params.tls_hlen, tls_header); 1941 tls_len = sizeof(*hdr) + ntohs(hdr->tls_length); 1942 1943 if (hdr->tls_vmajor != tls->params.tls_vmajor || 1944 hdr->tls_vminor != tls->params.tls_vminor) 1945 error = EINVAL; 1946 else if (tls_len < tls->params.tls_hlen || tls_len > 1947 tls->params.tls_hlen + TLS_MAX_MSG_SIZE_V10_2 + 1948 tls->params.tls_tlen) 1949 error = EMSGSIZE; 1950 else 1951 error = 0; 1952 if (__predict_false(error != 0)) { 1953 /* 1954 * We have a corrupted record and are likely 1955 * out of sync. The connection isn't 1956 * recoverable at this point, so abort it. 1957 */ 1958 SOCKBUF_UNLOCK(sb); 1959 counter_u64_add(ktls_offload_corrupted_records, 1); 1960 1961 CURVNET_SET(so->so_vnet); 1962 so->so_proto->pr_usrreqs->pru_abort(so); 1963 so->so_error = error; 1964 CURVNET_RESTORE(); 1965 goto deref; 1966 } 1967 1968 /* Is the entire record queued? */ 1969 if (sb->sb_tlscc < tls_len) 1970 break; 1971 1972 /* 1973 * Split out the portion of the mbuf chain containing 1974 * this TLS record. 1975 */ 1976 data = ktls_detach_record(sb, tls_len); 1977 if (data == NULL) 1978 continue; 1979 MPASS(sb->sb_tlsdcc == tls_len); 1980 1981 seqno = sb->sb_tls_seqno; 1982 sb->sb_tls_seqno++; 1983 SBCHECK(sb); 1984 SOCKBUF_UNLOCK(sb); 1985 1986 error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len); 1987 if (error) { 1988 counter_u64_add(ktls_offload_failed_crypto, 1); 1989 1990 SOCKBUF_LOCK(sb); 1991 if (sb->sb_tlsdcc == 0) { 1992 /* 1993 * sbcut/drop/flush discarded these 1994 * mbufs. 1995 */ 1996 m_freem(data); 1997 break; 1998 } 1999 2000 /* 2001 * Drop this TLS record's data, but keep 2002 * decrypting subsequent records. 2003 */ 2004 sb->sb_ccc -= tls_len; 2005 sb->sb_tlsdcc = 0; 2006 2007 CURVNET_SET(so->so_vnet); 2008 so->so_error = EBADMSG; 2009 sorwakeup_locked(so); 2010 CURVNET_RESTORE(); 2011 2012 m_freem(data); 2013 2014 SOCKBUF_LOCK(sb); 2015 continue; 2016 } 2017 2018 /* Allocate the control mbuf. */ 2019 tgr.tls_type = hdr->tls_type; 2020 tgr.tls_vmajor = hdr->tls_vmajor; 2021 tgr.tls_vminor = hdr->tls_vminor; 2022 tgr.tls_length = htobe16(tls_len - tls->params.tls_hlen - 2023 trail_len); 2024 control = sbcreatecontrol_how(&tgr, sizeof(tgr), 2025 TLS_GET_RECORD, IPPROTO_TCP, M_WAITOK); 2026 2027 SOCKBUF_LOCK(sb); 2028 if (sb->sb_tlsdcc == 0) { 2029 /* sbcut/drop/flush discarded these mbufs. */ 2030 MPASS(sb->sb_tlscc == 0); 2031 m_freem(data); 2032 m_freem(control); 2033 break; 2034 } 2035 2036 /* 2037 * Clear the 'dcc' accounting in preparation for 2038 * adding the decrypted record. 2039 */ 2040 sb->sb_ccc -= tls_len; 2041 sb->sb_tlsdcc = 0; 2042 SBCHECK(sb); 2043 2044 /* If there is no payload, drop all of the data. */ 2045 if (tgr.tls_length == htobe16(0)) { 2046 m_freem(data); 2047 data = NULL; 2048 } else { 2049 /* Trim header. */ 2050 remain = tls->params.tls_hlen; 2051 while (remain > 0) { 2052 if (data->m_len > remain) { 2053 data->m_data += remain; 2054 data->m_len -= remain; 2055 break; 2056 } 2057 remain -= data->m_len; 2058 data = m_free(data); 2059 } 2060 2061 /* Trim trailer and clear M_NOTREADY. */ 2062 remain = be16toh(tgr.tls_length); 2063 m = data; 2064 for (m = data; remain > m->m_len; m = m->m_next) { 2065 m->m_flags &= ~M_NOTREADY; 2066 remain -= m->m_len; 2067 } 2068 m->m_len = remain; 2069 m_freem(m->m_next); 2070 m->m_next = NULL; 2071 m->m_flags &= ~M_NOTREADY; 2072 2073 /* Set EOR on the final mbuf. */ 2074 m->m_flags |= M_EOR; 2075 } 2076 2077 sbappendcontrol_locked(sb, data, control, 0); 2078 } 2079 2080 sb->sb_flags &= ~SB_TLS_RX_RUNNING; 2081 2082 if ((sb->sb_state & SBS_CANTRCVMORE) != 0 && sb->sb_tlscc > 0) 2083 so->so_error = EMSGSIZE; 2084 2085 sorwakeup_locked(so); 2086 2087 deref: 2088 SOCKBUF_UNLOCK_ASSERT(sb); 2089 2090 CURVNET_SET(so->so_vnet); 2091 sorele(so); 2092 CURVNET_RESTORE(); 2093 } 2094 2095 void 2096 ktls_enqueue_to_free(struct mbuf *m) 2097 { 2098 struct ktls_wq *wq; 2099 bool running; 2100 2101 /* Mark it for freeing. */ 2102 m->m_epg_flags |= EPG_FLAG_2FREE; 2103 wq = &ktls_wq[m->m_epg_tls->wq_index]; 2104 mtx_lock(&wq->mtx); 2105 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); 2106 running = wq->running; 2107 mtx_unlock(&wq->mtx); 2108 if (!running) 2109 wakeup(wq); 2110 } 2111 2112 static void * 2113 ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m) 2114 { 2115 void *buf; 2116 int domain, running; 2117 2118 if (m->m_epg_npgs <= 2) 2119 return (NULL); 2120 if (ktls_buffer_zone == NULL) 2121 return (NULL); 2122 if ((u_int)(ticks - wq->lastallocfail) < hz) { 2123 /* 2124 * Rate-limit allocation attempts after a failure. 2125 * ktls_buffer_import() will acquire a per-domain mutex to check 2126 * the free page queues and may fail consistently if memory is 2127 * fragmented. 2128 */ 2129 return (NULL); 2130 } 2131 buf = uma_zalloc(ktls_buffer_zone, M_NOWAIT | M_NORECLAIM); 2132 if (buf == NULL) { 2133 domain = PCPU_GET(domain); 2134 wq->lastallocfail = ticks; 2135 2136 /* 2137 * Note that this check is "racy", but the races are 2138 * harmless, and are either a spurious wakeup if 2139 * multiple threads fail allocations before the alloc 2140 * thread wakes, or waiting an extra second in case we 2141 * see an old value of running == true. 2142 */ 2143 if (!VM_DOMAIN_EMPTY(domain)) { 2144 running = atomic_load_int(&ktls_domains[domain].alloc_td.running); 2145 if (!running) 2146 wakeup(&ktls_domains[domain].alloc_td); 2147 } 2148 } 2149 return (buf); 2150 } 2151 2152 static int 2153 ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m, 2154 struct ktls_session *tls, struct ktls_ocf_encrypt_state *state) 2155 { 2156 vm_page_t pg; 2157 int error, i, len, off; 2158 2159 KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY), 2160 ("%p not unready & nomap mbuf\n", m)); 2161 KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen, 2162 ("page count %d larger than maximum frame length %d", m->m_epg_npgs, 2163 ktls_maxlen)); 2164 2165 /* Anonymous mbufs are encrypted in place. */ 2166 if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) 2167 return (tls->sw_encrypt(state, tls, m, NULL, 0)); 2168 2169 /* 2170 * For file-backed mbufs (from sendfile), anonymous wired 2171 * pages are allocated and used as the encryption destination. 2172 */ 2173 if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) { 2174 len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len - 2175 m->m_epg_1st_off; 2176 state->dst_iov[0].iov_base = (char *)state->cbuf + 2177 m->m_epg_1st_off; 2178 state->dst_iov[0].iov_len = len; 2179 state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf); 2180 i = 1; 2181 } else { 2182 off = m->m_epg_1st_off; 2183 for (i = 0; i < m->m_epg_npgs; i++, off = 0) { 2184 pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP | 2185 VM_ALLOC_WIRED | VM_ALLOC_WAITOK); 2186 len = m_epg_pagelen(m, i, off); 2187 state->parray[i] = VM_PAGE_TO_PHYS(pg); 2188 state->dst_iov[i].iov_base = 2189 (char *)PHYS_TO_DMAP(state->parray[i]) + off; 2190 state->dst_iov[i].iov_len = len; 2191 } 2192 } 2193 KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small")); 2194 state->dst_iov[i].iov_base = m->m_epg_trail; 2195 state->dst_iov[i].iov_len = m->m_epg_trllen; 2196 2197 error = tls->sw_encrypt(state, tls, m, state->dst_iov, i + 1); 2198 2199 if (__predict_false(error != 0)) { 2200 /* Free the anonymous pages. */ 2201 if (state->cbuf != NULL) 2202 uma_zfree(ktls_buffer_zone, state->cbuf); 2203 else { 2204 for (i = 0; i < m->m_epg_npgs; i++) { 2205 pg = PHYS_TO_VM_PAGE(state->parray[i]); 2206 (void)vm_page_unwire_noq(pg); 2207 vm_page_free(pg); 2208 } 2209 } 2210 } 2211 return (error); 2212 } 2213 2214 /* Number of TLS records in a batch passed to ktls_enqueue(). */ 2215 static u_int 2216 ktls_batched_records(struct mbuf *m) 2217 { 2218 int page_count, records; 2219 2220 records = 0; 2221 page_count = m->m_epg_enc_cnt; 2222 while (page_count > 0) { 2223 records++; 2224 page_count -= m->m_epg_nrdy; 2225 m = m->m_next; 2226 } 2227 KASSERT(page_count == 0, ("%s: mismatched page count", __func__)); 2228 return (records); 2229 } 2230 2231 void 2232 ktls_enqueue(struct mbuf *m, struct socket *so, int page_count) 2233 { 2234 struct ktls_session *tls; 2235 struct ktls_wq *wq; 2236 int queued; 2237 bool running; 2238 2239 KASSERT(((m->m_flags & (M_EXTPG | M_NOTREADY)) == 2240 (M_EXTPG | M_NOTREADY)), 2241 ("ktls_enqueue: %p not unready & nomap mbuf\n", m)); 2242 KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count")); 2243 2244 KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf")); 2245 2246 m->m_epg_enc_cnt = page_count; 2247 2248 /* 2249 * Save a pointer to the socket. The caller is responsible 2250 * for taking an additional reference via soref(). 2251 */ 2252 m->m_epg_so = so; 2253 2254 queued = 1; 2255 tls = m->m_epg_tls; 2256 wq = &ktls_wq[tls->wq_index]; 2257 mtx_lock(&wq->mtx); 2258 if (__predict_false(tls->sequential_records)) { 2259 /* 2260 * For TLS 1.0, records must be encrypted 2261 * sequentially. For a given connection, all records 2262 * queued to the associated work queue are processed 2263 * sequentially. However, sendfile(2) might complete 2264 * I/O requests spanning multiple TLS records out of 2265 * order. Here we ensure TLS records are enqueued to 2266 * the work queue in FIFO order. 2267 * 2268 * tls->next_seqno holds the sequence number of the 2269 * next TLS record that should be enqueued to the work 2270 * queue. If this next record is not tls->next_seqno, 2271 * it must be a future record, so insert it, sorted by 2272 * TLS sequence number, into tls->pending_records and 2273 * return. 2274 * 2275 * If this TLS record matches tls->next_seqno, place 2276 * it in the work queue and then check 2277 * tls->pending_records to see if any 2278 * previously-queued records are now ready for 2279 * encryption. 2280 */ 2281 if (m->m_epg_seqno != tls->next_seqno) { 2282 struct mbuf *n, *p; 2283 2284 p = NULL; 2285 STAILQ_FOREACH(n, &tls->pending_records, m_epg_stailq) { 2286 if (n->m_epg_seqno > m->m_epg_seqno) 2287 break; 2288 p = n; 2289 } 2290 if (n == NULL) 2291 STAILQ_INSERT_TAIL(&tls->pending_records, m, 2292 m_epg_stailq); 2293 else if (p == NULL) 2294 STAILQ_INSERT_HEAD(&tls->pending_records, m, 2295 m_epg_stailq); 2296 else 2297 STAILQ_INSERT_AFTER(&tls->pending_records, p, m, 2298 m_epg_stailq); 2299 mtx_unlock(&wq->mtx); 2300 counter_u64_add(ktls_cnt_tx_pending, 1); 2301 return; 2302 } 2303 2304 tls->next_seqno += ktls_batched_records(m); 2305 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); 2306 2307 while (!STAILQ_EMPTY(&tls->pending_records)) { 2308 struct mbuf *n; 2309 2310 n = STAILQ_FIRST(&tls->pending_records); 2311 if (n->m_epg_seqno != tls->next_seqno) 2312 break; 2313 2314 queued++; 2315 STAILQ_REMOVE_HEAD(&tls->pending_records, m_epg_stailq); 2316 tls->next_seqno += ktls_batched_records(n); 2317 STAILQ_INSERT_TAIL(&wq->m_head, n, m_epg_stailq); 2318 } 2319 counter_u64_add(ktls_cnt_tx_pending, -(queued - 1)); 2320 } else 2321 STAILQ_INSERT_TAIL(&wq->m_head, m, m_epg_stailq); 2322 2323 running = wq->running; 2324 mtx_unlock(&wq->mtx); 2325 if (!running) 2326 wakeup(wq); 2327 counter_u64_add(ktls_cnt_tx_queued, queued); 2328 } 2329 2330 /* 2331 * Once a file-backed mbuf (from sendfile) has been encrypted, free 2332 * the pages from the file and replace them with the anonymous pages 2333 * allocated in ktls_encrypt_record(). 2334 */ 2335 static void 2336 ktls_finish_nonanon(struct mbuf *m, struct ktls_ocf_encrypt_state *state) 2337 { 2338 int i; 2339 2340 MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0); 2341 2342 /* Free the old pages. */ 2343 m->m_ext.ext_free(m); 2344 2345 /* Replace them with the new pages. */ 2346 if (state->cbuf != NULL) { 2347 for (i = 0; i < m->m_epg_npgs; i++) 2348 m->m_epg_pa[i] = state->parray[0] + ptoa(i); 2349 2350 /* Contig pages should go back to the cache. */ 2351 m->m_ext.ext_free = ktls_free_mext_contig; 2352 } else { 2353 for (i = 0; i < m->m_epg_npgs; i++) 2354 m->m_epg_pa[i] = state->parray[i]; 2355 2356 /* Use the basic free routine. */ 2357 m->m_ext.ext_free = mb_free_mext_pgs; 2358 } 2359 2360 /* Pages are now writable. */ 2361 m->m_epg_flags |= EPG_FLAG_ANON; 2362 } 2363 2364 static __noinline void 2365 ktls_encrypt(struct ktls_wq *wq, struct mbuf *top) 2366 { 2367 struct ktls_ocf_encrypt_state state; 2368 struct ktls_session *tls; 2369 struct socket *so; 2370 struct mbuf *m; 2371 int error, npages, total_pages; 2372 2373 so = top->m_epg_so; 2374 tls = top->m_epg_tls; 2375 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top)); 2376 KASSERT(so != NULL, ("so = NULL, top = %p\n", top)); 2377 #ifdef INVARIANTS 2378 top->m_epg_so = NULL; 2379 #endif 2380 total_pages = top->m_epg_enc_cnt; 2381 npages = 0; 2382 2383 /* 2384 * Encrypt the TLS records in the chain of mbufs starting with 2385 * 'top'. 'total_pages' gives us a total count of pages and is 2386 * used to know when we have finished encrypting the TLS 2387 * records originally queued with 'top'. 2388 * 2389 * NB: These mbufs are queued in the socket buffer and 2390 * 'm_next' is traversing the mbufs in the socket buffer. The 2391 * socket buffer lock is not held while traversing this chain. 2392 * Since the mbufs are all marked M_NOTREADY their 'm_next' 2393 * pointers should be stable. However, the 'm_next' of the 2394 * last mbuf encrypted is not necessarily NULL. It can point 2395 * to other mbufs appended while 'top' was on the TLS work 2396 * queue. 2397 * 2398 * Each mbuf holds an entire TLS record. 2399 */ 2400 error = 0; 2401 for (m = top; npages != total_pages; m = m->m_next) { 2402 KASSERT(m->m_epg_tls == tls, 2403 ("different TLS sessions in a single mbuf chain: %p vs %p", 2404 tls, m->m_epg_tls)); 2405 KASSERT(npages + m->m_epg_npgs <= total_pages, 2406 ("page count mismatch: top %p, total_pages %d, m %p", top, 2407 total_pages, m)); 2408 2409 error = ktls_encrypt_record(wq, m, tls, &state); 2410 if (error) { 2411 counter_u64_add(ktls_offload_failed_crypto, 1); 2412 break; 2413 } 2414 2415 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) 2416 ktls_finish_nonanon(m, &state); 2417 2418 npages += m->m_epg_nrdy; 2419 2420 /* 2421 * Drop a reference to the session now that it is no 2422 * longer needed. Existing code depends on encrypted 2423 * records having no associated session vs 2424 * yet-to-be-encrypted records having an associated 2425 * session. 2426 */ 2427 m->m_epg_tls = NULL; 2428 ktls_free(tls); 2429 } 2430 2431 CURVNET_SET(so->so_vnet); 2432 if (error == 0) { 2433 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, top, npages); 2434 } else { 2435 so->so_proto->pr_usrreqs->pru_abort(so); 2436 so->so_error = EIO; 2437 mb_free_notready(top, total_pages); 2438 } 2439 2440 sorele(so); 2441 CURVNET_RESTORE(); 2442 } 2443 2444 void 2445 ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error) 2446 { 2447 struct ktls_session *tls; 2448 struct socket *so; 2449 struct mbuf *m; 2450 int npages; 2451 2452 m = state->m; 2453 2454 if ((m->m_epg_flags & EPG_FLAG_ANON) == 0) 2455 ktls_finish_nonanon(m, state); 2456 2457 so = state->so; 2458 free(state, M_KTLS); 2459 2460 /* 2461 * Drop a reference to the session now that it is no longer 2462 * needed. Existing code depends on encrypted records having 2463 * no associated session vs yet-to-be-encrypted records having 2464 * an associated session. 2465 */ 2466 tls = m->m_epg_tls; 2467 m->m_epg_tls = NULL; 2468 ktls_free(tls); 2469 2470 if (error != 0) 2471 counter_u64_add(ktls_offload_failed_crypto, 1); 2472 2473 CURVNET_SET(so->so_vnet); 2474 npages = m->m_epg_nrdy; 2475 2476 if (error == 0) { 2477 (void)(*so->so_proto->pr_usrreqs->pru_ready)(so, m, npages); 2478 } else { 2479 so->so_proto->pr_usrreqs->pru_abort(so); 2480 so->so_error = EIO; 2481 mb_free_notready(m, npages); 2482 } 2483 2484 sorele(so); 2485 CURVNET_RESTORE(); 2486 } 2487 2488 /* 2489 * Similar to ktls_encrypt, but used with asynchronous OCF backends 2490 * (coprocessors) where encryption does not use host CPU resources and 2491 * it can be beneficial to queue more requests than CPUs. 2492 */ 2493 static __noinline void 2494 ktls_encrypt_async(struct ktls_wq *wq, struct mbuf *top) 2495 { 2496 struct ktls_ocf_encrypt_state *state; 2497 struct ktls_session *tls; 2498 struct socket *so; 2499 struct mbuf *m, *n; 2500 int error, mpages, npages, total_pages; 2501 2502 so = top->m_epg_so; 2503 tls = top->m_epg_tls; 2504 KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top)); 2505 KASSERT(so != NULL, ("so = NULL, top = %p\n", top)); 2506 #ifdef INVARIANTS 2507 top->m_epg_so = NULL; 2508 #endif 2509 total_pages = top->m_epg_enc_cnt; 2510 npages = 0; 2511 2512 error = 0; 2513 for (m = top; npages != total_pages; m = n) { 2514 KASSERT(m->m_epg_tls == tls, 2515 ("different TLS sessions in a single mbuf chain: %p vs %p", 2516 tls, m->m_epg_tls)); 2517 KASSERT(npages + m->m_epg_npgs <= total_pages, 2518 ("page count mismatch: top %p, total_pages %d, m %p", top, 2519 total_pages, m)); 2520 2521 state = malloc(sizeof(*state), M_KTLS, M_WAITOK | M_ZERO); 2522 soref(so); 2523 state->so = so; 2524 state->m = m; 2525 2526 mpages = m->m_epg_nrdy; 2527 n = m->m_next; 2528 2529 error = ktls_encrypt_record(wq, m, tls, state); 2530 if (error) { 2531 counter_u64_add(ktls_offload_failed_crypto, 1); 2532 free(state, M_KTLS); 2533 CURVNET_SET(so->so_vnet); 2534 sorele(so); 2535 CURVNET_RESTORE(); 2536 break; 2537 } 2538 2539 npages += mpages; 2540 } 2541 2542 CURVNET_SET(so->so_vnet); 2543 if (error != 0) { 2544 so->so_proto->pr_usrreqs->pru_abort(so); 2545 so->so_error = EIO; 2546 mb_free_notready(m, total_pages - npages); 2547 } 2548 2549 sorele(so); 2550 CURVNET_RESTORE(); 2551 } 2552 2553 static int 2554 ktls_bind_domain(int domain) 2555 { 2556 int error; 2557 2558 error = cpuset_setthread(curthread->td_tid, &cpuset_domain[domain]); 2559 if (error != 0) 2560 return (error); 2561 curthread->td_domain.dr_policy = DOMAINSET_PREF(domain); 2562 return (0); 2563 } 2564 2565 static void 2566 ktls_alloc_thread(void *ctx) 2567 { 2568 struct ktls_domain_info *ktls_domain = ctx; 2569 struct ktls_alloc_thread *sc = &ktls_domain->alloc_td; 2570 void **buf; 2571 struct sysctl_oid *oid; 2572 char name[80]; 2573 int domain, error, i, nbufs; 2574 2575 domain = ktls_domain - ktls_domains; 2576 if (bootverbose) 2577 printf("Starting KTLS alloc thread for domain %d\n", domain); 2578 error = ktls_bind_domain(domain); 2579 if (error) 2580 printf("Unable to bind KTLS alloc thread for domain %d: error %d\n", 2581 domain, error); 2582 snprintf(name, sizeof(name), "domain%d", domain); 2583 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_kern_ipc_tls), OID_AUTO, 2584 name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2585 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "allocs", 2586 CTLFLAG_RD, &sc->allocs, 0, "buffers allocated"); 2587 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "wakeups", 2588 CTLFLAG_RD, &sc->wakeups, 0, "thread wakeups"); 2589 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "running", 2590 CTLFLAG_RD, &sc->running, 0, "thread running"); 2591 2592 buf = NULL; 2593 nbufs = 0; 2594 for (;;) { 2595 atomic_store_int(&sc->running, 0); 2596 tsleep(sc, PZERO | PNOLOCK, "-", 0); 2597 atomic_store_int(&sc->running, 1); 2598 sc->wakeups++; 2599 if (nbufs != ktls_max_alloc) { 2600 free(buf, M_KTLS); 2601 nbufs = atomic_load_int(&ktls_max_alloc); 2602 buf = malloc(sizeof(void *) * nbufs, M_KTLS, 2603 M_WAITOK | M_ZERO); 2604 } 2605 /* 2606 * Below we allocate nbufs with different allocation 2607 * flags than we use when allocating normally during 2608 * encryption in the ktls worker thread. We specify 2609 * M_NORECLAIM in the worker thread. However, we omit 2610 * that flag here and add M_WAITOK so that the VM 2611 * system is permitted to perform expensive work to 2612 * defragment memory. We do this here, as it does not 2613 * matter if this thread blocks. If we block a ktls 2614 * worker thread, we risk developing backlogs of 2615 * buffers to be encrypted, leading to surges of 2616 * traffic and potential NIC output drops. 2617 */ 2618 for (i = 0; i < nbufs; i++) { 2619 buf[i] = uma_zalloc(ktls_buffer_zone, M_WAITOK); 2620 sc->allocs++; 2621 } 2622 for (i = 0; i < nbufs; i++) { 2623 uma_zfree(ktls_buffer_zone, buf[i]); 2624 buf[i] = NULL; 2625 } 2626 } 2627 } 2628 2629 static void 2630 ktls_work_thread(void *ctx) 2631 { 2632 struct ktls_wq *wq = ctx; 2633 struct mbuf *m, *n; 2634 struct socket *so, *son; 2635 STAILQ_HEAD(, mbuf) local_m_head; 2636 STAILQ_HEAD(, socket) local_so_head; 2637 int cpu; 2638 2639 cpu = wq - ktls_wq; 2640 if (bootverbose) 2641 printf("Starting KTLS worker thread for CPU %d\n", cpu); 2642 2643 /* 2644 * Bind to a core. If ktls_bind_threads is > 1, then 2645 * we bind to the NUMA domain instead. 2646 */ 2647 if (ktls_bind_threads) { 2648 int error; 2649 2650 if (ktls_bind_threads > 1) { 2651 struct pcpu *pc = pcpu_find(cpu); 2652 2653 error = ktls_bind_domain(pc->pc_domain); 2654 } else { 2655 cpuset_t mask; 2656 2657 CPU_SETOF(cpu, &mask); 2658 error = cpuset_setthread(curthread->td_tid, &mask); 2659 } 2660 if (error) 2661 printf("Unable to bind KTLS worker thread for CPU %d: error %d\n", 2662 cpu, error); 2663 } 2664 #if defined(__aarch64__) || defined(__amd64__) || defined(__i386__) 2665 fpu_kern_thread(0); 2666 #endif 2667 for (;;) { 2668 mtx_lock(&wq->mtx); 2669 while (STAILQ_EMPTY(&wq->m_head) && 2670 STAILQ_EMPTY(&wq->so_head)) { 2671 wq->running = false; 2672 mtx_sleep(wq, &wq->mtx, 0, "-", 0); 2673 wq->running = true; 2674 } 2675 2676 STAILQ_INIT(&local_m_head); 2677 STAILQ_CONCAT(&local_m_head, &wq->m_head); 2678 STAILQ_INIT(&local_so_head); 2679 STAILQ_CONCAT(&local_so_head, &wq->so_head); 2680 mtx_unlock(&wq->mtx); 2681 2682 STAILQ_FOREACH_SAFE(m, &local_m_head, m_epg_stailq, n) { 2683 if (m->m_epg_flags & EPG_FLAG_2FREE) { 2684 ktls_free(m->m_epg_tls); 2685 m_free_raw(m); 2686 } else { 2687 if (m->m_epg_tls->sync_dispatch) 2688 ktls_encrypt(wq, m); 2689 else 2690 ktls_encrypt_async(wq, m); 2691 counter_u64_add(ktls_cnt_tx_queued, -1); 2692 } 2693 } 2694 2695 STAILQ_FOREACH_SAFE(so, &local_so_head, so_ktls_rx_list, son) { 2696 ktls_decrypt(so); 2697 counter_u64_add(ktls_cnt_rx_queued, -1); 2698 } 2699 } 2700 } 2701 2702 #if defined(INET) || defined(INET6) 2703 static void 2704 ktls_disable_ifnet_help(void *context, int pending __unused) 2705 { 2706 struct ktls_session *tls; 2707 struct inpcb *inp; 2708 struct tcpcb *tp; 2709 struct socket *so; 2710 int err; 2711 2712 tls = context; 2713 inp = tls->inp; 2714 if (inp == NULL) 2715 return; 2716 INP_WLOCK(inp); 2717 so = inp->inp_socket; 2718 MPASS(so != NULL); 2719 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) || 2720 (inp->inp_flags2 & INP_FREED)) { 2721 goto out; 2722 } 2723 2724 if (so->so_snd.sb_tls_info != NULL) 2725 err = ktls_set_tx_mode(so, TCP_TLS_MODE_SW); 2726 else 2727 err = ENXIO; 2728 if (err == 0) { 2729 counter_u64_add(ktls_ifnet_disable_ok, 1); 2730 /* ktls_set_tx_mode() drops inp wlock, so recheck flags */ 2731 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0 && 2732 (inp->inp_flags2 & INP_FREED) == 0 && 2733 (tp = intotcpcb(inp)) != NULL && 2734 tp->t_fb->tfb_hwtls_change != NULL) 2735 (*tp->t_fb->tfb_hwtls_change)(tp, 0); 2736 } else { 2737 counter_u64_add(ktls_ifnet_disable_fail, 1); 2738 } 2739 2740 out: 2741 sorele(so); 2742 if (!in_pcbrele_wlocked(inp)) 2743 INP_WUNLOCK(inp); 2744 ktls_free(tls); 2745 } 2746 2747 /* 2748 * Called when re-transmits are becoming a substantial portion of the 2749 * sends on this connection. When this happens, we transition the 2750 * connection to software TLS. This is needed because most inline TLS 2751 * NICs keep crypto state only for in-order transmits. This means 2752 * that to handle a TCP rexmit (which is out-of-order), the NIC must 2753 * re-DMA the entire TLS record up to and including the current 2754 * segment. This means that when re-transmitting the last ~1448 byte 2755 * segment of a 16KB TLS record, we could wind up re-DMA'ing an order 2756 * of magnitude more data than we are sending. This can cause the 2757 * PCIe link to saturate well before the network, which can cause 2758 * output drops, and a general loss of capacity. 2759 */ 2760 void 2761 ktls_disable_ifnet(void *arg) 2762 { 2763 struct tcpcb *tp; 2764 struct inpcb *inp; 2765 struct socket *so; 2766 struct ktls_session *tls; 2767 2768 tp = arg; 2769 inp = tp->t_inpcb; 2770 INP_WLOCK_ASSERT(inp); 2771 so = inp->inp_socket; 2772 SOCK_LOCK(so); 2773 tls = so->so_snd.sb_tls_info; 2774 if (tls->disable_ifnet_pending) { 2775 SOCK_UNLOCK(so); 2776 return; 2777 } 2778 2779 /* 2780 * note that disable_ifnet_pending is never cleared; disabling 2781 * ifnet can only be done once per session, so we never want 2782 * to do it again 2783 */ 2784 2785 (void)ktls_hold(tls); 2786 in_pcbref(inp); 2787 soref(so); 2788 tls->disable_ifnet_pending = true; 2789 tls->inp = inp; 2790 SOCK_UNLOCK(so); 2791 TASK_INIT(&tls->disable_ifnet_task, 0, ktls_disable_ifnet_help, tls); 2792 (void)taskqueue_enqueue(taskqueue_thread, &tls->disable_ifnet_task); 2793 } 2794 #endif 2795