1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016-2018 Netflix, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_inet.h" 33 #include <sys/param.h> 34 #include <sys/arb.h> 35 #include <sys/hash.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/qmath.h> 41 #include <sys/queue.h> 42 #include <sys/refcount.h> 43 #include <sys/rwlock.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/tree.h> 48 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 49 #include <sys/counter.h> 50 #include <dev/tcp_log/tcp_log_dev.h> 51 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/vnet.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_pcb.h> 58 #include <netinet/in_var.h> 59 #include <netinet/tcp_var.h> 60 #include <netinet/tcp_log_buf.h> 61 #include <netinet/tcp_seq.h> 62 #include <netinet/tcp_hpts.h> 63 64 /* Default expiry time */ 65 #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S) 66 67 /* Max interval at which to run the expiry timer */ 68 #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S) 69 70 bool tcp_log_verbose; 71 static uma_zone_t tcp_log_id_bucket_zone, tcp_log_id_node_zone, tcp_log_zone; 72 static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT; 73 static uint32_t tcp_log_version = TCP_LOG_BUF_VER; 74 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket); 75 static struct tcp_log_id_tree tcp_log_id_head; 76 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head = 77 STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head); 78 static struct mtx tcp_log_expireq_mtx; 79 static struct callout tcp_log_expireq_callout; 80 static u_long tcp_log_auto_ratio = 0; 81 static volatile u_long tcp_log_auto_ratio_cur = 0; 82 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL; 83 static bool tcp_log_auto_all = false; 84 static uint32_t tcp_disable_all_bb_logs = 0; 85 86 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 87 88 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 89 "TCP Black Box controls"); 90 91 SYSCTL_NODE(_net_inet_tcp_bb, OID_AUTO, tp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 92 "TCP Black Box Trace Point controls"); 93 94 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose, 95 0, "Force verbose logging for TCP traces"); 96 97 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit, 98 CTLFLAG_RW, &tcp_log_session_limit, 0, 99 "Maximum number of events maintained for each TCP session"); 100 101 uint32_t tcp_trace_point_config = 0; 102 SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, number, CTLFLAG_RW, 103 &tcp_trace_point_config, TCP_LOG_STATE_HEAD_AUTO, 104 "What is the trace point number to activate (0=none, 0xffffffff = all)?"); 105 106 uint32_t tcp_trace_point_bb_mode = TCP_LOG_STATE_CONTINUAL; 107 SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, bbmode, CTLFLAG_RW, 108 &tcp_trace_point_bb_mode, TCP_LOG_STATE_HEAD_AUTO, 109 "What is BB logging mode that is activated?"); 110 111 int32_t tcp_trace_point_count = 0; 112 SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, count, CTLFLAG_RW, 113 &tcp_trace_point_count, TCP_LOG_STATE_HEAD_AUTO, 114 "How many connections will have BB logging turned on that hit the tracepoint?"); 115 116 117 118 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW, 119 &tcp_log_zone, "Maximum number of events maintained for all TCP sessions"); 120 121 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD, 122 &tcp_log_zone, "Current number of events maintained for all TCP sessions"); 123 124 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW, 125 &tcp_log_id_bucket_zone, "Maximum number of log IDs"); 126 127 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD, 128 &tcp_log_id_bucket_zone, "Current number of log IDs"); 129 130 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW, 131 &tcp_log_id_node_zone, "Maximum number of tcpcbs with log IDs"); 132 133 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD, 134 &tcp_log_id_node_zone, "Current number of tcpcbs with log IDs"); 135 136 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version, 137 0, "Version of log formats exported"); 138 139 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW, 140 &tcp_disable_all_bb_logs, 0, 141 "Disable all BB logging for all connections"); 142 143 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW, 144 &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions"); 145 146 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW, 147 &tcp_log_auto_mode, 0, 148 "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_TAIL)"); 149 150 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW, 151 &tcp_log_auto_all, 0, 152 "Auto-select from all sessions (rather than just those with IDs)"); 153 154 #ifdef TCPLOG_DEBUG_COUNTERS 155 counter_u64_t tcp_log_queued; 156 counter_u64_t tcp_log_que_fail1; 157 counter_u64_t tcp_log_que_fail2; 158 counter_u64_t tcp_log_que_fail3; 159 counter_u64_t tcp_log_que_fail4; 160 counter_u64_t tcp_log_que_fail5; 161 counter_u64_t tcp_log_que_copyout; 162 counter_u64_t tcp_log_que_read; 163 counter_u64_t tcp_log_que_freed; 164 165 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD, 166 &tcp_log_queued, "Number of entries queued"); 167 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD, 168 &tcp_log_que_fail1, "Number of entries queued but fail 1"); 169 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD, 170 &tcp_log_que_fail2, "Number of entries queued but fail 2"); 171 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD, 172 &tcp_log_que_fail3, "Number of entries queued but fail 3"); 173 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD, 174 &tcp_log_que_fail4, "Number of entries queued but fail 4"); 175 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD, 176 &tcp_log_que_fail5, "Number of entries queued but fail 4"); 177 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD, 178 &tcp_log_que_copyout, "Number of entries copied out"); 179 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD, 180 &tcp_log_que_read, "Number of entries read from the queue"); 181 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD, 182 &tcp_log_que_freed, "Number of entries freed after reading"); 183 #endif 184 185 #ifdef INVARIANTS 186 #define TCPLOG_DEBUG_RINGBUF 187 #endif 188 /* Number of requests to consider a PBCID "active". */ 189 #define ACTIVE_REQUEST_COUNT 10 190 191 /* Statistic tracking for "active" PBCIDs. */ 192 static counter_u64_t tcp_log_pcb_ids_cur; 193 static counter_u64_t tcp_log_pcb_ids_tot; 194 195 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD, 196 &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system"); 197 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD, 198 &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated"); 199 200 struct tcp_log_mem 201 { 202 STAILQ_ENTRY(tcp_log_mem) tlm_queue; 203 struct tcp_log_buffer tlm_buf; 204 struct tcp_log_verbose tlm_v; 205 #ifdef TCPLOG_DEBUG_RINGBUF 206 volatile int tlm_refcnt; 207 #endif 208 }; 209 210 /* 60 bytes for the header, + 16 bytes for padding */ 211 static uint8_t zerobuf[76]; 212 213 /* 214 * Lock order: 215 * 1. TCPID_TREE 216 * 2. TCPID_BUCKET 217 * 3. INP 218 * 219 * Rules: 220 * A. You need a lock on the Tree to add/remove buckets. 221 * B. You need a lock on the bucket to add/remove nodes from the bucket. 222 * C. To change information in a node, you need the INP lock if the tln_closed 223 * field is false. Otherwise, you need the bucket lock. (Note that the 224 * tln_closed field can change at any point, so you need to recheck the 225 * entry after acquiring the INP lock.) 226 * D. To remove a node from the bucket, you must have that entry locked, 227 * according to the criteria of Rule C. Also, the node must not be on 228 * the expiry queue. 229 * E. The exception to C is the expiry queue fields, which are locked by 230 * the TCPLOG_EXPIREQ lock. 231 * 232 * Buckets have a reference count. Each node is a reference. Further, 233 * other callers may add reference counts to keep a bucket from disappearing. 234 * You can add a reference as long as you own a lock sufficient to keep the 235 * bucket from disappearing. For example, a common use is: 236 * a. Have a locked INP, but need to lock the TCPID_BUCKET. 237 * b. Add a refcount on the bucket. (Safe because the INP lock prevents 238 * the TCPID_BUCKET from going away.) 239 * c. Drop the INP lock. 240 * d. Acquire a lock on the TCPID_BUCKET. 241 * e. Acquire a lock on the INP. 242 * f. Drop the refcount on the bucket. 243 * (At this point, the bucket may disappear.) 244 * 245 * Expire queue lock: 246 * You can acquire this with either the bucket or INP lock. Don't reverse it. 247 * When the expire code has committed to freeing a node, it resets the expiry 248 * time to SBT_MAX. That is the signal to everyone else that they should 249 * leave that node alone. 250 */ 251 static struct rwlock tcp_id_tree_lock; 252 #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock) 253 #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock) 254 #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock) 255 #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock) 256 #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock) 257 #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED) 258 #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED) 259 #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED) 260 261 #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF) 262 #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx)) 263 #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx)) 264 #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx)) 265 #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED) 266 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED) 267 268 #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt)) 269 #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt)) 270 271 #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx) 272 #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx) 273 274 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node); 275 276 struct tcp_log_id_bucket 277 { 278 /* 279 * tlb_id must be first. This lets us use strcmp on 280 * (struct tcp_log_id_bucket *) and (char *) interchangeably. 281 */ 282 char tlb_id[TCP_LOG_ID_LEN]; 283 char tlb_tag[TCP_LOG_TAG_LEN]; 284 RB_ENTRY(tcp_log_id_bucket) tlb_rb; 285 struct tcp_log_id_head tlb_head; 286 struct mtx tlb_mtx; 287 volatile u_int tlb_refcnt; 288 volatile u_int tlb_reqcnt; 289 uint32_t tlb_loglimit; 290 int8_t tlb_logstate; 291 }; 292 293 struct tcp_log_id_node 294 { 295 SLIST_ENTRY(tcp_log_id_node) tln_list; 296 STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */ 297 sbintime_t tln_expiretime; /* Locked by the expireq lock */ 298 299 /* 300 * If INP is NULL, that means the connection has closed. We've 301 * saved the connection endpoint information and the log entries 302 * in the tln_ie and tln_entries members. We've also saved a pointer 303 * to the enclosing bucket here. If INP is not NULL, the information is 304 * in the PCB and not here. 305 */ 306 struct inpcb *tln_inp; 307 struct tcpcb *tln_tp; 308 struct tcp_log_id_bucket *tln_bucket; 309 struct in_endpoints tln_ie; 310 struct tcp_log_stailq tln_entries; 311 int tln_count; 312 volatile int tln_closed; 313 uint8_t tln_af; 314 }; 315 316 enum tree_lock_state { 317 TREE_UNLOCKED = 0, 318 TREE_RLOCKED, 319 TREE_WLOCKED, 320 }; 321 322 /* Do we want to select this session for auto-logging? */ 323 static __inline bool 324 tcp_log_selectauto(void) 325 { 326 327 /* 328 * If we are doing auto-capturing, figure out whether we will capture 329 * this session. 330 */ 331 if (tcp_log_auto_ratio && 332 (tcp_disable_all_bb_logs == 0) && 333 (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) % 334 tcp_log_auto_ratio) == 0) 335 return (true); 336 return (false); 337 } 338 339 static __inline int 340 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b) 341 { 342 KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL")); 343 KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL")); 344 return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN); 345 } 346 347 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 348 349 static __inline void 350 tcp_log_id_validate_tree_lock(int tree_locked) 351 { 352 353 #ifdef INVARIANTS 354 switch (tree_locked) { 355 case TREE_WLOCKED: 356 TCPID_TREE_WLOCK_ASSERT(); 357 break; 358 case TREE_RLOCKED: 359 TCPID_TREE_RLOCK_ASSERT(); 360 break; 361 case TREE_UNLOCKED: 362 TCPID_TREE_UNLOCK_ASSERT(); 363 break; 364 default: 365 kassert_panic("%s:%d: unknown tree lock state", __func__, 366 __LINE__); 367 } 368 #endif 369 } 370 371 static __inline void 372 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb) 373 { 374 375 TCPID_TREE_WLOCK_ASSERT(); 376 KASSERT(SLIST_EMPTY(&tlb->tlb_head), 377 ("%s: Attempt to remove non-empty bucket", __func__)); 378 if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) { 379 #ifdef INVARIANTS 380 kassert_panic("%s:%d: error removing element from tree", 381 __func__, __LINE__); 382 #endif 383 } 384 TCPID_BUCKET_LOCK_DESTROY(tlb); 385 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); 386 uma_zfree(tcp_log_id_bucket_zone, tlb); 387 } 388 389 /* 390 * Call with a referenced and locked bucket. 391 * Will return true if the bucket was freed; otherwise, false. 392 * tlb: The bucket to unreference. 393 * tree_locked: A pointer to the state of the tree lock. If the tree lock 394 * state changes, the function will update it. 395 * inp: If not NULL and the function needs to drop the inp lock to relock the 396 * tree, it will do so. (The caller must ensure inp will not become invalid, 397 * probably by holding a reference to it.) 398 */ 399 static bool 400 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked, 401 struct inpcb *inp) 402 { 403 404 KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__)); 405 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 406 __func__)); 407 408 tcp_log_id_validate_tree_lock(*tree_locked); 409 410 /* 411 * Did we hold the last reference on the tlb? If so, we may need 412 * to free it. (Note that we can realistically only execute the 413 * loop twice: once without a write lock and once with a write 414 * lock.) 415 */ 416 while (TCPID_BUCKET_UNREF(tlb)) { 417 /* 418 * We need a write lock on the tree to free this. 419 * If we can upgrade the tree lock, this is "easy". If we 420 * can't upgrade the tree lock, we need to do this the 421 * "hard" way: unwind all our locks and relock everything. 422 * In the meantime, anything could have changed. We even 423 * need to validate that we still need to free the bucket. 424 */ 425 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE()) 426 *tree_locked = TREE_WLOCKED; 427 else if (*tree_locked != TREE_WLOCKED) { 428 TCPID_BUCKET_REF(tlb); 429 if (inp != NULL) 430 INP_WUNLOCK(inp); 431 TCPID_BUCKET_UNLOCK(tlb); 432 if (*tree_locked == TREE_RLOCKED) 433 TCPID_TREE_RUNLOCK(); 434 TCPID_TREE_WLOCK(); 435 *tree_locked = TREE_WLOCKED; 436 TCPID_BUCKET_LOCK(tlb); 437 if (inp != NULL) 438 INP_WLOCK(inp); 439 continue; 440 } 441 442 /* 443 * We have an empty bucket and a write lock on the tree. 444 * Remove the empty bucket. 445 */ 446 tcp_log_remove_bucket(tlb); 447 return (true); 448 } 449 return (false); 450 } 451 452 /* 453 * Call with a locked bucket. This function will release the lock on the 454 * bucket before returning. 455 * 456 * The caller is responsible for freeing the tp->t_lin/tln node! 457 * 458 * Note: one of tp or both tlb and tln must be supplied. 459 * 460 * inp: A pointer to the inp. If the function needs to drop the inp lock to 461 * acquire the tree write lock, it will do so. (The caller must ensure inp 462 * will not become invalid, probably by holding a reference to it.) 463 * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored) 464 * tlb: A pointer to the bucket. (optional; ignored if tp is specified) 465 * tln: A pointer to the node. (optional; ignored if tp is specified) 466 * tree_locked: A pointer to the state of the tree lock. If the tree lock 467 * state changes, the function will update it. 468 * 469 * Will return true if the INP lock was reacquired; otherwise, false. 470 */ 471 static bool 472 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp, 473 struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln, 474 int *tree_locked) 475 { 476 int orig_tree_locked; 477 478 KASSERT(tp != NULL || (tlb != NULL && tln != NULL), 479 ("%s: called with tp=%p, tlb=%p, tln=%p", __func__, 480 tp, tlb, tln)); 481 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 482 __func__)); 483 484 if (tp != NULL) { 485 tlb = tp->t_lib; 486 tln = tp->t_lin; 487 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__)); 488 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__)); 489 } 490 491 tcp_log_id_validate_tree_lock(*tree_locked); 492 TCPID_BUCKET_LOCK_ASSERT(tlb); 493 494 /* 495 * Remove the node, clear the log bucket and node from the TCPCB, and 496 * decrement the bucket refcount. In the process, if this is the 497 * last reference, the bucket will be freed. 498 */ 499 SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list); 500 if (tp != NULL) { 501 tp->t_lib = NULL; 502 tp->t_lin = NULL; 503 } 504 orig_tree_locked = *tree_locked; 505 if (!tcp_log_unref_bucket(tlb, tree_locked, inp)) 506 TCPID_BUCKET_UNLOCK(tlb); 507 return (*tree_locked != orig_tree_locked); 508 } 509 510 #define RECHECK_INP_CLEAN(cleanup) do { \ 511 if (inp->inp_flags & INP_DROPPED) { \ 512 rv = ECONNRESET; \ 513 cleanup; \ 514 goto done; \ 515 } \ 516 tp = intotcpcb(inp); \ 517 } while (0) 518 519 #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */) 520 521 static void 522 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp) 523 { 524 525 INP_WLOCK_ASSERT(tptoinpcb(tp)); 526 527 #ifdef STATS 528 if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL) 529 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id)); 530 #endif 531 } 532 533 static void 534 tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb) 535 { 536 537 atomic_fetchadd_int(&tlb->tlb_reqcnt, 1); 538 } 539 540 int 541 tcp_log_apply_ratio(struct tcpcb *tp, int ratio) 542 { 543 struct tcp_log_id_bucket *tlb; 544 struct inpcb *inp = tptoinpcb(tp); 545 uint32_t hash, ratio_hash_thresh; 546 int rv, tree_locked; 547 548 rv = 0; 549 tree_locked = TREE_UNLOCKED; 550 tlb = tp->t_lib; 551 552 INP_WLOCK_ASSERT(inp); 553 if (tlb == NULL) { 554 INP_WUNLOCK(inp); 555 return (EOPNOTSUPP); 556 } 557 ratio_hash_thresh = max(1, UINT32_MAX / ratio); 558 TCPID_BUCKET_REF(tlb); 559 INP_WUNLOCK(inp); 560 TCPID_BUCKET_LOCK(tlb); 561 562 hash = hash32_buf(tlb->tlb_id, strlen(tlb->tlb_id), 0); 563 if (hash > ratio_hash_thresh && tp->_t_logstate == TCP_LOG_STATE_OFF && 564 tlb->tlb_logstate == TCP_LOG_STATE_OFF) { 565 /* 566 * Ratio decision not to log this log ID (and this connection by 567 * way of association). We only apply a log ratio log disable 568 * decision if it would not interfere with a log enable decision 569 * made elsewhere e.g. tcp_log_selectauto() or setsockopt(). 570 */ 571 tlb->tlb_logstate = TCP_LOG_STATE_RATIO_OFF; 572 INP_WLOCK(inp); 573 RECHECK_INP(); 574 (void)tcp_log_state_change(tp, TCP_LOG_STATE_OFF); 575 done: 576 INP_WUNLOCK(inp); 577 } 578 579 INP_UNLOCK_ASSERT(inp); 580 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 581 TCPID_BUCKET_UNLOCK(tlb); 582 583 if (tree_locked == TREE_WLOCKED) { 584 TCPID_TREE_WLOCK_ASSERT(); 585 TCPID_TREE_WUNLOCK(); 586 } else if (tree_locked == TREE_RLOCKED) { 587 TCPID_TREE_RLOCK_ASSERT(); 588 TCPID_TREE_RUNLOCK(); 589 } else 590 TCPID_TREE_UNLOCK_ASSERT(); 591 592 return (rv); 593 } 594 595 /* 596 * Associate the specified tag with a particular TCP log ID. 597 * Called with INPCB locked. Returns with it unlocked. 598 * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID. 599 */ 600 int 601 tcp_log_set_tag(struct tcpcb *tp, char *tag) 602 { 603 struct inpcb *inp = tptoinpcb(tp); 604 struct tcp_log_id_bucket *tlb; 605 int tree_locked; 606 607 INP_WLOCK_ASSERT(inp); 608 609 tree_locked = TREE_UNLOCKED; 610 tlb = tp->t_lib; 611 if (tlb == NULL) { 612 INP_WUNLOCK(inp); 613 return (EOPNOTSUPP); 614 } 615 616 TCPID_BUCKET_REF(tlb); 617 INP_WUNLOCK(inp); 618 TCPID_BUCKET_LOCK(tlb); 619 strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN); 620 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 621 TCPID_BUCKET_UNLOCK(tlb); 622 623 if (tree_locked == TREE_WLOCKED) { 624 TCPID_TREE_WLOCK_ASSERT(); 625 TCPID_TREE_WUNLOCK(); 626 } else if (tree_locked == TREE_RLOCKED) { 627 TCPID_TREE_RLOCK_ASSERT(); 628 TCPID_TREE_RUNLOCK(); 629 } else 630 TCPID_TREE_UNLOCK_ASSERT(); 631 632 return (0); 633 } 634 635 /* 636 * Set the TCP log ID for a TCPCB. 637 * Called with INPCB locked. Returns with it unlocked. 638 */ 639 int 640 tcp_log_set_id(struct tcpcb *tp, char *id) 641 { 642 struct tcp_log_id_bucket *tlb, *tmp_tlb; 643 struct tcp_log_id_node *tln; 644 struct inpcb *inp = tptoinpcb(tp); 645 int tree_locked, rv; 646 bool bucket_locked, same; 647 648 tlb = NULL; 649 tln = NULL; 650 tree_locked = TREE_UNLOCKED; 651 bucket_locked = false; 652 653 restart: 654 INP_WLOCK_ASSERT(inp); 655 /* See if the ID is unchanged. */ 656 same = ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) || 657 (tp->t_lib == NULL && *id == 0)); 658 if (tp->_t_logstate && STAILQ_FIRST(&tp->t_logs) && !same) { 659 /* 660 * There are residual logs left we may 661 * be changing id's so dump what we can. 662 */ 663 switch(tp->_t_logstate) { 664 case TCP_LOG_STATE_HEAD_AUTO: 665 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at id switch", 666 M_NOWAIT, false); 667 break; 668 case TCP_LOG_STATE_TAIL_AUTO: 669 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at id switch", 670 M_NOWAIT, false); 671 break; 672 case TCP_LOG_STATE_CONTINUAL: 673 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual at id switch", 674 M_NOWAIT, false); 675 break; 676 case TCP_LOG_VIA_BBPOINTS: 677 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints at id switch", 678 M_NOWAIT, false); 679 break; 680 } 681 } 682 if (same) { 683 if (tp->t_lib != NULL) { 684 tcp_log_increment_reqcnt(tp->t_lib); 685 if ((tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) && 686 (tp->t_log_state_set == 0)) { 687 /* Clone in any logging */ 688 689 tp->_t_logstate = tp->t_lib->tlb_logstate; 690 } 691 if ((tp->t_lib->tlb_loglimit) && 692 (tp->t_log_state_set == 0)) { 693 /* We also have a limit set */ 694 695 tp->t_loglimit = tp->t_lib->tlb_loglimit; 696 } 697 } 698 rv = 0; 699 goto done; 700 } 701 702 /* 703 * If the TCPCB had a previous ID, we need to extricate it from 704 * the previous list. 705 * 706 * Drop the TCPCB lock and lock the tree and the bucket. 707 * Because this is called in the socket context, we (theoretically) 708 * don't need to worry about the INPCB completely going away 709 * while we are gone. 710 */ 711 if (tp->t_lib != NULL) { 712 tlb = tp->t_lib; 713 TCPID_BUCKET_REF(tlb); 714 INP_WUNLOCK(inp); 715 716 if (tree_locked == TREE_UNLOCKED) { 717 TCPID_TREE_RLOCK(); 718 tree_locked = TREE_RLOCKED; 719 } 720 TCPID_BUCKET_LOCK(tlb); 721 bucket_locked = true; 722 INP_WLOCK(inp); 723 724 /* 725 * Unreference the bucket. If our bucket went away, it is no 726 * longer locked or valid. 727 */ 728 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) { 729 bucket_locked = false; 730 tlb = NULL; 731 } 732 733 /* Validate the INP. */ 734 RECHECK_INP(); 735 736 /* 737 * Evaluate whether the bucket changed while we were unlocked. 738 * 739 * Possible scenarios here: 740 * 1. Bucket is unchanged and the same one we started with. 741 * 2. The TCPCB no longer has a bucket and our bucket was 742 * freed. 743 * 3. The TCPCB has a new bucket, whether ours was freed. 744 * 4. The TCPCB no longer has a bucket and our bucket was 745 * not freed. 746 * 747 * In cases 2-4, we will start over. In case 1, we will 748 * proceed here to remove the bucket. 749 */ 750 if (tlb == NULL || tp->t_lib != tlb) { 751 KASSERT(bucket_locked || tlb == NULL, 752 ("%s: bucket_locked (%d) and tlb (%p) are " 753 "inconsistent", __func__, bucket_locked, tlb)); 754 755 if (bucket_locked) { 756 TCPID_BUCKET_UNLOCK(tlb); 757 bucket_locked = false; 758 tlb = NULL; 759 } 760 goto restart; 761 } 762 763 /* 764 * Store the (struct tcp_log_id_node) for reuse. Then, remove 765 * it from the bucket. In the process, we may end up relocking. 766 * If so, we need to validate that the INP is still valid, and 767 * the TCPCB entries match we expect. 768 * 769 * We will clear tlb and change the bucket_locked state just 770 * before calling tcp_log_remove_id_node(), since that function 771 * will unlock the bucket. 772 */ 773 if (tln != NULL) 774 uma_zfree(tcp_log_id_node_zone, tln); 775 tln = tp->t_lin; 776 tlb = NULL; 777 bucket_locked = false; 778 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) { 779 RECHECK_INP(); 780 781 /* 782 * If the TCPCB moved to a new bucket while we had 783 * dropped the lock, restart. 784 */ 785 if (tp->t_lib != NULL || tp->t_lin != NULL) 786 goto restart; 787 } 788 789 /* 790 * Yay! We successfully removed the TCPCB from its old 791 * bucket. Phew! 792 * 793 * On to bigger and better things... 794 */ 795 } 796 797 /* At this point, the TCPCB should not be in any bucket. */ 798 KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__)); 799 800 /* 801 * If the new ID is not empty, we need to now assign this TCPCB to a 802 * new bucket. 803 */ 804 if (*id) { 805 /* Get a new tln, if we don't already have one to reuse. */ 806 if (tln == NULL) { 807 tln = uma_zalloc(tcp_log_id_node_zone, 808 M_NOWAIT | M_ZERO); 809 if (tln == NULL) { 810 rv = ENOBUFS; 811 goto done; 812 } 813 tln->tln_inp = inp; 814 tln->tln_tp = tp; 815 } 816 817 /* 818 * Drop the INP lock for a bit. We don't need it, and dropping 819 * it prevents lock order reversals. 820 */ 821 INP_WUNLOCK(inp); 822 823 /* Make sure we have at least a read lock on the tree. */ 824 tcp_log_id_validate_tree_lock(tree_locked); 825 if (tree_locked == TREE_UNLOCKED) { 826 TCPID_TREE_RLOCK(); 827 tree_locked = TREE_RLOCKED; 828 } 829 830 refind: 831 /* 832 * Remember that we constructed (struct tcp_log_id_node) so 833 * we can safely cast the id to it for the purposes of finding. 834 */ 835 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL", 836 __func__, __LINE__)); 837 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head, 838 (struct tcp_log_id_bucket *) id); 839 840 /* 841 * If we didn't find a matching bucket, we need to add a new 842 * one. This requires a write lock. But, of course, we will 843 * need to recheck some things when we re-acquire the lock. 844 */ 845 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) { 846 tree_locked = TREE_WLOCKED; 847 if (!TCPID_TREE_UPGRADE()) { 848 TCPID_TREE_RUNLOCK(); 849 TCPID_TREE_WLOCK(); 850 851 /* 852 * The tree may have changed while we were 853 * unlocked. 854 */ 855 goto refind; 856 } 857 } 858 859 /* If we need to add a new bucket, do it now. */ 860 if (tmp_tlb == NULL) { 861 /* Allocate new bucket. */ 862 tlb = uma_zalloc(tcp_log_id_bucket_zone, M_NOWAIT); 863 if (tlb == NULL) { 864 rv = ENOBUFS; 865 goto done_noinp; 866 } 867 counter_u64_add(tcp_log_pcb_ids_cur, 1); 868 counter_u64_add(tcp_log_pcb_ids_tot, 1); 869 870 if ((tcp_log_auto_all == false) && 871 tcp_log_auto_mode && 872 tcp_log_selectauto()) { 873 /* Save off the log state */ 874 tlb->tlb_logstate = tcp_log_auto_mode; 875 } else 876 tlb->tlb_logstate = TCP_LOG_STATE_OFF; 877 tlb->tlb_loglimit = 0; 878 tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */ 879 880 /* 881 * Copy the ID to the bucket. 882 * NB: Don't use strlcpy() unless you are sure 883 * we've always validated NULL termination. 884 * 885 * TODO: When I'm done writing this, see if we 886 * we have correctly validated NULL termination and 887 * can use strlcpy(). :-) 888 */ 889 strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1); 890 tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0'; 891 892 /* 893 * Take the refcount for the first node and go ahead 894 * and lock this. Note that we zero the tlb_mtx 895 * structure, since 0xdeadc0de flips the right bits 896 * for the code to think that this mutex has already 897 * been initialized. :-( 898 */ 899 SLIST_INIT(&tlb->tlb_head); 900 refcount_init(&tlb->tlb_refcnt, 1); 901 tlb->tlb_reqcnt = 1; 902 memset(&tlb->tlb_mtx, 0, sizeof(struct mtx)); 903 TCPID_BUCKET_LOCK_INIT(tlb); 904 TCPID_BUCKET_LOCK(tlb); 905 bucket_locked = true; 906 907 #define FREE_NEW_TLB() do { \ 908 TCPID_BUCKET_LOCK_DESTROY(tlb); \ 909 uma_zfree(tcp_log_id_bucket_zone, tlb); \ 910 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); \ 911 counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1); \ 912 bucket_locked = false; \ 913 tlb = NULL; \ 914 } while (0) 915 /* 916 * Relock the INP and make sure we are still 917 * unassigned. 918 */ 919 INP_WLOCK(inp); 920 RECHECK_INP_CLEAN(FREE_NEW_TLB()); 921 if (tp->t_lib != NULL) { 922 FREE_NEW_TLB(); 923 goto restart; 924 } 925 926 /* Add the new bucket to the tree. */ 927 tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head, 928 tlb); 929 KASSERT(tmp_tlb == NULL, 930 ("%s: Unexpected conflicting bucket (%p) while " 931 "adding new bucket (%p)", __func__, tmp_tlb, tlb)); 932 933 /* 934 * If we found a conflicting bucket, free the new 935 * one we made and fall through to use the existing 936 * bucket. 937 */ 938 if (tmp_tlb != NULL) { 939 FREE_NEW_TLB(); 940 INP_WUNLOCK(inp); 941 } 942 #undef FREE_NEW_TLB 943 } 944 945 /* If we found an existing bucket, use it. */ 946 if (tmp_tlb != NULL) { 947 tlb = tmp_tlb; 948 TCPID_BUCKET_LOCK(tlb); 949 bucket_locked = true; 950 951 /* 952 * Relock the INP and make sure we are still 953 * unassigned. 954 */ 955 INP_UNLOCK_ASSERT(inp); 956 INP_WLOCK(inp); 957 RECHECK_INP(); 958 if (tp->t_lib != NULL) { 959 TCPID_BUCKET_UNLOCK(tlb); 960 bucket_locked = false; 961 tlb = NULL; 962 goto restart; 963 } 964 965 /* Take a reference on the bucket. */ 966 TCPID_BUCKET_REF(tlb); 967 968 /* Record the request. */ 969 tcp_log_increment_reqcnt(tlb); 970 } 971 972 tcp_log_grow_tlb(tlb->tlb_id, tp); 973 974 /* Add the new node to the list. */ 975 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list); 976 tp->t_lib = tlb; 977 tp->t_lin = tln; 978 if (tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) { 979 /* Clone in any logging */ 980 981 tp->_t_logstate = tp->t_lib->tlb_logstate; 982 } 983 if (tp->t_lib->tlb_loglimit) { 984 /* The loglimit too */ 985 986 tp->t_loglimit = tp->t_lib->tlb_loglimit; 987 } 988 tln = NULL; 989 } 990 991 rv = 0; 992 993 done: 994 /* Unlock things, as needed, and return. */ 995 INP_WUNLOCK(inp); 996 done_noinp: 997 INP_UNLOCK_ASSERT(inp); 998 if (bucket_locked) { 999 TCPID_BUCKET_LOCK_ASSERT(tlb); 1000 TCPID_BUCKET_UNLOCK(tlb); 1001 } else if (tlb != NULL) 1002 TCPID_BUCKET_UNLOCK_ASSERT(tlb); 1003 if (tree_locked == TREE_WLOCKED) { 1004 TCPID_TREE_WLOCK_ASSERT(); 1005 TCPID_TREE_WUNLOCK(); 1006 } else if (tree_locked == TREE_RLOCKED) { 1007 TCPID_TREE_RLOCK_ASSERT(); 1008 TCPID_TREE_RUNLOCK(); 1009 } else 1010 TCPID_TREE_UNLOCK_ASSERT(); 1011 if (tln != NULL) 1012 uma_zfree(tcp_log_id_node_zone, tln); 1013 return (rv); 1014 } 1015 1016 /* 1017 * Get the TCP log ID for a TCPCB. 1018 * Called with INPCB locked. 1019 * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long. 1020 * Returns number of bytes copied. 1021 */ 1022 size_t 1023 tcp_log_get_id(struct tcpcb *tp, char *buf) 1024 { 1025 size_t len; 1026 1027 INP_LOCK_ASSERT(tptoinpcb(tp)); 1028 if (tp->t_lib != NULL) { 1029 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 1030 KASSERT(len < TCP_LOG_ID_LEN, 1031 ("%s:%d: tp->t_lib->tlb_id too long (%zu)", 1032 __func__, __LINE__, len)); 1033 } else { 1034 *buf = '\0'; 1035 len = 0; 1036 } 1037 return (len); 1038 } 1039 1040 /* 1041 * Get the tag associated with the TCPCB's log ID. 1042 * Called with INPCB locked. Returns with it unlocked. 1043 * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long. 1044 * Returns number of bytes copied. 1045 */ 1046 size_t 1047 tcp_log_get_tag(struct tcpcb *tp, char *buf) 1048 { 1049 struct inpcb *inp = tptoinpcb(tp); 1050 struct tcp_log_id_bucket *tlb; 1051 size_t len; 1052 int tree_locked; 1053 1054 INP_WLOCK_ASSERT(inp); 1055 1056 tree_locked = TREE_UNLOCKED; 1057 tlb = tp->t_lib; 1058 1059 if (tlb != NULL) { 1060 TCPID_BUCKET_REF(tlb); 1061 INP_WUNLOCK(inp); 1062 TCPID_BUCKET_LOCK(tlb); 1063 len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN); 1064 KASSERT(len < TCP_LOG_TAG_LEN, 1065 ("%s:%d: tp->t_lib->tlb_tag too long (%zu)", 1066 __func__, __LINE__, len)); 1067 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 1068 TCPID_BUCKET_UNLOCK(tlb); 1069 1070 if (tree_locked == TREE_WLOCKED) { 1071 TCPID_TREE_WLOCK_ASSERT(); 1072 TCPID_TREE_WUNLOCK(); 1073 } else if (tree_locked == TREE_RLOCKED) { 1074 TCPID_TREE_RLOCK_ASSERT(); 1075 TCPID_TREE_RUNLOCK(); 1076 } else 1077 TCPID_TREE_UNLOCK_ASSERT(); 1078 } else { 1079 INP_WUNLOCK(inp); 1080 *buf = '\0'; 1081 len = 0; 1082 } 1083 1084 return (len); 1085 } 1086 1087 /* 1088 * Get number of connections with the same log ID. 1089 * Log ID is taken from given TCPCB. 1090 * Called with INPCB locked. 1091 */ 1092 u_int 1093 tcp_log_get_id_cnt(struct tcpcb *tp) 1094 { 1095 1096 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1097 return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt); 1098 } 1099 1100 #ifdef TCPLOG_DEBUG_RINGBUF 1101 /* 1102 * Functions/macros to increment/decrement reference count for a log 1103 * entry. This should catch when we do a double-free/double-remove or 1104 * a double-add. 1105 */ 1106 static inline void 1107 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func, 1108 int line) 1109 { 1110 int refcnt; 1111 1112 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1); 1113 if (refcnt != 0) 1114 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)", 1115 func, line, log_entry, refcnt); 1116 } 1117 #define tcp_log_entry_refcnt_add(l) \ 1118 _tcp_log_entry_refcnt_add((l), __func__, __LINE__) 1119 1120 static inline void 1121 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func, 1122 int line) 1123 { 1124 int refcnt; 1125 1126 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1); 1127 if (refcnt != 1) 1128 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)", 1129 func, line, log_entry, refcnt); 1130 } 1131 #define tcp_log_entry_refcnt_rem(l) \ 1132 _tcp_log_entry_refcnt_rem((l), __func__, __LINE__) 1133 1134 #else /* !TCPLOG_DEBUG_RINGBUF */ 1135 1136 #define tcp_log_entry_refcnt_add(l) 1137 #define tcp_log_entry_refcnt_rem(l) 1138 1139 #endif 1140 1141 /* 1142 * Cleanup after removing a log entry, but only decrement the count if we 1143 * are running INVARIANTS. 1144 */ 1145 static inline void 1146 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused) 1147 { 1148 1149 uma_zfree(tcp_log_zone, log_entry); 1150 #ifdef INVARIANTS 1151 (*count)--; 1152 KASSERT(*count >= 0, 1153 ("%s: count unexpectedly negative", __func__)); 1154 #endif 1155 } 1156 1157 static void 1158 tcp_log_free_entries(struct tcp_log_stailq *head, int *count) 1159 { 1160 struct tcp_log_mem *log_entry; 1161 1162 /* Free the entries. */ 1163 while ((log_entry = STAILQ_FIRST(head)) != NULL) { 1164 STAILQ_REMOVE_HEAD(head, tlm_queue); 1165 tcp_log_entry_refcnt_rem(log_entry); 1166 tcp_log_free_log_common(log_entry, count); 1167 } 1168 } 1169 1170 /* Cleanup after removing a log entry. */ 1171 static inline void 1172 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry) 1173 { 1174 uma_zfree(tcp_log_zone, log_entry); 1175 tp->t_lognum--; 1176 KASSERT(tp->t_lognum >= 0, 1177 ("%s: tp->t_lognum unexpectedly negative", __func__)); 1178 } 1179 1180 /* Remove a log entry from the head of a list. */ 1181 static inline void 1182 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry) 1183 { 1184 1185 KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs), 1186 ("%s: attempt to remove non-HEAD log entry", __func__)); 1187 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 1188 tcp_log_entry_refcnt_rem(log_entry); 1189 tcp_log_remove_log_cleanup(tp, log_entry); 1190 } 1191 1192 #ifdef TCPLOG_DEBUG_RINGBUF 1193 /* 1194 * Initialize the log entry's reference count, which we want to 1195 * survive allocations. 1196 */ 1197 static int 1198 tcp_log_zone_init(void *mem, int size, int flags __unused) 1199 { 1200 struct tcp_log_mem *tlm; 1201 1202 KASSERT(size >= sizeof(struct tcp_log_mem), 1203 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1204 tlm = (struct tcp_log_mem *)mem; 1205 tlm->tlm_refcnt = 0; 1206 return (0); 1207 } 1208 1209 /* 1210 * Double check that the refcnt is zero on allocation and return. 1211 */ 1212 static int 1213 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused) 1214 { 1215 struct tcp_log_mem *tlm; 1216 1217 KASSERT(size >= sizeof(struct tcp_log_mem), 1218 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1219 tlm = (struct tcp_log_mem *)mem; 1220 if (tlm->tlm_refcnt != 0) 1221 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 1222 __func__, __LINE__, tlm, tlm->tlm_refcnt); 1223 return (0); 1224 } 1225 1226 static void 1227 tcp_log_zone_dtor(void *mem, int size, void *args __unused) 1228 { 1229 struct tcp_log_mem *tlm; 1230 1231 KASSERT(size >= sizeof(struct tcp_log_mem), 1232 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1233 tlm = (struct tcp_log_mem *)mem; 1234 if (tlm->tlm_refcnt != 0) 1235 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 1236 __func__, __LINE__, tlm, tlm->tlm_refcnt); 1237 } 1238 #endif /* TCPLOG_DEBUG_RINGBUF */ 1239 1240 /* Do global initialization. */ 1241 void 1242 tcp_log_init(void) 1243 { 1244 1245 tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem), 1246 #ifdef TCPLOG_DEBUG_RINGBUF 1247 tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init, 1248 #else 1249 NULL, NULL, NULL, 1250 #endif 1251 NULL, UMA_ALIGN_PTR, 0); 1252 (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT); 1253 tcp_log_id_bucket_zone = uma_zcreate("tcp_log_id_bucket", 1254 sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL, 1255 UMA_ALIGN_PTR, 0); 1256 tcp_log_id_node_zone = uma_zcreate("tcp_log_id_node", 1257 sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL, 1258 UMA_ALIGN_PTR, 0); 1259 #ifdef TCPLOG_DEBUG_COUNTERS 1260 tcp_log_queued = counter_u64_alloc(M_WAITOK); 1261 tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK); 1262 tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK); 1263 tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK); 1264 tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK); 1265 tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK); 1266 tcp_log_que_copyout = counter_u64_alloc(M_WAITOK); 1267 tcp_log_que_read = counter_u64_alloc(M_WAITOK); 1268 tcp_log_que_freed = counter_u64_alloc(M_WAITOK); 1269 #endif 1270 tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK); 1271 tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK); 1272 1273 rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW); 1274 mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF); 1275 callout_init(&tcp_log_expireq_callout, 1); 1276 } 1277 1278 /* Do per-TCPCB initialization. */ 1279 void 1280 tcp_log_tcpcbinit(struct tcpcb *tp) 1281 { 1282 1283 /* A new TCPCB should start out zero-initialized. */ 1284 STAILQ_INIT(&tp->t_logs); 1285 1286 /* 1287 * If we are doing auto-capturing, figure out whether we will capture 1288 * this session. 1289 */ 1290 tp->t_loglimit = tcp_log_session_limit; 1291 if ((tcp_log_auto_all == true) && 1292 tcp_log_auto_mode && 1293 tcp_log_selectauto()) { 1294 tp->_t_logstate = tcp_log_auto_mode; 1295 tp->t_flags2 |= TF2_LOG_AUTO; 1296 } 1297 } 1298 1299 /* Remove entries */ 1300 static void 1301 tcp_log_expire(void *unused __unused) 1302 { 1303 struct tcp_log_id_bucket *tlb; 1304 struct tcp_log_id_node *tln; 1305 sbintime_t expiry_limit; 1306 int tree_locked; 1307 1308 TCPLOG_EXPIREQ_LOCK(); 1309 if (callout_pending(&tcp_log_expireq_callout)) { 1310 /* Callout was reset. */ 1311 TCPLOG_EXPIREQ_UNLOCK(); 1312 return; 1313 } 1314 1315 /* 1316 * Process entries until we reach one that expires too far in the 1317 * future. Look one second in the future. 1318 */ 1319 expiry_limit = getsbinuptime() + SBT_1S; 1320 tree_locked = TREE_UNLOCKED; 1321 1322 while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL && 1323 tln->tln_expiretime <= expiry_limit) { 1324 if (!callout_active(&tcp_log_expireq_callout)) { 1325 /* 1326 * Callout was stopped. I guess we should 1327 * just quit at this point. 1328 */ 1329 TCPLOG_EXPIREQ_UNLOCK(); 1330 return; 1331 } 1332 1333 /* 1334 * Remove the node from the head of the list and unlock 1335 * the list. Change the expiry time to SBT_MAX as a signal 1336 * to other threads that we now own this. 1337 */ 1338 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq); 1339 tln->tln_expiretime = SBT_MAX; 1340 TCPLOG_EXPIREQ_UNLOCK(); 1341 1342 /* 1343 * Remove the node from the bucket. 1344 */ 1345 tlb = tln->tln_bucket; 1346 TCPID_BUCKET_LOCK(tlb); 1347 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) { 1348 tcp_log_id_validate_tree_lock(tree_locked); 1349 if (tree_locked == TREE_WLOCKED) 1350 TCPID_TREE_WUNLOCK(); 1351 else 1352 TCPID_TREE_RUNLOCK(); 1353 tree_locked = TREE_UNLOCKED; 1354 } 1355 1356 /* Drop the INP reference. */ 1357 INP_WLOCK(tln->tln_inp); 1358 if (!in_pcbrele_wlocked(tln->tln_inp)) 1359 INP_WUNLOCK(tln->tln_inp); 1360 1361 /* Free the log records. */ 1362 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count); 1363 1364 /* Free the node. */ 1365 uma_zfree(tcp_log_id_node_zone, tln); 1366 1367 /* Relock the expiry queue. */ 1368 TCPLOG_EXPIREQ_LOCK(); 1369 } 1370 1371 /* 1372 * We've expired all the entries we can. Do we need to reschedule 1373 * ourselves? 1374 */ 1375 callout_deactivate(&tcp_log_expireq_callout); 1376 if (tln != NULL) { 1377 /* 1378 * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and 1379 * set the next callout to that. (This helps ensure we generally 1380 * run the callout no more often than desired.) 1381 */ 1382 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL; 1383 if (expiry_limit < tln->tln_expiretime) 1384 expiry_limit = tln->tln_expiretime; 1385 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit, 1386 SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE); 1387 } 1388 1389 /* We're done. */ 1390 TCPLOG_EXPIREQ_UNLOCK(); 1391 return; 1392 } 1393 1394 /* 1395 * Move log data from the TCPCB to a new node. This will reset the TCPCB log 1396 * entries and log count; however, it will not touch other things from the 1397 * TCPCB (e.g. t_lin, t_lib). 1398 * 1399 * NOTE: Must hold a lock on the INP. 1400 */ 1401 static void 1402 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln) 1403 { 1404 struct inpcb *inp = tptoinpcb(tp); 1405 1406 INP_WLOCK_ASSERT(inp); 1407 1408 tln->tln_ie = inp->inp_inc.inc_ie; 1409 if (inp->inp_inc.inc_flags & INC_ISIPV6) 1410 tln->tln_af = AF_INET6; 1411 else 1412 tln->tln_af = AF_INET; 1413 tln->tln_entries = tp->t_logs; 1414 tln->tln_count = tp->t_lognum; 1415 tln->tln_bucket = tp->t_lib; 1416 1417 /* Clear information from the PCB. */ 1418 STAILQ_INIT(&tp->t_logs); 1419 tp->t_lognum = 0; 1420 } 1421 1422 /* Do per-TCPCB cleanup */ 1423 void 1424 tcp_log_tcpcbfini(struct tcpcb *tp) 1425 { 1426 struct tcp_log_id_node *tln, *tln_first; 1427 struct tcp_log_mem *log_entry; 1428 sbintime_t callouttime; 1429 1430 1431 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1432 #ifdef TCP_ACCOUNTING 1433 if (tp->_t_logstate) { 1434 struct tcp_log_buffer *lgb; 1435 union tcp_log_stackspecific log; 1436 struct timeval tv; 1437 int i; 1438 1439 memset(&log, 0, sizeof(log)); 1440 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 1441 for (i = 0; i<TCP_NUM_CNT_COUNTERS; i++) { 1442 log.u_raw.u64_flex[i] = tp->tcp_cnt_counters[i]; 1443 } 1444 lgb = tcp_log_event(tp, NULL, 1445 NULL, 1446 NULL, 1447 TCP_LOG_ACCOUNTING, 0, 1448 0, &log, false, NULL, NULL, 0, &tv); 1449 lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS; 1450 lgb->tlb_flex2 = 1; 1451 for (i = 0; i<TCP_NUM_CNT_COUNTERS; i++) { 1452 log.u_raw.u64_flex[i] = tp->tcp_proc_time[i]; 1453 } 1454 lgb = tcp_log_event(tp, NULL, 1455 NULL, 1456 NULL, 1457 TCP_LOG_ACCOUNTING, 0, 1458 0, &log, false, NULL, NULL, 0, &tv); 1459 if (tptoinpcb(tp)->inp_flags2 & INP_MBUF_ACKCMP) 1460 lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS; 1461 else 1462 lgb->tlb_flex1 = TCP_NUM_PROC_COUNTERS; 1463 lgb->tlb_flex2 = 2; 1464 } 1465 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1466 log.u_bbr.cur_del_rate = tp->t_end_info; 1467 TCP_LOG_EVENTP(tp, NULL, 1468 NULL, 1469 NULL, 1470 TCP_LOG_CONNEND, 0, 1471 0, &log, false, &tv); 1472 } 1473 #endif 1474 /* 1475 * If we were gathering packets to be automatically dumped, try to do 1476 * it now. If this succeeds, the log information in the TCPCB will be 1477 * cleared. Otherwise, we'll handle the log information as we do 1478 * for other states. 1479 */ 1480 switch(tp->_t_logstate) { 1481 case TCP_LOG_STATE_HEAD_AUTO: 1482 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 1483 M_NOWAIT, false); 1484 break; 1485 case TCP_LOG_STATE_TAIL_AUTO: 1486 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail", 1487 M_NOWAIT, false); 1488 break; 1489 case TCP_LOG_VIA_BBPOINTS: 1490 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints", 1491 M_NOWAIT, false); 1492 break; 1493 case TCP_LOG_STATE_CONTINUAL: 1494 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1495 M_NOWAIT, false); 1496 break; 1497 } 1498 1499 /* 1500 * There are two ways we could keep logs: per-socket or per-ID. If 1501 * we are tracking logs with an ID, then the logs survive the 1502 * destruction of the TCPCB. 1503 * 1504 * If the TCPCB is associated with an ID node, move the logs from the 1505 * TCPCB to the ID node. In theory, this is safe, for reasons which I 1506 * will now explain for my own benefit when I next need to figure out 1507 * this code. :-) 1508 * 1509 * We own the INP lock. Therefore, no one else can change the contents 1510 * of this node (Rule C). Further, no one can remove this node from 1511 * the bucket while we hold the lock (Rule D). Basically, no one can 1512 * mess with this node. That leaves two states in which we could be: 1513 * 1514 * 1. Another thread is currently waiting to acquire the INP lock, with 1515 * plans to do something with this node. When we drop the INP lock, 1516 * they will have a chance to do that. They will recheck the 1517 * tln_closed field (see note to Rule C) and then acquire the 1518 * bucket lock before proceeding further. 1519 * 1520 * 2. Another thread will try to acquire a lock at some point in the 1521 * future. If they try to acquire a lock before we set the 1522 * tln_closed field, they will follow state #1. If they try to 1523 * acquire a lock after we set the tln_closed field, they will be 1524 * able to make changes to the node, at will, following Rule C. 1525 * 1526 * Therefore, we currently own this node and can make any changes 1527 * we want. But, as soon as we set the tln_closed field to true, we 1528 * have effectively dropped our lock on the node. (For this reason, we 1529 * also need to make sure our writes are ordered correctly. An atomic 1530 * operation with "release" semantics should be sufficient.) 1531 */ 1532 1533 if (tp->t_lin != NULL) { 1534 struct inpcb *inp = tptoinpcb(tp); 1535 1536 /* Copy the relevant information to the log entry. */ 1537 tln = tp->t_lin; 1538 KASSERT(tln->tln_inp == inp, 1539 ("%s: Mismatched inp (tln->tln_inp=%p, tp inpcb=%p)", 1540 __func__, tln->tln_inp, inp)); 1541 tcp_log_move_tp_to_node(tp, tln); 1542 1543 /* Clear information from the PCB. */ 1544 tp->t_lin = NULL; 1545 tp->t_lib = NULL; 1546 1547 /* 1548 * Take a reference on the INP. This ensures that the INP 1549 * remains valid while the node is on the expiry queue. This 1550 * ensures the INP is valid for other threads that may be 1551 * racing to lock this node when we move it to the expire 1552 * queue. 1553 */ 1554 in_pcbref(inp); 1555 1556 /* 1557 * Store the entry on the expiry list. The exact behavior 1558 * depends on whether we have entries to keep. If so, we 1559 * put the entry at the tail of the list and expire in 1560 * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put 1561 * the entry at the head of the list. (Handling the cleanup 1562 * via the expiry timer lets us avoid locking messy-ness here.) 1563 */ 1564 tln->tln_expiretime = getsbinuptime(); 1565 TCPLOG_EXPIREQ_LOCK(); 1566 if (tln->tln_count) { 1567 tln->tln_expiretime += TCP_LOG_EXPIRE_TIME; 1568 if (STAILQ_EMPTY(&tcp_log_expireq_head) && 1569 !callout_active(&tcp_log_expireq_callout)) { 1570 /* 1571 * We are adding the first entry and a callout 1572 * is not currently scheduled; therefore, we 1573 * need to schedule one. 1574 */ 1575 callout_reset_sbt(&tcp_log_expireq_callout, 1576 tln->tln_expiretime, SBT_1S, tcp_log_expire, 1577 NULL, C_ABSOLUTE); 1578 } 1579 STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln, 1580 tln_expireq); 1581 } else { 1582 callouttime = tln->tln_expiretime + 1583 TCP_LOG_EXPIRE_INTVL; 1584 tln_first = STAILQ_FIRST(&tcp_log_expireq_head); 1585 1586 if ((tln_first == NULL || 1587 callouttime < tln_first->tln_expiretime) && 1588 (callout_pending(&tcp_log_expireq_callout) || 1589 !callout_active(&tcp_log_expireq_callout))) { 1590 /* 1591 * The list is empty, or we want to run the 1592 * expire code before the first entry's timer 1593 * fires. Also, we are in a case where a callout 1594 * is not actively running. We want to reset 1595 * the callout to occur sooner. 1596 */ 1597 callout_reset_sbt(&tcp_log_expireq_callout, 1598 callouttime, SBT_1S, tcp_log_expire, NULL, 1599 C_ABSOLUTE); 1600 } 1601 1602 /* 1603 * Insert to the head, or just after the head, as 1604 * appropriate. (This might result in small 1605 * mis-orderings as a bunch of "expire now" entries 1606 * gather at the start of the list, but that should 1607 * not produce big problems, since the expire timer 1608 * will walk through all of them.) 1609 */ 1610 if (tln_first == NULL || 1611 tln->tln_expiretime < tln_first->tln_expiretime) 1612 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln, 1613 tln_expireq); 1614 else 1615 STAILQ_INSERT_AFTER(&tcp_log_expireq_head, 1616 tln_first, tln, tln_expireq); 1617 } 1618 TCPLOG_EXPIREQ_UNLOCK(); 1619 1620 /* 1621 * We are done messing with the tln. After this point, we 1622 * can't touch it. (Note that the "release" semantics should 1623 * be included with the TCPLOG_EXPIREQ_UNLOCK() call above. 1624 * Therefore, they should be unnecessary here. However, it 1625 * seems like a good idea to include them anyway, since we 1626 * really are releasing a lock here.) 1627 */ 1628 atomic_store_rel_int(&tln->tln_closed, 1); 1629 } else { 1630 /* Remove log entries. */ 1631 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1632 tcp_log_remove_log_head(tp, log_entry); 1633 KASSERT(tp->t_lognum == 0, 1634 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 1635 __func__, tp->t_lognum)); 1636 } 1637 1638 /* 1639 * Change the log state to off (just in case anything tries to sneak 1640 * in a last-minute log). 1641 */ 1642 tp->_t_logstate = TCP_LOG_STATE_OFF; 1643 } 1644 1645 static void 1646 tcp_log_purge_tp_logbuf(struct tcpcb *tp) 1647 { 1648 struct tcp_log_mem *log_entry; 1649 1650 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1651 if (tp->t_lognum == 0) 1652 return; 1653 1654 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1655 tcp_log_remove_log_head(tp, log_entry); 1656 KASSERT(tp->t_lognum == 0, 1657 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 1658 __func__, tp->t_lognum)); 1659 tp->_t_logstate = TCP_LOG_STATE_OFF; 1660 } 1661 1662 /* 1663 * This logs an event for a TCP socket. Normally, this is called via 1664 * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for 1665 * TCP_LOG_EVENT(). 1666 */ 1667 1668 struct tcp_log_buffer * 1669 tcp_log_event(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf, 1670 struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len, 1671 union tcp_log_stackspecific *stackinfo, int th_hostorder, 1672 const char *output_caller, const char *func, int line, const struct timeval *itv) 1673 { 1674 struct tcp_log_mem *log_entry; 1675 struct tcp_log_buffer *log_buf; 1676 int attempt_count = 0; 1677 struct tcp_log_verbose *log_verbose; 1678 uint32_t logsn; 1679 1680 KASSERT((func == NULL && line == 0) || (func != NULL && line > 0), 1681 ("%s called with inconsistent func (%p) and line (%d) arguments", 1682 __func__, func, line)); 1683 1684 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1685 if (tcp_disable_all_bb_logs) { 1686 /* 1687 * The global shutdown logging 1688 * switch has been thrown. Call 1689 * the purge function that frees 1690 * purges out the logs and 1691 * turns off logging. 1692 */ 1693 tcp_log_purge_tp_logbuf(tp); 1694 return (NULL); 1695 } 1696 KASSERT(tp->_t_logstate == TCP_LOG_STATE_HEAD || 1697 tp->_t_logstate == TCP_LOG_STATE_TAIL || 1698 tp->_t_logstate == TCP_LOG_STATE_CONTINUAL || 1699 tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO || 1700 tp->_t_logstate == TCP_LOG_VIA_BBPOINTS || 1701 tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO, 1702 ("%s called with unexpected tp->_t_logstate (%d)", __func__, 1703 tp->_t_logstate)); 1704 1705 /* 1706 * Get the serial number. We do this early so it will 1707 * increment even if we end up skipping the log entry for some 1708 * reason. 1709 */ 1710 logsn = tp->t_logsn++; 1711 1712 /* 1713 * Can we get a new log entry? If so, increment the lognum counter 1714 * here. 1715 */ 1716 retry: 1717 if (tp->t_lognum < tp->t_loglimit) { 1718 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL) 1719 tp->t_lognum++; 1720 } else 1721 log_entry = NULL; 1722 1723 /* Do we need to try to reuse? */ 1724 if (log_entry == NULL) { 1725 /* 1726 * Sacrifice auto-logged sessions without a log ID if 1727 * tcp_log_auto_all is false. (If they don't have a log 1728 * ID by now, it is probable that either they won't get one 1729 * or we are resource-constrained.) 1730 */ 1731 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 1732 !tcp_log_auto_all) { 1733 if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) { 1734 #ifdef INVARIANTS 1735 panic("%s:%d: tcp_log_state_change() failed " 1736 "to set tp %p to TCP_LOG_STATE_CLEAR", 1737 __func__, __LINE__, tp); 1738 #endif 1739 tp->_t_logstate = TCP_LOG_STATE_OFF; 1740 } 1741 return (NULL); 1742 } 1743 /* 1744 * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump 1745 * the buffers. If successful, deactivate tracing. Otherwise, 1746 * leave it active so we will retry. 1747 */ 1748 if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO && 1749 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 1750 M_NOWAIT, false)) { 1751 tp->_t_logstate = TCP_LOG_STATE_OFF; 1752 return(NULL); 1753 } else if ((tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) && 1754 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1755 M_NOWAIT, false)) { 1756 if (attempt_count == 0) { 1757 attempt_count++; 1758 goto retry; 1759 } 1760 #ifdef TCPLOG_DEBUG_COUNTERS 1761 counter_u64_add(tcp_log_que_fail4, 1); 1762 #endif 1763 return(NULL); 1764 1765 } else if ((tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) && 1766 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints", 1767 M_NOWAIT, false)) { 1768 if (attempt_count == 0) { 1769 attempt_count++; 1770 goto retry; 1771 } 1772 #ifdef TCPLOG_DEBUG_COUNTERS 1773 counter_u64_add(tcp_log_que_fail4, 1); 1774 #endif 1775 return(NULL); 1776 } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO) 1777 return(NULL); 1778 1779 /* If in HEAD state, just deactivate the tracing and return. */ 1780 if (tp->_t_logstate == TCP_LOG_STATE_HEAD) { 1781 tp->_t_logstate = TCP_LOG_STATE_OFF; 1782 return(NULL); 1783 } 1784 /* 1785 * Get a buffer to reuse. If that fails, just give up. 1786 * (We can't log anything without a buffer in which to 1787 * put it.) 1788 * 1789 * Note that we don't change the t_lognum counter 1790 * here. Because we are re-using the buffer, the total 1791 * number won't change. 1792 */ 1793 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL) 1794 return(NULL); 1795 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 1796 tcp_log_entry_refcnt_rem(log_entry); 1797 } 1798 1799 KASSERT(log_entry != NULL, 1800 ("%s: log_entry unexpectedly NULL", __func__)); 1801 1802 /* Extract the log buffer and verbose buffer pointers. */ 1803 log_buf = &log_entry->tlm_buf; 1804 log_verbose = &log_entry->tlm_v; 1805 1806 /* Basic entries. */ 1807 if (itv == NULL) 1808 microuptime(&log_buf->tlb_tv); 1809 else 1810 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval)); 1811 log_buf->tlb_ticks = ticks; 1812 log_buf->tlb_sn = logsn; 1813 log_buf->tlb_stackid = tp->t_fb->tfb_id; 1814 log_buf->tlb_eventid = eventid; 1815 log_buf->tlb_eventflags = 0; 1816 log_buf->tlb_errno = errornum; 1817 1818 /* Socket buffers */ 1819 if (rxbuf != NULL) { 1820 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF; 1821 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc; 1822 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc; 1823 log_buf->tlb_rxbuf.tls_sb_spare = 0; 1824 } else { 1825 log_buf->tlb_rxbuf.tls_sb_acc = 0; 1826 log_buf->tlb_rxbuf.tls_sb_ccc = 0; 1827 } 1828 if (txbuf != NULL) { 1829 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF; 1830 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc; 1831 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc; 1832 log_buf->tlb_txbuf.tls_sb_spare = 0; 1833 } else { 1834 log_buf->tlb_txbuf.tls_sb_acc = 0; 1835 log_buf->tlb_txbuf.tls_sb_ccc = 0; 1836 } 1837 /* Copy values from tp to the log entry. */ 1838 #define COPY_STAT(f) log_buf->tlb_ ## f = tp->f 1839 #define COPY_STAT_T(f) log_buf->tlb_ ## f = tp->t_ ## f 1840 COPY_STAT_T(state); 1841 COPY_STAT_T(starttime); 1842 COPY_STAT(iss); 1843 COPY_STAT_T(flags); 1844 COPY_STAT(snd_una); 1845 COPY_STAT(snd_max); 1846 COPY_STAT(snd_cwnd); 1847 COPY_STAT(snd_nxt); 1848 COPY_STAT(snd_recover); 1849 COPY_STAT(snd_wnd); 1850 COPY_STAT(snd_ssthresh); 1851 COPY_STAT_T(srtt); 1852 COPY_STAT_T(rttvar); 1853 COPY_STAT(rcv_up); 1854 COPY_STAT(rcv_adv); 1855 COPY_STAT(rcv_nxt); 1856 COPY_STAT(rcv_wnd); 1857 COPY_STAT_T(dupacks); 1858 COPY_STAT_T(segqlen); 1859 COPY_STAT(snd_numholes); 1860 COPY_STAT(snd_scale); 1861 COPY_STAT(rcv_scale); 1862 COPY_STAT_T(flags2); 1863 COPY_STAT_T(fbyte_in); 1864 COPY_STAT_T(fbyte_out); 1865 #undef COPY_STAT 1866 #undef COPY_STAT_T 1867 /* Copy stack-specific info. */ 1868 if (stackinfo != NULL) { 1869 memcpy(&log_buf->tlb_stackinfo, stackinfo, 1870 sizeof(log_buf->tlb_stackinfo)); 1871 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO; 1872 } 1873 1874 /* The packet */ 1875 log_buf->tlb_len = len; 1876 if (th) { 1877 int optlen; 1878 1879 log_buf->tlb_eventflags |= TLB_FLAG_HDR; 1880 log_buf->tlb_th = *th; 1881 if (th_hostorder) 1882 tcp_fields_to_net(&log_buf->tlb_th); 1883 optlen = (th->th_off << 2) - sizeof (struct tcphdr); 1884 if (optlen > 0) 1885 memcpy(log_buf->tlb_opts, th + 1, optlen); 1886 } else { 1887 memset(&log_buf->tlb_th, 0, sizeof(*th)); 1888 } 1889 1890 /* Verbose information */ 1891 if (func != NULL) { 1892 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE; 1893 if (output_caller != NULL) 1894 strlcpy(log_verbose->tlv_snd_frm, output_caller, 1895 TCP_FUNC_LEN); 1896 else 1897 *log_verbose->tlv_snd_frm = 0; 1898 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN); 1899 log_verbose->tlv_trace_line = line; 1900 } 1901 1902 /* Insert the new log at the tail. */ 1903 STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue); 1904 tcp_log_entry_refcnt_add(log_entry); 1905 return (log_buf); 1906 } 1907 1908 /* 1909 * Change the logging state for a TCPCB. Returns 0 on success or an 1910 * error code on failure. 1911 */ 1912 int 1913 tcp_log_state_change(struct tcpcb *tp, int state) 1914 { 1915 struct tcp_log_mem *log_entry; 1916 int rv; 1917 1918 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1919 rv = 0; 1920 switch(state) { 1921 case TCP_LOG_STATE_CLEAR: 1922 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1923 tcp_log_remove_log_head(tp, log_entry); 1924 /* Fall through */ 1925 1926 case TCP_LOG_STATE_OFF: 1927 tp->_t_logstate = TCP_LOG_STATE_OFF; 1928 break; 1929 1930 case TCP_LOG_STATE_TAIL: 1931 case TCP_LOG_STATE_HEAD: 1932 case TCP_LOG_STATE_CONTINUAL: 1933 case TCP_LOG_VIA_BBPOINTS: 1934 case TCP_LOG_STATE_HEAD_AUTO: 1935 case TCP_LOG_STATE_TAIL_AUTO: 1936 /* 1937 * When the RATIO_OFF state is set for the bucket, the log ID 1938 * this tp is associated with has been probabilistically opted 1939 * out of logging per tcp_log_apply_ratio(). 1940 */ 1941 if (tp->t_lib == NULL || 1942 tp->t_lib->tlb_logstate != TCP_LOG_STATE_RATIO_OFF) { 1943 tp->_t_logstate = state; 1944 } else { 1945 rv = ECANCELED; 1946 tp->_t_logstate = TCP_LOG_STATE_OFF; 1947 } 1948 break; 1949 1950 default: 1951 return (EINVAL); 1952 } 1953 if (tcp_disable_all_bb_logs) { 1954 /* We are prohibited from doing any logs */ 1955 tp->_t_logstate = TCP_LOG_STATE_OFF; 1956 rv = EBUSY; 1957 } 1958 tp->t_flags2 &= ~(TF2_LOG_AUTO); 1959 1960 return (rv); 1961 } 1962 1963 /* If tcp_drain() is called, flush half the log entries. */ 1964 void 1965 tcp_log_drain(struct tcpcb *tp) 1966 { 1967 struct tcp_log_mem *log_entry, *next; 1968 int target, skip; 1969 1970 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1971 if ((target = tp->t_lognum / 2) == 0) 1972 return; 1973 1974 /* 1975 * XXXRRS: At this I don't think this is wise that 1976 * we do this. All that a drain call means is that 1977 * we are hitting one of the system mbuf limits. BB 1978 * logging, or freeing of them, will not create any 1979 * more mbufs and really has nothing to do with 1980 * the system running out of mbufs. For now I 1981 * am changing this to free any "AUTO" by dumping 1982 * them out. But this should either be changed 1983 * so that it gets called when we hit the BB limit 1984 * or it should just not get called (one of the two) 1985 * since I don't think the mbuf <-> BB log cleanup 1986 * is the right thing to do here. 1987 */ 1988 /* 1989 * If we are logging the "head" packets, we want to discard 1990 * from the tail of the queue. Otherwise, we want to discard 1991 * from the head. 1992 */ 1993 if (tp->_t_logstate == TCP_LOG_STATE_HEAD) { 1994 skip = tp->t_lognum - target; 1995 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue) 1996 if (!--skip) 1997 break; 1998 KASSERT(log_entry != NULL, 1999 ("%s: skipped through all entries!", __func__)); 2000 if (log_entry == NULL) 2001 return; 2002 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) { 2003 STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue); 2004 tcp_log_entry_refcnt_rem(next); 2005 tcp_log_remove_log_cleanup(tp, next); 2006 #ifdef INVARIANTS 2007 target--; 2008 #endif 2009 } 2010 KASSERT(target == 0, 2011 ("%s: After removing from tail, target was %d", __func__, 2012 target)); 2013 } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO) { 2014 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at drain", 2015 M_NOWAIT, false); 2016 } else if (tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO) { 2017 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at drain", 2018 M_NOWAIT, false); 2019 } else if (tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) { 2020 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints", 2021 M_NOWAIT, false); 2022 } else if (tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) { 2023 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 2024 M_NOWAIT, false); 2025 } else { 2026 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL && 2027 target--) 2028 tcp_log_remove_log_head(tp, log_entry); 2029 KASSERT(target <= 0, 2030 ("%s: After removing from head, target was %d", __func__, 2031 target)); 2032 KASSERT(tp->t_lognum > 0, 2033 ("%s: After removing from head, tp->t_lognum was %d", 2034 __func__, target)); 2035 KASSERT(log_entry != NULL, 2036 ("%s: After removing from head, the tailq was empty", 2037 __func__)); 2038 } 2039 } 2040 2041 static inline int 2042 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len) 2043 { 2044 2045 if (sopt->sopt_td != NULL) 2046 return (copyout(src, dst, len)); 2047 bcopy(src, dst, len); 2048 return (0); 2049 } 2050 2051 static int 2052 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp, 2053 struct tcp_log_buffer **end, int count) 2054 { 2055 struct tcp_log_buffer *out_entry; 2056 struct tcp_log_mem *log_entry; 2057 size_t entrysize; 2058 int error; 2059 #ifdef INVARIANTS 2060 int orig_count = count; 2061 #endif 2062 2063 /* Copy the data out. */ 2064 error = 0; 2065 out_entry = (struct tcp_log_buffer *) sopt->sopt_val; 2066 STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) { 2067 count--; 2068 KASSERT(count >= 0, 2069 ("%s:%d: Exceeded expected count (%d) processing list %p", 2070 __func__, __LINE__, orig_count, log_tailqp)); 2071 2072 #ifdef TCPLOG_DEBUG_COUNTERS 2073 counter_u64_add(tcp_log_que_copyout, 1); 2074 #endif 2075 2076 /* 2077 * Skip copying out the header if it isn't present. 2078 * Instead, copy out zeros (to ensure we don't leak info). 2079 * TODO: Make sure we truly do zero everything we don't 2080 * explicitly set. 2081 */ 2082 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR) 2083 entrysize = sizeof(struct tcp_log_buffer); 2084 else 2085 entrysize = offsetof(struct tcp_log_buffer, tlb_th); 2086 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry, 2087 entrysize); 2088 if (error) 2089 break; 2090 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) { 2091 error = tcp_log_copyout(sopt, zerobuf, 2092 ((uint8_t *)out_entry) + entrysize, 2093 sizeof(struct tcp_log_buffer) - entrysize); 2094 } 2095 2096 /* 2097 * Copy out the verbose bit, if needed. Either way, 2098 * increment the output pointer the correct amount. 2099 */ 2100 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) { 2101 error = tcp_log_copyout(sopt, &log_entry->tlm_v, 2102 out_entry->tlb_verbose, 2103 sizeof(struct tcp_log_verbose)); 2104 if (error) 2105 break; 2106 out_entry = (struct tcp_log_buffer *) 2107 (((uint8_t *) (out_entry + 1)) + 2108 sizeof(struct tcp_log_verbose)); 2109 } else 2110 out_entry++; 2111 } 2112 *end = out_entry; 2113 KASSERT(error || count == 0, 2114 ("%s:%d: Less than expected count (%d) processing list %p" 2115 " (%d remain)", __func__, __LINE__, orig_count, 2116 log_tailqp, count)); 2117 2118 return (error); 2119 } 2120 2121 /* 2122 * Copy out the buffer. Note that we do incremental copying, so 2123 * sooptcopyout() won't work. However, the goal is to produce the same 2124 * end result as if we copied in the entire user buffer, updated it, 2125 * and then used sooptcopyout() to copy it out. 2126 * 2127 * NOTE: This should be called with a write lock on the PCB; however, 2128 * the function will drop it after it extracts the data from the TCPCB. 2129 */ 2130 int 2131 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp) 2132 { 2133 struct tcp_log_stailq log_tailq; 2134 struct tcp_log_mem *log_entry, *log_next; 2135 struct tcp_log_buffer *out_entry; 2136 struct inpcb *inp = tptoinpcb(tp); 2137 size_t outsize, entrysize; 2138 int error, outnum; 2139 2140 INP_WLOCK_ASSERT(inp); 2141 2142 /* 2143 * Determine which log entries will fit in the buffer. As an 2144 * optimization, skip this if all the entries will clearly fit 2145 * in the buffer. (However, get an exact size if we are using 2146 * INVARIANTS.) 2147 */ 2148 #ifndef INVARIANTS 2149 if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) + 2150 sizeof(struct tcp_log_verbose)) >= tp->t_lognum) { 2151 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue); 2152 log_next = NULL; 2153 outsize = 0; 2154 outnum = tp->t_lognum; 2155 } else { 2156 #endif 2157 outsize = outnum = 0; 2158 log_entry = NULL; 2159 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) { 2160 entrysize = sizeof(struct tcp_log_buffer); 2161 if (log_next->tlm_buf.tlb_eventflags & 2162 TLB_FLAG_VERBOSE) 2163 entrysize += sizeof(struct tcp_log_verbose); 2164 if ((sopt->sopt_valsize - outsize) < entrysize) 2165 break; 2166 outsize += entrysize; 2167 outnum++; 2168 log_entry = log_next; 2169 } 2170 KASSERT(outsize <= sopt->sopt_valsize, 2171 ("%s: calculated output size (%zu) greater than available" 2172 "space (%zu)", __func__, outsize, sopt->sopt_valsize)); 2173 #ifndef INVARIANTS 2174 } 2175 #endif 2176 2177 /* 2178 * Copy traditional sooptcopyout() behavior: if sopt->sopt_val 2179 * is NULL, silently skip the copy. However, in this case, we 2180 * will leave the list alone and return. Functionally, this 2181 * gives userspace a way to poll for an approximate buffer 2182 * size they will need to get the log entries. 2183 */ 2184 if (sopt->sopt_val == NULL) { 2185 INP_WUNLOCK(inp); 2186 if (outsize == 0) { 2187 outsize = outnum * (sizeof(struct tcp_log_buffer) + 2188 sizeof(struct tcp_log_verbose)); 2189 } 2190 if (sopt->sopt_valsize > outsize) 2191 sopt->sopt_valsize = outsize; 2192 return (0); 2193 } 2194 2195 /* 2196 * Break apart the list. We'll save the ones we want to copy 2197 * out locally and remove them from the TCPCB list. We can 2198 * then drop the INPCB lock while we do the copyout. 2199 * 2200 * There are roughly three cases: 2201 * 1. There was nothing to copy out. That's easy: drop the 2202 * lock and return. 2203 * 2. We are copying out the entire list. Again, that's easy: 2204 * move the whole list. 2205 * 3. We are copying out a partial list. That's harder. We 2206 * need to update the list book-keeping entries. 2207 */ 2208 if (log_entry != NULL && log_next == NULL) { 2209 /* Move entire list. */ 2210 KASSERT(outnum == tp->t_lognum, 2211 ("%s:%d: outnum (%d) should match tp->t_lognum (%d)", 2212 __func__, __LINE__, outnum, tp->t_lognum)); 2213 log_tailq = tp->t_logs; 2214 tp->t_lognum = 0; 2215 STAILQ_INIT(&tp->t_logs); 2216 } else if (log_entry != NULL) { 2217 /* Move partial list. */ 2218 KASSERT(outnum < tp->t_lognum, 2219 ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)", 2220 __func__, __LINE__, outnum, tp->t_lognum)); 2221 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs); 2222 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue); 2223 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL, 2224 ("%s:%d: tp->t_logs is unexpectedly shorter than expected" 2225 "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)", 2226 __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum)); 2227 STAILQ_NEXT(log_entry, tlm_queue) = NULL; 2228 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue); 2229 tp->t_lognum -= outnum; 2230 } else 2231 STAILQ_INIT(&log_tailq); 2232 2233 /* Drop the PCB lock. */ 2234 INP_WUNLOCK(inp); 2235 2236 /* Copy the data out. */ 2237 error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum); 2238 2239 if (error) { 2240 /* Restore list */ 2241 INP_WLOCK(inp); 2242 if ((inp->inp_flags & INP_DROPPED) == 0) { 2243 tp = intotcpcb(inp); 2244 2245 /* Merge the two lists. */ 2246 STAILQ_CONCAT(&log_tailq, &tp->t_logs); 2247 tp->t_logs = log_tailq; 2248 tp->t_lognum += outnum; 2249 } 2250 INP_WUNLOCK(inp); 2251 } else { 2252 /* Sanity check entries */ 2253 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) == 2254 outsize, ("%s: Actual output size (%zu) != " 2255 "calculated output size (%zu)", __func__, 2256 (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val), 2257 outsize)); 2258 2259 /* Free the entries we just copied out. */ 2260 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) { 2261 tcp_log_entry_refcnt_rem(log_entry); 2262 uma_zfree(tcp_log_zone, log_entry); 2263 } 2264 } 2265 2266 sopt->sopt_valsize = (size_t)((caddr_t)out_entry - 2267 (caddr_t)sopt->sopt_val); 2268 return (error); 2269 } 2270 2271 static void 2272 tcp_log_free_queue(struct tcp_log_dev_queue *param) 2273 { 2274 struct tcp_log_dev_log_queue *entry; 2275 2276 KASSERT(param != NULL, ("%s: called with NULL param", __func__)); 2277 if (param == NULL) 2278 return; 2279 2280 entry = (struct tcp_log_dev_log_queue *)param; 2281 2282 /* Free the entries. */ 2283 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 2284 2285 /* Free the buffer, if it is allocated. */ 2286 if (entry->tldl_common.tldq_buf != NULL) 2287 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV); 2288 2289 /* Free the queue entry. */ 2290 free(entry, M_TCPLOGDEV); 2291 } 2292 2293 static struct tcp_log_common_header * 2294 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param) 2295 { 2296 struct tcp_log_dev_log_queue *entry; 2297 struct tcp_log_header *hdr; 2298 uint8_t *end; 2299 struct sockopt sopt; 2300 int error; 2301 2302 entry = (struct tcp_log_dev_log_queue *)param; 2303 2304 /* Take a worst-case guess at space needs. */ 2305 sopt.sopt_valsize = sizeof(struct tcp_log_header) + 2306 entry->tldl_count * (sizeof(struct tcp_log_buffer) + 2307 sizeof(struct tcp_log_verbose)); 2308 hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT); 2309 if (hdr == NULL) { 2310 #ifdef TCPLOG_DEBUG_COUNTERS 2311 counter_u64_add(tcp_log_que_fail5, entry->tldl_count); 2312 #endif 2313 return (NULL); 2314 } 2315 sopt.sopt_val = hdr + 1; 2316 sopt.sopt_valsize -= sizeof(struct tcp_log_header); 2317 sopt.sopt_td = NULL; 2318 2319 error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries, 2320 (struct tcp_log_buffer **)&end, entry->tldl_count); 2321 if (error) { 2322 free(hdr, M_TCPLOGDEV); 2323 return (NULL); 2324 } 2325 2326 /* Free the entries. */ 2327 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 2328 entry->tldl_count = 0; 2329 2330 memset(hdr, 0, sizeof(struct tcp_log_header)); 2331 hdr->tlh_version = TCP_LOG_BUF_VER; 2332 hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR; 2333 hdr->tlh_length = end - (uint8_t *)hdr; 2334 hdr->tlh_ie = entry->tldl_ie; 2335 hdr->tlh_af = entry->tldl_af; 2336 getboottime(&hdr->tlh_offset); 2337 strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN); 2338 strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN); 2339 strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN); 2340 return ((struct tcp_log_common_header *)hdr); 2341 } 2342 2343 /* 2344 * Queue the tcpcb's log buffer for transmission via the log buffer facility. 2345 * 2346 * NOTE: This should be called with a write lock on the PCB. 2347 * 2348 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 2349 * and reacquire the INP lock if it needs to do so. 2350 * 2351 * If force is false, this will only dump auto-logged sessions if 2352 * tcp_log_auto_all is true or if there is a log ID defined for the session. 2353 */ 2354 int 2355 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force) 2356 { 2357 struct tcp_log_dev_log_queue *entry; 2358 struct inpcb *inp = tptoinpcb(tp); 2359 #ifdef TCPLOG_DEBUG_COUNTERS 2360 int num_entries; 2361 #endif 2362 2363 INP_WLOCK_ASSERT(inp); 2364 2365 /* If there are no log entries, there is nothing to do. */ 2366 if (tp->t_lognum == 0) 2367 return (0); 2368 2369 /* Check for a log ID. */ 2370 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 2371 !tcp_log_auto_all && !force) { 2372 struct tcp_log_mem *log_entry; 2373 2374 /* 2375 * We needed a log ID and none was found. Free the log entries 2376 * and return success. Also, cancel further logging. If the 2377 * session doesn't have a log ID by now, we'll assume it isn't 2378 * going to get one. 2379 */ 2380 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 2381 tcp_log_remove_log_head(tp, log_entry); 2382 KASSERT(tp->t_lognum == 0, 2383 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 2384 __func__, tp->t_lognum)); 2385 tp->_t_logstate = TCP_LOG_STATE_OFF; 2386 return (0); 2387 } 2388 2389 /* 2390 * Allocate memory. If we must wait, we'll need to drop the locks 2391 * and reacquire them (and do all the related business that goes 2392 * along with that). 2393 */ 2394 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 2395 M_NOWAIT); 2396 if (entry == NULL && (how & M_NOWAIT)) { 2397 #ifdef TCPLOG_DEBUG_COUNTERS 2398 counter_u64_add(tcp_log_que_fail3, 1); 2399 #endif 2400 return (ENOBUFS); 2401 } 2402 if (entry == NULL) { 2403 INP_WUNLOCK(inp); 2404 entry = malloc(sizeof(struct tcp_log_dev_log_queue), 2405 M_TCPLOGDEV, M_WAITOK); 2406 INP_WLOCK(inp); 2407 /* 2408 * Note that this check is slightly overly-restrictive in 2409 * that the TCB can survive either of these events. 2410 * However, there is currently not a good way to ensure 2411 * that is the case. So, if we hit this M_WAIT path, we 2412 * may end up dropping some entries. That seems like a 2413 * small price to pay for safety. 2414 */ 2415 if (inp->inp_flags & INP_DROPPED) { 2416 free(entry, M_TCPLOGDEV); 2417 #ifdef TCPLOG_DEBUG_COUNTERS 2418 counter_u64_add(tcp_log_que_fail2, 1); 2419 #endif 2420 return (ECONNRESET); 2421 } 2422 tp = intotcpcb(inp); 2423 if (tp->t_lognum == 0) { 2424 free(entry, M_TCPLOGDEV); 2425 return (0); 2426 } 2427 } 2428 2429 /* Fill in the unique parts of the queue entry. */ 2430 if (tp->t_lib != NULL) { 2431 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 2432 strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN); 2433 } else { 2434 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN); 2435 strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN); 2436 } 2437 if (reason != NULL) 2438 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 2439 else 2440 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 2441 entry->tldl_ie = inp->inp_inc.inc_ie; 2442 if (inp->inp_inc.inc_flags & INC_ISIPV6) 2443 entry->tldl_af = AF_INET6; 2444 else 2445 entry->tldl_af = AF_INET; 2446 entry->tldl_entries = tp->t_logs; 2447 entry->tldl_count = tp->t_lognum; 2448 2449 /* Fill in the common parts of the queue entry. */ 2450 entry->tldl_common.tldq_buf = NULL; 2451 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 2452 entry->tldl_common.tldq_dtor = tcp_log_free_queue; 2453 2454 /* Clear the log data from the TCPCB. */ 2455 #ifdef TCPLOG_DEBUG_COUNTERS 2456 num_entries = tp->t_lognum; 2457 #endif 2458 tp->t_lognum = 0; 2459 STAILQ_INIT(&tp->t_logs); 2460 2461 /* Add the entry. If no one is listening, free the entry. */ 2462 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) { 2463 tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 2464 #ifdef TCPLOG_DEBUG_COUNTERS 2465 counter_u64_add(tcp_log_que_fail1, num_entries); 2466 } else { 2467 counter_u64_add(tcp_log_queued, num_entries); 2468 #endif 2469 } 2470 return (0); 2471 } 2472 2473 /* 2474 * Queue the log_id_node's log buffers for transmission via the log buffer 2475 * facility. 2476 * 2477 * NOTE: This should be called with the bucket locked and referenced. 2478 * 2479 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 2480 * and reacquire the bucket lock if it needs to do so. (The caller must 2481 * ensure that the tln is no longer on any lists so no one else will mess 2482 * with this while the lock is dropped!) 2483 */ 2484 static int 2485 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how) 2486 { 2487 struct tcp_log_dev_log_queue *entry; 2488 struct tcp_log_id_bucket *tlb; 2489 2490 tlb = tln->tln_bucket; 2491 TCPID_BUCKET_LOCK_ASSERT(tlb); 2492 KASSERT(tlb->tlb_refcnt > 0, 2493 ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)", 2494 __func__, __LINE__, tln, tlb)); 2495 KASSERT(tln->tln_closed, 2496 ("%s:%d: Called for node with tln_closed==false (tln=%p)", 2497 __func__, __LINE__, tln)); 2498 2499 /* If there are no log entries, there is nothing to do. */ 2500 if (tln->tln_count == 0) 2501 return (0); 2502 2503 /* 2504 * Allocate memory. If we must wait, we'll need to drop the locks 2505 * and reacquire them (and do all the related business that goes 2506 * along with that). 2507 */ 2508 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 2509 M_NOWAIT); 2510 if (entry == NULL && (how & M_NOWAIT)) 2511 return (ENOBUFS); 2512 if (entry == NULL) { 2513 TCPID_BUCKET_UNLOCK(tlb); 2514 entry = malloc(sizeof(struct tcp_log_dev_log_queue), 2515 M_TCPLOGDEV, M_WAITOK); 2516 TCPID_BUCKET_LOCK(tlb); 2517 } 2518 2519 /* Fill in the common parts of the queue entry.. */ 2520 entry->tldl_common.tldq_buf = NULL; 2521 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 2522 entry->tldl_common.tldq_dtor = tcp_log_free_queue; 2523 2524 /* Fill in the unique parts of the queue entry. */ 2525 strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN); 2526 strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN); 2527 if (reason != NULL) 2528 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 2529 else 2530 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 2531 entry->tldl_ie = tln->tln_ie; 2532 entry->tldl_entries = tln->tln_entries; 2533 entry->tldl_count = tln->tln_count; 2534 entry->tldl_af = tln->tln_af; 2535 2536 /* Add the entry. If no one is listening, free the entry. */ 2537 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) 2538 tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 2539 2540 return (0); 2541 } 2542 2543 /* 2544 * Queue the log buffers for all sessions in a bucket for transmissions via 2545 * the log buffer facility. 2546 * 2547 * NOTE: This should be called with a locked bucket; however, the function 2548 * will drop the lock. 2549 */ 2550 #define LOCAL_SAVE 10 2551 static void 2552 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason) 2553 { 2554 struct tcp_log_id_node local_entries[LOCAL_SAVE]; 2555 struct inpcb *inp; 2556 struct tcpcb *tp; 2557 struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln; 2558 int i, num_local_entries, tree_locked; 2559 bool expireq_locked; 2560 2561 TCPID_BUCKET_LOCK_ASSERT(tlb); 2562 2563 /* 2564 * Take a reference on the bucket to keep it from disappearing until 2565 * we are done. 2566 */ 2567 TCPID_BUCKET_REF(tlb); 2568 2569 /* 2570 * We'll try to create these without dropping locks. However, we 2571 * might very well need to drop locks to get memory. If that's the 2572 * case, we'll save up to 10 on the stack, and sacrifice the rest. 2573 * (Otherwise, we need to worry about finding our place again in a 2574 * potentially changed list. It just doesn't seem worth the trouble 2575 * to do that. 2576 */ 2577 expireq_locked = false; 2578 num_local_entries = 0; 2579 prev_tln = NULL; 2580 tree_locked = TREE_UNLOCKED; 2581 SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) { 2582 /* 2583 * If this isn't associated with a TCPCB, we can pull it off 2584 * the list now. We need to be careful that the expire timer 2585 * hasn't already taken ownership (tln_expiretime == SBT_MAX). 2586 * If so, we let the expire timer code free the data. 2587 */ 2588 if (cur_tln->tln_closed) { 2589 no_inp: 2590 /* 2591 * Get the expireq lock so we can get a consistent 2592 * read of tln_expiretime and so we can remove this 2593 * from the expireq. 2594 */ 2595 if (!expireq_locked) { 2596 TCPLOG_EXPIREQ_LOCK(); 2597 expireq_locked = true; 2598 } 2599 2600 /* 2601 * We ignore entries with tln_expiretime == SBT_MAX. 2602 * The expire timer code already owns those. 2603 */ 2604 KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0, 2605 ("%s:%d: node on the expire queue without positive " 2606 "expire time", __func__, __LINE__)); 2607 if (cur_tln->tln_expiretime == SBT_MAX) { 2608 prev_tln = cur_tln; 2609 continue; 2610 } 2611 2612 /* Remove the entry from the expireq. */ 2613 STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln, 2614 tcp_log_id_node, tln_expireq); 2615 2616 /* Remove the entry from the bucket. */ 2617 if (prev_tln != NULL) 2618 SLIST_REMOVE_AFTER(prev_tln, tln_list); 2619 else 2620 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list); 2621 2622 /* 2623 * Drop the INP and bucket reference counts. Due to 2624 * lock-ordering rules, we need to drop the expire 2625 * queue lock. 2626 */ 2627 TCPLOG_EXPIREQ_UNLOCK(); 2628 expireq_locked = false; 2629 2630 /* Drop the INP reference. */ 2631 INP_WLOCK(cur_tln->tln_inp); 2632 if (!in_pcbrele_wlocked(cur_tln->tln_inp)) 2633 INP_WUNLOCK(cur_tln->tln_inp); 2634 2635 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 2636 #ifdef INVARIANTS 2637 panic("%s: Bucket refcount unexpectedly 0.", 2638 __func__); 2639 #endif 2640 /* 2641 * Recover as best we can: free the entry we 2642 * own. 2643 */ 2644 tcp_log_free_entries(&cur_tln->tln_entries, 2645 &cur_tln->tln_count); 2646 uma_zfree(tcp_log_id_node_zone, cur_tln); 2647 goto done; 2648 } 2649 2650 if (tcp_log_dump_node_logbuf(cur_tln, reason, 2651 M_NOWAIT)) { 2652 /* 2653 * If we have sapce, save the entries locally. 2654 * Otherwise, free them. 2655 */ 2656 if (num_local_entries < LOCAL_SAVE) { 2657 local_entries[num_local_entries] = 2658 *cur_tln; 2659 num_local_entries++; 2660 } else { 2661 tcp_log_free_entries( 2662 &cur_tln->tln_entries, 2663 &cur_tln->tln_count); 2664 } 2665 } 2666 2667 /* No matter what, we are done with the node now. */ 2668 uma_zfree(tcp_log_id_node_zone, cur_tln); 2669 2670 /* 2671 * Because we removed this entry from the list, prev_tln 2672 * (which tracks the previous entry still on the tlb 2673 * list) remains unchanged. 2674 */ 2675 continue; 2676 } 2677 2678 /* 2679 * If we get to this point, the session data is still held in 2680 * the TCPCB. So, we need to pull the data out of that. 2681 * 2682 * We will need to drop the expireq lock so we can lock the INP. 2683 * We can then try to extract the data the "easy" way. If that 2684 * fails, we'll save the log entries for later. 2685 */ 2686 if (expireq_locked) { 2687 TCPLOG_EXPIREQ_UNLOCK(); 2688 expireq_locked = false; 2689 } 2690 2691 /* Lock the INP and then re-check the state. */ 2692 inp = cur_tln->tln_inp; 2693 INP_WLOCK(inp); 2694 /* 2695 * If we caught this while it was transitioning, the data 2696 * might have moved from the TCPCB to the tln (signified by 2697 * setting tln_closed to true. If so, treat this like an 2698 * inactive connection. 2699 */ 2700 if (cur_tln->tln_closed) { 2701 /* 2702 * It looks like we may have caught this connection 2703 * while it was transitioning from active to inactive. 2704 * Treat this like an inactive connection. 2705 */ 2706 INP_WUNLOCK(inp); 2707 goto no_inp; 2708 } 2709 2710 /* 2711 * Try to dump the data from the tp without dropping the lock. 2712 * If this fails, try to save off the data locally. 2713 */ 2714 tp = cur_tln->tln_tp; 2715 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) && 2716 num_local_entries < LOCAL_SAVE) { 2717 tcp_log_move_tp_to_node(tp, 2718 &local_entries[num_local_entries]); 2719 local_entries[num_local_entries].tln_closed = 1; 2720 KASSERT(local_entries[num_local_entries].tln_bucket == 2721 tlb, ("%s: %d: bucket mismatch for node %p", 2722 __func__, __LINE__, cur_tln)); 2723 num_local_entries++; 2724 } 2725 2726 INP_WUNLOCK(inp); 2727 2728 /* 2729 * We are goint to leave the current tln on the list. It will 2730 * become the previous tln. 2731 */ 2732 prev_tln = cur_tln; 2733 } 2734 2735 /* Drop our locks, if any. */ 2736 KASSERT(tree_locked == TREE_UNLOCKED, 2737 ("%s: %d: tree unexpectedly locked", __func__, __LINE__)); 2738 switch (tree_locked) { 2739 case TREE_WLOCKED: 2740 TCPID_TREE_WUNLOCK(); 2741 tree_locked = TREE_UNLOCKED; 2742 break; 2743 case TREE_RLOCKED: 2744 TCPID_TREE_RUNLOCK(); 2745 tree_locked = TREE_UNLOCKED; 2746 break; 2747 } 2748 if (expireq_locked) { 2749 TCPLOG_EXPIREQ_UNLOCK(); 2750 expireq_locked = false; 2751 } 2752 2753 /* 2754 * Try again for any saved entries. tcp_log_dump_node_logbuf() is 2755 * guaranteed to free the log entries within the node. And, since 2756 * the node itself is on our stack, we don't need to free it. 2757 */ 2758 for (i = 0; i < num_local_entries; i++) 2759 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK); 2760 2761 /* Drop our reference. */ 2762 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 2763 TCPID_BUCKET_UNLOCK(tlb); 2764 2765 done: 2766 /* Drop our locks, if any. */ 2767 switch (tree_locked) { 2768 case TREE_WLOCKED: 2769 TCPID_TREE_WUNLOCK(); 2770 break; 2771 case TREE_RLOCKED: 2772 TCPID_TREE_RUNLOCK(); 2773 break; 2774 } 2775 if (expireq_locked) 2776 TCPLOG_EXPIREQ_UNLOCK(); 2777 } 2778 #undef LOCAL_SAVE 2779 2780 /* 2781 * Queue the log buffers for all sessions in a bucket for transmissions via 2782 * the log buffer facility. 2783 * 2784 * NOTE: This should be called with a locked INP; however, the function 2785 * will drop the lock. 2786 */ 2787 void 2788 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason) 2789 { 2790 struct inpcb *inp = tptoinpcb(tp); 2791 struct tcp_log_id_bucket *tlb; 2792 int tree_locked; 2793 2794 /* Figure out our bucket and lock it. */ 2795 INP_WLOCK_ASSERT(inp); 2796 tlb = tp->t_lib; 2797 if (tlb == NULL) { 2798 /* 2799 * No bucket; treat this like a request to dump a single 2800 * session's traces. 2801 */ 2802 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true); 2803 INP_WUNLOCK(inp); 2804 return; 2805 } 2806 TCPID_BUCKET_REF(tlb); 2807 INP_WUNLOCK(inp); 2808 TCPID_BUCKET_LOCK(tlb); 2809 2810 /* If we are the last reference, we have nothing more to do here. */ 2811 tree_locked = TREE_UNLOCKED; 2812 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 2813 switch (tree_locked) { 2814 case TREE_WLOCKED: 2815 TCPID_TREE_WUNLOCK(); 2816 break; 2817 case TREE_RLOCKED: 2818 TCPID_TREE_RUNLOCK(); 2819 break; 2820 } 2821 return; 2822 } 2823 2824 /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */ 2825 tcp_log_dumpbucketlogs(tlb, reason); 2826 } 2827 2828 /* 2829 * Mark the end of a flow with the current stack. A stack can add 2830 * stack-specific info to this trace event by overriding this 2831 * function (see bbr_log_flowend() for example). 2832 */ 2833 void 2834 tcp_log_flowend(struct tcpcb *tp) 2835 { 2836 if (tp->_t_logstate != TCP_LOG_STATE_OFF) { 2837 struct socket *so = tptosocket(tp); 2838 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, 2839 TCP_LOG_FLOWEND, 0, 0, NULL, false); 2840 } 2841 } 2842 2843 void 2844 tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags) 2845 { 2846 struct inpcb *inp; 2847 struct tcpcb *tp; 2848 #ifdef TCP_REQUEST_TRK 2849 struct http_sendfile_track *ent; 2850 int i, fnd; 2851 #endif 2852 2853 inp = sotoinpcb(so); 2854 KASSERT(inp != NULL, ("tcp_log_sendfile: inp == NULL")); 2855 2856 /* quick check to see if logging is enabled for this connection */ 2857 tp = intotcpcb(inp); 2858 if ((inp->inp_flags & INP_DROPPED) || 2859 (tp->_t_logstate == TCP_LOG_STATE_OFF)) { 2860 return; 2861 } 2862 2863 INP_WLOCK(inp); 2864 /* double check log state now that we have the lock */ 2865 if (inp->inp_flags & INP_DROPPED) 2866 goto done; 2867 if (tp->_t_logstate != TCP_LOG_STATE_OFF) { 2868 struct timeval tv; 2869 tcp_log_eventspecific_t log; 2870 2871 microuptime(&tv); 2872 log.u_sf.offset = offset; 2873 log.u_sf.length = nbytes; 2874 log.u_sf.flags = flags; 2875 2876 TCP_LOG_EVENTP(tp, NULL, 2877 &tptosocket(tp)->so_rcv, 2878 &tptosocket(tp)->so_snd, 2879 TCP_LOG_SENDFILE, 0, 0, &log, false, &tv); 2880 } 2881 #ifdef TCP_REQUEST_TRK 2882 if (tp->t_http_req == 0) { 2883 /* No http requests to track */ 2884 goto done; 2885 } 2886 fnd = 0; 2887 if (tp->t_http_closed == 0) { 2888 /* No closed end req to track */ 2889 goto skip_closed_req; 2890 } 2891 for(i = 0; i < MAX_TCP_HTTP_REQ; i++) { 2892 /* Lets see if this one can be found */ 2893 ent = &tp->t_http_info[i]; 2894 if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) { 2895 /* Not used */ 2896 continue; 2897 } 2898 if (ent->flags & TCP_HTTP_TRACK_FLG_OPEN) { 2899 /* This pass does not consider open requests */ 2900 continue; 2901 } 2902 if (ent->flags & TCP_HTTP_TRACK_FLG_COMP) { 2903 /* Don't look at what we have completed */ 2904 continue; 2905 } 2906 /* If we reach here its a allocated closed end request */ 2907 if ((ent->start == offset) || 2908 ((offset > ent->start) && (offset < ent->end))){ 2909 /* Its within this request?? */ 2910 fnd = 1; 2911 } 2912 if (fnd) { 2913 /* 2914 * It is at or past the end, its complete. 2915 */ 2916 ent->flags |= TCP_HTTP_TRACK_FLG_SEQV; 2917 /* 2918 * When an entry completes we can take (snd_una + sb_cc) and know where 2919 * the end of the range really is. Note that this works since two 2920 * requests must be sequential and sendfile now is complete for *this* request. 2921 * we must use sb_ccc since the data may still be in-flight in TLS. 2922 * 2923 * We always cautiously move the end_seq only if our calculations 2924 * show it happened (just in case sf has the call to here at the wrong 2925 * place). When we go COMP we will stop coming here and hopefully be 2926 * left with the correct end_seq. 2927 */ 2928 if (SEQ_GT((tp->snd_una + so->so_snd.sb_ccc), ent->end_seq)) 2929 ent->end_seq = tp->snd_una + so->so_snd.sb_ccc; 2930 if ((offset + nbytes) >= ent->end) { 2931 ent->flags |= TCP_HTTP_TRACK_FLG_COMP; 2932 tcp_http_log_req_info(tp, ent, i, TCP_HTTP_REQ_LOG_COMPLETE, offset, nbytes); 2933 } else { 2934 tcp_http_log_req_info(tp, ent, i, TCP_HTTP_REQ_LOG_MOREYET, offset, nbytes); 2935 } 2936 /* We assume that sendfile never sends overlapping requests */ 2937 goto done; 2938 } 2939 } 2940 skip_closed_req: 2941 if (!fnd) { 2942 /* Ok now lets look for open requests */ 2943 for(i = 0; i < MAX_TCP_HTTP_REQ; i++) { 2944 ent = &tp->t_http_info[i]; 2945 if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) { 2946 /* Not used */ 2947 continue; 2948 } 2949 if ((ent->flags & TCP_HTTP_TRACK_FLG_OPEN) == 0) 2950 continue; 2951 /* If we reach here its an allocated open request */ 2952 if (ent->start == offset) { 2953 /* It begins this request */ 2954 ent->start_seq = tp->snd_una + 2955 tptosocket(tp)->so_snd.sb_ccc; 2956 ent->flags |= TCP_HTTP_TRACK_FLG_SEQV; 2957 break; 2958 } else if (offset > ent->start) { 2959 ent->flags |= TCP_HTTP_TRACK_FLG_SEQV; 2960 break; 2961 } 2962 } 2963 } 2964 #endif 2965 done: 2966 INP_WUNLOCK(inp); 2967 } 2968