1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * This module implements IPI message queueing and the MI portion of IPI 37 * message processing. 38 */ 39 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/queue.h> 48 #include <sys/thread2.h> 49 #include <sys/sysctl.h> 50 #include <sys/ktr.h> 51 #include <sys/kthread.h> 52 #include <machine/cpu.h> 53 #include <sys/lock.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_param.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_pager.h> 62 #include <vm/vm_extern.h> 63 #include <vm/vm_zone.h> 64 65 #include <machine/stdarg.h> 66 #include <machine/smp.h> 67 #include <machine/clock.h> 68 #include <machine/atomic.h> 69 70 #ifdef _KERNEL_VIRTUAL 71 #include <pthread.h> 72 #endif 73 74 struct ipiq_stats { 75 int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */ 76 int64_t ipiq_fifofull; /* number of fifo full conditions detected */ 77 int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */ 78 int64_t ipiq_passive; /* passive IPI messages */ 79 int64_t ipiq_cscount; /* number of cpu synchronizations */ 80 } __cachealign; 81 82 static struct ipiq_stats ipiq_stats_percpu[MAXCPU]; 83 #define ipiq_stat(gd) ipiq_stats_percpu[(gd)->gd_cpuid] 84 85 static int ipiq_debug; /* set to 1 for debug */ 86 #ifdef PANIC_DEBUG 87 static int panic_ipiq_cpu = -1; 88 static int panic_ipiq_count = 100; 89 #endif 90 91 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0, 92 ""); 93 #ifdef PANIC_DEBUG 94 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, ""); 95 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, ""); 96 #endif 97 98 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d" 99 #define IPIQ_ARGS void *func, void *arg1, int arg2, int scpu, int dcpu 100 101 #if !defined(KTR_IPIQ) 102 #define KTR_IPIQ KTR_ALL 103 #endif 104 KTR_INFO_MASTER(ipiq); 105 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARGS); 106 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARGS); 107 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARGS); 108 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARGS); 109 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARGS); 110 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08lx", unsigned long mask); 111 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08lx", unsigned long mask); 112 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARGS); 113 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS); 114 KTR_INFO(KTR_IPIQ, ipiq, sync_quick, 9, "cpumask=%08lx", unsigned long mask); 115 116 #define logipiq(name, func, arg1, arg2, sgd, dgd) \ 117 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid) 118 #define logipiq2(name, arg) \ 119 KTR_LOG(ipiq_ ## name, arg) 120 121 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 122 struct intrframe *frame); 123 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs); 124 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs); 125 126 #define IPIQ_SYSCTL(name) \ 127 static int \ 128 sysctl_##name(SYSCTL_HANDLER_ARGS) \ 129 { \ 130 int64_t val = 0; \ 131 int cpu, error; \ 132 \ 133 for (cpu = 0; cpu < ncpus; ++cpu) \ 134 val += ipiq_stats_percpu[cpu].name; \ 135 \ 136 error = sysctl_handle_quad(oidp, &val, 0, req); \ 137 if (error || req->newptr == NULL) \ 138 return error; \ 139 \ 140 for (cpu = 0; cpu < ncpus; ++cpu) \ 141 ipiq_stats_percpu[cpu].name = val; \ 142 \ 143 return 0; \ 144 } 145 146 IPIQ_SYSCTL(ipiq_count); 147 IPIQ_SYSCTL(ipiq_fifofull); 148 IPIQ_SYSCTL(ipiq_avoided); 149 IPIQ_SYSCTL(ipiq_passive); 150 IPIQ_SYSCTL(ipiq_cscount); 151 152 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_count, (CTLTYPE_QUAD | CTLFLAG_RW), 153 0, 0, sysctl_ipiq_count, "Q", "Number of IPI's sent"); 154 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_fifofull, (CTLTYPE_QUAD | CTLFLAG_RW), 155 0, 0, sysctl_ipiq_fifofull, "Q", 156 "Number of fifo full conditions detected"); 157 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_avoided, (CTLTYPE_QUAD | CTLFLAG_RW), 158 0, 0, sysctl_ipiq_avoided, "Q", 159 "Number of IPI's avoided by interlock with target cpu"); 160 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_passive, (CTLTYPE_QUAD | CTLFLAG_RW), 161 0, 0, sysctl_ipiq_passive, "Q", 162 "Number of passive IPI messages sent"); 163 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_cscount, (CTLTYPE_QUAD | CTLFLAG_RW), 164 0, 0, sysctl_ipiq_cscount, "Q", 165 "Number of cpu synchronizations"); 166 167 /* 168 * Send a function execution request to another cpu. The request is queued 169 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every 170 * possible target cpu. The FIFO can be written. 171 * 172 * If the FIFO fills up we have to enable interrupts to avoid an APIC 173 * deadlock and process pending IPIQs while waiting for it to empty. 174 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full. 175 * 176 * We can safely bump gd_intr_nesting_level because our crit_exit() at the 177 * end will take care of any pending interrupts. 178 * 179 * The actual hardware IPI is avoided if the target cpu is already processing 180 * the queue from a prior IPI. It is possible to pipeline IPI messages 181 * very quickly between cpus due to the FIFO hysteresis. 182 * 183 * Need not be called from a critical section. 184 */ 185 int 186 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2) 187 { 188 lwkt_ipiq_t ip; 189 int windex; 190 #ifdef _KERNEL_VIRTUAL 191 int repeating = 0; 192 #endif 193 struct globaldata *gd = mycpu; 194 195 logipiq(send_norm, func, arg1, arg2, gd, target); 196 197 if (target == gd) { 198 func(arg1, arg2, NULL); 199 logipiq(send_end, func, arg1, arg2, gd, target); 200 return(0); 201 } 202 crit_enter(); 203 ++gd->gd_intr_nesting_level; 204 #ifdef INVARIANTS 205 if (gd->gd_intr_nesting_level > 20) 206 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 207 #endif 208 KKASSERT(curthread->td_critcount); 209 ++ipiq_stat(gd).ipiq_count; 210 ip = &gd->gd_ipiq[target->gd_cpuid]; 211 212 /* 213 * Do not allow the FIFO to become full. Interrupts must be physically 214 * enabled while we liveloop to avoid deadlocking the APIC. 215 * 216 * The target ipiq may have gotten filled up due to passive IPIs and thus 217 * not be aware that its queue is too full, so be sure to issue an 218 * ipiq interrupt to the target cpu. 219 */ 220 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 221 #if defined(__x86_64__) 222 unsigned long rflags = read_rflags(); 223 #else 224 #error "no read_*flags" 225 #endif 226 227 cpu_enable_intr(); 228 ++ipiq_stat(gd).ipiq_fifofull; 229 DEBUG_PUSH_INFO("send_ipiq3"); 230 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 231 if (atomic_poll_acquire_int(&target->gd_npoll)) { 232 logipiq(cpu_send, func, arg1, arg2, gd, target); 233 cpu_send_ipiq(target->gd_cpuid); 234 } 235 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 236 lwkt_process_ipiq(); 237 cpu_pause(); 238 #ifdef _KERNEL_VIRTUAL 239 if (repeating++ > 10) 240 pthread_yield(); 241 #endif 242 } 243 DEBUG_POP_INFO(); 244 #if defined(__x86_64__) 245 write_rflags(rflags); 246 #else 247 #error "no write_*flags" 248 #endif 249 } 250 251 /* 252 * Queue the new message 253 */ 254 windex = ip->ip_windex & MAXCPUFIFO_MASK; 255 ip->ip_info[windex].func = func; 256 ip->ip_info[windex].arg1 = arg1; 257 ip->ip_info[windex].arg2 = arg2; 258 cpu_sfence(); 259 ++ip->ip_windex; 260 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid); 261 262 /* 263 * signal the target cpu that there is work pending. 264 */ 265 if (atomic_poll_acquire_int(&target->gd_npoll)) { 266 logipiq(cpu_send, func, arg1, arg2, gd, target); 267 cpu_send_ipiq(target->gd_cpuid); 268 } else { 269 ++ipiq_stat(gd).ipiq_avoided; 270 } 271 --gd->gd_intr_nesting_level; 272 crit_exit(); 273 logipiq(send_end, func, arg1, arg2, gd, target); 274 275 return(ip->ip_windex); 276 } 277 278 /* 279 * Similar to lwkt_send_ipiq() but this function does not actually initiate 280 * the IPI to the target cpu unless the FIFO has become too full, so it is 281 * very fast. 282 * 283 * This function is used for non-critical IPI messages, such as memory 284 * deallocations. The queue will typically be flushed by the target cpu at 285 * the next clock interrupt. 286 * 287 * Need not be called from a critical section. 288 */ 289 int 290 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func, 291 void *arg1, int arg2) 292 { 293 lwkt_ipiq_t ip; 294 int windex; 295 #ifdef _KERNEL_VIRTUAL 296 int repeating = 0; 297 #endif 298 struct globaldata *gd = mycpu; 299 300 KKASSERT(target != gd); 301 crit_enter(); 302 ++gd->gd_intr_nesting_level; 303 logipiq(send_pasv, func, arg1, arg2, gd, target); 304 #ifdef INVARIANTS 305 if (gd->gd_intr_nesting_level > 20) 306 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 307 #endif 308 KKASSERT(curthread->td_critcount); 309 ++ipiq_stat(gd).ipiq_count; 310 ++ipiq_stat(gd).ipiq_passive; 311 ip = &gd->gd_ipiq[target->gd_cpuid]; 312 313 /* 314 * Do not allow the FIFO to become full. Interrupts must be physically 315 * enabled while we liveloop to avoid deadlocking the APIC. 316 */ 317 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 318 #if defined(__x86_64__) 319 unsigned long rflags = read_rflags(); 320 #else 321 #error "no read_*flags" 322 #endif 323 324 cpu_enable_intr(); 325 ++ipiq_stat(gd).ipiq_fifofull; 326 DEBUG_PUSH_INFO("send_ipiq3_passive"); 327 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 328 if (atomic_poll_acquire_int(&target->gd_npoll)) { 329 logipiq(cpu_send, func, arg1, arg2, gd, target); 330 cpu_send_ipiq(target->gd_cpuid); 331 } 332 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 333 lwkt_process_ipiq(); 334 cpu_pause(); 335 #ifdef _KERNEL_VIRTUAL 336 if (repeating++ > 10) 337 pthread_yield(); 338 #endif 339 } 340 DEBUG_POP_INFO(); 341 #if defined(__x86_64__) 342 write_rflags(rflags); 343 #else 344 #error "no write_*flags" 345 #endif 346 } 347 348 /* 349 * Queue the new message 350 */ 351 windex = ip->ip_windex & MAXCPUFIFO_MASK; 352 ip->ip_info[windex].func = func; 353 ip->ip_info[windex].arg1 = arg1; 354 ip->ip_info[windex].arg2 = arg2; 355 cpu_sfence(); 356 ++ip->ip_windex; 357 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid); 358 --gd->gd_intr_nesting_level; 359 360 /* 361 * Do not signal the target cpu, it will pick up the IPI when it next 362 * polls (typically on the next tick). 363 */ 364 crit_exit(); 365 logipiq(send_end, func, arg1, arg2, gd, target); 366 367 return(ip->ip_windex); 368 } 369 370 /* 371 * Send an IPI request without blocking, return 0 on success, ENOENT on 372 * failure. The actual queueing of the hardware IPI may still force us 373 * to spin and process incoming IPIs but that will eventually go away 374 * when we've gotten rid of the other general IPIs. 375 */ 376 int 377 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func, 378 void *arg1, int arg2) 379 { 380 lwkt_ipiq_t ip; 381 int windex; 382 struct globaldata *gd = mycpu; 383 384 logipiq(send_nbio, func, arg1, arg2, gd, target); 385 KKASSERT(curthread->td_critcount); 386 if (target == gd) { 387 func(arg1, arg2, NULL); 388 logipiq(send_end, func, arg1, arg2, gd, target); 389 return(0); 390 } 391 crit_enter(); 392 ++gd->gd_intr_nesting_level; 393 ++ipiq_stat(gd).ipiq_count; 394 ip = &gd->gd_ipiq[target->gd_cpuid]; 395 396 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) { 397 logipiq(send_fail, func, arg1, arg2, gd, target); 398 --gd->gd_intr_nesting_level; 399 crit_exit(); 400 return(ENOENT); 401 } 402 windex = ip->ip_windex & MAXCPUFIFO_MASK; 403 ip->ip_info[windex].func = func; 404 ip->ip_info[windex].arg1 = arg1; 405 ip->ip_info[windex].arg2 = arg2; 406 cpu_sfence(); 407 ++ip->ip_windex; 408 ATOMIC_CPUMASK_ORBIT(target->gd_ipimask, gd->gd_cpuid); 409 410 /* 411 * This isn't a passive IPI, we still have to signal the target cpu. 412 */ 413 if (atomic_poll_acquire_int(&target->gd_npoll)) { 414 logipiq(cpu_send, func, arg1, arg2, gd, target); 415 cpu_send_ipiq(target->gd_cpuid); 416 } else { 417 ++ipiq_stat(gd).ipiq_avoided; 418 } 419 --gd->gd_intr_nesting_level; 420 crit_exit(); 421 422 logipiq(send_end, func, arg1, arg2, gd, target); 423 return(0); 424 } 425 426 /* 427 * deprecated, used only by fast int forwarding. 428 */ 429 int 430 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2) 431 { 432 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2)); 433 } 434 435 /* 436 * Send a message to several target cpus. Typically used for scheduling. 437 * The message will not be sent to stopped cpus. 438 */ 439 int 440 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2) 441 { 442 int cpuid; 443 int count = 0; 444 445 CPUMASK_NANDMASK(mask, stopped_cpus); 446 while (CPUMASK_TESTNZERO(mask)) { 447 cpuid = BSFCPUMASK(mask); 448 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2); 449 CPUMASK_NANDBIT(mask, cpuid); 450 ++count; 451 } 452 return(count); 453 } 454 455 /* 456 * Wait for the remote cpu to finish processing a function. 457 * 458 * YYY we have to enable interrupts and process the IPIQ while waiting 459 * for it to empty or we may deadlock with another cpu. Create a CPU_*() 460 * function to do this! YYY we really should 'block' here. 461 * 462 * MUST be called from a critical section. This routine may be called 463 * from an interrupt (for example, if an interrupt wakes a foreign thread 464 * up). 465 */ 466 void 467 lwkt_wait_ipiq(globaldata_t target, int seq) 468 { 469 lwkt_ipiq_t ip; 470 471 if (target != mycpu) { 472 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 473 if ((int)(ip->ip_xindex - seq) < 0) { 474 #if defined(__x86_64__) 475 unsigned long rflags = read_rflags(); 476 #else 477 #error "no read_*flags" 478 #endif 479 int64_t time_tgt = tsc_get_target(1000000000LL); 480 int time_loops = 10; 481 int benice = 0; 482 #ifdef _KERNEL_VIRTUAL 483 int repeating = 0; 484 #endif 485 486 cpu_enable_intr(); 487 DEBUG_PUSH_INFO("wait_ipiq"); 488 while ((int)(ip->ip_xindex - seq) < 0) { 489 crit_enter(); 490 lwkt_process_ipiq(); 491 crit_exit(); 492 #ifdef _KERNEL_VIRTUAL 493 if (repeating++ > 10) 494 pthread_yield(); 495 #endif 496 497 /* 498 * IPIQs must be handled within 10 seconds and this code 499 * will warn after one second. 500 */ 501 if ((benice & 255) == 0 && tsc_test_target(time_tgt) > 0) { 502 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", 503 mycpu->gd_cpuid, target->gd_cpuid, 504 ip->ip_xindex - seq); 505 if (--time_loops == 0) 506 panic("LWKT_WAIT_IPIQ"); 507 time_tgt = tsc_get_target(1000000000LL); 508 } 509 ++benice; 510 511 /* 512 * xindex may be modified by another cpu, use a load fence 513 * to ensure that the loop does not use a speculative value 514 * (which may improve performance). 515 */ 516 cpu_pause(); 517 cpu_lfence(); 518 } 519 DEBUG_POP_INFO(); 520 #if defined(__x86_64__) 521 write_rflags(rflags); 522 #else 523 #error "no write_*flags" 524 #endif 525 } 526 } 527 } 528 529 int 530 lwkt_seq_ipiq(globaldata_t target) 531 { 532 lwkt_ipiq_t ip; 533 534 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 535 return(ip->ip_windex); 536 } 537 538 /* 539 * Called from IPI interrupt (like a fast interrupt), which has placed 540 * us in a critical section. The MP lock may or may not be held. 541 * May also be called from doreti or splz, or be reentrantly called 542 * indirectly through the ip_info[].func we run. 543 * 544 * There are two versions, one where no interrupt frame is available (when 545 * called from the send code and from splz, and one where an interrupt 546 * frame is available. 547 * 548 * When the current cpu is mastering a cpusync we do NOT internally loop 549 * on the cpusyncq poll. We also do not re-flag a pending ipi due to 550 * the cpusyncq poll because this can cause doreti/splz to loop internally. 551 * The cpusync master's own loop must be allowed to run to avoid a deadlock. 552 */ 553 void 554 lwkt_process_ipiq(void) 555 { 556 globaldata_t gd = mycpu; 557 globaldata_t sgd; 558 lwkt_ipiq_t ip; 559 cpumask_t mask; 560 int n; 561 562 ++gd->gd_processing_ipiq; 563 again: 564 cpu_lfence(); 565 mask = gd->gd_ipimask; 566 ATOMIC_CPUMASK_NANDMASK(gd->gd_ipimask, mask); 567 while (CPUMASK_TESTNZERO(mask)) { 568 n = BSFCPUMASK(mask); 569 if (n != gd->gd_cpuid) { 570 sgd = globaldata_find(n); 571 ip = sgd->gd_ipiq; 572 if (ip != NULL) { 573 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL)) 574 ; 575 } 576 } 577 CPUMASK_NANDBIT(mask, n); 578 } 579 580 /* 581 * Process pending cpusyncs. If the current thread has a cpusync 582 * active cpusync we only run the list once and do not re-flag 583 * as the thread itself is processing its interlock. 584 */ 585 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) { 586 if (gd->gd_curthread->td_cscount == 0) 587 goto again; 588 /* need_ipiq(); do not reflag */ 589 } 590 591 /* 592 * Interlock to allow more IPI interrupts. Recheck ipimask after 593 * releasing gd_npoll. 594 */ 595 if (CPUMASK_TESTNZERO(gd->gd_ipimask)) 596 goto again; 597 atomic_poll_release_int(&gd->gd_npoll); 598 cpu_mfence(); 599 if (CPUMASK_TESTNZERO(gd->gd_ipimask)) 600 goto again; 601 --gd->gd_processing_ipiq; 602 } 603 604 void 605 lwkt_process_ipiq_frame(struct intrframe *frame) 606 { 607 globaldata_t gd = mycpu; 608 globaldata_t sgd; 609 lwkt_ipiq_t ip; 610 cpumask_t mask; 611 int n; 612 613 again: 614 cpu_lfence(); 615 mask = gd->gd_ipimask; 616 ATOMIC_CPUMASK_NANDMASK(gd->gd_ipimask, mask); 617 while (CPUMASK_TESTNZERO(mask)) { 618 n = BSFCPUMASK(mask); 619 if (n != gd->gd_cpuid) { 620 sgd = globaldata_find(n); 621 ip = sgd->gd_ipiq; 622 if (ip != NULL) { 623 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame)) 624 ; 625 } 626 } 627 CPUMASK_NANDBIT(mask, n); 628 } 629 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 630 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) { 631 if (gd->gd_curthread->td_cscount == 0) 632 goto again; 633 /* need_ipiq(); do not reflag */ 634 } 635 } 636 637 /* 638 * Interlock to allow more IPI interrupts. Recheck ipimask after 639 * releasing gd_npoll. 640 */ 641 if (CPUMASK_TESTNZERO(gd->gd_ipimask)) 642 goto again; 643 atomic_poll_release_int(&gd->gd_npoll); 644 cpu_mfence(); 645 if (CPUMASK_TESTNZERO(gd->gd_ipimask)) 646 goto again; 647 } 648 649 #if 0 650 static int iqticks[SMP_MAXCPU]; 651 static int iqcount[SMP_MAXCPU]; 652 #endif 653 #if 0 654 static int iqterm[SMP_MAXCPU]; 655 #endif 656 657 static int 658 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 659 struct intrframe *frame) 660 { 661 globaldata_t mygd = mycpu; 662 int ri; 663 int wi; 664 ipifunc3_t copy_func; 665 void *copy_arg1; 666 int copy_arg2; 667 668 #if 0 669 if (iqticks[mygd->gd_cpuid] != ticks) { 670 iqticks[mygd->gd_cpuid] = ticks; 671 iqcount[mygd->gd_cpuid] = 0; 672 } 673 if (++iqcount[mygd->gd_cpuid] > 3000000) { 674 kprintf("cpu %d ipiq maxed cscount %d spin %d\n", 675 mygd->gd_cpuid, 676 mygd->gd_curthread->td_cscount, 677 mygd->gd_spinlocks); 678 iqcount[mygd->gd_cpuid] = 0; 679 #if 0 680 if (++iqterm[mygd->gd_cpuid] > 10) 681 panic("cpu %d ipiq maxed", mygd->gd_cpuid); 682 #endif 683 int i; 684 for (i = 0; i < ncpus; ++i) { 685 if (globaldata_find(i)->gd_infomsg) 686 kprintf(" %s", globaldata_find(i)->gd_infomsg); 687 } 688 kprintf("\n"); 689 } 690 #endif 691 692 /* 693 * Clear the originating core from our ipimask, we will process all 694 * incoming messages. 695 * 696 * Obtain the current write index, which is modified by a remote cpu. 697 * Issue a load fence to prevent speculative reads of e.g. data written 698 * by the other cpu prior to it updating the index. 699 */ 700 KKASSERT(curthread->td_critcount); 701 wi = ip->ip_windex; 702 cpu_lfence(); 703 ++mygd->gd_intr_nesting_level; 704 705 /* 706 * NOTE: xindex is only updated after we are sure the function has 707 * finished execution. Beware lwkt_process_ipiq() reentrancy! 708 * The function may send an IPI which may block/drain. 709 * 710 * NOTE: Due to additional IPI operations that the callback function 711 * may make, it is possible for both rindex and windex to advance and 712 * thus for rindex to advance passed our cached windex. 713 * 714 * NOTE: A load fence is required to prevent speculative loads prior 715 * to the loading of ip_rindex. Even though stores might be 716 * ordered, loads are probably not. A memory fence is required 717 * to prevent reordering of the loads after the ip_rindex update. 718 * 719 * NOTE: Single pass only. Returns non-zero if the queue is not empty 720 * on return. 721 */ 722 while (wi - (ri = ip->ip_rindex) > 0) { 723 ri &= MAXCPUFIFO_MASK; 724 cpu_lfence(); 725 copy_func = ip->ip_info[ri].func; 726 copy_arg1 = ip->ip_info[ri].arg1; 727 copy_arg2 = ip->ip_info[ri].arg2; 728 cpu_mfence(); 729 ++ip->ip_rindex; 730 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) == 731 ((ri + 1) & MAXCPUFIFO_MASK)); 732 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu); 733 #ifdef INVARIANTS 734 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) { 735 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n", 736 mycpu->gd_cpuid, 737 copy_func, copy_arg1, copy_arg2, 738 #if defined(__x86_64__) 739 (frame ? (void *)frame->if_rip : NULL)); 740 #else 741 NULL); 742 #endif 743 } 744 #endif 745 copy_func(copy_arg1, copy_arg2, frame); 746 cpu_sfence(); 747 ip->ip_xindex = ip->ip_rindex; 748 749 #ifdef PANIC_DEBUG 750 /* 751 * Simulate panics during the processing of an IPI 752 */ 753 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) { 754 if (--panic_ipiq_count == 0) { 755 #ifdef DDB 756 Debugger("PANIC_DEBUG"); 757 #else 758 panic("PANIC_DEBUG"); 759 #endif 760 } 761 } 762 #endif 763 } 764 --mygd->gd_intr_nesting_level; 765 766 /* 767 * Return non-zero if there is still more in the queue. 768 */ 769 cpu_lfence(); 770 return (ip->ip_rindex != ip->ip_windex); 771 } 772 773 static void 774 lwkt_sync_ipiq(void *arg) 775 { 776 volatile cpumask_t *cpumask = arg; 777 778 ATOMIC_CPUMASK_NANDBIT(*cpumask, mycpu->gd_cpuid); 779 if (CPUMASK_TESTZERO(*cpumask)) 780 wakeup(cpumask); 781 } 782 783 void 784 lwkt_synchronize_ipiqs(const char *wmesg) 785 { 786 volatile cpumask_t other_cpumask; 787 788 other_cpumask = smp_active_mask; 789 CPUMASK_ANDMASK(other_cpumask, mycpu->gd_other_cpus); 790 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq, 791 __DEVOLATILE(void *, &other_cpumask)); 792 793 while (CPUMASK_TESTNZERO(other_cpumask)) { 794 tsleep_interlock(&other_cpumask, 0); 795 if (CPUMASK_TESTNZERO(other_cpumask)) 796 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0); 797 } 798 } 799 800 /* 801 * CPU Synchronization Support 802 * 803 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state. 804 * The current cpu is placed in a hard critical 805 * section. 806 * 807 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including 808 * current cpu if specified, then return. 809 */ 810 void 811 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg) 812 { 813 struct lwkt_cpusync cs; 814 815 lwkt_cpusync_init(&cs, mask, func, arg); 816 lwkt_cpusync_interlock(&cs); 817 lwkt_cpusync_deinterlock(&cs); 818 } 819 820 821 void 822 lwkt_cpusync_interlock(lwkt_cpusync_t cs) 823 { 824 globaldata_t gd = mycpu; 825 cpumask_t mask; 826 827 /* 828 * mask acknowledge (cs_mack): 0->mask for stage 1 829 * 830 * mack does not include the current cpu. 831 */ 832 mask = cs->cs_mask; 833 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 834 CPUMASK_ANDMASK(mask, smp_active_mask); 835 CPUMASK_ASSZERO(cs->cs_mack); 836 837 crit_enter_id("cpusync"); 838 if (CPUMASK_TESTNZERO(mask)) { 839 DEBUG_PUSH_INFO("cpusync_interlock"); 840 ++ipiq_stat(gd).ipiq_cscount; 841 ++gd->gd_curthread->td_cscount; 842 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs); 843 logipiq2(sync_start, (long)CPUMASK_LOWMASK(mask)); 844 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) { 845 lwkt_process_ipiq(); 846 cpu_pause(); 847 #ifdef _KERNEL_VIRTUAL 848 pthread_yield(); 849 #endif 850 } 851 DEBUG_POP_INFO(); 852 } 853 } 854 855 /* 856 * Interlocked cpus have executed remote1 and are polling in remote2. 857 * To deinterlock we clear cs_mack and wait for the cpus to execute 858 * the func and set their bit in cs_mack again. 859 * 860 */ 861 void 862 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs) 863 { 864 globaldata_t gd = mycpu; 865 cpumask_t mask; 866 867 /* 868 * mask acknowledge (cs_mack): mack->0->mack for stage 2 869 * 870 * Clearing cpu bits for polling cpus in cs_mack will cause them to 871 * execute stage 2, which executes the cs_func(cs_data) and then sets 872 * their bit in cs_mack again. 873 * 874 * mack does not include the current cpu. 875 */ 876 mask = cs->cs_mack; 877 cpu_ccfence(); 878 CPUMASK_ASSZERO(cs->cs_mack); 879 cpu_ccfence(); 880 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid)) 881 cs->cs_func(cs->cs_data); 882 if (CPUMASK_TESTNZERO(mask)) { 883 DEBUG_PUSH_INFO("cpusync_deinterlock"); 884 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) { 885 lwkt_process_ipiq(); 886 cpu_pause(); 887 #ifdef _KERNEL_VIRTUAL 888 pthread_yield(); 889 #endif 890 } 891 DEBUG_POP_INFO(); 892 /* 893 * cpusyncq ipis may be left queued without the RQF flag set due to 894 * a non-zero td_cscount, so be sure to process any laggards after 895 * decrementing td_cscount. 896 */ 897 --gd->gd_curthread->td_cscount; 898 lwkt_process_ipiq(); 899 logipiq2(sync_end, (long)CPUMASK_LOWMASK(mask)); 900 } 901 crit_exit_id("cpusync"); 902 } 903 904 /* 905 * The quick version does not quiesce the target cpu(s) but instead executes 906 * the function on the target cpu(s) and waits for all to acknowledge. This 907 * avoids spinning on the target cpus. 908 * 909 * This function is typically only used for kernel_pmap updates. User pmaps 910 * have to be quiesced. 911 */ 912 void 913 lwkt_cpusync_quick(lwkt_cpusync_t cs) 914 { 915 globaldata_t gd = mycpu; 916 cpumask_t mask; 917 918 /* 919 * stage-2 cs_mack only. 920 */ 921 mask = cs->cs_mask; 922 CPUMASK_ANDMASK(mask, gd->gd_other_cpus); 923 CPUMASK_ANDMASK(mask, smp_active_mask); 924 CPUMASK_ASSZERO(cs->cs_mack); 925 926 crit_enter_id("cpusync"); 927 if (CPUMASK_TESTNZERO(mask)) { 928 DEBUG_PUSH_INFO("cpusync_interlock"); 929 ++ipiq_stat(gd).ipiq_cscount; 930 ++gd->gd_curthread->td_cscount; 931 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote2, cs); 932 logipiq2(sync_quick, (long)CPUMASK_LOWMASK(mask)); 933 while (CPUMASK_CMPMASKNEQ(cs->cs_mack, mask)) { 934 lwkt_process_ipiq(); 935 cpu_pause(); 936 #ifdef _KERNEL_VIRTUAL 937 pthread_yield(); 938 #endif 939 } 940 941 /* 942 * cpusyncq ipis may be left queued without the RQF flag set due to 943 * a non-zero td_cscount, so be sure to process any laggards after 944 * decrementing td_cscount. 945 */ 946 DEBUG_POP_INFO(); 947 --gd->gd_curthread->td_cscount; 948 lwkt_process_ipiq(); 949 } 950 if (cs->cs_func && CPUMASK_TESTBIT(cs->cs_mask, gd->gd_cpuid)) 951 cs->cs_func(cs->cs_data); 952 crit_exit_id("cpusync"); 953 } 954 955 /* 956 * helper IPI remote messaging function. 957 * 958 * Called on remote cpu when a new cpu synchronization request has been 959 * sent to us. Execute the run function and adjust cs_count, then requeue 960 * the request so we spin on it. 961 */ 962 static void 963 lwkt_cpusync_remote1(lwkt_cpusync_t cs) 964 { 965 globaldata_t gd = mycpu; 966 967 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid); 968 lwkt_cpusync_remote2(cs); 969 } 970 971 /* 972 * helper IPI remote messaging function. 973 * 974 * Poll for the originator telling us to finish. If it hasn't, requeue 975 * our request so we spin on it. 976 */ 977 static void 978 lwkt_cpusync_remote2(lwkt_cpusync_t cs) 979 { 980 globaldata_t gd = mycpu; 981 982 if (CPUMASK_TESTMASK(cs->cs_mack, gd->gd_cpumask) == 0) { 983 if (cs->cs_func) 984 cs->cs_func(cs->cs_data); 985 ATOMIC_CPUMASK_ORBIT(cs->cs_mack, gd->gd_cpuid); 986 /* cs can be ripped out at this point */ 987 } else { 988 lwkt_ipiq_t ip; 989 int wi; 990 991 cpu_pause(); 992 #ifdef _KERNEL_VIRTUAL 993 pthread_yield(); 994 #endif 995 cpu_lfence(); 996 997 /* 998 * Requeue our IPI to avoid a deep stack recursion. If no other 999 * IPIs are pending we can just loop up, which should help VMs 1000 * better-detect spin loops. 1001 */ 1002 ip = &gd->gd_cpusyncq; 1003 #if 0 1004 if (ip->ip_rindex == ip->ip_windex) { 1005 __asm __volatile("cli"); 1006 if (ip->ip_rindex == ip->ip_windex) { 1007 __asm __volatile("sti; hlt"); 1008 } else { 1009 __asm __volatile("sti"); 1010 } 1011 } 1012 #endif 1013 1014 wi = ip->ip_windex & MAXCPUFIFO_MASK; 1015 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2; 1016 ip->ip_info[wi].arg1 = cs; 1017 ip->ip_info[wi].arg2 = 0; 1018 cpu_sfence(); 1019 KKASSERT(ip->ip_windex - ip->ip_rindex < MAXCPUFIFO); 1020 ++ip->ip_windex; 1021 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) { 1022 kprintf("cpu %d cm=%016jx %016jx f=%p\n", 1023 gd->gd_cpuid, 1024 (intmax_t)CPUMASK_LOWMASK(cs->cs_mask), 1025 (intmax_t)CPUMASK_LOWMASK(cs->cs_mack), 1026 cs->cs_func); 1027 } 1028 } 1029 } 1030 1031 #define LWKT_IPIQ_NLATENCY 8 1032 #define LWKT_IPIQ_NLATENCY_MASK (LWKT_IPIQ_NLATENCY - 1) 1033 1034 struct lwkt_ipiq_latency_log { 1035 int idx; /* unmasked index */ 1036 int pad; 1037 uint64_t latency[LWKT_IPIQ_NLATENCY]; 1038 }; 1039 1040 static struct lwkt_ipiq_latency_log lwkt_ipiq_latency_logs[MAXCPU]; 1041 static uint64_t save_tsc; 1042 1043 /* 1044 * IPI callback (already in a critical section) 1045 */ 1046 static void 1047 lwkt_ipiq_latency_testfunc(void *arg __unused) 1048 { 1049 uint64_t delta_tsc; 1050 struct globaldata *gd; 1051 struct lwkt_ipiq_latency_log *lat; 1052 1053 /* 1054 * Get delta TSC (assume TSCs are synchronized) as quickly as 1055 * possible and then convert to nanoseconds. 1056 */ 1057 delta_tsc = rdtsc_ordered() - save_tsc; 1058 delta_tsc = delta_tsc * 1000000000LU / tsc_frequency; 1059 1060 /* 1061 * Record in our save array. 1062 */ 1063 gd = mycpu; 1064 lat = &lwkt_ipiq_latency_logs[gd->gd_cpuid]; 1065 lat->latency[lat->idx & LWKT_IPIQ_NLATENCY_MASK] = delta_tsc; 1066 ++lat->idx; 1067 } 1068 1069 /* 1070 * Send IPI from cpu0 to other cpus 1071 * 1072 * NOTE: Machine must be idle for test to run dependably, and also probably 1073 * a good idea not to be running powerd. 1074 * 1075 * NOTE: Caller should use 'usched :1 <command>' to lock itself to cpu 0. 1076 * See 'ipitest' script in /usr/src/test/sysperf/ipitest 1077 */ 1078 static int 1079 lwkt_ipiq_latency_test(SYSCTL_HANDLER_ARGS) 1080 { 1081 struct globaldata *gd; 1082 int cpu = 0, orig_cpu, error; 1083 1084 error = sysctl_handle_int(oidp, &cpu, arg2, req); 1085 if (error || req->newptr == NULL) 1086 return error; 1087 1088 if (cpu == 0) 1089 return 0; 1090 else if (cpu >= ncpus || cpu < 0) 1091 return EINVAL; 1092 1093 orig_cpu = mycpuid; 1094 lwkt_migratecpu(0); 1095 1096 gd = globaldata_find(cpu); 1097 1098 save_tsc = rdtsc_ordered(); 1099 lwkt_send_ipiq(gd, lwkt_ipiq_latency_testfunc, NULL); 1100 1101 lwkt_migratecpu(orig_cpu); 1102 return 0; 1103 } 1104 1105 SYSCTL_NODE(_debug, OID_AUTO, ipiq, CTLFLAG_RW, 0, ""); 1106 SYSCTL_PROC(_debug_ipiq, OID_AUTO, latency_test, CTLTYPE_INT | CTLFLAG_RW, 1107 NULL, 0, lwkt_ipiq_latency_test, "I", 1108 "ipi latency test, arg: remote cpuid"); 1109 1110 static int 1111 lwkt_ipiq_latency(SYSCTL_HANDLER_ARGS) 1112 { 1113 struct lwkt_ipiq_latency_log *latency = arg1; 1114 uint64_t lat[LWKT_IPIQ_NLATENCY]; 1115 int i; 1116 1117 for (i = 0; i < LWKT_IPIQ_NLATENCY; ++i) 1118 lat[i] = latency->latency[i]; 1119 1120 return sysctl_handle_opaque(oidp, lat, sizeof(lat), req); 1121 } 1122 1123 static void 1124 lwkt_ipiq_latency_init(void *dummy __unused) 1125 { 1126 int cpu; 1127 1128 for (cpu = 0; cpu < ncpus; ++cpu) { 1129 char name[32]; 1130 1131 ksnprintf(name, sizeof(name), "latency%d", cpu); 1132 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_debug_ipiq), 1133 OID_AUTO, name, CTLTYPE_OPAQUE | CTLFLAG_RD, 1134 &lwkt_ipiq_latency_logs[cpu], 0, lwkt_ipiq_latency, 1135 "LU", "7 latest ipi latency measurement results"); 1136 } 1137 } 1138 SYSINIT(lwkt_ipiq_latency, SI_SUB_CONFIGURE, SI_ORDER_ANY, 1139 lwkt_ipiq_latency_init, NULL); 1140