1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * This module implements IPI message queueing and the MI portion of IPI 37 * message processing. 38 */ 39 40 #include "opt_ddb.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/queue.h> 48 #include <sys/thread2.h> 49 #include <sys/sysctl.h> 50 #include <sys/ktr.h> 51 #include <sys/kthread.h> 52 #include <machine/cpu.h> 53 #include <sys/lock.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_param.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_pager.h> 62 #include <vm/vm_extern.h> 63 #include <vm/vm_zone.h> 64 65 #include <machine/stdarg.h> 66 #include <machine/smp.h> 67 #include <machine/atomic.h> 68 69 struct ipiq_stats { 70 __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */ 71 __int64_t ipiq_fifofull; /* number of fifo full conditions detected */ 72 __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */ 73 __int64_t ipiq_passive; /* passive IPI messages */ 74 __int64_t ipiq_cscount; /* number of cpu synchronizations */ 75 } __cachealign; 76 77 static struct ipiq_stats ipiq_stats_percpu[MAXCPU]; 78 #define ipiq_stat(gd) ipiq_stats_percpu[(gd)->gd_cpuid] 79 80 static int ipiq_debug; /* set to 1 for debug */ 81 #ifdef PANIC_DEBUG 82 static int panic_ipiq_cpu = -1; 83 static int panic_ipiq_count = 100; 84 #endif 85 86 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0, 87 ""); 88 #ifdef PANIC_DEBUG 89 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, ""); 90 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, ""); 91 #endif 92 93 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d" 94 #define IPIQ_ARGS void *func, void *arg1, int arg2, int scpu, int dcpu 95 96 #if !defined(KTR_IPIQ) 97 #define KTR_IPIQ KTR_ALL 98 #endif 99 KTR_INFO_MASTER(ipiq); 100 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARGS); 101 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARGS); 102 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARGS); 103 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARGS); 104 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARGS); 105 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08lx", unsigned long mask); 106 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08lx", unsigned long mask); 107 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARGS); 108 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARGS); 109 110 #define logipiq(name, func, arg1, arg2, sgd, dgd) \ 111 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid) 112 #define logipiq2(name, arg) \ 113 KTR_LOG(ipiq_ ## name, arg) 114 115 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 116 struct intrframe *frame); 117 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs); 118 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs); 119 120 #define IPIQ_SYSCTL(name) \ 121 static int \ 122 sysctl_##name(SYSCTL_HANDLER_ARGS) \ 123 { \ 124 __int64_t val = 0; \ 125 int cpu, error; \ 126 \ 127 for (cpu = 0; cpu < ncpus; ++cpu) \ 128 val += ipiq_stats_percpu[cpu].name; \ 129 \ 130 error = sysctl_handle_quad(oidp, &val, 0, req); \ 131 if (error || req->newptr == NULL) \ 132 return error; \ 133 \ 134 for (cpu = 0; cpu < ncpus; ++cpu) \ 135 ipiq_stats_percpu[cpu].name = val; \ 136 \ 137 return 0; \ 138 } 139 140 IPIQ_SYSCTL(ipiq_count); 141 IPIQ_SYSCTL(ipiq_fifofull); 142 IPIQ_SYSCTL(ipiq_avoided); 143 IPIQ_SYSCTL(ipiq_passive); 144 IPIQ_SYSCTL(ipiq_cscount); 145 146 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_count, (CTLTYPE_QUAD | CTLFLAG_RW), 147 0, 0, sysctl_ipiq_count, "Q", "Number of IPI's sent"); 148 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_fifofull, (CTLTYPE_QUAD | CTLFLAG_RW), 149 0, 0, sysctl_ipiq_fifofull, "Q", 150 "Number of fifo full conditions detected"); 151 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_avoided, (CTLTYPE_QUAD | CTLFLAG_RW), 152 0, 0, sysctl_ipiq_avoided, "Q", 153 "Number of IPI's avoided by interlock with target cpu"); 154 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_passive, (CTLTYPE_QUAD | CTLFLAG_RW), 155 0, 0, sysctl_ipiq_passive, "Q", 156 "Number of passive IPI messages sent"); 157 SYSCTL_PROC(_lwkt, OID_AUTO, ipiq_cscount, (CTLTYPE_QUAD | CTLFLAG_RW), 158 0, 0, sysctl_ipiq_cscount, "Q", 159 "Number of cpu synchronizations"); 160 161 /* 162 * Send a function execution request to another cpu. The request is queued 163 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every 164 * possible target cpu. The FIFO can be written. 165 * 166 * If the FIFO fills up we have to enable interrupts to avoid an APIC 167 * deadlock and process pending IPIQs while waiting for it to empty. 168 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full. 169 * 170 * We can safely bump gd_intr_nesting_level because our crit_exit() at the 171 * end will take care of any pending interrupts. 172 * 173 * The actual hardware IPI is avoided if the target cpu is already processing 174 * the queue from a prior IPI. It is possible to pipeline IPI messages 175 * very quickly between cpus due to the FIFO hysteresis. 176 * 177 * Need not be called from a critical section. 178 */ 179 int 180 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2) 181 { 182 lwkt_ipiq_t ip; 183 int windex; 184 struct globaldata *gd = mycpu; 185 186 logipiq(send_norm, func, arg1, arg2, gd, target); 187 188 if (target == gd) { 189 func(arg1, arg2, NULL); 190 logipiq(send_end, func, arg1, arg2, gd, target); 191 return(0); 192 } 193 crit_enter(); 194 ++gd->gd_intr_nesting_level; 195 #ifdef INVARIANTS 196 if (gd->gd_intr_nesting_level > 20) 197 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 198 #endif 199 KKASSERT(curthread->td_critcount); 200 ++ipiq_stat(gd).ipiq_count; 201 ip = &gd->gd_ipiq[target->gd_cpuid]; 202 203 /* 204 * Do not allow the FIFO to become full. Interrupts must be physically 205 * enabled while we liveloop to avoid deadlocking the APIC. 206 * 207 * The target ipiq may have gotten filled up due to passive IPIs and thus 208 * not be aware that its queue is too full, so be sure to issue an 209 * ipiq interrupt to the target cpu. 210 */ 211 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 212 #if defined(__i386__) 213 unsigned int eflags = read_eflags(); 214 #elif defined(__x86_64__) 215 unsigned long rflags = read_rflags(); 216 #endif 217 218 cpu_enable_intr(); 219 ++ipiq_stat(gd).ipiq_fifofull; 220 DEBUG_PUSH_INFO("send_ipiq3"); 221 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 222 if (atomic_poll_acquire_int(&target->gd_npoll)) { 223 logipiq(cpu_send, func, arg1, arg2, gd, target); 224 cpu_send_ipiq(target->gd_cpuid); 225 } 226 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 227 lwkt_process_ipiq(); 228 cpu_pause(); 229 } 230 DEBUG_POP_INFO(); 231 #if defined(__i386__) 232 write_eflags(eflags); 233 #elif defined(__x86_64__) 234 write_rflags(rflags); 235 #endif 236 } 237 238 /* 239 * Queue the new message 240 */ 241 windex = ip->ip_windex & MAXCPUFIFO_MASK; 242 ip->ip_info[windex].func = func; 243 ip->ip_info[windex].arg1 = arg1; 244 ip->ip_info[windex].arg2 = arg2; 245 cpu_sfence(); 246 ++ip->ip_windex; 247 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask); 248 249 /* 250 * signal the target cpu that there is work pending. 251 */ 252 if (atomic_poll_acquire_int(&target->gd_npoll)) { 253 logipiq(cpu_send, func, arg1, arg2, gd, target); 254 cpu_send_ipiq(target->gd_cpuid); 255 } else { 256 ++ipiq_stat(gd).ipiq_avoided; 257 } 258 --gd->gd_intr_nesting_level; 259 crit_exit(); 260 logipiq(send_end, func, arg1, arg2, gd, target); 261 262 return(ip->ip_windex); 263 } 264 265 /* 266 * Similar to lwkt_send_ipiq() but this function does not actually initiate 267 * the IPI to the target cpu unless the FIFO has become too full, so it is 268 * very fast. 269 * 270 * This function is used for non-critical IPI messages, such as memory 271 * deallocations. The queue will typically be flushed by the target cpu at 272 * the next clock interrupt. 273 * 274 * Need not be called from a critical section. 275 */ 276 int 277 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func, 278 void *arg1, int arg2) 279 { 280 lwkt_ipiq_t ip; 281 int windex; 282 struct globaldata *gd = mycpu; 283 284 KKASSERT(target != gd); 285 crit_enter(); 286 ++gd->gd_intr_nesting_level; 287 logipiq(send_pasv, func, arg1, arg2, gd, target); 288 #ifdef INVARIANTS 289 if (gd->gd_intr_nesting_level > 20) 290 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 291 #endif 292 KKASSERT(curthread->td_critcount); 293 ++ipiq_stat(gd).ipiq_count; 294 ++ipiq_stat(gd).ipiq_passive; 295 ip = &gd->gd_ipiq[target->gd_cpuid]; 296 297 /* 298 * Do not allow the FIFO to become full. Interrupts must be physically 299 * enabled while we liveloop to avoid deadlocking the APIC. 300 */ 301 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 302 #if defined(__i386__) 303 unsigned int eflags = read_eflags(); 304 #elif defined(__x86_64__) 305 unsigned long rflags = read_rflags(); 306 #endif 307 308 cpu_enable_intr(); 309 ++ipiq_stat(gd).ipiq_fifofull; 310 DEBUG_PUSH_INFO("send_ipiq3_passive"); 311 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 312 if (atomic_poll_acquire_int(&target->gd_npoll)) { 313 logipiq(cpu_send, func, arg1, arg2, gd, target); 314 cpu_send_ipiq(target->gd_cpuid); 315 } 316 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 317 lwkt_process_ipiq(); 318 cpu_pause(); 319 } 320 DEBUG_POP_INFO(); 321 #if defined(__i386__) 322 write_eflags(eflags); 323 #elif defined(__x86_64__) 324 write_rflags(rflags); 325 #endif 326 } 327 328 /* 329 * Queue the new message 330 */ 331 windex = ip->ip_windex & MAXCPUFIFO_MASK; 332 ip->ip_info[windex].func = func; 333 ip->ip_info[windex].arg1 = arg1; 334 ip->ip_info[windex].arg2 = arg2; 335 cpu_sfence(); 336 ++ip->ip_windex; 337 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask); 338 --gd->gd_intr_nesting_level; 339 340 /* 341 * Do not signal the target cpu, it will pick up the IPI when it next 342 * polls (typically on the next tick). 343 */ 344 crit_exit(); 345 logipiq(send_end, func, arg1, arg2, gd, target); 346 347 return(ip->ip_windex); 348 } 349 350 /* 351 * Send an IPI request without blocking, return 0 on success, ENOENT on 352 * failure. The actual queueing of the hardware IPI may still force us 353 * to spin and process incoming IPIs but that will eventually go away 354 * when we've gotten rid of the other general IPIs. 355 */ 356 int 357 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func, 358 void *arg1, int arg2) 359 { 360 lwkt_ipiq_t ip; 361 int windex; 362 struct globaldata *gd = mycpu; 363 364 logipiq(send_nbio, func, arg1, arg2, gd, target); 365 KKASSERT(curthread->td_critcount); 366 if (target == gd) { 367 func(arg1, arg2, NULL); 368 logipiq(send_end, func, arg1, arg2, gd, target); 369 return(0); 370 } 371 crit_enter(); 372 ++gd->gd_intr_nesting_level; 373 ++ipiq_stat(gd).ipiq_count; 374 ip = &gd->gd_ipiq[target->gd_cpuid]; 375 376 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) { 377 logipiq(send_fail, func, arg1, arg2, gd, target); 378 --gd->gd_intr_nesting_level; 379 crit_exit(); 380 return(ENOENT); 381 } 382 windex = ip->ip_windex & MAXCPUFIFO_MASK; 383 ip->ip_info[windex].func = func; 384 ip->ip_info[windex].arg1 = arg1; 385 ip->ip_info[windex].arg2 = arg2; 386 cpu_sfence(); 387 ++ip->ip_windex; 388 atomic_set_cpumask(&target->gd_ipimask, gd->gd_cpumask); 389 390 /* 391 * This isn't a passive IPI, we still have to signal the target cpu. 392 */ 393 if (atomic_poll_acquire_int(&target->gd_npoll)) { 394 logipiq(cpu_send, func, arg1, arg2, gd, target); 395 cpu_send_ipiq(target->gd_cpuid); 396 } else { 397 ++ipiq_stat(gd).ipiq_avoided; 398 } 399 --gd->gd_intr_nesting_level; 400 crit_exit(); 401 402 logipiq(send_end, func, arg1, arg2, gd, target); 403 return(0); 404 } 405 406 /* 407 * deprecated, used only by fast int forwarding. 408 */ 409 int 410 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2) 411 { 412 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2)); 413 } 414 415 /* 416 * Send a message to several target cpus. Typically used for scheduling. 417 * The message will not be sent to stopped cpus. 418 */ 419 int 420 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2) 421 { 422 int cpuid; 423 int count = 0; 424 425 mask &= ~stopped_cpus; 426 while (mask) { 427 cpuid = BSFCPUMASK(mask); 428 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2); 429 mask &= ~CPUMASK(cpuid); 430 ++count; 431 } 432 return(count); 433 } 434 435 /* 436 * Wait for the remote cpu to finish processing a function. 437 * 438 * YYY we have to enable interrupts and process the IPIQ while waiting 439 * for it to empty or we may deadlock with another cpu. Create a CPU_*() 440 * function to do this! YYY we really should 'block' here. 441 * 442 * MUST be called from a critical section. This routine may be called 443 * from an interrupt (for example, if an interrupt wakes a foreign thread 444 * up). 445 */ 446 void 447 lwkt_wait_ipiq(globaldata_t target, int seq) 448 { 449 lwkt_ipiq_t ip; 450 int maxc = 100000000; 451 452 if (target != mycpu) { 453 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 454 if ((int)(ip->ip_xindex - seq) < 0) { 455 #if defined(__i386__) 456 unsigned int eflags = read_eflags(); 457 #elif defined(__x86_64__) 458 unsigned long rflags = read_rflags(); 459 #endif 460 cpu_enable_intr(); 461 DEBUG_PUSH_INFO("wait_ipiq"); 462 while ((int)(ip->ip_xindex - seq) < 0) { 463 crit_enter(); 464 lwkt_process_ipiq(); 465 crit_exit(); 466 if (--maxc == 0) 467 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq); 468 if (maxc < -1000000) 469 panic("LWKT_WAIT_IPIQ"); 470 /* 471 * xindex may be modified by another cpu, use a load fence 472 * to ensure that the loop does not use a speculative value 473 * (which may improve performance). 474 */ 475 cpu_lfence(); 476 } 477 DEBUG_POP_INFO(); 478 #if defined(__i386__) 479 write_eflags(eflags); 480 #elif defined(__x86_64__) 481 write_rflags(rflags); 482 #endif 483 } 484 } 485 } 486 487 int 488 lwkt_seq_ipiq(globaldata_t target) 489 { 490 lwkt_ipiq_t ip; 491 492 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 493 return(ip->ip_windex); 494 } 495 496 /* 497 * Called from IPI interrupt (like a fast interrupt), which has placed 498 * us in a critical section. The MP lock may or may not be held. 499 * May also be called from doreti or splz, or be reentrantly called 500 * indirectly through the ip_info[].func we run. 501 * 502 * There are two versions, one where no interrupt frame is available (when 503 * called from the send code and from splz, and one where an interrupt 504 * frame is available. 505 * 506 * When the current cpu is mastering a cpusync we do NOT internally loop 507 * on the cpusyncq poll. We also do not re-flag a pending ipi due to 508 * the cpusyncq poll because this can cause doreti/splz to loop internally. 509 * The cpusync master's own loop must be allowed to run to avoid a deadlock. 510 */ 511 void 512 lwkt_process_ipiq(void) 513 { 514 globaldata_t gd = mycpu; 515 globaldata_t sgd; 516 lwkt_ipiq_t ip; 517 cpumask_t mask; 518 int n; 519 520 ++gd->gd_processing_ipiq; 521 again: 522 cpu_lfence(); 523 mask = gd->gd_ipimask; 524 atomic_clear_cpumask(&gd->gd_ipimask, mask); 525 while (mask) { 526 n = BSFCPUMASK(mask); 527 if (n != gd->gd_cpuid) { 528 sgd = globaldata_find(n); 529 ip = sgd->gd_ipiq; 530 if (ip != NULL) { 531 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL)) 532 ; 533 } 534 } 535 mask &= ~CPUMASK(n); 536 } 537 538 /* 539 * Process pending cpusyncs. If the current thread has a cpusync 540 * active cpusync we only run the list once and do not re-flag 541 * as the thread itself is processing its interlock. 542 */ 543 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) { 544 if (gd->gd_curthread->td_cscount == 0) 545 goto again; 546 /* need_ipiq(); do not reflag */ 547 } 548 549 /* 550 * Interlock to allow more IPI interrupts. Recheck ipimask after 551 * releasing gd_npoll. 552 */ 553 if (gd->gd_ipimask) 554 goto again; 555 atomic_poll_release_int(&gd->gd_npoll); 556 cpu_mfence(); 557 if (gd->gd_ipimask) 558 goto again; 559 --gd->gd_processing_ipiq; 560 } 561 562 void 563 lwkt_process_ipiq_frame(struct intrframe *frame) 564 { 565 globaldata_t gd = mycpu; 566 globaldata_t sgd; 567 lwkt_ipiq_t ip; 568 cpumask_t mask; 569 int n; 570 571 again: 572 cpu_lfence(); 573 mask = gd->gd_ipimask; 574 atomic_clear_cpumask(&gd->gd_ipimask, mask); 575 while (mask) { 576 n = BSFCPUMASK(mask); 577 if (n != gd->gd_cpuid) { 578 sgd = globaldata_find(n); 579 ip = sgd->gd_ipiq; 580 if (ip != NULL) { 581 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame)) 582 ; 583 } 584 } 585 mask &= ~CPUMASK(n); 586 } 587 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 588 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) { 589 if (gd->gd_curthread->td_cscount == 0) 590 goto again; 591 /* need_ipiq(); do not reflag */ 592 } 593 } 594 595 /* 596 * Interlock to allow more IPI interrupts. Recheck ipimask after 597 * releasing gd_npoll. 598 */ 599 if (gd->gd_ipimask) 600 goto again; 601 atomic_poll_release_int(&gd->gd_npoll); 602 cpu_mfence(); 603 if (gd->gd_ipimask) 604 goto again; 605 } 606 607 #if 0 608 static int iqticks[SMP_MAXCPU]; 609 static int iqcount[SMP_MAXCPU]; 610 #endif 611 #if 0 612 static int iqterm[SMP_MAXCPU]; 613 #endif 614 615 static int 616 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 617 struct intrframe *frame) 618 { 619 globaldata_t mygd = mycpu; 620 int ri; 621 int wi; 622 ipifunc3_t copy_func; 623 void *copy_arg1; 624 int copy_arg2; 625 626 #if 0 627 if (iqticks[mygd->gd_cpuid] != ticks) { 628 iqticks[mygd->gd_cpuid] = ticks; 629 iqcount[mygd->gd_cpuid] = 0; 630 } 631 if (++iqcount[mygd->gd_cpuid] > 3000000) { 632 kprintf("cpu %d ipiq maxed cscount %d spin %d\n", 633 mygd->gd_cpuid, 634 mygd->gd_curthread->td_cscount, 635 mygd->gd_spinlocks); 636 iqcount[mygd->gd_cpuid] = 0; 637 #if 0 638 if (++iqterm[mygd->gd_cpuid] > 10) 639 panic("cpu %d ipiq maxed", mygd->gd_cpuid); 640 #endif 641 int i; 642 for (i = 0; i < ncpus; ++i) { 643 if (globaldata_find(i)->gd_infomsg) 644 kprintf(" %s", globaldata_find(i)->gd_infomsg); 645 } 646 kprintf("\n"); 647 } 648 #endif 649 650 /* 651 * Clear the originating core from our ipimask, we will process all 652 * incoming messages. 653 * 654 * Obtain the current write index, which is modified by a remote cpu. 655 * Issue a load fence to prevent speculative reads of e.g. data written 656 * by the other cpu prior to it updating the index. 657 */ 658 KKASSERT(curthread->td_critcount); 659 wi = ip->ip_windex; 660 cpu_lfence(); 661 ++mygd->gd_intr_nesting_level; 662 663 /* 664 * NOTE: xindex is only updated after we are sure the function has 665 * finished execution. Beware lwkt_process_ipiq() reentrancy! 666 * The function may send an IPI which may block/drain. 667 * 668 * NOTE: Due to additional IPI operations that the callback function 669 * may make, it is possible for both rindex and windex to advance and 670 * thus for rindex to advance passed our cached windex. 671 * 672 * NOTE: A load fence is required to prevent speculative loads prior 673 * to the loading of ip_rindex. Even though stores might be 674 * ordered, loads are probably not. A memory fence is required 675 * to prevent reordering of the loads after the ip_rindex update. 676 * 677 * NOTE: Single pass only. Returns non-zero if the queue is not empty 678 * on return. 679 */ 680 while (wi - (ri = ip->ip_rindex) > 0) { 681 ri &= MAXCPUFIFO_MASK; 682 cpu_lfence(); 683 copy_func = ip->ip_info[ri].func; 684 copy_arg1 = ip->ip_info[ri].arg1; 685 copy_arg2 = ip->ip_info[ri].arg2; 686 cpu_mfence(); 687 ++ip->ip_rindex; 688 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) == 689 ((ri + 1) & MAXCPUFIFO_MASK)); 690 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu); 691 #ifdef INVARIANTS 692 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) { 693 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n", 694 mycpu->gd_cpuid, 695 copy_func, copy_arg1, copy_arg2, 696 #if defined(__i386__) 697 (frame ? (void *)frame->if_eip : NULL)); 698 #elif defined(__x86_64__) 699 (frame ? (void *)frame->if_rip : NULL)); 700 #else 701 NULL); 702 #endif 703 } 704 #endif 705 copy_func(copy_arg1, copy_arg2, frame); 706 cpu_sfence(); 707 ip->ip_xindex = ip->ip_rindex; 708 709 #ifdef PANIC_DEBUG 710 /* 711 * Simulate panics during the processing of an IPI 712 */ 713 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) { 714 if (--panic_ipiq_count == 0) { 715 #ifdef DDB 716 Debugger("PANIC_DEBUG"); 717 #else 718 panic("PANIC_DEBUG"); 719 #endif 720 } 721 } 722 #endif 723 } 724 --mygd->gd_intr_nesting_level; 725 726 /* 727 * Return non-zero if there is still more in the queue. 728 */ 729 cpu_lfence(); 730 return (ip->ip_rindex != ip->ip_windex); 731 } 732 733 static void 734 lwkt_sync_ipiq(void *arg) 735 { 736 volatile cpumask_t *cpumask = arg; 737 738 atomic_clear_cpumask(cpumask, mycpu->gd_cpumask); 739 if (*cpumask == 0) 740 wakeup(cpumask); 741 } 742 743 void 744 lwkt_synchronize_ipiqs(const char *wmesg) 745 { 746 volatile cpumask_t other_cpumask; 747 748 other_cpumask = mycpu->gd_other_cpus & smp_active_mask; 749 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq, 750 __DEVOLATILE(void *, &other_cpumask)); 751 752 while (other_cpumask != 0) { 753 tsleep_interlock(&other_cpumask, 0); 754 if (other_cpumask != 0) 755 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0); 756 } 757 } 758 759 /* 760 * CPU Synchronization Support 761 * 762 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state. 763 * The current cpu is placed in a hard critical 764 * section. 765 * 766 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including 767 * current cpu if specified, then return. 768 */ 769 void 770 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg) 771 { 772 struct lwkt_cpusync cs; 773 774 lwkt_cpusync_init(&cs, mask, func, arg); 775 lwkt_cpusync_interlock(&cs); 776 lwkt_cpusync_deinterlock(&cs); 777 } 778 779 780 void 781 lwkt_cpusync_interlock(lwkt_cpusync_t cs) 782 { 783 #if 0 784 const char *smsg = "SMPSYNL"; 785 #endif 786 globaldata_t gd = mycpu; 787 cpumask_t mask; 788 789 /* 790 * mask acknowledge (cs_mack): 0->mask for stage 1 791 * 792 * mack does not include the current cpu. 793 */ 794 mask = cs->cs_mask & gd->gd_other_cpus & smp_active_mask; 795 cs->cs_mack = 0; 796 crit_enter_id("cpusync"); 797 if (mask) { 798 DEBUG_PUSH_INFO("cpusync_interlock"); 799 ++ipiq_stat(gd).ipiq_cscount; 800 ++gd->gd_curthread->td_cscount; 801 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs); 802 logipiq2(sync_start, (long)mask); 803 #if 0 804 if (gd->gd_curthread->td_wmesg == NULL) 805 gd->gd_curthread->td_wmesg = smsg; 806 #endif 807 while (cs->cs_mack != mask) { 808 lwkt_process_ipiq(); 809 cpu_pause(); 810 } 811 #if 0 812 if (gd->gd_curthread->td_wmesg == smsg) 813 gd->gd_curthread->td_wmesg = NULL; 814 #endif 815 DEBUG_POP_INFO(); 816 } 817 } 818 819 /* 820 * Interlocked cpus have executed remote1 and are polling in remote2. 821 * To deinterlock we clear cs_mack and wait for the cpus to execute 822 * the func and set their bit in cs_mack again. 823 * 824 */ 825 void 826 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs) 827 { 828 globaldata_t gd = mycpu; 829 #if 0 830 const char *smsg = "SMPSYNU"; 831 #endif 832 cpumask_t mask; 833 834 /* 835 * mask acknowledge (cs_mack): mack->0->mack for stage 2 836 * 837 * Clearing cpu bits for polling cpus in cs_mack will cause them to 838 * execute stage 2, which executes the cs_func(cs_data) and then sets 839 * their bit in cs_mack again. 840 * 841 * mack does not include the current cpu. 842 */ 843 mask = cs->cs_mack; 844 cpu_ccfence(); 845 cs->cs_mack = 0; 846 cpu_ccfence(); 847 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask)) 848 cs->cs_func(cs->cs_data); 849 if (mask) { 850 DEBUG_PUSH_INFO("cpusync_deinterlock"); 851 #if 0 852 if (gd->gd_curthread->td_wmesg == NULL) 853 gd->gd_curthread->td_wmesg = smsg; 854 #endif 855 while (cs->cs_mack != mask) { 856 lwkt_process_ipiq(); 857 cpu_pause(); 858 } 859 #if 0 860 if (gd->gd_curthread->td_wmesg == smsg) 861 gd->gd_curthread->td_wmesg = NULL; 862 #endif 863 DEBUG_POP_INFO(); 864 /* 865 * cpusyncq ipis may be left queued without the RQF flag set due to 866 * a non-zero td_cscount, so be sure to process any laggards after 867 * decrementing td_cscount. 868 */ 869 --gd->gd_curthread->td_cscount; 870 lwkt_process_ipiq(); 871 logipiq2(sync_end, (long)mask); 872 } 873 crit_exit_id("cpusync"); 874 } 875 876 /* 877 * helper IPI remote messaging function. 878 * 879 * Called on remote cpu when a new cpu synchronization request has been 880 * sent to us. Execute the run function and adjust cs_count, then requeue 881 * the request so we spin on it. 882 */ 883 static void 884 lwkt_cpusync_remote1(lwkt_cpusync_t cs) 885 { 886 globaldata_t gd = mycpu; 887 888 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask); 889 lwkt_cpusync_remote2(cs); 890 } 891 892 /* 893 * helper IPI remote messaging function. 894 * 895 * Poll for the originator telling us to finish. If it hasn't, requeue 896 * our request so we spin on it. 897 */ 898 static void 899 lwkt_cpusync_remote2(lwkt_cpusync_t cs) 900 { 901 globaldata_t gd = mycpu; 902 903 if ((cs->cs_mack & gd->gd_cpumask) == 0) { 904 if (cs->cs_func) 905 cs->cs_func(cs->cs_data); 906 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask); 907 /* cs can be ripped out at this point */ 908 } else { 909 lwkt_ipiq_t ip; 910 int wi; 911 912 ip = &gd->gd_cpusyncq; 913 wi = ip->ip_windex & MAXCPUFIFO_MASK; 914 ip->ip_info[wi].func = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2; 915 ip->ip_info[wi].arg1 = cs; 916 ip->ip_info[wi].arg2 = 0; 917 cpu_sfence(); 918 KKASSERT(ip->ip_windex - ip->ip_rindex < MAXCPUFIFO); 919 ++ip->ip_windex; 920 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) { 921 kprintf("cpu %d cm=%016jx %016jx f=%p\n", 922 gd->gd_cpuid, 923 (intmax_t)cs->cs_mask, (intmax_t)cs->cs_mack, 924 cs->cs_func); 925 } 926 } 927 } 928