1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.27 2008/05/18 20:57:56 nth Exp $ 35 */ 36 37 /* 38 * This module implements IPI message queueing and the MI portion of IPI 39 * message processing. 40 */ 41 42 #include "opt_ddb.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/proc.h> 48 #include <sys/rtprio.h> 49 #include <sys/queue.h> 50 #include <sys/thread2.h> 51 #include <sys/sysctl.h> 52 #include <sys/ktr.h> 53 #include <sys/kthread.h> 54 #include <machine/cpu.h> 55 #include <sys/lock.h> 56 #include <sys/caps.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_param.h> 60 #include <vm/vm_kern.h> 61 #include <vm/vm_object.h> 62 #include <vm/vm_page.h> 63 #include <vm/vm_map.h> 64 #include <vm/vm_pager.h> 65 #include <vm/vm_extern.h> 66 #include <vm/vm_zone.h> 67 68 #include <machine/stdarg.h> 69 #include <machine/smp.h> 70 #include <machine/atomic.h> 71 72 #ifdef SMP 73 static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */ 74 static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */ 75 static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */ 76 static __int64_t ipiq_passive; /* passive IPI messages */ 77 static __int64_t ipiq_cscount; /* number of cpu synchronizations */ 78 static int ipiq_optimized = 1; /* XXX temporary sysctl */ 79 static int ipiq_debug; /* set to 1 for debug */ 80 #ifdef PANIC_DEBUG 81 static int panic_ipiq_cpu = -1; 82 static int panic_ipiq_count = 100; 83 #endif 84 #endif 85 86 #ifdef SMP 87 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, 88 "Number of IPI's sent"); 89 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, 90 "Number of fifo full conditions detected"); 91 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0, 92 "Number of IPI's avoided by interlock with target cpu"); 93 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0, 94 "Number of passive IPI messages sent"); 95 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0, 96 "Number of cpu synchronizations"); 97 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_optimized, CTLFLAG_RW, &ipiq_optimized, 0, 98 ""); 99 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_debug, CTLFLAG_RW, &ipiq_debug, 0, 100 ""); 101 #ifdef PANIC_DEBUG 102 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, ""); 103 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, ""); 104 #endif 105 106 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d" 107 #define IPIQ_ARG_SIZE (sizeof(void *) * 2 + sizeof(int) * 3) 108 109 #if !defined(KTR_IPIQ) 110 #define KTR_IPIQ KTR_ALL 111 #endif 112 KTR_INFO_MASTER(ipiq); 113 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARG_SIZE); 114 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARG_SIZE); 115 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARG_SIZE); 116 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARG_SIZE); 117 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARG_SIZE); 118 KTR_INFO(KTR_IPIQ, ipiq, sync_start, 5, "cpumask=%08x", sizeof(cpumask_t)); 119 KTR_INFO(KTR_IPIQ, ipiq, sync_end, 6, "cpumask=%08x", sizeof(cpumask_t)); 120 KTR_INFO(KTR_IPIQ, ipiq, cpu_send, 7, IPIQ_STRING, IPIQ_ARG_SIZE); 121 KTR_INFO(KTR_IPIQ, ipiq, send_end, 8, IPIQ_STRING, IPIQ_ARG_SIZE); 122 123 #define logipiq(name, func, arg1, arg2, sgd, dgd) \ 124 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid) 125 #define logipiq2(name, arg) \ 126 KTR_LOG(ipiq_ ## name, arg) 127 128 #endif /* SMP */ 129 130 #ifdef SMP 131 132 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 133 struct intrframe *frame); 134 static void lwkt_cpusync_remote1(lwkt_cpusync_t cs); 135 static void lwkt_cpusync_remote2(lwkt_cpusync_t cs); 136 137 /* 138 * Send a function execution request to another cpu. The request is queued 139 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every 140 * possible target cpu. The FIFO can be written. 141 * 142 * If the FIFO fills up we have to enable interrupts to avoid an APIC 143 * deadlock and process pending IPIQs while waiting for it to empty. 144 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full. 145 * 146 * We can safely bump gd_intr_nesting_level because our crit_exit() at the 147 * end will take care of any pending interrupts. 148 * 149 * The actual hardware IPI is avoided if the target cpu is already processing 150 * the queue from a prior IPI. It is possible to pipeline IPI messages 151 * very quickly between cpus due to the FIFO hysteresis. 152 * 153 * Need not be called from a critical section. 154 */ 155 int 156 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2) 157 { 158 lwkt_ipiq_t ip; 159 int windex; 160 struct globaldata *gd = mycpu; 161 162 logipiq(send_norm, func, arg1, arg2, gd, target); 163 164 if (target == gd) { 165 func(arg1, arg2, NULL); 166 logipiq(send_end, func, arg1, arg2, gd, target); 167 return(0); 168 } 169 crit_enter(); 170 ++gd->gd_intr_nesting_level; 171 #ifdef INVARIANTS 172 if (gd->gd_intr_nesting_level > 20) 173 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 174 #endif 175 KKASSERT(curthread->td_critcount); 176 ++ipiq_count; 177 ip = &gd->gd_ipiq[target->gd_cpuid]; 178 179 /* 180 * Do not allow the FIFO to become full. Interrupts must be physically 181 * enabled while we liveloop to avoid deadlocking the APIC. 182 */ 183 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 184 #if defined(__i386__) 185 unsigned int eflags = read_eflags(); 186 #elif defined(__x86_64__) 187 unsigned long rflags = read_rflags(); 188 #endif 189 190 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) { 191 logipiq(cpu_send, func, arg1, arg2, gd, target); 192 cpu_send_ipiq(target->gd_cpuid); 193 } 194 cpu_enable_intr(); 195 ++ipiq_fifofull; 196 DEBUG_PUSH_INFO("send_ipiq3"); 197 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 198 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 199 lwkt_process_ipiq(); 200 } 201 DEBUG_POP_INFO(); 202 #if defined(__i386__) 203 write_eflags(eflags); 204 #elif defined(__x86_64__) 205 write_rflags(rflags); 206 #endif 207 } 208 209 /* 210 * Queue the new message 211 */ 212 windex = ip->ip_windex & MAXCPUFIFO_MASK; 213 ip->ip_func[windex] = func; 214 ip->ip_arg1[windex] = arg1; 215 ip->ip_arg2[windex] = arg2; 216 cpu_sfence(); 217 ++ip->ip_windex; 218 --gd->gd_intr_nesting_level; 219 220 /* 221 * signal the target cpu that there is work pending. 222 */ 223 if (atomic_poll_acquire_int(&ip->ip_npoll)) { 224 logipiq(cpu_send, func, arg1, arg2, gd, target); 225 cpu_send_ipiq(target->gd_cpuid); 226 } else { 227 if (ipiq_optimized == 0) { 228 logipiq(cpu_send, func, arg1, arg2, gd, target); 229 cpu_send_ipiq(target->gd_cpuid); 230 } else { 231 ++ipiq_avoided; 232 } 233 } 234 crit_exit(); 235 236 logipiq(send_end, func, arg1, arg2, gd, target); 237 return(ip->ip_windex); 238 } 239 240 /* 241 * Similar to lwkt_send_ipiq() but this function does not actually initiate 242 * the IPI to the target cpu unless the FIFO has become too full, so it is 243 * very fast. 244 * 245 * This function is used for non-critical IPI messages, such as memory 246 * deallocations. The queue will typically be flushed by the target cpu at 247 * the next clock interrupt. 248 * 249 * Need not be called from a critical section. 250 */ 251 int 252 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func, 253 void *arg1, int arg2) 254 { 255 lwkt_ipiq_t ip; 256 int windex; 257 struct globaldata *gd = mycpu; 258 259 KKASSERT(target != gd); 260 crit_enter(); 261 logipiq(send_pasv, func, arg1, arg2, gd, target); 262 ++gd->gd_intr_nesting_level; 263 #ifdef INVARIANTS 264 if (gd->gd_intr_nesting_level > 20) 265 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 266 #endif 267 KKASSERT(curthread->td_critcount); 268 ++ipiq_count; 269 ++ipiq_passive; 270 ip = &gd->gd_ipiq[target->gd_cpuid]; 271 272 /* 273 * Do not allow the FIFO to become full. Interrupts must be physically 274 * enabled while we liveloop to avoid deadlocking the APIC. 275 */ 276 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 277 #if defined(__i386__) 278 unsigned int eflags = read_eflags(); 279 #elif defined(__x86_64__) 280 unsigned long rflags = read_rflags(); 281 #endif 282 283 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) { 284 logipiq(cpu_send, func, arg1, arg2, gd, target); 285 cpu_send_ipiq(target->gd_cpuid); 286 } 287 cpu_enable_intr(); 288 ++ipiq_fifofull; 289 DEBUG_PUSH_INFO("send_ipiq3_passive"); 290 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 291 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 292 lwkt_process_ipiq(); 293 } 294 DEBUG_POP_INFO(); 295 #if defined(__i386__) 296 write_eflags(eflags); 297 #elif defined(__x86_64__) 298 write_rflags(rflags); 299 #endif 300 } 301 302 /* 303 * Queue the new message 304 */ 305 windex = ip->ip_windex & MAXCPUFIFO_MASK; 306 ip->ip_func[windex] = func; 307 ip->ip_arg1[windex] = arg1; 308 ip->ip_arg2[windex] = arg2; 309 cpu_sfence(); 310 ++ip->ip_windex; 311 --gd->gd_intr_nesting_level; 312 313 /* 314 * Do not signal the target cpu, it will pick up the IPI when it next 315 * polls (typically on the next tick). 316 */ 317 crit_exit(); 318 319 logipiq(send_end, func, arg1, arg2, gd, target); 320 return(ip->ip_windex); 321 } 322 323 /* 324 * Send an IPI request without blocking, return 0 on success, ENOENT on 325 * failure. The actual queueing of the hardware IPI may still force us 326 * to spin and process incoming IPIs but that will eventually go away 327 * when we've gotten rid of the other general IPIs. 328 */ 329 int 330 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func, 331 void *arg1, int arg2) 332 { 333 lwkt_ipiq_t ip; 334 int windex; 335 struct globaldata *gd = mycpu; 336 337 logipiq(send_nbio, func, arg1, arg2, gd, target); 338 KKASSERT(curthread->td_critcount); 339 if (target == gd) { 340 func(arg1, arg2, NULL); 341 logipiq(send_end, func, arg1, arg2, gd, target); 342 return(0); 343 } 344 ++ipiq_count; 345 ip = &gd->gd_ipiq[target->gd_cpuid]; 346 347 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) { 348 logipiq(send_fail, func, arg1, arg2, gd, target); 349 return(ENOENT); 350 } 351 windex = ip->ip_windex & MAXCPUFIFO_MASK; 352 ip->ip_func[windex] = func; 353 ip->ip_arg1[windex] = arg1; 354 ip->ip_arg2[windex] = arg2; 355 cpu_sfence(); 356 ++ip->ip_windex; 357 358 /* 359 * This isn't a passive IPI, we still have to signal the target cpu. 360 */ 361 if (atomic_poll_acquire_int(&ip->ip_npoll)) { 362 logipiq(cpu_send, func, arg1, arg2, gd, target); 363 cpu_send_ipiq(target->gd_cpuid); 364 } else { 365 if (ipiq_optimized == 0) { 366 logipiq(cpu_send, func, arg1, arg2, gd, target); 367 cpu_send_ipiq(target->gd_cpuid); 368 } else { 369 ++ipiq_avoided; 370 } 371 } 372 373 logipiq(send_end, func, arg1, arg2, gd, target); 374 return(0); 375 } 376 377 /* 378 * deprecated, used only by fast int forwarding. 379 */ 380 int 381 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2) 382 { 383 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2)); 384 } 385 386 /* 387 * Send a message to several target cpus. Typically used for scheduling. 388 * The message will not be sent to stopped cpus. 389 */ 390 int 391 lwkt_send_ipiq3_mask(cpumask_t mask, ipifunc3_t func, void *arg1, int arg2) 392 { 393 int cpuid; 394 int count = 0; 395 396 mask &= ~stopped_cpus; 397 while (mask) { 398 cpuid = BSFCPUMASK(mask); 399 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2); 400 mask &= ~CPUMASK(cpuid); 401 ++count; 402 } 403 return(count); 404 } 405 406 /* 407 * Wait for the remote cpu to finish processing a function. 408 * 409 * YYY we have to enable interrupts and process the IPIQ while waiting 410 * for it to empty or we may deadlock with another cpu. Create a CPU_*() 411 * function to do this! YYY we really should 'block' here. 412 * 413 * MUST be called from a critical section. This routine may be called 414 * from an interrupt (for example, if an interrupt wakes a foreign thread 415 * up). 416 */ 417 void 418 lwkt_wait_ipiq(globaldata_t target, int seq) 419 { 420 lwkt_ipiq_t ip; 421 int maxc = 100000000; 422 423 if (target != mycpu) { 424 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 425 if ((int)(ip->ip_xindex - seq) < 0) { 426 #if defined(__i386__) 427 unsigned int eflags = read_eflags(); 428 #elif defined(__x86_64__) 429 unsigned long rflags = read_rflags(); 430 #endif 431 cpu_enable_intr(); 432 DEBUG_PUSH_INFO("wait_ipiq"); 433 while ((int)(ip->ip_xindex - seq) < 0) { 434 crit_enter(); 435 lwkt_process_ipiq(); 436 crit_exit(); 437 if (--maxc == 0) 438 kprintf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq); 439 if (maxc < -1000000) 440 panic("LWKT_WAIT_IPIQ"); 441 /* 442 * xindex may be modified by another cpu, use a load fence 443 * to ensure that the loop does not use a speculative value 444 * (which may improve performance). 445 */ 446 cpu_lfence(); 447 } 448 DEBUG_POP_INFO(); 449 #if defined(__i386__) 450 write_eflags(eflags); 451 #elif defined(__x86_64__) 452 write_rflags(rflags); 453 #endif 454 } 455 } 456 } 457 458 int 459 lwkt_seq_ipiq(globaldata_t target) 460 { 461 lwkt_ipiq_t ip; 462 463 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 464 return(ip->ip_windex); 465 } 466 467 /* 468 * Called from IPI interrupt (like a fast interrupt), which has placed 469 * us in a critical section. The MP lock may or may not be held. 470 * May also be called from doreti or splz, or be reentrantly called 471 * indirectly through the ip_func[] we run. 472 * 473 * There are two versions, one where no interrupt frame is available (when 474 * called from the send code and from splz, and one where an interrupt 475 * frame is available. 476 * 477 * When the current cpu is mastering a cpusync we do NOT internally loop 478 * on the cpusyncq poll. We also do not re-flag a pending ipi due to 479 * the cpusyncq poll because this can cause doreti/splz to loop internally. 480 * The cpusync master's own loop must be allowed to run to avoid a deadlock. 481 */ 482 void 483 lwkt_process_ipiq(void) 484 { 485 globaldata_t gd = mycpu; 486 globaldata_t sgd; 487 lwkt_ipiq_t ip; 488 int n; 489 490 again: 491 for (n = 0; n < ncpus; ++n) { 492 if (n != gd->gd_cpuid) { 493 sgd = globaldata_find(n); 494 ip = sgd->gd_ipiq; 495 if (ip != NULL) { 496 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL)) 497 ; 498 } 499 } 500 } 501 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 502 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) { 503 if (gd->gd_curthread->td_cscount == 0) 504 goto again; 505 } 506 } 507 } 508 509 void 510 lwkt_process_ipiq_frame(struct intrframe *frame) 511 { 512 globaldata_t gd = mycpu; 513 globaldata_t sgd; 514 lwkt_ipiq_t ip; 515 int n; 516 517 again: 518 for (n = 0; n < ncpus; ++n) { 519 if (n != gd->gd_cpuid) { 520 sgd = globaldata_find(n); 521 ip = sgd->gd_ipiq; 522 if (ip != NULL) { 523 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], frame)) 524 ; 525 } 526 } 527 } 528 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 529 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, frame)) { 530 if (gd->gd_curthread->td_cscount == 0) 531 goto again; 532 } 533 } 534 } 535 536 #if 0 537 static int iqticks[SMP_MAXCPU]; 538 static int iqcount[SMP_MAXCPU]; 539 #endif 540 #if 0 541 static int iqterm[SMP_MAXCPU]; 542 #endif 543 544 static int 545 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 546 struct intrframe *frame) 547 { 548 globaldata_t mygd = mycpu; 549 int ri; 550 int wi; 551 ipifunc3_t copy_func; 552 void *copy_arg1; 553 int copy_arg2; 554 555 #if 0 556 if (iqticks[mygd->gd_cpuid] != ticks) { 557 iqticks[mygd->gd_cpuid] = ticks; 558 iqcount[mygd->gd_cpuid] = 0; 559 } 560 if (++iqcount[mygd->gd_cpuid] > 3000000) { 561 kprintf("cpu %d ipiq maxed cscount %d spin %d\n", 562 mygd->gd_cpuid, 563 mygd->gd_curthread->td_cscount, 564 mygd->gd_spinlocks_wr); 565 iqcount[mygd->gd_cpuid] = 0; 566 #if 0 567 if (++iqterm[mygd->gd_cpuid] > 10) 568 panic("cpu %d ipiq maxed", mygd->gd_cpuid); 569 #endif 570 int i; 571 for (i = 0; i < ncpus; ++i) { 572 if (globaldata_find(i)->gd_infomsg) 573 kprintf(" %s", globaldata_find(i)->gd_infomsg); 574 } 575 kprintf("\n"); 576 } 577 #endif 578 579 /* 580 * Obtain the current write index, which is modified by a remote cpu. 581 * Issue a load fence to prevent speculative reads of e.g. data written 582 * by the other cpu prior to it updating the index. 583 */ 584 KKASSERT(curthread->td_critcount); 585 wi = ip->ip_windex; 586 cpu_lfence(); 587 ++mygd->gd_intr_nesting_level; 588 589 /* 590 * NOTE: xindex is only updated after we are sure the function has 591 * finished execution. Beware lwkt_process_ipiq() reentrancy! 592 * The function may send an IPI which may block/drain. 593 * 594 * NOTE: Due to additional IPI operations that the callback function 595 * may make, it is possible for both rindex and windex to advance and 596 * thus for rindex to advance passed our cached windex. 597 * 598 * NOTE: A load fence is required to prevent speculative loads prior 599 * to the loading of ip_rindex. Even though stores might be 600 * ordered, loads are probably not. A memory fence is required 601 * to prevent reordering of the loads after the ip_rindex update. 602 */ 603 while (wi - (ri = ip->ip_rindex) > 0) { 604 ri &= MAXCPUFIFO_MASK; 605 cpu_lfence(); 606 copy_func = ip->ip_func[ri]; 607 copy_arg1 = ip->ip_arg1[ri]; 608 copy_arg2 = ip->ip_arg2[ri]; 609 cpu_mfence(); 610 ++ip->ip_rindex; 611 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) == 612 ((ri + 1) & MAXCPUFIFO_MASK)); 613 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu); 614 #ifdef INVARIANTS 615 if (ipiq_debug && (ip->ip_rindex & 0xFFFFFF) == 0) { 616 kprintf("cpu %d ipifunc %p %p %d (frame %p)\n", 617 mycpu->gd_cpuid, 618 copy_func, copy_arg1, copy_arg2, 619 #if defined(__i386__) 620 (frame ? (void *)frame->if_eip : NULL)); 621 #elif defined(__amd64__) 622 (frame ? (void *)frame->if_rip : NULL)); 623 #else 624 NULL); 625 #endif 626 } 627 #endif 628 copy_func(copy_arg1, copy_arg2, frame); 629 cpu_sfence(); 630 ip->ip_xindex = ip->ip_rindex; 631 632 #ifdef PANIC_DEBUG 633 /* 634 * Simulate panics during the processing of an IPI 635 */ 636 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) { 637 if (--panic_ipiq_count == 0) { 638 #ifdef DDB 639 Debugger("PANIC_DEBUG"); 640 #else 641 panic("PANIC_DEBUG"); 642 #endif 643 } 644 } 645 #endif 646 } 647 --mygd->gd_intr_nesting_level; 648 649 /* 650 * Return non-zero if there are more IPI messages pending on this 651 * ipiq. ip_npoll is left set as long as possible to reduce the 652 * number of IPIs queued by the originating cpu, but must be cleared 653 * *BEFORE* checking windex. 654 */ 655 atomic_poll_release_int(&ip->ip_npoll); 656 return(wi != ip->ip_windex); 657 } 658 659 static void 660 lwkt_sync_ipiq(void *arg) 661 { 662 volatile cpumask_t *cpumask = arg; 663 664 atomic_clear_cpumask(cpumask, mycpu->gd_cpumask); 665 if (*cpumask == 0) 666 wakeup(cpumask); 667 } 668 669 void 670 lwkt_synchronize_ipiqs(const char *wmesg) 671 { 672 volatile cpumask_t other_cpumask; 673 674 other_cpumask = mycpu->gd_other_cpus & smp_active_mask; 675 lwkt_send_ipiq_mask(other_cpumask, lwkt_sync_ipiq, 676 __DEVOLATILE(void *, &other_cpumask)); 677 678 while (other_cpumask != 0) { 679 tsleep_interlock(&other_cpumask, 0); 680 if (other_cpumask != 0) 681 tsleep(&other_cpumask, PINTERLOCKED, wmesg, 0); 682 } 683 } 684 685 #endif 686 687 /* 688 * CPU Synchronization Support 689 * 690 * lwkt_cpusync_interlock() - Place specified cpus in a quiescent state. 691 * The current cpu is placed in a hard critical 692 * section. 693 * 694 * lwkt_cpusync_deinterlock() - Execute cs_func on specified cpus, including 695 * current cpu if specified, then return. 696 */ 697 void 698 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *arg) 699 { 700 struct lwkt_cpusync cs; 701 702 lwkt_cpusync_init(&cs, mask, func, arg); 703 lwkt_cpusync_interlock(&cs); 704 lwkt_cpusync_deinterlock(&cs); 705 } 706 707 708 void 709 lwkt_cpusync_interlock(lwkt_cpusync_t cs) 710 { 711 #ifdef SMP 712 globaldata_t gd = mycpu; 713 cpumask_t mask; 714 715 /* 716 * mask acknowledge (cs_mack): 0->mask for stage 1 717 * 718 * mack does not include the current cpu. 719 */ 720 mask = cs->cs_mask & gd->gd_other_cpus & smp_active_mask; 721 cs->cs_mack = 0; 722 crit_enter_id("cpusync"); 723 if (mask) { 724 DEBUG_PUSH_INFO("cpusync_interlock"); 725 ++ipiq_cscount; 726 ++gd->gd_curthread->td_cscount; 727 lwkt_send_ipiq_mask(mask, (ipifunc1_t)lwkt_cpusync_remote1, cs); 728 logipiq2(sync_start, mask); 729 while (cs->cs_mack != mask) { 730 lwkt_process_ipiq(); 731 cpu_pause(); 732 } 733 DEBUG_POP_INFO(); 734 } 735 #else 736 cs->cs_mack = 0; 737 #endif 738 } 739 740 /* 741 * Interlocked cpus have executed remote1 and are polling in remote2. 742 * To deinterlock we clear cs_mack and wait for the cpus to execute 743 * the func and set their bit in cs_mack again. 744 * 745 */ 746 void 747 lwkt_cpusync_deinterlock(lwkt_cpusync_t cs) 748 { 749 globaldata_t gd = mycpu; 750 #ifdef SMP 751 cpumask_t mask; 752 753 /* 754 * mask acknowledge (cs_mack): mack->0->mack for stage 2 755 * 756 * Clearing cpu bits for polling cpus in cs_mack will cause them to 757 * execute stage 2, which executes the cs_func(cs_data) and then sets 758 * their bit in cs_mack again. 759 * 760 * mack does not include the current cpu. 761 */ 762 mask = cs->cs_mack; 763 cpu_ccfence(); 764 cs->cs_mack = 0; 765 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask)) 766 cs->cs_func(cs->cs_data); 767 if (mask) { 768 DEBUG_PUSH_INFO("cpusync_deinterlock"); 769 while (cs->cs_mack != mask) { 770 lwkt_process_ipiq(); 771 cpu_pause(); 772 } 773 DEBUG_POP_INFO(); 774 /* 775 * cpusyncq ipis may be left queued without the RQF flag set due to 776 * a non-zero td_cscount, so be sure to process any laggards after 777 * decrementing td_cscount. 778 */ 779 --gd->gd_curthread->td_cscount; 780 lwkt_process_ipiq(); 781 logipiq2(sync_end, mask); 782 } 783 crit_exit_id("cpusync"); 784 #else 785 if (cs->cs_func && (cs->cs_mask & gd->gd_cpumask)) 786 cs->cs_func(cs->cs_data); 787 #endif 788 } 789 790 #ifdef SMP 791 792 /* 793 * helper IPI remote messaging function. 794 * 795 * Called on remote cpu when a new cpu synchronization request has been 796 * sent to us. Execute the run function and adjust cs_count, then requeue 797 * the request so we spin on it. 798 */ 799 static void 800 lwkt_cpusync_remote1(lwkt_cpusync_t cs) 801 { 802 globaldata_t gd = mycpu; 803 804 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask); 805 lwkt_cpusync_remote2(cs); 806 } 807 808 /* 809 * helper IPI remote messaging function. 810 * 811 * Poll for the originator telling us to finish. If it hasn't, requeue 812 * our request so we spin on it. 813 */ 814 static void 815 lwkt_cpusync_remote2(lwkt_cpusync_t cs) 816 { 817 globaldata_t gd = mycpu; 818 819 if ((cs->cs_mack & gd->gd_cpumask) == 0) { 820 if (cs->cs_func) 821 cs->cs_func(cs->cs_data); 822 atomic_set_cpumask(&cs->cs_mack, gd->gd_cpumask); 823 } else { 824 lwkt_ipiq_t ip; 825 int wi; 826 827 ip = &gd->gd_cpusyncq; 828 wi = ip->ip_windex & MAXCPUFIFO_MASK; 829 ip->ip_func[wi] = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2; 830 ip->ip_arg1[wi] = cs; 831 ip->ip_arg2[wi] = 0; 832 cpu_sfence(); 833 ++ip->ip_windex; 834 if (ipiq_debug && (ip->ip_windex & 0xFFFFFF) == 0) { 835 kprintf("cpu %d cm=%016jx %016jx f=%p\n", 836 gd->gd_cpuid, 837 (intmax_t)cs->cs_mask, (intmax_t)cs->cs_mack, 838 cs->cs_func); 839 } 840 } 841 } 842 843 #endif 844