1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_ipiq.c,v 1.17 2005/10/26 10:46:45 sephe Exp $ 35 */ 36 37 /* 38 * This module implements IPI message queueing and the MI portion of IPI 39 * message processing. 40 */ 41 42 #ifdef _KERNEL 43 44 #include "opt_ddb.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/rtprio.h> 51 #include <sys/queue.h> 52 #include <sys/thread2.h> 53 #include <sys/sysctl.h> 54 #include <sys/ktr.h> 55 #include <sys/kthread.h> 56 #include <machine/cpu.h> 57 #include <sys/lock.h> 58 #include <sys/caps.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_param.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_map.h> 66 #include <vm/vm_pager.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_zone.h> 69 70 #include <machine/stdarg.h> 71 #include <machine/ipl.h> 72 #include <machine/smp.h> 73 #include <machine/atomic.h> 74 75 #define THREAD_STACK (UPAGES * PAGE_SIZE) 76 77 #else 78 79 #include <sys/stdint.h> 80 #include <libcaps/thread.h> 81 #include <sys/thread.h> 82 #include <sys/msgport.h> 83 #include <sys/errno.h> 84 #include <libcaps/globaldata.h> 85 #include <machine/cpufunc.h> 86 #include <sys/thread2.h> 87 #include <sys/msgport2.h> 88 #include <stdio.h> 89 #include <stdlib.h> 90 #include <string.h> 91 #include <machine/lock.h> 92 #include <machine/cpu.h> 93 #include <machine/atomic.h> 94 95 #endif 96 97 #ifdef SMP 98 static __int64_t ipiq_count; /* total calls to lwkt_send_ipiq*() */ 99 static __int64_t ipiq_fifofull; /* number of fifo full conditions detected */ 100 static __int64_t ipiq_avoided; /* interlock with target avoids cpu ipi */ 101 static __int64_t ipiq_passive; /* passive IPI messages */ 102 static __int64_t ipiq_cscount; /* number of cpu synchronizations */ 103 static int ipiq_optimized = 1; /* XXX temporary sysctl */ 104 #ifdef PANIC_DEBUG 105 static int panic_ipiq_cpu = -1; 106 static int panic_ipiq_count = 100; 107 #endif 108 #endif 109 110 #ifdef _KERNEL 111 112 #ifdef SMP 113 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_count, CTLFLAG_RW, &ipiq_count, 0, ""); 114 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_fifofull, CTLFLAG_RW, &ipiq_fifofull, 0, ""); 115 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_avoided, CTLFLAG_RW, &ipiq_avoided, 0, ""); 116 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_passive, CTLFLAG_RW, &ipiq_passive, 0, ""); 117 SYSCTL_QUAD(_lwkt, OID_AUTO, ipiq_cscount, CTLFLAG_RW, &ipiq_cscount, 0, ""); 118 SYSCTL_INT(_lwkt, OID_AUTO, ipiq_optimized, CTLFLAG_RW, &ipiq_optimized, 0, ""); 119 #ifdef PANIC_DEBUG 120 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_cpu, CTLFLAG_RW, &panic_ipiq_cpu, 0, ""); 121 SYSCTL_INT(_lwkt, OID_AUTO, panic_ipiq_count, CTLFLAG_RW, &panic_ipiq_count, 0, ""); 122 #endif 123 124 #define IPIQ_STRING "func=%p arg1=%p arg2=%d scpu=%d dcpu=%d" 125 #define IPIQ_ARG_SIZE (sizeof(void *) * 2 + sizeof(int) * 2) 126 127 #if !defined(KTR_IPIQ) 128 #define KTR_IPIQ KTR_ALL 129 #endif 130 KTR_INFO_MASTER(ipiq); 131 KTR_INFO(KTR_IPIQ, ipiq, send_norm, 0, IPIQ_STRING, IPIQ_ARG_SIZE); 132 KTR_INFO(KTR_IPIQ, ipiq, send_pasv, 1, IPIQ_STRING, IPIQ_ARG_SIZE); 133 KTR_INFO(KTR_IPIQ, ipiq, send_nbio, 2, IPIQ_STRING, IPIQ_ARG_SIZE); 134 KTR_INFO(KTR_IPIQ, ipiq, send_fail, 3, IPIQ_STRING, IPIQ_ARG_SIZE); 135 KTR_INFO(KTR_IPIQ, ipiq, receive, 4, IPIQ_STRING, IPIQ_ARG_SIZE); 136 137 #define logipiq(name, func, arg1, arg2, sgd, dgd) \ 138 KTR_LOG(ipiq_ ## name, func, arg1, arg2, sgd->gd_cpuid, dgd->gd_cpuid) 139 140 #endif /* SMP */ 141 #endif /* KERNEL */ 142 143 #ifdef SMP 144 145 static int lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 146 struct intrframe *frame); 147 static void lwkt_cpusync_remote1(lwkt_cpusync_t poll); 148 static void lwkt_cpusync_remote2(lwkt_cpusync_t poll); 149 150 /* 151 * Send a function execution request to another cpu. The request is queued 152 * on the cpu<->cpu ipiq matrix. Each cpu owns a unique ipiq FIFO for every 153 * possible target cpu. The FIFO can be written. 154 * 155 * If the FIFO fills up we have to enable interrupts to avoid an APIC 156 * deadlock and process pending IPIQs while waiting for it to empty. 157 * Otherwise we may soft-deadlock with another cpu whos FIFO is also full. 158 * 159 * We can safely bump gd_intr_nesting_level because our crit_exit() at the 160 * end will take care of any pending interrupts. 161 * 162 * The actual hardware IPI is avoided if the target cpu is already processing 163 * the queue from a prior IPI. It is possible to pipeline IPI messages 164 * very quickly between cpus due to the FIFO hysteresis. 165 * 166 * Need not be called from a critical section. 167 */ 168 int 169 lwkt_send_ipiq3(globaldata_t target, ipifunc3_t func, void *arg1, int arg2) 170 { 171 lwkt_ipiq_t ip; 172 int windex; 173 struct globaldata *gd = mycpu; 174 175 logipiq(send_norm, func, arg1, arg2, gd, target); 176 177 if (target == gd) { 178 func(arg1, arg2, NULL); 179 return(0); 180 } 181 crit_enter(); 182 ++gd->gd_intr_nesting_level; 183 #ifdef INVARIANTS 184 if (gd->gd_intr_nesting_level > 20) 185 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 186 #endif 187 KKASSERT(curthread->td_pri >= TDPRI_CRIT); 188 ++ipiq_count; 189 ip = &gd->gd_ipiq[target->gd_cpuid]; 190 191 /* 192 * Do not allow the FIFO to become full. Interrupts must be physically 193 * enabled while we liveloop to avoid deadlocking the APIC. 194 */ 195 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 196 unsigned int eflags = read_eflags(); 197 198 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) 199 cpu_send_ipiq(target->gd_cpuid); 200 cpu_enable_intr(); 201 ++ipiq_fifofull; 202 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 203 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 204 lwkt_process_ipiq(); 205 } 206 write_eflags(eflags); 207 } 208 209 /* 210 * Queue the new message 211 */ 212 windex = ip->ip_windex & MAXCPUFIFO_MASK; 213 ip->ip_func[windex] = func; 214 ip->ip_arg1[windex] = arg1; 215 ip->ip_arg2[windex] = arg2; 216 cpu_sfence(); 217 ++ip->ip_windex; 218 --gd->gd_intr_nesting_level; 219 220 /* 221 * signal the target cpu that there is work pending. 222 */ 223 if (atomic_poll_acquire_int(&ip->ip_npoll)) { 224 cpu_send_ipiq(target->gd_cpuid); 225 } else { 226 if (ipiq_optimized == 0) 227 cpu_send_ipiq(target->gd_cpuid); 228 ++ipiq_avoided; 229 } 230 crit_exit(); 231 return(ip->ip_windex); 232 } 233 234 /* 235 * Similar to lwkt_send_ipiq() but this function does not actually initiate 236 * the IPI to the target cpu unless the FIFO has become too full, so it is 237 * very fast. 238 * 239 * This function is used for non-critical IPI messages, such as memory 240 * deallocations. The queue will typically be flushed by the target cpu at 241 * the next clock interrupt. 242 * 243 * Need not be called from a critical section. 244 */ 245 int 246 lwkt_send_ipiq3_passive(globaldata_t target, ipifunc3_t func, 247 void *arg1, int arg2) 248 { 249 lwkt_ipiq_t ip; 250 int windex; 251 struct globaldata *gd = mycpu; 252 253 KKASSERT(target != gd); 254 crit_enter(); 255 logipiq(send_pasv, func, arg1, arg2, gd, target); 256 ++gd->gd_intr_nesting_level; 257 #ifdef INVARIANTS 258 if (gd->gd_intr_nesting_level > 20) 259 panic("lwkt_send_ipiq: TOO HEAVILY NESTED!"); 260 #endif 261 KKASSERT(curthread->td_pri >= TDPRI_CRIT); 262 ++ipiq_count; 263 ++ipiq_passive; 264 ip = &gd->gd_ipiq[target->gd_cpuid]; 265 266 /* 267 * Do not allow the FIFO to become full. Interrupts must be physically 268 * enabled while we liveloop to avoid deadlocking the APIC. 269 */ 270 if (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 2) { 271 unsigned int eflags = read_eflags(); 272 273 if (atomic_poll_acquire_int(&ip->ip_npoll) || ipiq_optimized == 0) 274 cpu_send_ipiq(target->gd_cpuid); 275 cpu_enable_intr(); 276 ++ipiq_fifofull; 277 while (ip->ip_windex - ip->ip_rindex > MAXCPUFIFO / 4) { 278 KKASSERT(ip->ip_windex - ip->ip_rindex != MAXCPUFIFO - 1); 279 lwkt_process_ipiq(); 280 } 281 write_eflags(eflags); 282 } 283 284 /* 285 * Queue the new message 286 */ 287 windex = ip->ip_windex & MAXCPUFIFO_MASK; 288 ip->ip_func[windex] = func; 289 ip->ip_arg1[windex] = arg1; 290 ip->ip_arg2[windex] = arg2; 291 cpu_sfence(); 292 ++ip->ip_windex; 293 --gd->gd_intr_nesting_level; 294 295 /* 296 * Do not signal the target cpu, it will pick up the IPI when it next 297 * polls (typically on the next tick). 298 */ 299 crit_exit(); 300 return(ip->ip_windex); 301 } 302 303 /* 304 * Send an IPI request without blocking, return 0 on success, ENOENT on 305 * failure. The actual queueing of the hardware IPI may still force us 306 * to spin and process incoming IPIs but that will eventually go away 307 * when we've gotten rid of the other general IPIs. 308 */ 309 int 310 lwkt_send_ipiq3_nowait(globaldata_t target, ipifunc3_t func, 311 void *arg1, int arg2) 312 { 313 lwkt_ipiq_t ip; 314 int windex; 315 struct globaldata *gd = mycpu; 316 317 logipiq(send_nbio, func, arg1, arg2, gd, target); 318 KKASSERT(curthread->td_pri >= TDPRI_CRIT); 319 if (target == gd) { 320 func(arg1, arg2, NULL); 321 return(0); 322 } 323 ++ipiq_count; 324 ip = &gd->gd_ipiq[target->gd_cpuid]; 325 326 if (ip->ip_windex - ip->ip_rindex >= MAXCPUFIFO * 2 / 3) { 327 logipiq(send_fail, func, arg1, arg2, gd, target); 328 return(ENOENT); 329 } 330 windex = ip->ip_windex & MAXCPUFIFO_MASK; 331 ip->ip_func[windex] = func; 332 ip->ip_arg1[windex] = arg1; 333 ip->ip_arg2[windex] = arg2; 334 cpu_sfence(); 335 ++ip->ip_windex; 336 337 /* 338 * This isn't a passive IPI, we still have to signal the target cpu. 339 */ 340 if (atomic_poll_acquire_int(&ip->ip_npoll)) { 341 cpu_send_ipiq(target->gd_cpuid); 342 } else { 343 if (ipiq_optimized == 0) 344 cpu_send_ipiq(target->gd_cpuid); 345 else 346 ++ipiq_avoided; 347 } 348 return(0); 349 } 350 351 /* 352 * deprecated, used only by fast int forwarding. 353 */ 354 int 355 lwkt_send_ipiq3_bycpu(int dcpu, ipifunc3_t func, void *arg1, int arg2) 356 { 357 return(lwkt_send_ipiq3(globaldata_find(dcpu), func, arg1, arg2)); 358 } 359 360 /* 361 * Send a message to several target cpus. Typically used for scheduling. 362 * The message will not be sent to stopped cpus. 363 */ 364 int 365 lwkt_send_ipiq3_mask(u_int32_t mask, ipifunc3_t func, void *arg1, int arg2) 366 { 367 int cpuid; 368 int count = 0; 369 370 mask &= ~stopped_cpus; 371 while (mask) { 372 cpuid = bsfl(mask); 373 lwkt_send_ipiq3(globaldata_find(cpuid), func, arg1, arg2); 374 mask &= ~(1 << cpuid); 375 ++count; 376 } 377 return(count); 378 } 379 380 /* 381 * Wait for the remote cpu to finish processing a function. 382 * 383 * YYY we have to enable interrupts and process the IPIQ while waiting 384 * for it to empty or we may deadlock with another cpu. Create a CPU_*() 385 * function to do this! YYY we really should 'block' here. 386 * 387 * MUST be called from a critical section. This routine may be called 388 * from an interrupt (for example, if an interrupt wakes a foreign thread 389 * up). 390 */ 391 void 392 lwkt_wait_ipiq(globaldata_t target, int seq) 393 { 394 lwkt_ipiq_t ip; 395 int maxc = 100000000; 396 397 if (target != mycpu) { 398 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 399 if ((int)(ip->ip_xindex - seq) < 0) { 400 unsigned int eflags = read_eflags(); 401 cpu_enable_intr(); 402 while ((int)(ip->ip_xindex - seq) < 0) { 403 crit_enter(); 404 lwkt_process_ipiq(); 405 crit_exit(); 406 if (--maxc == 0) 407 printf("LWKT_WAIT_IPIQ WARNING! %d wait %d (%d)\n", mycpu->gd_cpuid, target->gd_cpuid, ip->ip_xindex - seq); 408 if (maxc < -1000000) 409 panic("LWKT_WAIT_IPIQ"); 410 /* 411 * xindex may be modified by another cpu, use a load fence 412 * to ensure that the loop does not use a speculative value 413 * (which may improve performance). 414 */ 415 cpu_lfence(); 416 } 417 write_eflags(eflags); 418 } 419 } 420 } 421 422 int 423 lwkt_seq_ipiq(globaldata_t target) 424 { 425 lwkt_ipiq_t ip; 426 427 ip = &mycpu->gd_ipiq[target->gd_cpuid]; 428 return(ip->ip_windex); 429 } 430 431 /* 432 * Called from IPI interrupt (like a fast interrupt), which has placed 433 * us in a critical section. The MP lock may or may not be held. 434 * May also be called from doreti or splz, or be reentrantly called 435 * indirectly through the ip_func[] we run. 436 * 437 * There are two versions, one where no interrupt frame is available (when 438 * called from the send code and from splz, and one where an interrupt 439 * frame is available. 440 */ 441 void 442 lwkt_process_ipiq(void) 443 { 444 globaldata_t gd = mycpu; 445 globaldata_t sgd; 446 lwkt_ipiq_t ip; 447 int n; 448 449 again: 450 for (n = 0; n < ncpus; ++n) { 451 if (n != gd->gd_cpuid) { 452 sgd = globaldata_find(n); 453 ip = sgd->gd_ipiq; 454 if (ip != NULL) { 455 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], NULL)) 456 ; 457 } 458 } 459 } 460 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 461 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, NULL)) { 462 if (gd->gd_curthread->td_cscount == 0) 463 goto again; 464 need_ipiq(); 465 } 466 } 467 } 468 469 #ifdef _KERNEL 470 void 471 lwkt_process_ipiq_frame(struct intrframe frame) 472 { 473 globaldata_t gd = mycpu; 474 globaldata_t sgd; 475 lwkt_ipiq_t ip; 476 int n; 477 478 again: 479 for (n = 0; n < ncpus; ++n) { 480 if (n != gd->gd_cpuid) { 481 sgd = globaldata_find(n); 482 ip = sgd->gd_ipiq; 483 if (ip != NULL) { 484 while (lwkt_process_ipiq_core(sgd, &ip[gd->gd_cpuid], &frame)) 485 ; 486 } 487 } 488 } 489 if (gd->gd_cpusyncq.ip_rindex != gd->gd_cpusyncq.ip_windex) { 490 if (lwkt_process_ipiq_core(gd, &gd->gd_cpusyncq, &frame)) { 491 if (gd->gd_curthread->td_cscount == 0) 492 goto again; 493 need_ipiq(); 494 } 495 } 496 } 497 #endif 498 499 static int 500 lwkt_process_ipiq_core(globaldata_t sgd, lwkt_ipiq_t ip, 501 struct intrframe *frame) 502 { 503 int ri; 504 int wi; 505 ipifunc3_t copy_func; 506 void *copy_arg1; 507 int copy_arg2; 508 509 /* 510 * Obtain the current write index, which is modified by a remote cpu. 511 * Issue a load fence to prevent speculative reads of e.g. data written 512 * by the other cpu prior to it updating the index. 513 */ 514 KKASSERT(curthread->td_pri >= TDPRI_CRIT); 515 wi = ip->ip_windex; 516 cpu_lfence(); 517 518 /* 519 * Note: xindex is only updated after we are sure the function has 520 * finished execution. Beware lwkt_process_ipiq() reentrancy! The 521 * function may send an IPI which may block/drain. 522 * 523 * Note: due to additional IPI operations that the callback function 524 * may make, it is possible for both rindex and windex to advance and 525 * thus for rindex to advance passed our cached windex. 526 */ 527 while (wi - (ri = ip->ip_rindex) > 0) { 528 ri &= MAXCPUFIFO_MASK; 529 copy_func = ip->ip_func[ri]; 530 copy_arg1 = ip->ip_arg1[ri]; 531 copy_arg2 = ip->ip_arg2[ri]; 532 cpu_mfence(); 533 ++ip->ip_rindex; 534 KKASSERT((ip->ip_rindex & MAXCPUFIFO_MASK) == ((ri + 1) & MAXCPUFIFO_MASK)); 535 logipiq(receive, copy_func, copy_arg1, copy_arg2, sgd, mycpu); 536 copy_func(copy_arg1, copy_arg2, frame); 537 cpu_sfence(); 538 ip->ip_xindex = ip->ip_rindex; 539 540 #ifdef PANIC_DEBUG 541 /* 542 * Simulate panics during the processing of an IPI 543 */ 544 if (mycpu->gd_cpuid == panic_ipiq_cpu && panic_ipiq_count) { 545 if (--panic_ipiq_count == 0) { 546 #ifdef DDB 547 Debugger("PANIC_DEBUG"); 548 #else 549 panic("PANIC_DEBUG"); 550 #endif 551 } 552 } 553 #endif 554 } 555 556 /* 557 * Return non-zero if there are more IPI messages pending on this 558 * ipiq. ip_npoll is left set as long as possible to reduce the 559 * number of IPIs queued by the originating cpu, but must be cleared 560 * *BEFORE* checking windex. 561 */ 562 atomic_poll_release_int(&ip->ip_npoll); 563 return(wi != ip->ip_windex); 564 } 565 566 #endif 567 568 /* 569 * CPU Synchronization Support 570 * 571 * lwkt_cpusync_simple() 572 * 573 * The function is executed synchronously before return on remote cpus. 574 * A lwkt_cpusync_t pointer is passed as an argument. The data can 575 * be accessed via arg->cs_data. 576 * 577 * XXX should I just pass the data as an argument to be consistent? 578 */ 579 580 void 581 lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *data) 582 { 583 struct lwkt_cpusync cmd; 584 585 cmd.cs_run_func = NULL; 586 cmd.cs_fin1_func = func; 587 cmd.cs_fin2_func = NULL; 588 cmd.cs_data = data; 589 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd); 590 if (mask & (1 << mycpu->gd_cpuid)) 591 func(&cmd); 592 lwkt_cpusync_finish(&cmd); 593 } 594 595 /* 596 * lwkt_cpusync_fastdata() 597 * 598 * The function is executed in tandem with return on remote cpus. 599 * The data is directly passed as an argument. Do not pass pointers to 600 * temporary storage as the storage might have 601 * gone poof by the time the target cpu executes 602 * the function. 603 * 604 * At the moment lwkt_cpusync is declared on the stack and we must wait 605 * for all remote cpus to ack in lwkt_cpusync_finish(), but as a future 606 * optimization we should be able to put a counter in the globaldata 607 * structure (if it is not otherwise being used) and just poke it and 608 * return without waiting. XXX 609 */ 610 void 611 lwkt_cpusync_fastdata(cpumask_t mask, cpusync_func2_t func, void *data) 612 { 613 struct lwkt_cpusync cmd; 614 615 cmd.cs_run_func = NULL; 616 cmd.cs_fin1_func = NULL; 617 cmd.cs_fin2_func = func; 618 cmd.cs_data = NULL; 619 lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd); 620 if (mask & (1 << mycpu->gd_cpuid)) 621 func(data); 622 lwkt_cpusync_finish(&cmd); 623 } 624 625 /* 626 * lwkt_cpusync_start() 627 * 628 * Start synchronization with a set of target cpus, return once they are 629 * known to be in a synchronization loop. The target cpus will execute 630 * poll->cs_run_func() IN TANDEM WITH THE RETURN. 631 * 632 * XXX future: add lwkt_cpusync_start_quick() and require a call to 633 * lwkt_cpusync_add() or lwkt_cpusync_wait(), allowing the caller to 634 * potentially absorb the IPI latency doing something useful. 635 */ 636 void 637 lwkt_cpusync_start(cpumask_t mask, lwkt_cpusync_t poll) 638 { 639 globaldata_t gd = mycpu; 640 641 poll->cs_count = 0; 642 poll->cs_mask = mask; 643 #ifdef SMP 644 poll->cs_maxcount = lwkt_send_ipiq_mask( 645 mask & gd->gd_other_cpus & smp_active_mask, 646 (ipifunc1_t)lwkt_cpusync_remote1, poll); 647 #endif 648 if (mask & gd->gd_cpumask) { 649 if (poll->cs_run_func) 650 poll->cs_run_func(poll); 651 } 652 #ifdef SMP 653 if (poll->cs_maxcount) { 654 ++ipiq_cscount; 655 ++gd->gd_curthread->td_cscount; 656 while (poll->cs_count != poll->cs_maxcount) { 657 crit_enter(); 658 lwkt_process_ipiq(); 659 crit_exit(); 660 } 661 } 662 #endif 663 } 664 665 void 666 lwkt_cpusync_add(cpumask_t mask, lwkt_cpusync_t poll) 667 { 668 globaldata_t gd = mycpu; 669 #ifdef SMP 670 int count; 671 #endif 672 673 mask &= ~poll->cs_mask; 674 poll->cs_mask |= mask; 675 #ifdef SMP 676 count = lwkt_send_ipiq_mask( 677 mask & gd->gd_other_cpus & smp_active_mask, 678 (ipifunc1_t)lwkt_cpusync_remote1, poll); 679 #endif 680 if (mask & gd->gd_cpumask) { 681 if (poll->cs_run_func) 682 poll->cs_run_func(poll); 683 } 684 #ifdef SMP 685 poll->cs_maxcount += count; 686 if (poll->cs_maxcount) { 687 if (poll->cs_maxcount == count) 688 ++gd->gd_curthread->td_cscount; 689 while (poll->cs_count != poll->cs_maxcount) { 690 crit_enter(); 691 lwkt_process_ipiq(); 692 crit_exit(); 693 } 694 } 695 #endif 696 } 697 698 /* 699 * Finish synchronization with a set of target cpus. The target cpus will 700 * execute cs_fin1_func(poll) prior to this function returning, and will 701 * execute cs_fin2_func(data) IN TANDEM WITH THIS FUNCTION'S RETURN. 702 * 703 * If cs_maxcount is non-zero then we are mastering a cpusync with one or 704 * more remote cpus and must account for it in our thread structure. 705 */ 706 void 707 lwkt_cpusync_finish(lwkt_cpusync_t poll) 708 { 709 globaldata_t gd = mycpu; 710 711 poll->cs_count = -1; 712 if (poll->cs_mask & gd->gd_cpumask) { 713 if (poll->cs_fin1_func) 714 poll->cs_fin1_func(poll); 715 if (poll->cs_fin2_func) 716 poll->cs_fin2_func(poll->cs_data); 717 } 718 #ifdef SMP 719 if (poll->cs_maxcount) { 720 while (poll->cs_count != -(poll->cs_maxcount + 1)) { 721 crit_enter(); 722 lwkt_process_ipiq(); 723 crit_exit(); 724 } 725 --gd->gd_curthread->td_cscount; 726 } 727 #endif 728 } 729 730 #ifdef SMP 731 732 /* 733 * helper IPI remote messaging function. 734 * 735 * Called on remote cpu when a new cpu synchronization request has been 736 * sent to us. Execute the run function and adjust cs_count, then requeue 737 * the request so we spin on it. 738 */ 739 static void 740 lwkt_cpusync_remote1(lwkt_cpusync_t poll) 741 { 742 atomic_add_int(&poll->cs_count, 1); 743 if (poll->cs_run_func) 744 poll->cs_run_func(poll); 745 lwkt_cpusync_remote2(poll); 746 } 747 748 /* 749 * helper IPI remote messaging function. 750 * 751 * Poll for the originator telling us to finish. If it hasn't, requeue 752 * our request so we spin on it. When the originator requests that we 753 * finish we execute cs_fin1_func(poll) synchronously and cs_fin2_func(data) 754 * in tandem with the release. 755 */ 756 static void 757 lwkt_cpusync_remote2(lwkt_cpusync_t poll) 758 { 759 if (poll->cs_count < 0) { 760 cpusync_func2_t savef; 761 void *saved; 762 763 if (poll->cs_fin1_func) 764 poll->cs_fin1_func(poll); 765 if (poll->cs_fin2_func) { 766 savef = poll->cs_fin2_func; 767 saved = poll->cs_data; 768 atomic_add_int(&poll->cs_count, -1); 769 savef(saved); 770 } else { 771 atomic_add_int(&poll->cs_count, -1); 772 } 773 } else { 774 globaldata_t gd = mycpu; 775 lwkt_ipiq_t ip; 776 int wi; 777 778 ip = &gd->gd_cpusyncq; 779 wi = ip->ip_windex & MAXCPUFIFO_MASK; 780 ip->ip_func[wi] = (ipifunc3_t)(ipifunc1_t)lwkt_cpusync_remote2; 781 ip->ip_arg1[wi] = poll; 782 ip->ip_arg2[wi] = 0; 783 cpu_sfence(); 784 ++ip->ip_windex; 785 } 786 } 787 788 #endif 789