1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_token.c,v 1.20 2005/06/20 20:38:01 dillon Exp $ 35 */ 36 37 #ifdef _KERNEL 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/proc.h> 43 #include <sys/rtprio.h> 44 #include <sys/queue.h> 45 #include <sys/thread2.h> 46 #include <sys/sysctl.h> 47 #include <sys/ktr.h> 48 #include <sys/kthread.h> 49 #include <machine/cpu.h> 50 #include <sys/lock.h> 51 #include <sys/caps.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <vm/vm_kern.h> 56 #include <vm/vm_object.h> 57 #include <vm/vm_page.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_pager.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_zone.h> 62 63 #include <machine/stdarg.h> 64 #include <machine/ipl.h> 65 #include <machine/smp.h> 66 67 #define THREAD_STACK (UPAGES * PAGE_SIZE) 68 69 #else 70 71 #include <sys/stdint.h> 72 #include <libcaps/thread.h> 73 #include <sys/thread.h> 74 #include <sys/msgport.h> 75 #include <sys/errno.h> 76 #include <libcaps/globaldata.h> 77 #include <machine/cpufunc.h> 78 #include <sys/thread2.h> 79 #include <sys/msgport2.h> 80 #include <stdio.h> 81 #include <stdlib.h> 82 #include <string.h> 83 #include <machine/lock.h> 84 #include <machine/cpu.h> 85 86 #endif 87 88 #define MAKE_TOKENS_SPIN 89 /* #define MAKE_TOKENS_YIELD */ 90 91 #ifndef LWKT_NUM_POOL_TOKENS 92 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */ 93 #endif 94 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1) 95 96 #ifdef INVARIANTS 97 static int token_debug = 0; 98 #endif 99 100 #ifdef SMP 101 static void lwkt_reqtoken_remote(void *data); 102 #endif 103 104 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS]; 105 106 #define TOKEN_STRING "REF=%p TOK=%p TD=%p" 107 #if !defined(KTR_TOKENS) 108 #define KTR_TOKENS KTR_ALL 109 #endif 110 111 KTR_INFO_MASTER(tokens); 112 KTR_INFO(KTR_TOKENS, tokens, try, 0, TOKEN_STRING, sizeof(void *) * 3); 113 KTR_INFO(KTR_TOKENS, tokens, get, 1, TOKEN_STRING, sizeof(void *) * 3); 114 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3); 115 #ifdef SMP 116 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3); 117 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3); 118 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3); 119 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3); 120 #endif 121 122 #define logtoken(name, ref) \ 123 KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread) 124 125 #ifdef _KERNEL 126 127 #ifdef INVARIANTS 128 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, ""); 129 #endif 130 131 #endif 132 133 #ifdef SMP 134 135 /* 136 * Determine if we own all the tokens in the token reference list. 137 * Return 1 on success, 0 on failure. 138 * 139 * As a side effect, queue requests for tokens we want which are owned 140 * by other cpus. The magic number is used to communicate when the 141 * target cpu has processed the request. Note, however, that the 142 * target cpu may not be able to assign the token to us which is why 143 * the scheduler must spin. 144 */ 145 int 146 lwkt_chktokens(thread_t td) 147 { 148 globaldata_t gd = td->td_gd; /* mycpu */ 149 lwkt_tokref_t refs; 150 globaldata_t dgd; 151 lwkt_token_t tok; 152 __uint32_t magic; 153 int r = 1; 154 155 KKASSERT(gd->gd_curthread->td_pri >= TDPRI_CRIT); 156 for (refs = td->td_toks; refs; refs = refs->tr_next) { 157 tok = refs->tr_tok; 158 if ((dgd = tok->t_cpu) != gd) { 159 cpu_ccfence(); /* don't let the compiler reload tok->t_cpu */ 160 r = 0; 161 162 /* 163 * Queue a request to the target cpu, exit the loop early if 164 * we are unable to queue the IPI message. The magic number 165 * flags whether we have a pending ipi request queued or not. 166 * It can be set from MAGIC2 to MAGIC1 by a remote cpu but can 167 * only be set from MAGIC1 to MAGIC2 by our cpu. 168 */ 169 magic = refs->tr_magic; 170 cpu_ccfence(); 171 if (magic == LWKT_TOKREF_MAGIC1) { 172 refs->tr_magic = LWKT_TOKREF_MAGIC2; /* MP synched slowreq*/ 173 refs->tr_reqgd = gd; 174 tok->t_reqcpu = gd; /* MP unsynchronized 'fast' req */ 175 176 logtoken(reqremote, refs); 177 178 if (lwkt_send_ipiq_nowait(dgd, lwkt_reqtoken_remote, refs)) { 179 /* failed */ 180 refs->tr_magic = LWKT_TOKREF_MAGIC1; 181 182 logtoken(reqfail, refs); 183 break; 184 } 185 } else if (magic != LWKT_TOKREF_MAGIC2) { 186 panic("lwkt_chktoken(): token ref %p tok %p bad magic %08x\n", 187 refs, refs->tr_tok, magic); 188 } 189 } 190 } 191 return(r); 192 } 193 194 #endif 195 196 /* 197 * Check if we already own the token. Return 1 on success, 0 on failure. 198 */ 199 int 200 lwkt_havetoken(lwkt_token_t tok) 201 { 202 globaldata_t gd = mycpu; 203 thread_t td = gd->gd_curthread; 204 lwkt_tokref_t ref; 205 206 for (ref = td->td_toks; ref; ref = ref->tr_next) { 207 if (ref->tr_tok == tok) 208 return(1); 209 } 210 return(0); 211 } 212 213 int 214 lwkt_havetokref(lwkt_tokref_t xref) 215 { 216 globaldata_t gd = mycpu; 217 thread_t td = gd->gd_curthread; 218 lwkt_tokref_t ref; 219 220 for (ref = td->td_toks; ref; ref = ref->tr_next) { 221 if (ref == xref) 222 return(1); 223 } 224 return(0); 225 } 226 227 #ifdef SMP 228 229 /* 230 * Returns 1 if it is ok to give a token away, 0 if it is not. 231 */ 232 static int 233 lwkt_oktogiveaway_token(lwkt_token_t tok) 234 { 235 globaldata_t gd = mycpu; 236 lwkt_tokref_t ref; 237 thread_t td; 238 239 for (td = gd->gd_curthread; td; td = td->td_preempted) { 240 for (ref = td->td_toks; ref; ref = ref->tr_next) { 241 if (ref->tr_tok == tok) 242 return(0); 243 } 244 } 245 return(1); 246 } 247 248 #endif 249 250 /* 251 * Acquire a serializing token 252 */ 253 254 static __inline 255 void 256 _lwkt_gettokref(lwkt_tokref_t ref) 257 { 258 lwkt_tokref_t scan; 259 lwkt_token_t tok; 260 globaldata_t gd; 261 thread_t td; 262 263 gd = mycpu; /* our cpu */ 264 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1); 265 KKASSERT(gd->gd_intr_nesting_level == 0); 266 td = gd->gd_curthread; /* our thread */ 267 268 /* 269 * Link the request into our thread's list. This interlocks against 270 * remote requests from other cpus, prevents the token from being 271 * given away if our cpu already owns it, and interlocks against 272 * preempting threads which may want the token. This also allows us to 273 * avoid using a critical section. 274 */ 275 ref->tr_next = td->td_toks; 276 cpu_ccfence(); /* prevent compiler reordering */ 277 td->td_toks = ref; 278 tok = ref->tr_tok; 279 280 /* 281 * If we are preempting another thread which owns the token we have to 282 * yield to get out from the preemption because we cannot obtain a token 283 * owned by the thread we are preempting. 284 */ 285 if (td->td_preempted) { 286 while ((td = td->td_preempted) != NULL) { 287 for (scan = td->td_toks; scan; scan = scan->tr_next) { 288 if (scan->tr_tok == tok) { 289 lwkt_yield(); 290 KKASSERT(tok->t_cpu == gd); 291 goto breakout; 292 } 293 } 294 } 295 breakout: ; 296 td = gd->gd_curthread; /* our thread, again */ 297 } 298 299 /* 300 * If our cpu does not own the token then (currently) spin while we 301 * await it. XXX we should yield here but some testing is required 302 * before we do so, there could be some interlock issues with e.g. 303 * softupdates before we can yield. ZZZ 304 */ 305 #ifdef SMP 306 if (tok->t_cpu != gd) { 307 #if defined(MAKE_TOKENS_SPIN) 308 int x = 40000000; 309 int y = 10; 310 crit_enter(); 311 while (lwkt_chktokens(td) == 0) { 312 lwkt_process_ipiq(); 313 lwkt_drain_token_requests(); 314 if (--x == 0) { 315 x = 40000000; 316 printf("CHKTOKEN looping on cpu %d\n", gd->gd_cpuid); 317 #ifdef _KERNEL 318 if (--y == 0) 319 panic("CHKTOKEN looping on cpu %d", gd->gd_cpuid); 320 #endif 321 } 322 splz(); 323 } 324 crit_exit(); 325 #elif defined(MAKE_TOKENS_YIELD) 326 lwkt_yield(); 327 #else 328 #error MAKE_TOKENS_XXX ? 329 #endif 330 KKASSERT(tok->t_cpu == gd); 331 } 332 #endif 333 } 334 335 336 /* 337 * Attempt to acquire a serializing token 338 */ 339 static __inline 340 int 341 _lwkt_trytokref(lwkt_tokref_t ref) 342 { 343 lwkt_token_t tok; 344 globaldata_t gd; 345 thread_t td; 346 347 gd = mycpu; /* our cpu */ 348 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1); 349 KKASSERT(gd->gd_intr_nesting_level == 0); 350 td = gd->gd_curthread; /* our thread */ 351 352 /* 353 * Link the request into our thread's list. This interlocks against 354 * remote requests from other cpus and prevents the token from being 355 * given away if our cpu already owns it. This also allows us to 356 * avoid using a critical section. 357 * 358 * Force a panic to occur if chktokens is called while the reference 359 * is linked to td_toks but before we have resolved whether we can 360 * keep it. chktokens should never be called on our ref list 361 * preemptively. 362 */ 363 ref->tr_magic = LWKT_TOKREF_MAGIC3; 364 ref->tr_next = td->td_toks; 365 cpu_ccfence(); /* prevent compiler reordering */ 366 td->td_toks = ref; 367 368 /* 369 * If our cpu does not own the token then stop now. 370 * 371 * Otherwise make sure the token is not held by a thread we are 372 * preempting. If it is, stop. 373 */ 374 tok = ref->tr_tok; 375 #ifdef SMP 376 if (tok->t_cpu != gd) { 377 td->td_toks = ref->tr_next; /* remove ref */ 378 ref->tr_magic = LWKT_TOKREF_MAGIC1; 379 return(0); 380 } 381 #endif 382 if (td->td_preempted) { 383 while ((td = td->td_preempted) != NULL) { 384 lwkt_tokref_t scan; 385 for (scan = td->td_toks; scan; scan = scan->tr_next) { 386 if (scan->tr_tok == tok) { 387 td = gd->gd_curthread; /* our thread */ 388 td->td_toks = ref->tr_next; /* remove ref */ 389 ref->tr_magic = LWKT_TOKREF_MAGIC1; 390 return(0); 391 } 392 } 393 } 394 } 395 396 /* 397 * We own the token, legitimize the reference. 398 */ 399 ref->tr_magic = LWKT_TOKREF_MAGIC1; 400 /* 'td' variable no longer valid */ 401 return(1); 402 } 403 404 void 405 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok) 406 { 407 lwkt_tokref_init(ref, tok); 408 logtoken(get, ref); 409 _lwkt_gettokref(ref); 410 } 411 412 void 413 lwkt_gettokref(lwkt_tokref_t ref) 414 { 415 logtoken(get, ref); 416 _lwkt_gettokref(ref); 417 } 418 419 int 420 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok) 421 { 422 lwkt_tokref_init(ref, tok); 423 logtoken(try, ref); 424 return(_lwkt_trytokref(ref)); 425 } 426 427 int 428 lwkt_trytokref(lwkt_tokref_t ref) 429 { 430 logtoken(try, ref); 431 return(_lwkt_trytokref(ref)); 432 } 433 434 /* 435 * Release a serializing token 436 */ 437 void 438 lwkt_reltoken(lwkt_tokref *_ref) 439 { 440 lwkt_tokref_t scan; 441 lwkt_tokref *ref; 442 lwkt_tokref **pref; 443 lwkt_token_t tok; 444 globaldata_t gd; 445 thread_t td; 446 int giveaway; 447 448 logtoken(release, _ref); 449 /* 450 * Guard check and stack check (if in the same stack page). We must 451 * also wait for any action pending on remote cpus which we do by 452 * checking the magic number and yielding in a loop. 453 */ 454 ref = _ref; 455 #ifdef INVARIANTS 456 if ((((intptr_t)ref ^ (intptr_t)&_ref) & ~(intptr_t)PAGE_MASK) == 0) 457 KKASSERT((char *)ref > (char *)&_ref); 458 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 || 459 ref->tr_magic == LWKT_TOKREF_MAGIC2); 460 #endif 461 462 tok = ref->tr_tok; 463 gd = mycpu; 464 td = gd->gd_curthread; 465 466 KKASSERT(tok->t_cpu == gd); 467 KKASSERT(gd->gd_intr_nesting_level == 0); 468 469 /* 470 * We can only give away the token if we aren't holding it recursively. 471 * Also use the opportunity to locate the link field for the token. 472 * 473 * We do not have to scan preempted threads since by definition we cannot 474 * be holding any token held by a thread we are preempting. 475 */ 476 giveaway = 1; 477 for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) { 478 KKASSERT(ref != NULL); 479 if (ref->tr_tok == tok) 480 giveaway = 0; 481 } 482 for (scan = ref->tr_next; scan; scan = scan->tr_next) { 483 if (scan->tr_tok == tok) 484 giveaway = 0; 485 } 486 487 /* 488 * Give the token away (if we can) before removing the interlock. Once 489 * the interlock is removed, the token can be given away by an IPI. 490 */ 491 if (giveaway) 492 tok->t_cpu = tok->t_reqcpu; 493 KKASSERT(*pref == ref); 494 *pref = ref->tr_next; 495 496 /* 497 * If we had gotten the token opportunistically and it still happens to 498 * be queued to a target cpu, we have to wait for the target cpu 499 * to finish processing it. This does not happen very often and does 500 * not need to be optimal. 501 */ 502 while (ref->tr_magic == LWKT_TOKREF_MAGIC2) { 503 #if defined(MAKE_TOKENS_SPIN) 504 crit_enter(); 505 #ifdef SMP 506 lwkt_process_ipiq(); 507 #endif 508 splz(); 509 crit_exit(); 510 #elif defined(MAKE_TOKENS_YIELD) 511 lwkt_yield(); 512 #else 513 #error MAKE_TOKENS_XXX ? 514 #endif 515 } 516 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1); 517 } 518 519 /* 520 * Pool tokens are used to provide a type-stable serializing token 521 * pointer that does not race against disappearing data structures. 522 * 523 * This routine is called in early boot just after we setup the BSP's 524 * globaldata structure. 525 */ 526 void 527 lwkt_token_pool_init(void) 528 { 529 int i; 530 531 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i) 532 lwkt_token_init(&pool_tokens[i]); 533 } 534 535 lwkt_token_t 536 lwkt_token_pool_get(void *ptraddr) 537 { 538 int i; 539 540 i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12); 541 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]); 542 } 543 544 #ifdef SMP 545 546 /* 547 * This is the receiving side of a remote IPI requesting a token. If we 548 * cannot immediately hand the token off to another cpu we queue it. 549 * 550 * NOTE! we 'own' the ref structure, but we only 'own' the token if 551 * t_cpu == mycpu. 552 */ 553 static void 554 lwkt_reqtoken_remote(void *data) 555 { 556 lwkt_tokref_t ref = data; 557 globaldata_t gd = mycpu; 558 lwkt_token_t tok = ref->tr_tok; 559 560 logtoken(remote, ref); 561 /* 562 * We do not have to queue the token if we can give it away 563 * immediately. Otherwise we queue it to our globaldata structure. 564 */ 565 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2); 566 if (lwkt_oktogiveaway_token(tok)) { 567 if (tok->t_cpu == gd) 568 tok->t_cpu = ref->tr_reqgd; 569 cpu_ccfence(); /* prevent compiler reordering */ 570 ref->tr_magic = LWKT_TOKREF_MAGIC1; 571 } else { 572 ref->tr_gdreqnext = gd->gd_tokreqbase; 573 gd->gd_tokreqbase = ref; 574 } 575 } 576 577 /* 578 * Must be called from a critical section. Satisfy all remote token 579 * requests that are pending on our globaldata structure. The request 580 * does not have to be satisfied with a successful change of ownership 581 * but we do have to acknowledge that we have completed processing the 582 * request by setting the magic number back to MAGIC1. 583 * 584 * NOTE! we 'own' the ref structure, but we only 'own' the token if 585 * t_cpu == mycpu. 586 */ 587 void 588 lwkt_drain_token_requests(void) 589 { 590 globaldata_t gd = mycpu; 591 lwkt_tokref_t ref; 592 593 KKASSERT(gd->gd_curthread->td_pri >= TDPRI_CRIT); 594 while ((ref = gd->gd_tokreqbase) != NULL) { 595 gd->gd_tokreqbase = ref->tr_gdreqnext; 596 logtoken(drain, ref); 597 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2); 598 if (ref->tr_tok->t_cpu == gd) 599 ref->tr_tok->t_cpu = ref->tr_reqgd; 600 cpu_ccfence(); /* prevent compiler reordering */ 601 ref->tr_magic = LWKT_TOKREF_MAGIC1; 602 } 603 } 604 605 #endif 606 607 /* 608 * Initialize the owner and release-to cpu to the current cpu 609 * and reset the generation count. 610 */ 611 void 612 lwkt_token_init(lwkt_token_t tok) 613 { 614 tok->t_cpu = tok->t_reqcpu = mycpu; 615 } 616 617 void 618 lwkt_token_uninit(lwkt_token_t tok) 619 { 620 /* empty */ 621 } 622