1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment counta. Then we check if counta 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of counta. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/spinlock.h> 66 #include <sys/spinlock2.h> 67 #include <sys/ktr.h> 68 69 #ifdef _KERNEL_VIRTUAL 70 #include <pthread.h> 71 #endif 72 73 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin); 74 75 struct indefinite_info { 76 sysclock_t base; 77 int secs; 78 }; 79 80 /* 81 * Kernal Trace 82 */ 83 #if !defined(KTR_SPIN_CONTENTION) 84 #define KTR_SPIN_CONTENTION KTR_ALL 85 #endif 86 #define SPIN_STRING "spin=%p type=%c" 87 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 88 89 KTR_INFO_MASTER(spin); 90 #if 0 91 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 92 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 93 #endif 94 95 #define logspin(name, spin, type) \ 96 KTR_LOG(spin_ ## name, spin, type) 97 98 #ifdef INVARIANTS 99 static int spin_lock_test_mode; 100 #endif 101 102 static int64_t spinlocks_contested1; 103 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested1, CTLFLAG_RD, 104 &spinlocks_contested1, 0, 105 "Spinlock contention count due to collisions with exclusive lock holders"); 106 107 static int64_t spinlocks_contested2; 108 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested2, CTLFLAG_RD, 109 &spinlocks_contested2, 0, 110 "Serious spinlock contention count"); 111 112 #ifdef DEBUG_LOCKS_LATENCY 113 114 static long spinlocks_add_latency; 115 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 116 &spinlocks_add_latency, 0, 117 "Add spinlock latency"); 118 119 #endif 120 121 122 /* 123 * We need a fairly large pool to avoid contention on large SMP systems, 124 * particularly multi-chip systems. 125 */ 126 /*#define SPINLOCK_NUM_POOL 8101*/ 127 #define SPINLOCK_NUM_POOL 8192 128 #define SPINLOCK_NUM_POOL_MASK (SPINLOCK_NUM_POOL - 1) 129 130 static __cachealign struct { 131 struct spinlock spin; 132 char filler[32 - sizeof(struct spinlock)]; 133 } pool_spinlocks[SPINLOCK_NUM_POOL]; 134 135 static int spin_indefinite_check(struct spinlock *spin, 136 struct indefinite_info *info); 137 138 /* 139 * We contested due to another exclusive lock holder. We lose. 140 * 141 * We have to unwind the attempt and may acquire the spinlock 142 * anyway while doing so. countb was incremented on our behalf. 143 */ 144 int 145 spin_trylock_contested(struct spinlock *spin) 146 { 147 globaldata_t gd = mycpu; 148 149 /*++spinlocks_contested1;*/ 150 /*atomic_add_int(&spin->counta, -1);*/ 151 --gd->gd_spinlocks; 152 --gd->gd_curthread->td_critcount; 153 return (FALSE); 154 } 155 156 /* 157 * The spin_lock() inline was unable to acquire the lock. 158 * 159 * atomic_swap_int() is the absolute fastest spinlock instruction, at 160 * least on multi-socket systems. All instructions seem to be about 161 * the same on single-socket multi-core systems. However, atomic_swap_int() 162 * does not result in an even distribution of successful acquisitions. 163 * 164 * UNFORTUNATELY we cannot really use atomic_swap_int() when also implementing 165 * shared spin locks, so as we do a better job removing contention we've 166 * moved to atomic_cmpset_int() to be able handle multiple states. 167 * 168 * Another problem we have is that (at least on the 48-core opteron we test 169 * with) having all 48 cores contesting the same spin lock reduces 170 * performance to around 600,000 ops/sec, verses millions when fewer cores 171 * are going after the same lock. 172 * 173 * Backoff algorithms can create even worse starvation problems, and don't 174 * really improve performance when a lot of cores are contending. 175 * 176 * Our solution is to allow the data cache to lazy-update by reading it 177 * non-atomically and only attempting to acquire the lock if the lazy read 178 * looks good. This effectively limits cache bus bandwidth. A cpu_pause() 179 * (for intel/amd anyhow) is not strictly needed as cache bus resource use 180 * is governed by the lazy update. 181 * 182 * WARNING!!!! Performance matters here, by a huge margin. 183 * 184 * 48-core test with pre-read / -j 48 no-modules kernel compile 185 * with fanned-out inactive and active queues came in at 55 seconds. 186 * 187 * 48-core test with pre-read / -j 48 no-modules kernel compile 188 * came in at 75 seconds. Without pre-read it came in at 170 seconds. 189 * 190 * 4-core test with pre-read / -j 48 no-modules kernel compile 191 * came in at 83 seconds. Without pre-read it came in at 83 seconds 192 * as well (no difference). 193 */ 194 void 195 spin_lock_contested(struct spinlock *spin) 196 { 197 struct indefinite_info info = { 0, 0 }; 198 int i; 199 200 /* 201 * Force any existing shared locks to exclusive so no new shared 202 * locks can occur. Transfer our count to the high bits, then 203 * loop until we can acquire the low counter (== 1). 204 */ 205 atomic_clear_int(&spin->counta, SPINLOCK_SHARED); 206 atomic_add_int(&spin->counta, SPINLOCK_EXCLWAIT - 1); 207 208 #ifdef DEBUG_LOCKS_LATENCY 209 long j; 210 for (j = spinlocks_add_latency; j > 0; --j) 211 cpu_ccfence(); 212 #endif 213 #if defined(INVARIANTS) 214 if (spin_lock_test_mode > 10 && 215 spin->countb > spin_lock_test_mode && 216 (spin_lock_test_mode & 0xFF) == mycpu->gd_cpuid) { 217 spin->countb = 0; 218 print_backtrace(-1); 219 } 220 ++spin->countb; 221 #endif 222 i = 0; 223 224 /*logspin(beg, spin, 'w');*/ 225 for (;;) { 226 /* 227 * If the low bits are zero, try to acquire the exclusive lock 228 * by transfering our high bit counter to the low bits. 229 * 230 * NOTE: Reading spin->counta prior to the swap is extremely 231 * important on multi-chip/many-core boxes. On 48-core 232 * this one change improves fully concurrent all-cores 233 * compiles by 100% or better. 234 * 235 * I can't emphasize enough how important the pre-read 236 * is in preventing hw cache bus armageddon on 237 * multi-chip systems. And on single-chip/multi-core 238 * systems it just doesn't hurt. 239 */ 240 uint32_t ovalue = spin->counta; 241 cpu_ccfence(); 242 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 243 atomic_cmpset_int(&spin->counta, ovalue, 244 (ovalue - SPINLOCK_EXCLWAIT) | 1)) { 245 break; 246 } 247 if ((++i & 0x7F) == 0x7F) { 248 #if defined(INVARIANTS) 249 ++spin->countb; 250 #endif 251 if (spin_indefinite_check(spin, &info)) 252 break; 253 } 254 #ifdef _KERNEL_VIRTUAL 255 pthread_yield(); 256 #endif 257 } 258 /*logspin(end, spin, 'w');*/ 259 } 260 261 /* 262 * Shared spinlocks 263 */ 264 void 265 spin_lock_shared_contested(struct spinlock *spin) 266 { 267 struct indefinite_info info = { 0, 0 }; 268 int i; 269 270 atomic_add_int(&spin->counta, -1); 271 #ifdef DEBUG_LOCKS_LATENCY 272 long j; 273 for (j = spinlocks_add_latency; j > 0; --j) 274 cpu_ccfence(); 275 #endif 276 #if defined(INVARIANTS) 277 if (spin_lock_test_mode > 10 && 278 spin->countb > spin_lock_test_mode && 279 (spin_lock_test_mode & 0xFF) == mycpu->gd_cpuid) { 280 spin->countb = 0; 281 print_backtrace(-1); 282 } 283 ++spin->countb; 284 #endif 285 i = 0; 286 287 /*logspin(beg, spin, 'w');*/ 288 for (;;) { 289 /* 290 * NOTE: Reading spin->counta prior to the swap is extremely 291 * important on multi-chip/many-core boxes. On 48-core 292 * this one change improves fully concurrent all-cores 293 * compiles by 100% or better. 294 * 295 * I can't emphasize enough how important the pre-read 296 * is in preventing hw cache bus armageddon on 297 * multi-chip systems. And on single-chip/multi-core 298 * systems it just doesn't hurt. 299 */ 300 uint32_t ovalue = spin->counta; 301 302 cpu_ccfence(); 303 if (ovalue == 0) { 304 if (atomic_cmpset_int(&spin->counta, 0, 305 SPINLOCK_SHARED | 1)) 306 break; 307 } else if (ovalue & SPINLOCK_SHARED) { 308 if (atomic_cmpset_int(&spin->counta, ovalue, 309 ovalue + 1)) 310 break; 311 } 312 if ((++i & 0x7F) == 0x7F) { 313 #if defined(INVARIANTS) 314 ++spin->countb; 315 #endif 316 if (spin_indefinite_check(spin, &info)) 317 break; 318 } 319 #ifdef _KERNEL_VIRTUAL 320 pthread_yield(); 321 #endif 322 } 323 /*logspin(end, spin, 'w');*/ 324 } 325 326 /* 327 * Pool functions (SHARED SPINLOCKS NOT SUPPORTED) 328 */ 329 static __inline int 330 _spin_pool_hash(void *ptr) 331 { 332 int i; 333 334 i = ((int)(uintptr_t) ptr >> 5) ^ ((int)(uintptr_t)ptr >> 12); 335 i &= SPINLOCK_NUM_POOL_MASK; 336 return (i); 337 } 338 339 void 340 _spin_pool_lock(void *chan) 341 { 342 struct spinlock *sp; 343 344 sp = &pool_spinlocks[_spin_pool_hash(chan)].spin; 345 spin_lock(sp); 346 } 347 348 void 349 _spin_pool_unlock(void *chan) 350 { 351 struct spinlock *sp; 352 353 sp = &pool_spinlocks[_spin_pool_hash(chan)].spin; 354 spin_unlock(sp); 355 } 356 357 358 static 359 int 360 spin_indefinite_check(struct spinlock *spin, struct indefinite_info *info) 361 { 362 sysclock_t count; 363 364 cpu_spinlock_contested(); 365 366 count = sys_cputimer->count(); 367 if (info->secs == 0) { 368 info->base = count; 369 ++info->secs; 370 } else if (count - info->base > sys_cputimer->freq) { 371 kprintf("spin_lock: %p, indefinite wait (%d secs)!\n", 372 spin, info->secs); 373 info->base = count; 374 ++info->secs; 375 if (panicstr) 376 return (TRUE); 377 #if defined(INVARIANTS) 378 if (spin_lock_test_mode) { 379 print_backtrace(-1); 380 return (TRUE); 381 } 382 #endif 383 #if defined(INVARIANTS) 384 if (info->secs == 11) 385 print_backtrace(-1); 386 #endif 387 if (info->secs == 60) 388 panic("spin_lock: %p, indefinite wait!", spin); 389 } 390 return (FALSE); 391 } 392 393 /* 394 * If INVARIANTS is enabled various spinlock timing tests can be run 395 * by setting debug.spin_lock_test: 396 * 397 * 1 Test the indefinite wait code 398 * 2 Time the best-case exclusive lock overhead (spin_test_count) 399 * 3 Time the best-case shared lock overhead (spin_test_count) 400 */ 401 402 #ifdef INVARIANTS 403 404 static int spin_test_count = 10000000; 405 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 406 "Number of iterations to use for spinlock wait code test"); 407 408 static int 409 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 410 { 411 struct spinlock spin; 412 int error; 413 int value = 0; 414 int i; 415 416 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 417 return (error); 418 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 419 return (error); 420 421 /* 422 * Indefinite wait test 423 */ 424 if (value == 1) { 425 spin_init(&spin); 426 spin_lock(&spin); /* force an indefinite wait */ 427 spin_lock_test_mode = 1; 428 spin_lock(&spin); 429 spin_unlock(&spin); /* Clean up the spinlock count */ 430 spin_unlock(&spin); 431 spin_lock_test_mode = 0; 432 } 433 434 /* 435 * Time best-case exclusive spinlocks 436 */ 437 if (value == 2) { 438 globaldata_t gd = mycpu; 439 440 spin_init(&spin); 441 for (i = spin_test_count; i > 0; --i) { 442 spin_lock_quick(gd, &spin); 443 spin_unlock_quick(gd, &spin); 444 } 445 } 446 447 return (0); 448 } 449 450 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 451 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 452 453 #endif /* INVARIANTS */ 454