1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment counta. Then we check if counta 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of counta. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/spinlock.h> 66 #include <sys/spinlock2.h> 67 #include <sys/ktr.h> 68 69 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin); 70 71 struct indefinite_info { 72 sysclock_t base; 73 int secs; 74 }; 75 76 /* 77 * Kernal Trace 78 */ 79 #if !defined(KTR_SPIN_CONTENTION) 80 #define KTR_SPIN_CONTENTION KTR_ALL 81 #endif 82 #define SPIN_STRING "spin=%p type=%c" 83 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 84 85 KTR_INFO_MASTER(spin); 86 #if 0 87 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 88 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 89 #endif 90 91 #define logspin(name, spin, type) \ 92 KTR_LOG(spin_ ## name, spin, type) 93 94 #ifdef INVARIANTS 95 static int spin_lock_test_mode; 96 #endif 97 98 static int64_t spinlocks_contested1; 99 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested1, CTLFLAG_RD, 100 &spinlocks_contested1, 0, 101 "Spinlock contention count due to collisions with exclusive lock holders"); 102 103 static int64_t spinlocks_contested2; 104 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested2, CTLFLAG_RD, 105 &spinlocks_contested2, 0, 106 "Serious spinlock contention count"); 107 108 #ifdef DEBUG_LOCKS_LATENCY 109 110 static long spinlocks_add_latency; 111 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 112 &spinlocks_add_latency, 0, 113 "Add spinlock latency"); 114 115 #endif 116 117 118 /* 119 * We need a fairly large pool to avoid contention on large SMP systems, 120 * particularly multi-chip systems. 121 */ 122 /*#define SPINLOCK_NUM_POOL 8101*/ 123 #define SPINLOCK_NUM_POOL 8192 124 #define SPINLOCK_NUM_POOL_MASK (SPINLOCK_NUM_POOL - 1) 125 126 static __cachealign struct { 127 struct spinlock spin; 128 char filler[32 - sizeof(struct spinlock)]; 129 } pool_spinlocks[SPINLOCK_NUM_POOL]; 130 131 static int spin_indefinite_check(struct spinlock *spin, 132 struct indefinite_info *info); 133 134 /* 135 * We contested due to another exclusive lock holder. We lose. 136 * 137 * We have to unwind the attempt and may acquire the spinlock 138 * anyway while doing so. countb was incremented on our behalf. 139 */ 140 int 141 spin_trylock_contested(struct spinlock *spin) 142 { 143 globaldata_t gd = mycpu; 144 145 /*++spinlocks_contested1;*/ 146 /*atomic_add_int(&spin->counta, -1);*/ 147 --gd->gd_spinlocks; 148 --gd->gd_curthread->td_critcount; 149 return (FALSE); 150 } 151 152 /* 153 * The spin_lock() inline was unable to acquire the lock. 154 * 155 * atomic_swap_int() is the absolute fastest spinlock instruction, at 156 * least on multi-socket systems. All instructions seem to be about 157 * the same on single-socket multi-core systems. However, atomic_swap_int() 158 * does not result in an even distribution of successful acquisitions. 159 * 160 * UNFORTUNATELY we cannot really use atomic_swap_int() when also implementing 161 * shared spin locks, so as we do a better job removing contention we've 162 * moved to atomic_cmpset_int() to be able handle multiple states. 163 * 164 * Another problem we have is that (at least on the 48-core opteron we test 165 * with) having all 48 cores contesting the same spin lock reduces 166 * performance to around 600,000 ops/sec, verses millions when fewer cores 167 * are going after the same lock. 168 * 169 * Backoff algorithms can create even worse starvation problems, and don't 170 * really improve performance when a lot of cores are contending. 171 * 172 * Our solution is to allow the data cache to lazy-update by reading it 173 * non-atomically and only attempting to acquire the lock if the lazy read 174 * looks good. This effectively limits cache bus bandwidth. A cpu_pause() 175 * (for intel/amd anyhow) is not strictly needed as cache bus resource use 176 * is governed by the lazy update. 177 * 178 * WARNING!!!! Performance matters here, by a huge margin. 179 * 180 * 48-core test with pre-read / -j 48 no-modules kernel compile 181 * with fanned-out inactive and active queues came in at 55 seconds. 182 * 183 * 48-core test with pre-read / -j 48 no-modules kernel compile 184 * came in at 75 seconds. Without pre-read it came in at 170 seconds. 185 * 186 * 4-core test with pre-read / -j 48 no-modules kernel compile 187 * came in at 83 seconds. Without pre-read it came in at 83 seconds 188 * as well (no difference). 189 */ 190 void 191 spin_lock_contested(struct spinlock *spin) 192 { 193 struct indefinite_info info = { 0, 0 }; 194 int i; 195 196 /* 197 * Force any existing shared locks to exclusive so no new shared 198 * locks can occur. Transfer our count to the high bits, then 199 * loop until we can acquire the low counter (== 1). 200 */ 201 atomic_clear_int(&spin->counta, SPINLOCK_SHARED); 202 atomic_add_int(&spin->counta, SPINLOCK_EXCLWAIT - 1); 203 204 #ifdef DEBUG_LOCKS_LATENCY 205 long j; 206 for (j = spinlocks_add_latency; j > 0; --j) 207 cpu_ccfence(); 208 #endif 209 #if defined(INVARIANTS) 210 if (spin_lock_test_mode > 10 && 211 spin->countb > spin_lock_test_mode && 212 (spin_lock_test_mode & 0xFF) == mycpu->gd_cpuid) { 213 spin->countb = 0; 214 print_backtrace(-1); 215 } 216 ++spin->countb; 217 #endif 218 i = 0; 219 220 /*logspin(beg, spin, 'w');*/ 221 for (;;) { 222 /* 223 * If the low bits are zero, try to acquire the exclusive lock 224 * by transfering our high bit counter to the low bits. 225 * 226 * NOTE: Reading spin->counta prior to the swap is extremely 227 * important on multi-chip/many-core boxes. On 48-core 228 * this one change improves fully concurrent all-cores 229 * compiles by 100% or better. 230 * 231 * I can't emphasize enough how important the pre-read 232 * is in preventing hw cache bus armageddon on 233 * multi-chip systems. And on single-chip/multi-core 234 * systems it just doesn't hurt. 235 */ 236 uint32_t ovalue = spin->counta; 237 cpu_ccfence(); 238 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 239 atomic_cmpset_int(&spin->counta, ovalue, 240 (ovalue - SPINLOCK_EXCLWAIT) | 1)) { 241 break; 242 } 243 if ((++i & 0x7F) == 0x7F) { 244 #if defined(INVARIANTS) 245 ++spin->countb; 246 #endif 247 if (spin_indefinite_check(spin, &info)) 248 break; 249 } 250 } 251 /*logspin(end, spin, 'w');*/ 252 } 253 254 /* 255 * Shared spinlocks 256 */ 257 void 258 spin_lock_shared_contested(struct spinlock *spin) 259 { 260 struct indefinite_info info = { 0, 0 }; 261 int i; 262 263 atomic_add_int(&spin->counta, -1); 264 #ifdef DEBUG_LOCKS_LATENCY 265 long j; 266 for (j = spinlocks_add_latency; j > 0; --j) 267 cpu_ccfence(); 268 #endif 269 #if defined(INVARIANTS) 270 if (spin_lock_test_mode > 10 && 271 spin->countb > spin_lock_test_mode && 272 (spin_lock_test_mode & 0xFF) == mycpu->gd_cpuid) { 273 spin->countb = 0; 274 print_backtrace(-1); 275 } 276 ++spin->countb; 277 #endif 278 i = 0; 279 280 /*logspin(beg, spin, 'w');*/ 281 for (;;) { 282 /* 283 * NOTE: Reading spin->counta prior to the swap is extremely 284 * important on multi-chip/many-core boxes. On 48-core 285 * this one change improves fully concurrent all-cores 286 * compiles by 100% or better. 287 * 288 * I can't emphasize enough how important the pre-read 289 * is in preventing hw cache bus armageddon on 290 * multi-chip systems. And on single-chip/multi-core 291 * systems it just doesn't hurt. 292 */ 293 uint32_t ovalue = spin->counta; 294 295 cpu_ccfence(); 296 if (ovalue == 0) { 297 if (atomic_cmpset_int(&spin->counta, 0, 298 SPINLOCK_SHARED | 1)) 299 break; 300 } else if (ovalue & SPINLOCK_SHARED) { 301 if (atomic_cmpset_int(&spin->counta, ovalue, 302 ovalue + 1)) 303 break; 304 } 305 if ((++i & 0x7F) == 0x7F) { 306 #if defined(INVARIANTS) 307 ++spin->countb; 308 #endif 309 if (spin_indefinite_check(spin, &info)) 310 break; 311 } 312 } 313 /*logspin(end, spin, 'w');*/ 314 } 315 316 /* 317 * Pool functions (SHARED SPINLOCKS NOT SUPPORTED) 318 */ 319 static __inline int 320 _spin_pool_hash(void *ptr) 321 { 322 int i; 323 324 i = ((int)(uintptr_t) ptr >> 5) ^ ((int)(uintptr_t)ptr >> 12); 325 i &= SPINLOCK_NUM_POOL_MASK; 326 return (i); 327 } 328 329 void 330 _spin_pool_lock(void *chan) 331 { 332 struct spinlock *sp; 333 334 sp = &pool_spinlocks[_spin_pool_hash(chan)].spin; 335 spin_lock(sp); 336 } 337 338 void 339 _spin_pool_unlock(void *chan) 340 { 341 struct spinlock *sp; 342 343 sp = &pool_spinlocks[_spin_pool_hash(chan)].spin; 344 spin_unlock(sp); 345 } 346 347 348 static 349 int 350 spin_indefinite_check(struct spinlock *spin, struct indefinite_info *info) 351 { 352 sysclock_t count; 353 354 cpu_spinlock_contested(); 355 356 count = sys_cputimer->count(); 357 if (info->secs == 0) { 358 info->base = count; 359 ++info->secs; 360 } else if (count - info->base > sys_cputimer->freq) { 361 kprintf("spin_lock: %p, indefinite wait (%d secs)!\n", 362 spin, info->secs); 363 info->base = count; 364 ++info->secs; 365 if (panicstr) 366 return (TRUE); 367 #if defined(INVARIANTS) 368 if (spin_lock_test_mode) { 369 print_backtrace(-1); 370 return (TRUE); 371 } 372 #endif 373 #if defined(INVARIANTS) 374 if (info->secs == 11) 375 print_backtrace(-1); 376 #endif 377 if (info->secs == 60) 378 panic("spin_lock: %p, indefinite wait!", spin); 379 } 380 return (FALSE); 381 } 382 383 /* 384 * If INVARIANTS is enabled various spinlock timing tests can be run 385 * by setting debug.spin_lock_test: 386 * 387 * 1 Test the indefinite wait code 388 * 2 Time the best-case exclusive lock overhead (spin_test_count) 389 * 3 Time the best-case shared lock overhead (spin_test_count) 390 */ 391 392 #ifdef INVARIANTS 393 394 static int spin_test_count = 10000000; 395 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 396 "Number of iterations to use for spinlock wait code test"); 397 398 static int 399 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 400 { 401 struct spinlock spin; 402 int error; 403 int value = 0; 404 int i; 405 406 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 407 return (error); 408 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 409 return (error); 410 411 /* 412 * Indefinite wait test 413 */ 414 if (value == 1) { 415 spin_init(&spin); 416 spin_lock(&spin); /* force an indefinite wait */ 417 spin_lock_test_mode = 1; 418 spin_lock(&spin); 419 spin_unlock(&spin); /* Clean up the spinlock count */ 420 spin_unlock(&spin); 421 spin_lock_test_mode = 0; 422 } 423 424 /* 425 * Time best-case exclusive spinlocks 426 */ 427 if (value == 2) { 428 globaldata_t gd = mycpu; 429 430 spin_init(&spin); 431 for (i = spin_test_count; i > 0; --i) { 432 spin_lock_quick(gd, &spin); 433 spin_unlock_quick(gd, &spin); 434 } 435 } 436 437 return (0); 438 } 439 440 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 441 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 442 443 #endif /* INVARIANTS */ 444