1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment lock. Then we check if lock 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of lock. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/indefinite2.h> 66 #include <sys/spinlock.h> 67 #include <sys/spinlock2.h> 68 #include <sys/ktr.h> 69 70 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin"); 71 72 /* 73 * Kernal Trace 74 */ 75 #if !defined(KTR_SPIN_CONTENTION) 76 #define KTR_SPIN_CONTENTION KTR_ALL 77 #endif 78 #define SPIN_STRING "spin=%p type=%c" 79 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 80 81 KTR_INFO_MASTER(spin); 82 #if 0 83 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 84 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 85 #endif 86 87 #define logspin(name, spin, type) \ 88 KTR_LOG(spin_ ## name, spin, type) 89 90 #ifdef INVARIANTS 91 static int spin_lock_test_mode; 92 #endif 93 94 #ifdef DEBUG_LOCKS_LATENCY 95 96 __read_frequently static long spinlocks_add_latency; 97 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 98 &spinlocks_add_latency, 0, 99 "Add spinlock latency"); 100 101 #endif 102 103 __read_frequently static long spin_backoff_max = 4096; 104 SYSCTL_LONG(_debug, OID_AUTO, spin_backoff_max, CTLFLAG_RW, 105 &spin_backoff_max, 0, 106 "Spinlock exponential backoff limit"); 107 108 /* 1 << n clock cycles, approx */ 109 __read_frequently static long spin_window_shift = 8; 110 SYSCTL_LONG(_debug, OID_AUTO, spin_window_shift, CTLFLAG_RW, 111 &spin_window_shift, 0, 112 "Spinlock TSC windowing"); 113 114 __read_frequently int indefinite_uses_rdtsc = 1; 115 SYSCTL_INT(_debug, OID_AUTO, indefinite_uses_rdtsc, CTLFLAG_RW, 116 &indefinite_uses_rdtsc, 0, 117 "Indefinite code uses RDTSC"); 118 119 /* 120 * We contested due to another exclusive lock holder. We lose. 121 * 122 * We have to unwind the attempt and may acquire the spinlock 123 * anyway while doing so. 124 */ 125 int 126 spin_trylock_contested(struct spinlock *spin) 127 { 128 globaldata_t gd = mycpu; 129 130 /* 131 * Handle degenerate case, else fail. 132 */ 133 if (atomic_cmpset_int(&spin->lock, SPINLOCK_SHARED|0, 1)) 134 return TRUE; 135 /*atomic_add_int(&spin->lock, -1);*/ 136 --gd->gd_spinlocks; 137 crit_exit_quick(gd->gd_curthread); 138 139 return (FALSE); 140 } 141 142 /* 143 * The spin_lock() inline was unable to acquire the lock and calls this 144 * function with spin->lock already incremented, passing (spin->lock - 1) 145 * to the function (the result of the inline's fetchadd). 146 * 147 * Note that we implement both exclusive and shared spinlocks, so we cannot 148 * use atomic_swap_int(). Instead, we try to use atomic_fetchadd_int() 149 * to put most of the burden on the cpu. Atomic_cmpset_int() (cmpxchg) 150 * can cause a lot of unnecessary looping in situations where it is just 151 * trying to increment the count. 152 * 153 * Similarly, we leave the SHARED flag intact and incur slightly more 154 * overhead when switching from shared to exclusive. This allows us to 155 * use atomic_fetchadd_int() for both spinlock types in the critical 156 * path. 157 * 158 * The exponential (n^1.5) backoff algorithm is designed to both reduce 159 * cache bus contention between cpu cores and sockets, and to allow some 160 * bursting of exclusive locks in heavily contended situations to improve 161 * performance. 162 * 163 * The exclusive lock priority mechanism prevents even heavily contended 164 * exclusive locks from being starved by shared locks 165 */ 166 void 167 _spin_lock_contested(struct spinlock *spin, const char *ident, int value) 168 { 169 indefinite_info_t info; 170 uint32_t ovalue; 171 long expbackoff; 172 long loop; 173 174 /* 175 * WARNING! Caller has already incremented the lock. We must 176 * increment the count value (from the inline's fetch-add) 177 * to match. 178 * 179 * Handle the degenerate case where the spinlock is flagged SHARED 180 * with only our reference. We can convert it to EXCLUSIVE. 181 */ 182 if (value == (SPINLOCK_SHARED | 1) - 1) { 183 if (atomic_cmpset_int(&spin->lock, SPINLOCK_SHARED | 1, 1)) 184 return; 185 } 186 /* ++value; value not used after this */ 187 info.type = 0; /* avoid improper gcc warning */ 188 info.ident = NULL; /* avoid improper gcc warning */ 189 info.secs = 0; /* avoid improper gcc warning */ 190 info.base = 0; /* avoid improper gcc warning */ 191 info.count = 0; /* avoid improper gcc warning */ 192 expbackoff = 0; 193 194 /* 195 * Transfer our exclusive request to the high bits and clear the 196 * SPINLOCK_SHARED bit if it was set. This makes the spinlock 197 * appear exclusive, preventing any NEW shared or exclusive 198 * spinlocks from being obtained while we wait for existing 199 * shared or exclusive holders to unlock. 200 * 201 * Don't tread on earlier exclusive waiters by stealing the lock 202 * away early if the low bits happen to now be 1. 203 * 204 * The shared unlock understands that this may occur. 205 */ 206 ovalue = atomic_fetchadd_int(&spin->lock, SPINLOCK_EXCLWAIT - 1); 207 ovalue += SPINLOCK_EXCLWAIT - 1; 208 if (ovalue & SPINLOCK_SHARED) { 209 atomic_clear_int(&spin->lock, SPINLOCK_SHARED); 210 ovalue &= ~SPINLOCK_SHARED; 211 } 212 213 for (;;) { 214 expbackoff = (expbackoff + 1) * 3 / 2; 215 if (expbackoff == 6) /* 1, 3, 6, 10, ... */ 216 indefinite_init(&info, ident, 0, 'S'); 217 if (indefinite_uses_rdtsc) { 218 if ((rdtsc() >> spin_window_shift) % ncpus != mycpuid) { 219 for (loop = expbackoff; loop; --loop) 220 cpu_pause(); 221 } 222 } 223 /*cpu_lfence();*/ 224 225 /* 226 * If the low bits are zero, try to acquire the exclusive lock 227 * by transfering our high bit reservation to the low bits. 228 * 229 * NOTE: Avoid unconditional atomic op by testing ovalue, 230 * otherwise we get cache bus armageddon. 231 * 232 * NOTE: We must also ensure that the SHARED bit is cleared. 233 * It is possible for it to wind up being set on a 234 * shared lock override of the EXCLWAIT bits. 235 */ 236 ovalue = spin->lock; 237 cpu_ccfence(); 238 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0) { 239 uint32_t nvalue; 240 241 nvalue= ((ovalue - SPINLOCK_EXCLWAIT) | 1) & 242 ~SPINLOCK_SHARED; 243 if (atomic_fcmpset_int(&spin->lock, &ovalue, nvalue)) 244 break; 245 continue; 246 } 247 if (expbackoff > 6 + spin_backoff_max) 248 expbackoff = 6 + spin_backoff_max; 249 if (expbackoff >= 6) { 250 if (indefinite_check(&info)) 251 break; 252 } 253 } 254 if (expbackoff >= 6) 255 indefinite_done(&info); 256 } 257 258 /* 259 * The spin_lock_shared() inline was unable to acquire the lock and calls 260 * this function with spin->lock already incremented. 261 * 262 * This is not in the critical path unless there is contention between 263 * shared and exclusive holders. 264 * 265 * Exclusive locks have priority over shared locks. However, this can 266 * cause shared locks to be starved when large numbers of threads are 267 * competing for exclusive locks so the shared lock code uses TSC-windowing 268 * to selectively ignore the exclusive priority mechanism. This has the 269 * effect of allowing a limited number of shared locks to compete against 270 * exclusive waiters at any given moment. 271 * 272 * Note that shared locks do not implement exponential backoff. Instead, 273 * the shared lock simply polls the lock value. One cpu_pause() is built 274 * into indefinite_check(). 275 */ 276 void 277 _spin_lock_shared_contested(struct spinlock *spin, const char *ident) 278 { 279 indefinite_info_t info; 280 uint32_t ovalue; 281 282 /* 283 * Undo the inline's increment. 284 */ 285 ovalue = atomic_fetchadd_int(&spin->lock, -1) - 1; 286 287 indefinite_init(&info, ident, 0, 's'); 288 cpu_pause(); 289 290 #ifdef DEBUG_LOCKS_LATENCY 291 long j; 292 for (j = spinlocks_add_latency; j > 0; --j) 293 cpu_ccfence(); 294 #endif 295 296 for (;;) { 297 /* 298 * Loop until we can acquire the shared spinlock. Note that 299 * the low bits can be zero while the high EXCLWAIT bits are 300 * non-zero. In this situation exclusive requesters have 301 * priority (otherwise shared users on multiple cpus can hog 302 * the spinlnock). 303 * 304 * NOTE: Reading spin->lock prior to the swap is extremely 305 * important on multi-chip/many-core boxes. On 48-core 306 * this one change improves fully concurrent all-cores 307 * compiles by 100% or better. 308 * 309 * I can't emphasize enough how important the pre-read 310 * is in preventing hw cache bus armageddon on 311 * multi-chip systems. And on single-chip/multi-core 312 * systems it just doesn't hurt. 313 */ 314 cpu_ccfence(); 315 316 /* 317 * Ignore the EXCLWAIT bits if we are inside our window. 318 * 319 * We must always use a windowing approach here or the 320 * EXCLWAIT bits can prevent the shared lock from ever 321 * resolving... permanent starvation. 322 * 323 * In addition, if we were to always ignore the EXCLWAIT 324 * bits overlapping shared locks can prevent an exclusive 325 * lock from ever resolving... permanent starvation again. 326 */ 327 if (/*indefinite_uses_rdtsc &&*/ 328 (ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 329 (rdtsc() >> spin_window_shift) % ncpus == mycpuid) { 330 if (atomic_fcmpset_int(&spin->lock, &ovalue, 331 ovalue | SPINLOCK_SHARED | 1)) { 332 break; 333 } 334 continue; 335 } 336 337 /* 338 * Check ovalue tightly (no exponential backoff for shared 339 * locks, that would result in horrible performance. Instead, 340 * shared locks depend on the exclusive priority mechanism 341 * to avoid starving exclusive locks). 342 */ 343 if (ovalue == 0) { 344 if (atomic_fcmpset_int(&spin->lock, &ovalue, 345 SPINLOCK_SHARED | 1)) { 346 break; 347 } 348 continue; 349 } 350 351 /* 352 * If SHARED is already set, go for the increment, improving 353 * the exclusive to multiple-readers transition. 354 */ 355 if (ovalue & SPINLOCK_SHARED) { 356 ovalue = atomic_fetchadd_int(&spin->lock, 1); 357 /* ovalue += 1; NOT NEEDED */ 358 if (ovalue & SPINLOCK_SHARED) 359 break; 360 ovalue = atomic_fetchadd_int(&spin->lock, -1); 361 ovalue += -1; 362 continue; 363 } 364 if (indefinite_check(&info)) 365 break; 366 /* 367 * ovalue was wrong anyway, just reload 368 */ 369 ovalue = spin->lock; 370 } 371 indefinite_done(&info); 372 } 373 374 /* 375 * Automatically avoid use of rdtsc when running in a VM 376 */ 377 static void 378 spinlock_sysinit(void *dummy __unused) 379 { 380 if (vmm_guest) 381 indefinite_uses_rdtsc = 0; 382 } 383 SYSINIT(spinsysinit, SI_BOOT2_PROC0, SI_ORDER_FIRST, spinlock_sysinit, NULL); 384 385 386 /* 387 * If INVARIANTS is enabled various spinlock timing tests can be run 388 * by setting debug.spin_lock_test: 389 * 390 * 1 Test the indefinite wait code 391 * 2 Time the best-case exclusive lock overhead (spin_test_count) 392 * 3 Time the best-case shared lock overhead (spin_test_count) 393 */ 394 395 #ifdef INVARIANTS 396 397 static int spin_test_count = 10000000; 398 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 399 "Number of iterations to use for spinlock wait code test"); 400 401 static int 402 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 403 { 404 struct spinlock spin; 405 int error; 406 int value = 0; 407 int i; 408 409 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 410 return (error); 411 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 412 return (error); 413 414 /* 415 * Indefinite wait test 416 */ 417 if (value == 1) { 418 spin_init(&spin, "sysctllock"); 419 spin_lock(&spin); /* force an indefinite wait */ 420 spin_lock_test_mode = 1; 421 spin_lock(&spin); 422 spin_unlock(&spin); /* Clean up the spinlock count */ 423 spin_unlock(&spin); 424 spin_lock_test_mode = 0; 425 } 426 427 /* 428 * Time best-case exclusive spinlocks 429 */ 430 if (value == 2) { 431 globaldata_t gd = mycpu; 432 433 spin_init(&spin, "sysctllocktest"); 434 for (i = spin_test_count; i > 0; --i) { 435 _spin_lock_quick(gd, &spin, "test"); 436 spin_unlock_quick(gd, &spin); 437 } 438 } 439 440 return (0); 441 } 442 443 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 444 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 445 446 #endif /* INVARIANTS */ 447