1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment lock. Then we check if lock 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of lock. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/indefinite2.h> 66 #include <sys/spinlock.h> 67 #include <sys/spinlock2.h> 68 #include <sys/ktr.h> 69 70 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin"); 71 72 /* 73 * Kernal Trace 74 */ 75 #if !defined(KTR_SPIN_CONTENTION) 76 #define KTR_SPIN_CONTENTION KTR_ALL 77 #endif 78 #define SPIN_STRING "spin=%p type=%c" 79 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 80 81 KTR_INFO_MASTER(spin); 82 #if 0 83 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 84 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 85 #endif 86 87 #define logspin(name, spin, type) \ 88 KTR_LOG(spin_ ## name, spin, type) 89 90 #ifdef INVARIANTS 91 static int spin_lock_test_mode; 92 #endif 93 94 #ifdef DEBUG_LOCKS_LATENCY 95 96 __read_frequently static long spinlocks_add_latency; 97 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 98 &spinlocks_add_latency, 0, 99 "Add spinlock latency"); 100 101 #endif 102 103 __read_frequently static long spin_backoff_max = 4096; 104 SYSCTL_LONG(_debug, OID_AUTO, spin_backoff_max, CTLFLAG_RW, 105 &spin_backoff_max, 0, 106 "Spinlock exponential backoff limit"); 107 108 /* 1 << n clock cycles, approx */ 109 __read_frequently static long spin_window_shift = 8; 110 SYSCTL_LONG(_debug, OID_AUTO, spin_window_shift, CTLFLAG_RW, 111 &spin_window_shift, 0, 112 "Spinlock TSC windowing"); 113 114 __read_frequently int indefinite_uses_rdtsc = 1; 115 SYSCTL_INT(_debug, OID_AUTO, indefinite_uses_rdtsc, CTLFLAG_RW, 116 &indefinite_uses_rdtsc, 0, 117 "Indefinite code uses RDTSC"); 118 119 /* 120 * We contested due to another exclusive lock holder. We lose. 121 * 122 * We have to unwind the attempt and may acquire the spinlock 123 * anyway while doing so. 124 */ 125 int 126 spin_trylock_contested(struct spinlock *spin) 127 { 128 globaldata_t gd = mycpu; 129 130 /* 131 * Handle degenerate case, else fail. 132 */ 133 if (atomic_cmpset_int(&spin->lock, SPINLOCK_SHARED|0, 1)) 134 return TRUE; 135 /*atomic_add_int(&spin->lock, -1);*/ 136 --gd->gd_spinlocks; 137 crit_exit_quick(gd->gd_curthread); 138 139 return (FALSE); 140 } 141 142 /* 143 * The spin_lock() inline was unable to acquire the lock and calls this 144 * function with spin->lock already incremented, passing (spin->lock - 1) 145 * to the function (the result of the inline's fetchadd). 146 * 147 * Note that we implement both exclusive and shared spinlocks, so we cannot 148 * use atomic_swap_int(). Instead, we try to use atomic_fetchadd_int() 149 * to put most of the burden on the cpu. Atomic_cmpset_int() (cmpxchg) 150 * can cause a lot of unnecessary looping in situations where it is just 151 * trying to increment the count. 152 * 153 * Similarly, we leave the SHARED flag intact and incur slightly more 154 * overhead when switching from shared to exclusive. This allows us to 155 * use atomic_fetchadd_int() for both spinlock types in the critical 156 * path. 157 * 158 * The exponential (n^1.5) backoff algorithm is designed to both reduce 159 * cache bus contention between cpu cores and sockets, and to allow some 160 * bursting of exclusive locks in heavily contended situations to improve 161 * performance. 162 * 163 * The exclusive lock priority mechanism prevents even heavily contended 164 * exclusive locks from being starved by shared locks 165 */ 166 void 167 _spin_lock_contested(struct spinlock *spin, const char *ident, int value) 168 { 169 indefinite_info_t info; 170 uint32_t ovalue; 171 long expbackoff; 172 long loop; 173 174 /* 175 * WARNING! Caller has already incremented the lock. We must 176 * increment the count value (from the inline's fetch-add) 177 * to match. 178 * 179 * Handle the degenerate case where the spinlock is flagged SHARED 180 * with only our reference. We can convert it to EXCLUSIVE. 181 */ 182 if (value == (SPINLOCK_SHARED | 1) - 1) { 183 if (atomic_cmpset_int(&spin->lock, SPINLOCK_SHARED | 1, 1)) 184 return; 185 } 186 /* ++value; value not used after this */ 187 info.type = 0; /* avoid improper gcc warning */ 188 info.ident = NULL; /* avoid improper gcc warning */ 189 info.secs = 0; /* avoid improper gcc warning */ 190 info.base = 0; /* avoid improper gcc warning */ 191 expbackoff = 0; 192 193 /* 194 * Transfer our exclusive request to the high bits and clear the 195 * SPINLOCK_SHARED bit if it was set. This makes the spinlock 196 * appear exclusive, preventing any NEW shared or exclusive 197 * spinlocks from being obtained while we wait for existing 198 * shared or exclusive holders to unlock. 199 * 200 * Don't tread on earlier exclusive waiters by stealing the lock 201 * away early if the low bits happen to now be 1. 202 * 203 * The shared unlock understands that this may occur. 204 */ 205 ovalue = atomic_fetchadd_int(&spin->lock, SPINLOCK_EXCLWAIT - 1); 206 ovalue += SPINLOCK_EXCLWAIT - 1; 207 if (ovalue & SPINLOCK_SHARED) { 208 atomic_clear_int(&spin->lock, SPINLOCK_SHARED); 209 ovalue &= ~SPINLOCK_SHARED; 210 } 211 212 for (;;) { 213 expbackoff = (expbackoff + 1) * 3 / 2; 214 if (expbackoff == 6) /* 1, 3, 6, 10, ... */ 215 indefinite_init(&info, ident, 0, 'S'); 216 if (indefinite_uses_rdtsc) { 217 if ((rdtsc() >> spin_window_shift) % ncpus != mycpuid) { 218 for (loop = expbackoff; loop; --loop) 219 cpu_pause(); 220 } 221 } 222 /*cpu_lfence();*/ 223 224 /* 225 * If the low bits are zero, try to acquire the exclusive lock 226 * by transfering our high bit reservation to the low bits. 227 * 228 * NOTE: Avoid unconditional atomic op by testing ovalue, 229 * otherwise we get cache bus armageddon. 230 * 231 * NOTE: We must also ensure that the SHARED bit is cleared. 232 * It is possible for it to wind up being set on a 233 * shared lock override of the EXCLWAIT bits. 234 */ 235 ovalue = spin->lock; 236 cpu_ccfence(); 237 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0) { 238 uint32_t nvalue; 239 240 nvalue= ((ovalue - SPINLOCK_EXCLWAIT) | 1) & 241 ~SPINLOCK_SHARED; 242 if (atomic_fcmpset_int(&spin->lock, &ovalue, nvalue)) 243 break; 244 continue; 245 } 246 if (expbackoff > 6 + spin_backoff_max) 247 expbackoff = 6 + spin_backoff_max; 248 if (expbackoff >= 6) { 249 if (indefinite_check(&info)) 250 break; 251 } 252 } 253 if (expbackoff >= 6) 254 indefinite_done(&info); 255 } 256 257 /* 258 * The spin_lock_shared() inline was unable to acquire the lock and calls 259 * this function with spin->lock already incremented. 260 * 261 * This is not in the critical path unless there is contention between 262 * shared and exclusive holders. 263 * 264 * Exclusive locks have priority over shared locks. However, this can 265 * cause shared locks to be starved when large numbers of threads are 266 * competing for exclusive locks so the shared lock code uses TSC-windowing 267 * to selectively ignore the exclusive priority mechanism. This has the 268 * effect of allowing a limited number of shared locks to compete against 269 * exclusive waiters at any given moment. 270 * 271 * Note that shared locks do not implement exponential backoff. Instead, 272 * the shared lock simply polls the lock value. One cpu_pause() is built 273 * into indefinite_check(). 274 */ 275 void 276 _spin_lock_shared_contested(struct spinlock *spin, const char *ident) 277 { 278 indefinite_info_t info; 279 uint32_t ovalue; 280 281 /* 282 * Undo the inline's increment. 283 */ 284 ovalue = atomic_fetchadd_int(&spin->lock, -1) - 1; 285 286 indefinite_init(&info, ident, 0, 's'); 287 cpu_pause(); 288 289 #ifdef DEBUG_LOCKS_LATENCY 290 long j; 291 for (j = spinlocks_add_latency; j > 0; --j) 292 cpu_ccfence(); 293 #endif 294 295 for (;;) { 296 /* 297 * Loop until we can acquire the shared spinlock. Note that 298 * the low bits can be zero while the high EXCLWAIT bits are 299 * non-zero. In this situation exclusive requesters have 300 * priority (otherwise shared users on multiple cpus can hog 301 * the spinlnock). 302 * 303 * NOTE: Reading spin->lock prior to the swap is extremely 304 * important on multi-chip/many-core boxes. On 48-core 305 * this one change improves fully concurrent all-cores 306 * compiles by 100% or better. 307 * 308 * I can't emphasize enough how important the pre-read 309 * is in preventing hw cache bus armageddon on 310 * multi-chip systems. And on single-chip/multi-core 311 * systems it just doesn't hurt. 312 */ 313 cpu_ccfence(); 314 315 /* 316 * Ignore the EXCLWAIT bits if we are inside our window. 317 */ 318 if (indefinite_uses_rdtsc && 319 (ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 320 (rdtsc() >> spin_window_shift) % ncpus == mycpuid) { 321 if (atomic_fcmpset_int(&spin->lock, &ovalue, 322 ovalue | SPINLOCK_SHARED | 1)) { 323 break; 324 } 325 continue; 326 } 327 328 /* 329 * Check ovalue tightly (no exponential backoff for shared 330 * locks, that would result in horrible performance. Instead, 331 * shared locks depend on the exclusive priority mechanism 332 * to avoid starving exclusive locks). 333 */ 334 if (ovalue == 0) { 335 if (atomic_fcmpset_int(&spin->lock, &ovalue, 336 SPINLOCK_SHARED | 1)) { 337 break; 338 } 339 continue; 340 } 341 342 /* 343 * If SHARED is already set, go for the increment, improving 344 * the exclusive to multiple-readers transition. 345 */ 346 if (ovalue & SPINLOCK_SHARED) { 347 ovalue = atomic_fetchadd_int(&spin->lock, 1); 348 /* ovalue += 1; NOT NEEDED */ 349 if (ovalue & SPINLOCK_SHARED) 350 break; 351 ovalue = atomic_fetchadd_int(&spin->lock, -1); 352 ovalue += -1; 353 continue; 354 } 355 if (indefinite_check(&info)) 356 break; 357 /* 358 * ovalue was wrong anyway, just reload 359 */ 360 ovalue = spin->lock; 361 } 362 indefinite_done(&info); 363 } 364 365 /* 366 * Automatically avoid use of rdtsc when running in a VM 367 */ 368 static void 369 spinlock_sysinit(void *dummy __unused) 370 { 371 if (vmm_guest) 372 indefinite_uses_rdtsc = 0; 373 } 374 SYSINIT(spinsysinit, SI_BOOT2_PROC0, SI_ORDER_FIRST, spinlock_sysinit, NULL); 375 376 377 /* 378 * If INVARIANTS is enabled various spinlock timing tests can be run 379 * by setting debug.spin_lock_test: 380 * 381 * 1 Test the indefinite wait code 382 * 2 Time the best-case exclusive lock overhead (spin_test_count) 383 * 3 Time the best-case shared lock overhead (spin_test_count) 384 */ 385 386 #ifdef INVARIANTS 387 388 static int spin_test_count = 10000000; 389 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 390 "Number of iterations to use for spinlock wait code test"); 391 392 static int 393 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 394 { 395 struct spinlock spin; 396 int error; 397 int value = 0; 398 int i; 399 400 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 401 return (error); 402 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 403 return (error); 404 405 /* 406 * Indefinite wait test 407 */ 408 if (value == 1) { 409 spin_init(&spin, "sysctllock"); 410 spin_lock(&spin); /* force an indefinite wait */ 411 spin_lock_test_mode = 1; 412 spin_lock(&spin); 413 spin_unlock(&spin); /* Clean up the spinlock count */ 414 spin_unlock(&spin); 415 spin_lock_test_mode = 0; 416 } 417 418 /* 419 * Time best-case exclusive spinlocks 420 */ 421 if (value == 2) { 422 globaldata_t gd = mycpu; 423 424 spin_init(&spin, "sysctllocktest"); 425 for (i = spin_test_count; i > 0; --i) { 426 _spin_lock_quick(gd, &spin, "test"); 427 spin_unlock_quick(gd, &spin); 428 } 429 } 430 431 return (0); 432 } 433 434 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 435 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 436 437 #endif /* INVARIANTS */ 438