1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment counta. Then we check if counta 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of counta. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/indefinite2.h> 66 #include <sys/spinlock.h> 67 #include <sys/spinlock2.h> 68 #include <sys/ktr.h> 69 70 #ifdef _KERNEL_VIRTUAL 71 #include <pthread.h> 72 #endif 73 74 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin"); 75 76 /* 77 * Kernal Trace 78 */ 79 #if !defined(KTR_SPIN_CONTENTION) 80 #define KTR_SPIN_CONTENTION KTR_ALL 81 #endif 82 #define SPIN_STRING "spin=%p type=%c" 83 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 84 85 KTR_INFO_MASTER(spin); 86 #if 0 87 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 88 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 89 #endif 90 91 #define logspin(name, spin, type) \ 92 KTR_LOG(spin_ ## name, spin, type) 93 94 #ifdef INVARIANTS 95 static int spin_lock_test_mode; 96 #endif 97 98 #ifdef DEBUG_LOCKS_LATENCY 99 100 __read_frequently static long spinlocks_add_latency; 101 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 102 &spinlocks_add_latency, 0, 103 "Add spinlock latency"); 104 105 #endif 106 107 __read_frequently static long spin_backoff_max = 4096; 108 SYSCTL_LONG(_debug, OID_AUTO, spin_backoff_max, CTLFLAG_RW, 109 &spin_backoff_max, 0, 110 "Spinlock exponential backoff limit"); 111 112 /* 1 << n clock cycles, approx */ 113 __read_frequently static long spin_window_shift = 8; 114 SYSCTL_LONG(_debug, OID_AUTO, spin_window_shift, CTLFLAG_RW, 115 &spin_window_shift, 0, 116 "Spinlock TSC windowing"); 117 118 /* 119 * We contested due to another exclusive lock holder. We lose. 120 * 121 * We have to unwind the attempt and may acquire the spinlock 122 * anyway while doing so. 123 */ 124 int 125 spin_trylock_contested(struct spinlock *spin) 126 { 127 globaldata_t gd = mycpu; 128 129 /* 130 * Handle degenerate case, else fail. 131 */ 132 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1)) 133 return TRUE; 134 /*atomic_add_int(&spin->counta, -1);*/ 135 --gd->gd_spinlocks; 136 crit_exit_raw(gd->gd_curthread); 137 138 return (FALSE); 139 } 140 141 /* 142 * The spin_lock() inline was unable to acquire the lock and calls this 143 * function with spin->counta already incremented, passing (spin->counta - 1) 144 * to the function (the result of the inline's fetchadd). 145 * 146 * Note that we implement both exclusive and shared spinlocks, so we cannot 147 * use atomic_swap_int(). Instead, we try to use atomic_fetchadd_int() 148 * to put most of the burden on the cpu. Atomic_cmpset_int() (cmpxchg) 149 * can cause a lot of unnecessary looping in situations where it is just 150 * trying to increment the count. 151 * 152 * Similarly, we leave the SHARED flag intact and incur slightly more 153 * overhead when switching from shared to exclusive. This allows us to 154 * use atomic_fetchadd_int() for both spinlock types in the critical 155 * path. 156 * 157 * The exponential (n^1.5) backoff algorithm is designed to both reduce 158 * cache bus contention between cpu cores and sockets, and to allow some 159 * bursting of exclusive locks in heavily contended situations to improve 160 * performance. 161 * 162 * The exclusive lock priority mechanism prevents even heavily contended 163 * exclusive locks from being starved by shared locks 164 */ 165 void 166 _spin_lock_contested(struct spinlock *spin, const char *ident, int value) 167 { 168 indefinite_info_t info; 169 uint32_t ovalue; 170 long expbackoff; 171 long loop; 172 173 /* 174 * WARNING! Caller has already incremented the lock. We must 175 * increment the count value (from the inline's fetch-add) 176 * to match. 177 * 178 * Handle the degenerate case where the spinlock is flagged SHARED 179 * with only our reference. We can convert it to EXCLUSIVE. 180 */ 181 if (value == (SPINLOCK_SHARED | 1) - 1) { 182 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED | 1, 1)) 183 return; 184 } 185 /* ++value; value not used after this */ 186 info.type = 0; /* avoid improper gcc warning */ 187 info.ident = NULL; /* avoid improper gcc warning */ 188 expbackoff = 0; 189 190 /* 191 * Transfer our exclusive request to the high bits and clear the 192 * SPINLOCK_SHARED bit if it was set. This makes the spinlock 193 * appear exclusive, preventing any NEW shared or exclusive 194 * spinlocks from being obtained while we wait for existing 195 * shared or exclusive holders to unlock. 196 * 197 * Don't tread on earlier exclusive waiters by stealing the lock 198 * away early if the low bits happen to now be 1. 199 * 200 * The shared unlock understands that this may occur. 201 */ 202 ovalue = atomic_fetchadd_int(&spin->counta, SPINLOCK_EXCLWAIT - 1); 203 ovalue += SPINLOCK_EXCLWAIT - 1; 204 if (ovalue & SPINLOCK_SHARED) { 205 atomic_clear_int(&spin->counta, SPINLOCK_SHARED); 206 ovalue &= ~SPINLOCK_SHARED; 207 } 208 209 for (;;) { 210 expbackoff = (expbackoff + 1) * 3 / 2; 211 if (expbackoff == 6) /* 1, 3, 6, 10, ... */ 212 indefinite_init(&info, ident, 0, 'S'); 213 if ((rdtsc() >> spin_window_shift) % ncpus != mycpuid) { 214 for (loop = expbackoff; loop; --loop) 215 cpu_pause(); 216 } 217 /*cpu_lfence();*/ 218 219 /* 220 * If the low bits are zero, try to acquire the exclusive lock 221 * by transfering our high bit reservation to the low bits. 222 * 223 * NOTE: Avoid unconditional atomic op by testing ovalue, 224 * otherwise we get cache bus armageddon. 225 * 226 * NOTE: We must also ensure that the SHARED bit is cleared. 227 * It is possible for it to wind up being set on a 228 * shared lock override of the EXCLWAIT bits. 229 */ 230 ovalue = spin->counta; 231 cpu_ccfence(); 232 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0) { 233 uint32_t nvalue; 234 235 nvalue= ((ovalue - SPINLOCK_EXCLWAIT) | 1) & 236 ~SPINLOCK_SHARED; 237 if (atomic_fcmpset_int(&spin->counta, &ovalue, nvalue)) 238 break; 239 continue; 240 } 241 if (expbackoff > 6 + spin_backoff_max) 242 expbackoff = 6 + spin_backoff_max; 243 if (expbackoff >= 6) { 244 if (indefinite_check(&info)) 245 break; 246 } 247 } 248 if (expbackoff >= 6) 249 indefinite_done(&info); 250 } 251 252 /* 253 * The spin_lock_shared() inline was unable to acquire the lock and calls 254 * this function with spin->counta already incremented. 255 * 256 * This is not in the critical path unless there is contention between 257 * shared and exclusive holders. 258 * 259 * Exclusive locks have priority over shared locks. However, this can 260 * cause shared locks to be starved when large numbers of threads are 261 * competing for exclusive locks so the shared lock code uses TSC-windowing 262 * to selectively ignore the exclusive priority mechanism. This has the 263 * effect of allowing a limited number of shared locks to compete against 264 * exclusive waiters at any given moment. 265 * 266 * Note that shared locks do not implement exponential backoff. Instead, 267 * the shared lock simply polls the lock value. One cpu_pause() is built 268 * into indefinite_check(). 269 */ 270 void 271 _spin_lock_shared_contested(struct spinlock *spin, const char *ident) 272 { 273 indefinite_info_t info; 274 uint32_t ovalue; 275 276 /* 277 * Undo the inline's increment. 278 */ 279 ovalue = atomic_fetchadd_int(&spin->counta, -1) - 1; 280 281 indefinite_init(&info, ident, 0, 's'); 282 cpu_pause(); 283 284 #ifdef DEBUG_LOCKS_LATENCY 285 long j; 286 for (j = spinlocks_add_latency; j > 0; --j) 287 cpu_ccfence(); 288 #endif 289 290 for (;;) { 291 /* 292 * Loop until we can acquire the shared spinlock. Note that 293 * the low bits can be zero while the high EXCLWAIT bits are 294 * non-zero. In this situation exclusive requesters have 295 * priority (otherwise shared users on multiple cpus can hog 296 * the spinlnock). 297 * 298 * NOTE: Reading spin->counta prior to the swap is extremely 299 * important on multi-chip/many-core boxes. On 48-core 300 * this one change improves fully concurrent all-cores 301 * compiles by 100% or better. 302 * 303 * I can't emphasize enough how important the pre-read 304 * is in preventing hw cache bus armageddon on 305 * multi-chip systems. And on single-chip/multi-core 306 * systems it just doesn't hurt. 307 */ 308 cpu_ccfence(); 309 310 /* 311 * Ignore the EXCLWAIT bits if we are inside our window. 312 */ 313 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 314 (rdtsc() >> spin_window_shift) % ncpus == mycpuid) { 315 if (atomic_fcmpset_int(&spin->counta, &ovalue, 316 ovalue | SPINLOCK_SHARED | 1)) { 317 break; 318 } 319 continue; 320 } 321 322 /* 323 * Check ovalue tightly (no exponential backoff for shared 324 * locks, that would result in horrible performance. Instead, 325 * shared locks depend on the exclusive priority mechanism 326 * to avoid starving exclusive locks). 327 */ 328 if (ovalue == 0) { 329 if (atomic_fcmpset_int(&spin->counta, &ovalue, 330 SPINLOCK_SHARED | 1)) { 331 break; 332 } 333 continue; 334 } 335 336 /* 337 * If SHARED is already set, go for the increment, improving 338 * the exclusive to multiple-readers transition. 339 */ 340 if (ovalue & SPINLOCK_SHARED) { 341 ovalue = atomic_fetchadd_int(&spin->counta, 1); 342 /* ovalue += 1; NOT NEEDED */ 343 if (ovalue & SPINLOCK_SHARED) 344 break; 345 ovalue = atomic_fetchadd_int(&spin->counta, -1); 346 ovalue += -1; 347 continue; 348 } 349 if (indefinite_check(&info)) 350 break; 351 /* 352 * ovalue was wrong anyway, just reload 353 */ 354 ovalue = spin->counta; 355 } 356 indefinite_done(&info); 357 } 358 359 /* 360 * If INVARIANTS is enabled various spinlock timing tests can be run 361 * by setting debug.spin_lock_test: 362 * 363 * 1 Test the indefinite wait code 364 * 2 Time the best-case exclusive lock overhead (spin_test_count) 365 * 3 Time the best-case shared lock overhead (spin_test_count) 366 */ 367 368 #ifdef INVARIANTS 369 370 static int spin_test_count = 10000000; 371 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 372 "Number of iterations to use for spinlock wait code test"); 373 374 static int 375 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 376 { 377 struct spinlock spin; 378 int error; 379 int value = 0; 380 int i; 381 382 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 383 return (error); 384 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 385 return (error); 386 387 /* 388 * Indefinite wait test 389 */ 390 if (value == 1) { 391 spin_init(&spin, "sysctllock"); 392 spin_lock(&spin); /* force an indefinite wait */ 393 spin_lock_test_mode = 1; 394 spin_lock(&spin); 395 spin_unlock(&spin); /* Clean up the spinlock count */ 396 spin_unlock(&spin); 397 spin_lock_test_mode = 0; 398 } 399 400 /* 401 * Time best-case exclusive spinlocks 402 */ 403 if (value == 2) { 404 globaldata_t gd = mycpu; 405 406 spin_init(&spin, "sysctllocktest"); 407 for (i = spin_test_count; i > 0; --i) { 408 _spin_lock_quick(gd, &spin, "test"); 409 spin_unlock_quick(gd, &spin); 410 } 411 } 412 413 return (0); 414 } 415 416 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 417 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 418 419 #endif /* INVARIANTS */ 420