1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment counta. Then we check if counta 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of counta. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/indefinite2.h> 66 #include <sys/spinlock.h> 67 #include <sys/spinlock2.h> 68 #include <sys/ktr.h> 69 70 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin"); 71 72 /* 73 * Kernal Trace 74 */ 75 #if !defined(KTR_SPIN_CONTENTION) 76 #define KTR_SPIN_CONTENTION KTR_ALL 77 #endif 78 #define SPIN_STRING "spin=%p type=%c" 79 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 80 81 KTR_INFO_MASTER(spin); 82 #if 0 83 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 84 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 85 #endif 86 87 #define logspin(name, spin, type) \ 88 KTR_LOG(spin_ ## name, spin, type) 89 90 #ifdef INVARIANTS 91 static int spin_lock_test_mode; 92 #endif 93 94 #ifdef DEBUG_LOCKS_LATENCY 95 96 __read_frequently static long spinlocks_add_latency; 97 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 98 &spinlocks_add_latency, 0, 99 "Add spinlock latency"); 100 101 #endif 102 103 __read_frequently static long spin_backoff_max = 4096; 104 SYSCTL_LONG(_debug, OID_AUTO, spin_backoff_max, CTLFLAG_RW, 105 &spin_backoff_max, 0, 106 "Spinlock exponential backoff limit"); 107 108 /* 1 << n clock cycles, approx */ 109 __read_frequently static long spin_window_shift = 8; 110 SYSCTL_LONG(_debug, OID_AUTO, spin_window_shift, CTLFLAG_RW, 111 &spin_window_shift, 0, 112 "Spinlock TSC windowing"); 113 114 /* 115 * We contested due to another exclusive lock holder. We lose. 116 * 117 * We have to unwind the attempt and may acquire the spinlock 118 * anyway while doing so. 119 */ 120 int 121 spin_trylock_contested(struct spinlock *spin) 122 { 123 globaldata_t gd = mycpu; 124 125 /* 126 * Handle degenerate case, else fail. 127 */ 128 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1)) 129 return TRUE; 130 /*atomic_add_int(&spin->counta, -1);*/ 131 --gd->gd_spinlocks; 132 crit_exit_quick(gd->gd_curthread); 133 134 return (FALSE); 135 } 136 137 /* 138 * The spin_lock() inline was unable to acquire the lock and calls this 139 * function with spin->counta already incremented, passing (spin->counta - 1) 140 * to the function (the result of the inline's fetchadd). 141 * 142 * Note that we implement both exclusive and shared spinlocks, so we cannot 143 * use atomic_swap_int(). Instead, we try to use atomic_fetchadd_int() 144 * to put most of the burden on the cpu. Atomic_cmpset_int() (cmpxchg) 145 * can cause a lot of unnecessary looping in situations where it is just 146 * trying to increment the count. 147 * 148 * Similarly, we leave the SHARED flag intact and incur slightly more 149 * overhead when switching from shared to exclusive. This allows us to 150 * use atomic_fetchadd_int() for both spinlock types in the critical 151 * path. 152 * 153 * The exponential (n^1.5) backoff algorithm is designed to both reduce 154 * cache bus contention between cpu cores and sockets, and to allow some 155 * bursting of exclusive locks in heavily contended situations to improve 156 * performance. 157 * 158 * The exclusive lock priority mechanism prevents even heavily contended 159 * exclusive locks from being starved by shared locks 160 */ 161 void 162 _spin_lock_contested(struct spinlock *spin, const char *ident, int value) 163 { 164 indefinite_info_t info; 165 uint32_t ovalue; 166 long expbackoff; 167 long loop; 168 169 /* 170 * WARNING! Caller has already incremented the lock. We must 171 * increment the count value (from the inline's fetch-add) 172 * to match. 173 * 174 * Handle the degenerate case where the spinlock is flagged SHARED 175 * with only our reference. We can convert it to EXCLUSIVE. 176 */ 177 if (value == (SPINLOCK_SHARED | 1) - 1) { 178 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED | 1, 1)) 179 return; 180 } 181 /* ++value; value not used after this */ 182 info.type = 0; /* avoid improper gcc warning */ 183 info.ident = NULL; /* avoid improper gcc warning */ 184 expbackoff = 0; 185 186 /* 187 * Transfer our exclusive request to the high bits and clear the 188 * SPINLOCK_SHARED bit if it was set. This makes the spinlock 189 * appear exclusive, preventing any NEW shared or exclusive 190 * spinlocks from being obtained while we wait for existing 191 * shared or exclusive holders to unlock. 192 * 193 * Don't tread on earlier exclusive waiters by stealing the lock 194 * away early if the low bits happen to now be 1. 195 * 196 * The shared unlock understands that this may occur. 197 */ 198 ovalue = atomic_fetchadd_int(&spin->counta, SPINLOCK_EXCLWAIT - 1); 199 ovalue += SPINLOCK_EXCLWAIT - 1; 200 if (ovalue & SPINLOCK_SHARED) { 201 atomic_clear_int(&spin->counta, SPINLOCK_SHARED); 202 ovalue &= ~SPINLOCK_SHARED; 203 } 204 205 for (;;) { 206 expbackoff = (expbackoff + 1) * 3 / 2; 207 if (expbackoff == 6) /* 1, 3, 6, 10, ... */ 208 indefinite_init(&info, ident, 0, 'S'); 209 if ((rdtsc() >> spin_window_shift) % ncpus != mycpuid) { 210 for (loop = expbackoff; loop; --loop) 211 cpu_pause(); 212 } 213 /*cpu_lfence();*/ 214 215 /* 216 * If the low bits are zero, try to acquire the exclusive lock 217 * by transfering our high bit reservation to the low bits. 218 * 219 * NOTE: Avoid unconditional atomic op by testing ovalue, 220 * otherwise we get cache bus armageddon. 221 * 222 * NOTE: We must also ensure that the SHARED bit is cleared. 223 * It is possible for it to wind up being set on a 224 * shared lock override of the EXCLWAIT bits. 225 */ 226 ovalue = spin->counta; 227 cpu_ccfence(); 228 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0) { 229 uint32_t nvalue; 230 231 nvalue= ((ovalue - SPINLOCK_EXCLWAIT) | 1) & 232 ~SPINLOCK_SHARED; 233 if (atomic_fcmpset_int(&spin->counta, &ovalue, nvalue)) 234 break; 235 continue; 236 } 237 if (expbackoff > 6 + spin_backoff_max) 238 expbackoff = 6 + spin_backoff_max; 239 if (expbackoff >= 6) { 240 if (indefinite_check(&info)) 241 break; 242 } 243 } 244 if (expbackoff >= 6) 245 indefinite_done(&info); 246 } 247 248 /* 249 * The spin_lock_shared() inline was unable to acquire the lock and calls 250 * this function with spin->counta already incremented. 251 * 252 * This is not in the critical path unless there is contention between 253 * shared and exclusive holders. 254 * 255 * Exclusive locks have priority over shared locks. However, this can 256 * cause shared locks to be starved when large numbers of threads are 257 * competing for exclusive locks so the shared lock code uses TSC-windowing 258 * to selectively ignore the exclusive priority mechanism. This has the 259 * effect of allowing a limited number of shared locks to compete against 260 * exclusive waiters at any given moment. 261 * 262 * Note that shared locks do not implement exponential backoff. Instead, 263 * the shared lock simply polls the lock value. One cpu_pause() is built 264 * into indefinite_check(). 265 */ 266 void 267 _spin_lock_shared_contested(struct spinlock *spin, const char *ident) 268 { 269 indefinite_info_t info; 270 uint32_t ovalue; 271 272 /* 273 * Undo the inline's increment. 274 */ 275 ovalue = atomic_fetchadd_int(&spin->counta, -1) - 1; 276 277 indefinite_init(&info, ident, 0, 's'); 278 cpu_pause(); 279 280 #ifdef DEBUG_LOCKS_LATENCY 281 long j; 282 for (j = spinlocks_add_latency; j > 0; --j) 283 cpu_ccfence(); 284 #endif 285 286 for (;;) { 287 /* 288 * Loop until we can acquire the shared spinlock. Note that 289 * the low bits can be zero while the high EXCLWAIT bits are 290 * non-zero. In this situation exclusive requesters have 291 * priority (otherwise shared users on multiple cpus can hog 292 * the spinlnock). 293 * 294 * NOTE: Reading spin->counta prior to the swap is extremely 295 * important on multi-chip/many-core boxes. On 48-core 296 * this one change improves fully concurrent all-cores 297 * compiles by 100% or better. 298 * 299 * I can't emphasize enough how important the pre-read 300 * is in preventing hw cache bus armageddon on 301 * multi-chip systems. And on single-chip/multi-core 302 * systems it just doesn't hurt. 303 */ 304 cpu_ccfence(); 305 306 /* 307 * Ignore the EXCLWAIT bits if we are inside our window. 308 */ 309 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 310 (rdtsc() >> spin_window_shift) % ncpus == mycpuid) { 311 if (atomic_fcmpset_int(&spin->counta, &ovalue, 312 ovalue | SPINLOCK_SHARED | 1)) { 313 break; 314 } 315 continue; 316 } 317 318 /* 319 * Check ovalue tightly (no exponential backoff for shared 320 * locks, that would result in horrible performance. Instead, 321 * shared locks depend on the exclusive priority mechanism 322 * to avoid starving exclusive locks). 323 */ 324 if (ovalue == 0) { 325 if (atomic_fcmpset_int(&spin->counta, &ovalue, 326 SPINLOCK_SHARED | 1)) { 327 break; 328 } 329 continue; 330 } 331 332 /* 333 * If SHARED is already set, go for the increment, improving 334 * the exclusive to multiple-readers transition. 335 */ 336 if (ovalue & SPINLOCK_SHARED) { 337 ovalue = atomic_fetchadd_int(&spin->counta, 1); 338 /* ovalue += 1; NOT NEEDED */ 339 if (ovalue & SPINLOCK_SHARED) 340 break; 341 ovalue = atomic_fetchadd_int(&spin->counta, -1); 342 ovalue += -1; 343 continue; 344 } 345 if (indefinite_check(&info)) 346 break; 347 /* 348 * ovalue was wrong anyway, just reload 349 */ 350 ovalue = spin->counta; 351 } 352 indefinite_done(&info); 353 } 354 355 /* 356 * If INVARIANTS is enabled various spinlock timing tests can be run 357 * by setting debug.spin_lock_test: 358 * 359 * 1 Test the indefinite wait code 360 * 2 Time the best-case exclusive lock overhead (spin_test_count) 361 * 3 Time the best-case shared lock overhead (spin_test_count) 362 */ 363 364 #ifdef INVARIANTS 365 366 static int spin_test_count = 10000000; 367 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 368 "Number of iterations to use for spinlock wait code test"); 369 370 static int 371 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 372 { 373 struct spinlock spin; 374 int error; 375 int value = 0; 376 int i; 377 378 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 379 return (error); 380 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 381 return (error); 382 383 /* 384 * Indefinite wait test 385 */ 386 if (value == 1) { 387 spin_init(&spin, "sysctllock"); 388 spin_lock(&spin); /* force an indefinite wait */ 389 spin_lock_test_mode = 1; 390 spin_lock(&spin); 391 spin_unlock(&spin); /* Clean up the spinlock count */ 392 spin_unlock(&spin); 393 spin_lock_test_mode = 0; 394 } 395 396 /* 397 * Time best-case exclusive spinlocks 398 */ 399 if (value == 2) { 400 globaldata_t gd = mycpu; 401 402 spin_init(&spin, "sysctllocktest"); 403 for (i = spin_test_count; i > 0; --i) { 404 _spin_lock_quick(gd, &spin, "test"); 405 spin_unlock_quick(gd, &spin); 406 } 407 } 408 409 return (0); 410 } 411 412 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 413 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 414 415 #endif /* INVARIANTS */ 416