1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. and Matthew Dillon 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * The implementation is designed to avoid looping when compatible operations 35 * are executed. 36 * 37 * To acquire a spinlock we first increment counta. Then we check if counta 38 * meets our requirements. For an exclusive spinlock it must be 1, of a 39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set. 40 * 41 * Shared spinlock failure case: Decrement the count, loop until we can 42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK 43 * is set and increment the count. 44 * 45 * Exclusive spinlock failure case: While maintaining the count, clear the 46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer 47 * the count from the low bits to the high bits of counta. Then loop until 48 * all low bits are 0. Once the low bits drop to 0 we can transfer the 49 * count back with an atomic_cmpset_int(), atomically, and return. 50 */ 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/types.h> 54 #include <sys/kernel.h> 55 #include <sys/sysctl.h> 56 #ifdef INVARIANTS 57 #include <sys/proc.h> 58 #endif 59 #include <sys/priv.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/cpufunc.h> 63 #include <machine/specialreg.h> 64 #include <machine/clock.h> 65 #include <sys/indefinite2.h> 66 #include <sys/spinlock.h> 67 #include <sys/spinlock2.h> 68 #include <sys/ktr.h> 69 70 #ifdef _KERNEL_VIRTUAL 71 #include <pthread.h> 72 #endif 73 74 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin"); 75 76 /* 77 * Kernal Trace 78 */ 79 #if !defined(KTR_SPIN_CONTENTION) 80 #define KTR_SPIN_CONTENTION KTR_ALL 81 #endif 82 #define SPIN_STRING "spin=%p type=%c" 83 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int)) 84 85 KTR_INFO_MASTER(spin); 86 #if 0 87 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE); 88 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE); 89 #endif 90 91 #define logspin(name, spin, type) \ 92 KTR_LOG(spin_ ## name, spin, type) 93 94 #ifdef INVARIANTS 95 static int spin_lock_test_mode; 96 #endif 97 98 #ifdef DEBUG_LOCKS_LATENCY 99 100 static long spinlocks_add_latency; 101 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW, 102 &spinlocks_add_latency, 0, 103 "Add spinlock latency"); 104 105 #endif 106 107 static long spin_backoff_max = 4096; 108 SYSCTL_LONG(_debug, OID_AUTO, spin_backoff_max, CTLFLAG_RW, 109 &spin_backoff_max, 0, 110 "Spinlock exponential backoff limit"); 111 static long spin_window_shift = 8; /* 1 << n clock cycles, approx */ 112 SYSCTL_LONG(_debug, OID_AUTO, spin_window_shift, CTLFLAG_RW, 113 &spin_window_shift, 0, 114 "Spinlock TSC windowing"); 115 116 /* 117 * We contested due to another exclusive lock holder. We lose. 118 * 119 * We have to unwind the attempt and may acquire the spinlock 120 * anyway while doing so. 121 */ 122 int 123 spin_trylock_contested(struct spinlock *spin) 124 { 125 globaldata_t gd = mycpu; 126 127 /* 128 * Handle degenerate case, else fail. 129 */ 130 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1)) 131 return TRUE; 132 /*atomic_add_int(&spin->counta, -1);*/ 133 --gd->gd_spinlocks; 134 crit_exit_raw(gd->gd_curthread); 135 136 return (FALSE); 137 } 138 139 /* 140 * The spin_lock() inline was unable to acquire the lock and calls this 141 * function with spin->counta already incremented, passing (spin->counta - 1) 142 * to the function (the result of the inline's fetchadd). 143 * 144 * Note that we implement both exclusive and shared spinlocks, so we cannot 145 * use atomic_swap_int(). Instead, we try to use atomic_fetchadd_int() 146 * to put most of the burden on the cpu. Atomic_cmpset_int() (cmpxchg) 147 * can cause a lot of unnecessary looping in situations where it is just 148 * trying to increment the count. 149 * 150 * Similarly, we leave the SHARED flag intact and incur slightly more 151 * overhead when switching from shared to exclusive. This allows us to 152 * use atomic_fetchadd_int() for both spinlock types in the critical 153 * path. 154 * 155 * The exponential (n^1.5) backoff algorithm is designed to both reduce 156 * cache bus contention between cpu cores and sockets, and to allow some 157 * bursting of exclusive locks in heavily contended situations to improve 158 * performance. 159 * 160 * The exclusive lock priority mechanism prevents even heavily contended 161 * exclusive locks from being starved by shared locks 162 */ 163 void 164 _spin_lock_contested(struct spinlock *spin, const char *ident, int value) 165 { 166 indefinite_info_t info; 167 uint32_t ovalue; 168 long expbackoff; 169 long loop; 170 171 /* 172 * WARNING! Caller has already incremented the lock. We must 173 * increment the count value (from the inline's fetch-add) 174 * to match. 175 * 176 * Handle the degenerate case where the spinlock is flagged SHARED 177 * with only our reference. We can convert it to EXCLUSIVE. 178 */ 179 if (value == (SPINLOCK_SHARED | 1) - 1) { 180 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED | 1, 1)) 181 return; 182 } 183 /* ++value; value not used after this */ 184 info.type = 0; /* avoid improper gcc warning */ 185 info.ident = NULL; /* avoid improper gcc warning */ 186 expbackoff = 0; 187 188 /* 189 * Transfer our exclusive request to the high bits and clear the 190 * SPINLOCK_SHARED bit if it was set. This makes the spinlock 191 * appear exclusive, preventing any NEW shared or exclusive 192 * spinlocks from being obtained while we wait for existing 193 * shared or exclusive holders to unlock. 194 * 195 * Don't tread on earlier exclusive waiters by stealing the lock 196 * away early if the low bits happen to now be 1. 197 * 198 * The shared unlock understands that this may occur. 199 */ 200 ovalue = atomic_fetchadd_int(&spin->counta, SPINLOCK_EXCLWAIT - 1); 201 ovalue += SPINLOCK_EXCLWAIT - 1; 202 if (ovalue & SPINLOCK_SHARED) { 203 atomic_clear_int(&spin->counta, SPINLOCK_SHARED); 204 ovalue &= ~SPINLOCK_SHARED; 205 } 206 207 for (;;) { 208 expbackoff = (expbackoff + 1) * 3 / 2; 209 if (expbackoff == 6) /* 1, 3, 6, 10, ... */ 210 indefinite_init(&info, ident, 0, 'S'); 211 if ((rdtsc() >> spin_window_shift) % ncpus != mycpuid) { 212 for (loop = expbackoff; loop; --loop) 213 cpu_pause(); 214 } 215 /*cpu_lfence();*/ 216 217 /* 218 * If the low bits are zero, try to acquire the exclusive lock 219 * by transfering our high bit reservation to the low bits. 220 * 221 * NOTE: Avoid unconditional atomic op by testing ovalue, 222 * otherwise we get cache bus armageddon. 223 * 224 * NOTE: We must also ensure that the SHARED bit is cleared. 225 * It is possible for it to wind up being set on a 226 * shared lock override of the EXCLWAIT bits. 227 */ 228 ovalue = spin->counta; 229 cpu_ccfence(); 230 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0) { 231 uint32_t nvalue; 232 233 nvalue= ((ovalue - SPINLOCK_EXCLWAIT) | 1) & 234 ~SPINLOCK_SHARED; 235 if (atomic_fcmpset_int(&spin->counta, &ovalue, nvalue)) 236 break; 237 continue; 238 } 239 if (expbackoff > 6 + spin_backoff_max) 240 expbackoff = 6 + spin_backoff_max; 241 if (expbackoff >= 6) { 242 if (indefinite_check(&info)) 243 break; 244 } 245 } 246 if (expbackoff >= 6) 247 indefinite_done(&info); 248 } 249 250 /* 251 * The spin_lock_shared() inline was unable to acquire the lock and calls 252 * this function with spin->counta already incremented. 253 * 254 * This is not in the critical path unless there is contention between 255 * shared and exclusive holders. 256 * 257 * Exclusive locks have priority over shared locks. However, this can 258 * cause shared locks to be starved when large numbers of threads are 259 * competing for exclusive locks so the shared lock code uses TSC-windowing 260 * to selectively ignore the exclusive priority mechanism. This has the 261 * effect of allowing a limited number of shared locks to compete against 262 * exclusive waiters at any given moment. 263 * 264 * Note that shared locks do not implement exponential backoff. Instead, 265 * the shared lock simply polls the lock value. One cpu_pause() is built 266 * into indefinite_check(). 267 */ 268 void 269 _spin_lock_shared_contested(struct spinlock *spin, const char *ident) 270 { 271 indefinite_info_t info; 272 uint32_t ovalue; 273 274 /* 275 * Undo the inline's increment. 276 */ 277 ovalue = atomic_fetchadd_int(&spin->counta, -1) - 1; 278 279 indefinite_init(&info, ident, 0, 's'); 280 cpu_pause(); 281 282 #ifdef DEBUG_LOCKS_LATENCY 283 long j; 284 for (j = spinlocks_add_latency; j > 0; --j) 285 cpu_ccfence(); 286 #endif 287 288 for (;;) { 289 /* 290 * Loop until we can acquire the shared spinlock. Note that 291 * the low bits can be zero while the high EXCLWAIT bits are 292 * non-zero. In this situation exclusive requesters have 293 * priority (otherwise shared users on multiple cpus can hog 294 * the spinlnock). 295 * 296 * NOTE: Reading spin->counta prior to the swap is extremely 297 * important on multi-chip/many-core boxes. On 48-core 298 * this one change improves fully concurrent all-cores 299 * compiles by 100% or better. 300 * 301 * I can't emphasize enough how important the pre-read 302 * is in preventing hw cache bus armageddon on 303 * multi-chip systems. And on single-chip/multi-core 304 * systems it just doesn't hurt. 305 */ 306 cpu_ccfence(); 307 308 /* 309 * Ignore the EXCLWAIT bits if we are inside our window. 310 */ 311 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 && 312 (rdtsc() >> spin_window_shift) % ncpus == mycpuid) { 313 if (atomic_fcmpset_int(&spin->counta, &ovalue, 314 ovalue | SPINLOCK_SHARED | 1)) { 315 break; 316 } 317 continue; 318 } 319 320 /* 321 * Check ovalue tightly (no exponential backoff for shared 322 * locks, that would result in horrible performance. Instead, 323 * shared locks depend on the exclusive priority mechanism 324 * to avoid starving exclusive locks). 325 */ 326 if (ovalue == 0) { 327 if (atomic_fcmpset_int(&spin->counta, &ovalue, 328 SPINLOCK_SHARED | 1)) { 329 break; 330 } 331 continue; 332 } 333 334 /* 335 * If SHARED is already set, go for the increment, improving 336 * the exclusive to multiple-readers transition. 337 */ 338 if (ovalue & SPINLOCK_SHARED) { 339 ovalue = atomic_fetchadd_int(&spin->counta, 1); 340 /* ovalue += 1; NOT NEEDED */ 341 if (ovalue & SPINLOCK_SHARED) 342 break; 343 ovalue = atomic_fetchadd_int(&spin->counta, -1); 344 ovalue += -1; 345 continue; 346 } 347 if (indefinite_check(&info)) 348 break; 349 /* 350 * ovalue was wrong anyway, just reload 351 */ 352 ovalue = spin->counta; 353 } 354 indefinite_done(&info); 355 } 356 357 /* 358 * If INVARIANTS is enabled various spinlock timing tests can be run 359 * by setting debug.spin_lock_test: 360 * 361 * 1 Test the indefinite wait code 362 * 2 Time the best-case exclusive lock overhead (spin_test_count) 363 * 3 Time the best-case shared lock overhead (spin_test_count) 364 */ 365 366 #ifdef INVARIANTS 367 368 static int spin_test_count = 10000000; 369 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, 370 "Number of iterations to use for spinlock wait code test"); 371 372 static int 373 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS) 374 { 375 struct spinlock spin; 376 int error; 377 int value = 0; 378 int i; 379 380 if ((error = priv_check(curthread, PRIV_ROOT)) != 0) 381 return (error); 382 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0) 383 return (error); 384 385 /* 386 * Indefinite wait test 387 */ 388 if (value == 1) { 389 spin_init(&spin, "sysctllock"); 390 spin_lock(&spin); /* force an indefinite wait */ 391 spin_lock_test_mode = 1; 392 spin_lock(&spin); 393 spin_unlock(&spin); /* Clean up the spinlock count */ 394 spin_unlock(&spin); 395 spin_lock_test_mode = 0; 396 } 397 398 /* 399 * Time best-case exclusive spinlocks 400 */ 401 if (value == 2) { 402 globaldata_t gd = mycpu; 403 404 spin_init(&spin, "sysctllocktest"); 405 for (i = spin_test_count; i > 0; --i) { 406 _spin_lock_quick(gd, &spin, "test"); 407 spin_unlock_quick(gd, &spin); 408 } 409 } 410 411 return (0); 412 } 413 414 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT, 415 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code"); 416 417 #endif /* INVARIANTS */ 418