1 /* 2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #ifndef _SYS_SPINLOCK2_H_ 34 #define _SYS_SPINLOCK2_H_ 35 36 #ifndef _KERNEL 37 38 #error "This file should not be included by userland programs." 39 40 #else 41 42 #ifndef _SYS_SYSTM_H_ 43 #include <sys/systm.h> 44 #endif 45 #ifndef _SYS_THREAD2_H_ 46 #include <sys/thread2.h> 47 #endif 48 #ifndef _SYS_GLOBALDATA_H_ 49 #include <sys/globaldata.h> 50 #endif 51 #include <machine/atomic.h> 52 #include <machine/cpufunc.h> 53 54 #ifdef SMP 55 56 extern int spin_trylock_wr_contested2(globaldata_t gd); 57 extern void spin_lock_wr_contested2(struct spinlock *mtx); 58 59 #endif 60 61 #ifdef SMP 62 63 /* 64 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure, 65 * TRUE on success. 66 */ 67 static __inline boolean_t 68 spin_trylock(struct spinlock *mtx) 69 { 70 globaldata_t gd = mycpu; 71 int value; 72 73 ++gd->gd_curthread->td_critcount; 74 cpu_ccfence(); 75 ++gd->gd_spinlocks_wr; 76 if ((value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE)) != 0) 77 return (spin_trylock_wr_contested2(gd)); 78 #ifdef SMP 79 #ifdef DEBUG_LOCKS 80 int i; 81 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { 82 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) { 83 gd->gd_curthread->td_spinlock_stack_id[i] = 1; 84 gd->gd_curthread->td_spinlock_stack[i] = mtx; 85 gd->gd_curthread->td_spinlock_caller_pc[i] = 86 __builtin_return_address(0); 87 break; 88 } 89 } 90 #endif 91 #endif 92 return (TRUE); 93 } 94 95 #else 96 97 static __inline boolean_t 98 spin_trylock(struct spinlock *mtx) 99 { 100 globaldata_t gd = mycpu; 101 102 ++gd->gd_curthread->td_critcount; 103 cpu_ccfence(); 104 ++gd->gd_spinlocks_wr; 105 return (TRUE); 106 } 107 108 #endif 109 110 /* 111 * Obtain an exclusive spinlock and return. 112 */ 113 static __inline void 114 spin_lock_quick(globaldata_t gd, struct spinlock *mtx) 115 { 116 #ifdef SMP 117 int value; 118 #endif 119 120 ++gd->gd_curthread->td_critcount; 121 cpu_ccfence(); 122 ++gd->gd_spinlocks_wr; 123 #ifdef SMP 124 if ((value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE)) != 0) 125 spin_lock_wr_contested2(mtx); 126 #ifdef DEBUG_LOCKS 127 int i; 128 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { 129 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) { 130 gd->gd_curthread->td_spinlock_stack_id[i] = 1; 131 gd->gd_curthread->td_spinlock_stack[i] = mtx; 132 gd->gd_curthread->td_spinlock_caller_pc[i] = 133 __builtin_return_address(0); 134 break; 135 } 136 } 137 #endif 138 #endif 139 } 140 141 static __inline void 142 spin_lock(struct spinlock *mtx) 143 { 144 spin_lock_quick(mycpu, mtx); 145 } 146 147 /* 148 * Release an exclusive spinlock. We can just do this passively, only 149 * ensuring that our spinlock count is left intact until the mutex is 150 * cleared. 151 */ 152 static __inline void 153 spin_unlock_quick(globaldata_t gd, struct spinlock *mtx) 154 { 155 #ifdef SMP 156 #ifdef DEBUG_LOCKS 157 int i; 158 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { 159 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) && 160 (gd->gd_curthread->td_spinlock_stack[i] == mtx)) { 161 gd->gd_curthread->td_spinlock_stack_id[i] = 0; 162 gd->gd_curthread->td_spinlock_stack[i] = NULL; 163 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL; 164 break; 165 } 166 } 167 #endif 168 mtx->lock = 0; 169 #endif 170 KKASSERT(gd->gd_spinlocks_wr > 0); 171 --gd->gd_spinlocks_wr; 172 cpu_ccfence(); 173 --gd->gd_curthread->td_critcount; 174 } 175 176 static __inline void 177 spin_unlock(struct spinlock *mtx) 178 { 179 spin_unlock_quick(mycpu, mtx); 180 } 181 182 static __inline void 183 spin_init(struct spinlock *mtx) 184 { 185 mtx->lock = 0; 186 } 187 188 static __inline void 189 spin_uninit(struct spinlock *mtx) 190 { 191 /* unused */ 192 } 193 194 #endif /* _KERNEL */ 195 #endif /* _SYS_SPINLOCK2_H_ */ 196 197