1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com> 4 * 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #include <assert.h> 31 #include <errno.h> 32 #include <unistd.h> 33 #include <sys/time.h> 34 35 #include "thr_private.h" 36 37 #define cpu_ccfence() __asm __volatile("" : : : "memory") 38 39 /* 40 * This function is used to acquire a contested lock. 41 * 42 * A *mtx value of 1 indicates locked normally. 43 * A *mtx value of 2 indicates locked and contested. 44 */ 45 int 46 __thr_umtx_lock(volatile umtx_t *mtx, int id, int timo) 47 { 48 int v; 49 int errval; 50 int ret = 0; 51 int retry = 4; 52 53 v = *mtx; 54 cpu_ccfence(); 55 id &= 0x3FFFFFFF; 56 57 for (;;) { 58 cpu_pause(); 59 if (v == 0) { 60 if (atomic_fcmpset_int(mtx, &v, id)) 61 break; 62 continue; 63 } 64 if (--retry) { 65 sched_yield(); 66 v = *mtx; 67 continue; 68 } 69 70 /* 71 * Set the waiting bit. If the fcmpset fails v is loaded 72 * with the current content of the mutex, and if the waiting 73 * bit is already set, we can also sleep. 74 */ 75 if (atomic_fcmpset_int(mtx, &v, v|0x40000000) || 76 (v & 0x40000000)) { 77 if (timo == 0) { 78 _umtx_sleep_err(mtx, v|0x40000000, timo); 79 } else if ((errval = _umtx_sleep_err(mtx, v|0x40000000, timo)) > 0) { 80 if (errval == EAGAIN) { 81 if (atomic_cmpset_acq_int(mtx, 0, id)) 82 ret = 0; 83 else 84 ret = ETIMEDOUT; 85 break; 86 } 87 } 88 } 89 retry = 4; 90 } 91 return (ret); 92 } 93 94 /* 95 * Inline followup when releasing a mutex. The mutex has been released 96 * but 'v' either doesn't match id or needs a wakeup. 97 */ 98 void 99 __thr_umtx_unlock(volatile umtx_t *mtx, int v, int id) 100 { 101 if (v & 0x40000000) { 102 _umtx_wakeup_err(mtx, 0); 103 v &= 0x3FFFFFFF; 104 } 105 THR_ASSERT(v == id, "thr_umtx_unlock: wrong owner"); 106 } 107 108 /* 109 * Low level timed umtx lock. This function must never return 110 * EINTR. 111 */ 112 int 113 __thr_umtx_timedlock(volatile umtx_t *mtx, int id, 114 const struct timespec *timeout) 115 { 116 struct timespec ts, ts2, ts3; 117 int timo, ret; 118 119 if ((timeout->tv_sec < 0) || 120 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) { 121 return (ETIMEDOUT); 122 } 123 124 /* XXX there should have MONO timer! */ 125 clock_gettime(CLOCK_REALTIME, &ts); 126 TIMESPEC_ADD(&ts, &ts, timeout); 127 ts2 = *timeout; 128 129 id &= 0x3FFFFFFF; 130 131 for (;;) { 132 if (ts2.tv_nsec) { 133 timo = (int)(ts2.tv_nsec / 1000); 134 if (timo == 0) 135 timo = 1; 136 } else { 137 timo = 1000000; 138 } 139 ret = __thr_umtx_lock(mtx, id, timo); 140 if (ret != EINTR && ret != ETIMEDOUT) 141 break; 142 clock_gettime(CLOCK_REALTIME, &ts3); 143 TIMESPEC_SUB(&ts2, &ts, &ts3); 144 if (ts2.tv_sec < 0 || 145 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 146 ret = ETIMEDOUT; 147 break; 148 } 149 } 150 return (ret); 151 } 152 153 /* 154 * Regular umtx wait that cannot return EINTR 155 */ 156 int 157 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout, 158 int clockid) 159 { 160 struct timespec ts, ts2, ts3; 161 int timo, errval, ret = 0; 162 163 cpu_ccfence(); 164 if (*mtx != exp) 165 return (0); 166 167 if (timeout == NULL) { 168 /* 169 * NOTE: If no timeout, EINTR cannot be returned. Ignore 170 * EINTR. 171 */ 172 while ((errval = _umtx_sleep_err(mtx, exp, 10000000)) > 0) { 173 if (errval == EBUSY) 174 break; 175 #if 0 176 if (errval == ETIMEDOUT || errval == EWOULDBLOCK) { 177 if (*mtx != exp) { 178 fprintf(stderr, 179 "thr_umtx_wait: FAULT VALUE CHANGE " 180 "%d -> %d oncond %p\n", 181 exp, *mtx, mtx); 182 } 183 } 184 #endif 185 if (*mtx != exp) 186 return(0); 187 } 188 return (ret); 189 } 190 191 /* 192 * Timed waits can return EINTR 193 */ 194 if ((timeout->tv_sec < 0) || 195 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) 196 return (ETIMEDOUT); 197 198 clock_gettime(clockid, &ts); 199 TIMESPEC_ADD(&ts, &ts, timeout); 200 ts2 = *timeout; 201 202 for (;;) { 203 if (ts2.tv_nsec) { 204 timo = (int)(ts2.tv_nsec / 1000); 205 if (timo == 0) 206 timo = 1; 207 } else { 208 timo = 1000000; 209 } 210 211 if ((errval = _umtx_sleep_err(mtx, exp, timo)) > 0) { 212 if (errval == EBUSY) { 213 ret = 0; 214 break; 215 } 216 if (errval == EINTR) { 217 ret = EINTR; 218 break; 219 } 220 } 221 222 clock_gettime(clockid, &ts3); 223 TIMESPEC_SUB(&ts2, &ts, &ts3); 224 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 225 ret = ETIMEDOUT; 226 break; 227 } 228 } 229 return (ret); 230 } 231 232 /* 233 * Simple version without a timeout which can also return EINTR 234 */ 235 int 236 _thr_umtx_wait_intr(volatile umtx_t *mtx, int exp) 237 { 238 int ret = 0; 239 int errval; 240 241 cpu_ccfence(); 242 for (;;) { 243 if (*mtx != exp) 244 return (0); 245 errval = _umtx_sleep_err(mtx, exp, 10000000); 246 if (errval == 0) 247 break; 248 if (errval == EBUSY) 249 break; 250 if (errval == EINTR) { 251 ret = errval; 252 break; 253 } 254 cpu_ccfence(); 255 } 256 return (ret); 257 } 258 259 void 260 _thr_umtx_wake(volatile umtx_t *mtx, int count) 261 { 262 _umtx_wakeup_err(mtx, count); 263 } 264