1 /*-
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
4  *
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include <assert.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <sys/time.h>
34 
35 #include "thr_private.h"
36 
37 #define cpu_ccfence()	__asm __volatile("" : : : "memory")
38 
39 /*
40  * This function is used to acquire a contested lock.
41  */
42 int
43 __thr_umtx_lock(volatile umtx_t *mtx, int id, int timo)
44 {
45 	int v;
46 	int errval;
47 	int ret = 0;
48 	int retry = 4;
49 
50 	v = *mtx;
51 	cpu_ccfence();
52 	id &= 0x3FFFFFFF;
53 
54 	for (;;) {
55 		cpu_pause();
56 		if (v == 0) {
57 			if (atomic_fcmpset_int(mtx, &v, id))
58 				break;
59 			continue;
60 		}
61 		if (--retry) {
62 			sched_yield();
63 			v = *mtx;
64 			continue;
65 		}
66 
67 		/*
68 		 * Set the waiting bit.  If the fcmpset fails v is loaded
69 		 * with the current content of the mutex, and if the waiting
70 		 * bit is already set, we can also sleep.
71 		 */
72 		if (atomic_fcmpset_int(mtx, &v, v|0x40000000) ||
73 		    (v & 0x40000000)) {
74 			if (timo == 0) {
75 				_umtx_sleep_err(mtx, v|0x40000000, timo);
76 			} else if ((errval = _umtx_sleep_err(mtx, v|0x40000000, timo)) > 0) {
77 				if (errval == EAGAIN) {
78 					if (atomic_cmpset_acq_int(mtx, 0, id))
79 						ret = 0;
80 					else
81 						ret = ETIMEDOUT;
82 					break;
83 				}
84 			}
85 		}
86 		retry = 4;
87 	}
88 	return (ret);
89 }
90 
91 /*
92  * Inline followup when releasing a mutex.  The mutex has been released
93  * but 'v' either doesn't match id or needs a wakeup.
94  */
95 void
96 __thr_umtx_unlock(volatile umtx_t *mtx, int v, int id)
97 {
98 	if (v & 0x40000000) {
99 		_umtx_wakeup_err(mtx, 0);
100 		v &= 0x3FFFFFFF;
101 	}
102 	THR_ASSERT(v == id, "thr_umtx_unlock: wrong owner");
103 }
104 
105 /*
106  * Low level timed umtx lock.  This function must never return
107  * EINTR.
108  */
109 int
110 __thr_umtx_timedlock(volatile umtx_t *mtx, int id,
111 		     const struct timespec *timeout)
112 {
113 	struct timespec ts, ts2, ts3;
114 	int timo, ret;
115 
116 	if ((timeout->tv_sec < 0) ||
117 	    (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) {
118 		return (ETIMEDOUT);
119 	}
120 
121 	/* XXX there should have MONO timer! */
122 	clock_gettime(CLOCK_REALTIME, &ts);
123 	timespecadd(&ts, timeout, &ts);
124 	ts2 = *timeout;
125 
126 	id &= 0x3FFFFFFF;
127 
128 	for (;;) {
129 		if (ts2.tv_nsec) {
130 			timo = (int)(ts2.tv_nsec / 1000);
131 			if (timo == 0)
132 				timo = 1;
133 		} else {
134 			timo = 1000000;
135 		}
136 		ret = __thr_umtx_lock(mtx, id, timo);
137 		if (ret != EINTR && ret != ETIMEDOUT)
138 			break;
139 		clock_gettime(CLOCK_REALTIME, &ts3);
140 		timespecsub(&ts, &ts3, &ts2);
141 		if (ts2.tv_sec < 0 ||
142 		    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
143 			ret = ETIMEDOUT;
144 			break;
145 		}
146 	}
147 	return (ret);
148 }
149 
150 /*
151  * Regular umtx wait that cannot return EINTR
152  */
153 int
154 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout,
155 	       int clockid)
156 {
157 	struct timespec ts, ts2, ts3;
158 	int timo, errval, ret = 0;
159 
160 	cpu_ccfence();
161 	if (*mtx != exp)
162 		return (0);
163 
164 	if (timeout == NULL) {
165 		/*
166 		 * NOTE: If no timeout, EINTR cannot be returned.  Ignore
167 		 *	 EINTR.
168 		 */
169 		while ((errval = _umtx_sleep_err(mtx, exp, 10000000)) > 0) {
170 			if (errval == EBUSY)
171 				break;
172 #if 0
173 			if (errval == ETIMEDOUT || errval == EWOULDBLOCK) {
174 				if (*mtx != exp) {
175 					fprintf(stderr,
176 					    "thr_umtx_wait: FAULT VALUE CHANGE "
177 					    "%d -> %d oncond %p\n",
178 					    exp, *mtx, mtx);
179 				}
180 			}
181 #endif
182 			if (*mtx != exp)
183 				return(0);
184 		}
185 		return (ret);
186 	}
187 
188 	/*
189 	 * Timed waits can return EINTR
190 	 */
191 	if ((timeout->tv_sec < 0) ||
192 	    (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
193 	return (ETIMEDOUT);
194 
195 	clock_gettime(clockid, &ts);
196 	timespecadd(&ts, timeout, &ts);
197 	ts2 = *timeout;
198 
199 	for (;;) {
200 		if (ts2.tv_nsec) {
201 			timo = (int)(ts2.tv_nsec / 1000);
202 			if (timo == 0)
203 				timo = 1;
204 		} else {
205 			timo = 1000000;
206 		}
207 
208 		if ((errval = _umtx_sleep_err(mtx, exp, timo)) > 0) {
209 			if (errval == EBUSY) {
210 				ret = 0;
211 				break;
212 			}
213 			if (errval == EINTR) {
214 				ret = EINTR;
215 				break;
216 			}
217 		}
218 
219 		clock_gettime(clockid, &ts3);
220 		timespecsub(&ts, &ts3, &ts2);
221 		if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
222 			ret = ETIMEDOUT;
223 			break;
224 		}
225 	}
226 	return (ret);
227 }
228 
229 /*
230  * Simple version without a timeout which can also return EINTR
231  */
232 int
233 _thr_umtx_wait_intr(volatile umtx_t *mtx, int exp)
234 {
235 	int ret = 0;
236 	int errval;
237 
238 	cpu_ccfence();
239 	for (;;) {
240 		if (*mtx != exp)
241 			return (0);
242 		errval = _umtx_sleep_err(mtx, exp, 10000000);
243 		if (errval == 0)
244 			break;
245 		if (errval == EBUSY)
246 			break;
247 		if (errval == EINTR) {
248 			ret = errval;
249 			break;
250 		}
251 		cpu_ccfence();
252 	}
253 	return (ret);
254 }
255 
256 void
257 _thr_umtx_wake(volatile umtx_t *mtx, int count)
258 {
259 	_umtx_wakeup_err(mtx, count);
260 }
261