1 /*-
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
4  *
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include <assert.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <sys/time.h>
34 
35 #include "thr_private.h"
36 
37 #define cpu_ccfence()	__asm __volatile("" : : : "memory")
38 
39 /*
40  * This function is used to acquire a contested lock.
41  *
42  * A *mtx value of 1 indicates locked normally.
43  * A *mtx value of 2 indicates locked and contested.
44  */
45 int
46 __thr_umtx_lock(volatile umtx_t *mtx, int id, int timo)
47 {
48 	int v, errval, ret = 0;
49 
50 	v = *mtx;
51 	cpu_ccfence();
52 	id &= 0x3FFFFFFF;
53 
54 	for (;;) {
55 		if (v == 0) {
56 			if (atomic_cmpset_acq_int(mtx, 0, id)) {
57 				break;
58 			}
59 			continue;
60 		}
61 		if (atomic_fcmpset_int(mtx, &v, v|0x40000000)) {
62 			if (timo == 0) {
63 				_umtx_sleep_err(mtx, v|0x40000000, timo);
64 			} else if ((errval = _umtx_sleep_err(mtx, v|0x40000000, timo)) > 0) {
65 				if (errval == EAGAIN) {
66 					if (atomic_cmpset_acq_int(mtx, 0, id))
67 						ret = 0;
68 					else
69 						ret = ETIMEDOUT;
70 					break;
71 				}
72 			}
73 		}
74 	}
75 	return (ret);
76 }
77 
78 /*
79  * Release a mutex.  A contested mutex has a value
80  * of 2, an uncontested mutex has a value of 1.
81  */
82 void
83 __thr_umtx_unlock(volatile umtx_t *mtx, int id)
84 {
85 	int v;
86 
87 	v = *mtx;
88 	cpu_ccfence();
89 	id &= 0x3FFFFFFF;
90 
91 	for (;;) {
92 		if (atomic_fcmpset_int(mtx, &v, 0)) {
93 			if (v & 0x40000000)
94 				_umtx_wakeup_err(mtx, 0);
95 			THR_ASSERT((v & 0x3FFFFFFF) == id,
96 				   "thr_umtx_unlock: wrong owner");
97 			break;
98 		}
99 	}
100 }
101 
102 /*
103  * Low level timed umtx lock.  This function must never return
104  * EINTR.
105  */
106 int
107 __thr_umtx_timedlock(volatile umtx_t *mtx, int id,
108 		     const struct timespec *timeout)
109 {
110 	struct timespec ts, ts2, ts3;
111 	int timo, ret;
112 
113 	if ((timeout->tv_sec < 0) ||
114 	    (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) {
115 		return (ETIMEDOUT);
116 	}
117 
118 	/* XXX there should have MONO timer! */
119 	clock_gettime(CLOCK_REALTIME, &ts);
120 	TIMESPEC_ADD(&ts, &ts, timeout);
121 	ts2 = *timeout;
122 
123 	id &= 0x3FFFFFFF;
124 
125 	for (;;) {
126 		if (ts2.tv_nsec) {
127 			timo = (int)(ts2.tv_nsec / 1000);
128 			if (timo == 0)
129 				timo = 1;
130 		} else {
131 			timo = 1000000;
132 		}
133 		ret = __thr_umtx_lock(mtx, id, timo);
134 		if (ret != EINTR && ret != ETIMEDOUT)
135 			break;
136 		clock_gettime(CLOCK_REALTIME, &ts3);
137 		TIMESPEC_SUB(&ts2, &ts, &ts3);
138 		if (ts2.tv_sec < 0 ||
139 		    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
140 			ret = ETIMEDOUT;
141 			break;
142 		}
143 	}
144 	return (ret);
145 }
146 
147 /*
148  * Regular umtx wait that cannot return EINTR
149  */
150 int
151 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout,
152 	       int clockid)
153 {
154 	struct timespec ts, ts2, ts3;
155 	int timo, errval, ret = 0;
156 
157 	cpu_ccfence();
158 	if (*mtx != exp)
159 		return (0);
160 
161 	if (timeout == NULL) {
162 		/*
163 		 * NOTE: If no timeout, EINTR cannot be returned.  Ignore
164 		 *	 EINTR.
165 		 */
166 		while ((errval = _umtx_sleep_err(mtx, exp, 10000000)) > 0) {
167 			if (errval == EBUSY)
168 				break;
169 #if 0
170 			if (errval == ETIMEDOUT || errval == EWOULDBLOCK) {
171 				if (*mtx != exp) {
172 					fprintf(stderr,
173 					    "thr_umtx_wait: FAULT VALUE CHANGE "
174 					    "%d -> %d oncond %p\n",
175 					    exp, *mtx, mtx);
176 				}
177 			}
178 #endif
179 			if (*mtx != exp)
180 				return(0);
181 		}
182 		return (ret);
183 	}
184 
185 	/*
186 	 * Timed waits can return EINTR
187 	 */
188 	if ((timeout->tv_sec < 0) ||
189 	    (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
190 	return (ETIMEDOUT);
191 
192 	clock_gettime(clockid, &ts);
193 	TIMESPEC_ADD(&ts, &ts, timeout);
194 	ts2 = *timeout;
195 
196 	for (;;) {
197 		if (ts2.tv_nsec) {
198 			timo = (int)(ts2.tv_nsec / 1000);
199 			if (timo == 0)
200 				timo = 1;
201 		} else {
202 			timo = 1000000;
203 		}
204 
205 		if ((errval = _umtx_sleep_err(mtx, exp, timo)) > 0) {
206 			if (errval == EBUSY) {
207 				ret = 0;
208 				break;
209 			}
210 			if (errval == EINTR) {
211 				ret = EINTR;
212 				break;
213 			}
214 		}
215 
216 		clock_gettime(clockid, &ts3);
217 		TIMESPEC_SUB(&ts2, &ts, &ts3);
218 		if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
219 			ret = ETIMEDOUT;
220 			break;
221 		}
222 	}
223 	return (ret);
224 }
225 
226 /*
227  * Simple version without a timeout which can also return EINTR
228  */
229 int
230 _thr_umtx_wait_intr(volatile umtx_t *mtx, int exp)
231 {
232 	int ret = 0;
233 	int errval;
234 
235 	cpu_ccfence();
236 	for (;;) {
237 		if (*mtx != exp)
238 			return (0);
239 		errval = _umtx_sleep_err(mtx, exp, 10000000);
240 		if (errval == 0)
241 			break;
242 		if (errval == EBUSY)
243 			break;
244 		if (errval == EINTR) {
245 			ret = errval;
246 			break;
247 		}
248 		cpu_ccfence();
249 	}
250 	return (ret);
251 }
252 
253 void
254 _thr_umtx_wake(volatile umtx_t *mtx, int count)
255 {
256 	_umtx_wakeup_err(mtx, count);
257 }
258