1 /*-
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
4  *
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $DragonFly: src/lib/libthread_xu/thread/thr_umtx.c,v 1.2 2005/03/15 11:24:23 davidxu Exp $
29  */
30 
31 /*
32  * Part of these code is derived from /usr/src/test/debug/umtx.c.
33  */
34 
35 #include <assert.h>
36 #include <errno.h>
37 #include <unistd.h>
38 #include <sys/time.h>
39 
40 #include "thr_private.h"
41 
42 static int get_contested(volatile umtx_t *mtx, int timo);
43 
44 int
45 __thr_umtx_lock(volatile umtx_t *mtx, int timo)
46 {
47     int v;
48     int ret;
49 
50     for (;;) {
51 	v = *mtx;
52 	if ((v & UMTX_LOCKED) == 0) {
53 	    /* not locked, attempt to lock. */
54 	    if (atomic_cmpset_acq_int(mtx, v, v | UMTX_LOCKED)) {
55 		ret = 0;
56 		break;
57 	    }
58 	} else {
59 	    /*
60 	     * Locked, bump the contested count and obtain
61 	     * the contested mutex.
62 	     */
63 	    if (atomic_cmpset_acq_int(mtx, v, v + 1)) {
64 		ret = get_contested(mtx, timo);
65 		break;
66 	    }
67 	}
68     }
69 
70     return (ret);
71 }
72 
73 static int
74 get_contested(volatile umtx_t *mtx, int timo)
75 {
76     int ret = 0;
77     int v;
78 
79     for (;;) {
80 	v = *mtx;
81 	assert(v & ~UMTX_LOCKED); /* our contesting count still there */
82 	if ((v & UMTX_LOCKED) == 0) {
83 	    /*
84 	     * Not locked, attempt to remove our contested
85 	     * count and lock at the same time.
86 	     */
87 	    if (atomic_cmpset_acq_int(mtx, v, (v - 1) | UMTX_LOCKED)) {
88 		ret = 0;
89 		break;
90 	    }
91 	} else {
92 	    /*
93 	     * Retried after resuming from umtx_sleep, try to leave if there
94 	     * was error, e.g, timeout.
95 	     */
96 	    if (ret) {
97 		if (atomic_cmpset_acq_int(mtx, v, v - 1))
98 			break;
99 		else
100 			continue;
101 	    }
102 
103 	    /*
104 	     * Still locked, sleep and try again.
105 	     */
106 	    if (timo == 0) {
107 		umtx_sleep(mtx, v, 0);
108 	    } else {
109 		if (umtx_sleep(mtx, v, timo) < 0) {
110 		    if (errno == EAGAIN)
111 			ret = ETIMEDOUT;
112 		}
113 	    }
114 	}
115     }
116 
117     return (ret);
118 }
119 
120 void
121 __thr_umtx_unlock(volatile umtx_t *mtx)
122 {
123     int v;
124 
125     for (;;) {
126 	v = *mtx;
127 	assert(v & UMTX_LOCKED);	/* we still have it locked */
128 	if (v == UMTX_LOCKED) {
129 	    /*
130 	     * We hold an uncontested lock, try to set to an unlocked
131 	     * state.
132 	     */
133 	    if (atomic_cmpset_acq_int(mtx, UMTX_LOCKED, 0))
134 		return;
135 	} else {
136 	    /*
137 	     * We hold a contested lock, unlock and wakeup exactly
138 	     * one sleeper. It is possible for this to race a new
139 	     * thread obtaining a lock, in which case any contested
140 	     * sleeper we wake up will simply go back to sleep.
141 	     */
142 	    if (atomic_cmpset_acq_int(mtx, v, v & ~UMTX_LOCKED)) {
143 		umtx_wakeup(mtx, 1);
144 		return;
145 	    }
146 	}
147     }
148 }
149 
150 int
151 __thr_umtx_timedlock(volatile umtx_t *mtx, const struct timespec *timeout)
152 {
153     struct timespec ts, ts2, ts3;
154     int timo, ret;
155 
156     if ((timeout->tv_sec < 0) ||
157         (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
158 	return (ETIMEDOUT);
159 
160     /* XXX there should have MONO timer! */
161     clock_gettime(CLOCK_REALTIME, &ts);
162     TIMESPEC_ADD(&ts, &ts, timeout);
163     ts2 = *timeout;
164 
165     for (;;) {
166     	if (ts2.tv_nsec) {
167 	    timo = (int)(ts2.tv_nsec / 1000);
168 	    if (timo == 0)
169 		timo = 1;
170 	} else {
171 	    timo = 1000000;
172 	}
173 	ret = __thr_umtx_lock(mtx, timo);
174 	if (ret != ETIMEDOUT)
175 	    break;
176 	clock_gettime(CLOCK_REALTIME, &ts3);
177 	TIMESPEC_SUB(&ts2, &ts, &ts3);
178 	if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
179 	    ret = ETIMEDOUT;
180 	    break;
181 	}
182     }
183     return (ret);
184 }
185 
186 int
187 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout,
188 	int clockid)
189 {
190     struct timespec ts, ts2, ts3;
191     int timo, ret = 0;
192 
193     if (*mtx != exp)
194 	return (0);
195 
196     if (timeout == NULL) {
197 	if (umtx_sleep(mtx, exp, 0) < 0) {
198 	    if (errno == EINTR)
199 		ret = EINTR;
200 	}
201 	return (ret);
202     }
203 
204     if ((timeout->tv_sec < 0) ||
205         (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
206 	return (ETIMEDOUT);
207 
208     clock_gettime(clockid, &ts);
209     TIMESPEC_ADD(&ts, &ts, timeout);
210     ts2 = *timeout;
211 
212     for (;;) {
213     	if (ts2.tv_nsec) {
214 	    timo = (int)(ts2.tv_nsec / 1000);
215 	    if (timo == 0)
216 		timo = 1;
217 	} else {
218 	    timo = 1000000;
219 	}
220 	if (umtx_sleep(mtx, exp, timo) < 0) {
221 	    if (errno == EBUSY) {
222 		ret = 0;
223 		break;
224 	    } else if (errno == EINTR) {
225 		ret = EINTR;
226 		break;
227 	    }
228 	}
229 	clock_gettime(clockid, &ts3);
230 	TIMESPEC_SUB(&ts2, &ts, &ts3);
231 	if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
232 	    ret = ETIMEDOUT;
233 	    break;
234 	}
235     }
236     return (ret);
237 }
238 
239 void _thr_umtx_wake(volatile umtx_t *mtx, int count)
240 {
241     umtx_wakeup(mtx, count);
242 }
243