xref: /dragonfly/lib/libc/sysvipc/lock.c (revision 5e83d98b)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 1998 Alex Nash
4  * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>.
5  * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by John Birrell.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
36  */
37 
38 #include <sys/lwp.h>
39 #include <machine/atomic.h>
40 #include <machine/tls.h>
41 #include <errno.h>
42 
43 #include "sysvipc_utils.h"
44 #include "sysvipc_lock.h"
45 #include "sysvipc_lock_generic.h"
46 
47 #include <limits.h>
48 #include <stdio.h>
49 #include <unistd.h>
50 
51 #define MAX_READ_LOCKS          (INT_MAX - 1)
52 
53 static int rdlock_count;
54 
55 int
56 sysv_mutex_init(struct sysv_mutex *mutex)
57 {
58 	if(mutex == NULL)
59 		return (EINVAL);
60 	mutex->_mutex_static_lock = 0;
61 	mutex->pid_owner = -1;
62 	mutex->tid_owner = -1;
63 	return (0);
64 }
65 
66 int
67 sysv_mutex_lock(struct sysv_mutex *mutex)
68 {
69 	if (mutex->pid_owner == getpid() &&
70 			mutex->tid_owner == lwp_gettid()) {
71 		sysv_print_err("deadlock: mutex aleady acquired by the thread\n");
72 		return (EDEADLK);
73 	}
74 	_sysv_umtx_lock(&mutex->_mutex_static_lock);
75 	mutex->pid_owner = getpid();
76 	mutex->tid_owner = lwp_gettid();
77 	return (0);
78 }
79 
80 int
81 sysv_mutex_unlock(struct sysv_mutex *mutex)
82 {
83 	if (mutex->pid_owner != getpid() ||
84 			mutex->tid_owner != lwp_gettid()) {
85 		sysv_print_err("eperm try unlock a mutex that is not acquired\n");
86 		return (EPERM);
87 	}
88 
89 	mutex->tid_owner = -1;
90 	mutex->pid_owner = -1;
91 	_sysv_umtx_unlock(&mutex->_mutex_static_lock);
92 	return (0);
93 }
94 
95 static int
96 sysv_cond_wait(int *val, struct sysv_mutex *mutex)
97 {
98 	sysv_mutex_unlock(mutex);
99 
100 	/* I use SYSV_TIMEOUT to avoid lossing a wakeup
101 	 * sent before going to sleep and remain blocked.
102 	 */
103 	umtx_sleep(val, *val, SYSV_TIMEOUT);
104 	return (sysv_mutex_lock(mutex));
105 }
106 
107 static int
108 sysv_cond_signal(int *val)
109 {
110 	return (umtx_wakeup(val, 0));
111 }
112 
113 int
114 sysv_rwlock_init(struct sysv_rwlock *rwlock)
115 {
116 	int ret = 0;
117 
118 	if (rwlock == NULL)
119 		return (EINVAL);
120 
121 	/* Initialize the lock. */
122 	sysv_mutex_init(&rwlock->lock);
123 	rwlock->state = 0;
124 	rwlock->blocked_writers = 0;
125 
126 	return (ret);
127 }
128 
129 int
130 sysv_rwlock_unlock(struct sysv_rwlock *rwlock)
131 {
132 	int ret;
133 
134 	if (rwlock == NULL)
135 		return (EINVAL);
136 
137 	/* Grab the monitor lock. */
138 	if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
139 		return (ret);
140 
141 	if (rwlock->state > 0) {
142 		rdlock_count--;
143 		rwlock->state--;
144 		if (rwlock->state == 0 && rwlock->blocked_writers) {
145 			ret = sysv_cond_signal(&rwlock->write_signal);
146 		}
147 	} else if (rwlock->state < 0) {
148 		rwlock->state = 0;
149 
150 		if (rwlock->blocked_writers) {
151 			ret = sysv_cond_signal(&rwlock->write_signal);
152 		}
153 		else {
154 			ret = sysv_cond_signal(&rwlock->read_signal);
155 		}
156 	} else
157 		ret = EINVAL;
158 
159 	sysv_mutex_unlock(&rwlock->lock);
160 
161 	return (ret);
162 }
163 
164 int
165 sysv_rwlock_wrlock(struct sysv_rwlock *rwlock)
166 {
167 	int ret;
168 
169 	if (rwlock == NULL)
170 		return (EINVAL);
171 
172 	/* Grab the monitor lock. */
173 	if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
174 		return (ret);
175 
176 	while (rwlock->state != 0) {
177 		rwlock->blocked_writers++;
178 
179 		ret = sysv_cond_wait(&rwlock->write_signal, &rwlock->lock);
180 		if (ret != 0) {
181 			rwlock->blocked_writers--;
182 			/* No unlock is required because only the lock
183 			 * operation can return error.
184 			 */
185 			//sysv_mutex_unlock(&rwlock->lock);
186 			return (ret);
187 		}
188 
189 		rwlock->blocked_writers--;
190 	}
191 
192 	/* Indicate that we are locked for writing. */
193 	rwlock->state = -1;
194 
195 	sysv_mutex_unlock(&rwlock->lock);
196 
197 	return (ret);
198 }
199 
200 int
201 sysv_rwlock_rdlock(struct sysv_rwlock *rwlock)
202 {
203 	int ret;
204 
205 //	sysv_print("try get rd lock\n");
206 	if (rwlock == NULL)
207 		return (EINVAL);
208 
209 	/* Grab the monitor lock. */
210 	if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
211 		return (ret);
212 
213 	/* Check the lock count. */
214 	if (rwlock->state == MAX_READ_LOCKS) {
215 		sysv_mutex_unlock(&rwlock->lock);
216 		return (EAGAIN);
217 	}
218 
219 	if ((rdlock_count > 0) && (rwlock->state > 0)) {
220 		/*
221 		 * Taken from the pthread implementation with only
222 		 * one change; rdlock_count is per process not per
223 		 * thread;
224 		 * Original comment:
225 		 * To avoid having to track all the rdlocks held by
226 		 * a thread or all of the threads that hold a rdlock,
227 		 * we keep a simple count of all the rdlocks held by
228 		 * a thread.  If a thread holds any rdlocks it is
229 		 * possible that it is attempting to take a recursive
230 		 * rdlock.  If there are blocked writers and precedence
231 		 * is given to them, then that would result in the thread
232 		 * deadlocking.  So allowing a thread to take the rdlock
233 		 * when it already has one or more rdlocks avoids the
234 		 * deadlock.  I hope the reader can follow that logic ;-)
235 		 */
236 		;	/* nothing needed */
237 	} else {
238 		/* Give writers priority over readers. */
239 		while (rwlock->blocked_writers || rwlock->state < 0) {
240 			ret = sysv_cond_wait(&rwlock->read_signal,
241 			   &rwlock->lock);
242 			if (ret != 0) {
243 				/* No unlock necessary because only lock
244 				 * operation can return error.
245 				 */
246 				//sysv_mutex_unlock(&rwlock->lock);
247 				return (ret);
248 			}
249 		}
250 	}
251 
252 	rdlock_count++;
253 	rwlock->state++; /* Indicate we are locked for reading. */
254 
255 	/*
256 	 * Something is really wrong if this call fails.  Returning
257 	 * error won't do because we've already obtained the read
258 	 * lock.  Decrementing 'state' is no good because we probably
259 	 * don't have the monitor lock.
260 	 */
261 	sysv_mutex_unlock(&rwlock->lock);
262 
263 	return (ret);
264 }
265