xref: /dragonfly/lib/libc/sysvipc/lock.c (revision 896f2e3a)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 1998 Alex Nash
4  * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>.
5  * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by John Birrell.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.15 2008/05/09 16:03:27 dillon Exp $
36  * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $
37  * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.7 2006/04/06 13:03:09 davidxu Exp $
38  */
39 
40 #include <machine/atomic.h>
41 #include <machine/tls.h>
42 #include <errno.h>
43 
44 #include "sysvipc_utils.h"
45 #include "sysvipc_lock.h"
46 #include "sysvipc_lock_generic.h"
47 
48 #include <limits.h>
49 #include <stdio.h>
50 #include <unistd.h>
51 
52 #define MAX_READ_LOCKS          (INT_MAX - 1)
53 
54 static int rdlock_count;
55 
56 int
57 sysv_mutex_init(struct sysv_mutex *mutex) {
58 	if(mutex == NULL)
59 		return (EINVAL);
60 	mutex->_mutex_static_lock = 0;
61 	mutex->pid_owner = -1;
62 	mutex->tid_owner = -1;
63 	return (0);
64 }
65 
66 int
67 sysv_mutex_lock(struct sysv_mutex *mutex)
68 {
69 	if (mutex->pid_owner == getpid() &&
70 			mutex->tid_owner == lwp_gettid()) {
71 		sysv_print_err("deadlock: mutex aleady acquired by the thread\n");
72 		return (EDEADLK);
73 	}
74 	_sysv_umtx_lock(&mutex->_mutex_static_lock);
75 	mutex->pid_owner = getpid();
76 	mutex->tid_owner = lwp_gettid();
77 	return (0);
78 }
79 
80 int
81 sysv_mutex_unlock(struct sysv_mutex *mutex)
82 {
83 	if (mutex->pid_owner != getpid() ||
84 			mutex->tid_owner != lwp_gettid()) {
85 		sysv_print_err("eperm try unlock a mutex that is not acquired\n");
86 		return (EPERM);
87 	}
88 
89 	mutex->tid_owner = -1;
90 	mutex->pid_owner = -1;
91 	_sysv_umtx_unlock(&mutex->_mutex_static_lock);
92 	return (0);
93 }
94 
95 static int
96 sysv_cond_wait(int *val, struct sysv_mutex *mutex) {
97 	sysv_mutex_unlock(mutex);
98 
99 	/* I use SYSV_TIMEOUT to avoid lossing a wakeup
100 	 * sent before going to sleep and remain blocked.
101 	 */
102 	umtx_sleep(val, *val, SYSV_TIMEOUT);
103 	return (sysv_mutex_lock(mutex));
104 }
105 
106 static int
107 sysv_cond_signal(int *val) {
108 	return (umtx_wakeup(val, 0));
109 }
110 
111 int
112 sysv_rwlock_init(struct sysv_rwlock *rwlock)
113 {
114 	int ret = 0;
115 
116 	if (rwlock == NULL)
117 		return (EINVAL);
118 
119 	/* Initialize the lock. */
120 	sysv_mutex_init(&rwlock->lock);
121 	rwlock->state = 0;
122 	rwlock->blocked_writers = 0;
123 
124 	return (ret);
125 }
126 
127 int
128 sysv_rwlock_unlock (struct sysv_rwlock *rwlock)
129 {
130 	int ret;
131 
132 	if (rwlock == NULL)
133 		return (EINVAL);
134 
135 	/* Grab the monitor lock. */
136 	if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
137 		return (ret);
138 
139 	if (rwlock->state > 0) {
140 		rdlock_count--;
141 		rwlock->state--;
142 		if (rwlock->state == 0 && rwlock->blocked_writers) {
143 			ret = sysv_cond_signal(&rwlock->write_signal);
144 		}
145 	} else if (rwlock->state < 0) {
146 		rwlock->state = 0;
147 
148 		if (rwlock->blocked_writers) {
149 			ret = sysv_cond_signal(&rwlock->write_signal);
150 		}
151 		else {
152 			ret = sysv_cond_signal(&rwlock->read_signal);
153 		}
154 	} else
155 		ret = EINVAL;
156 
157 	sysv_mutex_unlock(&rwlock->lock);
158 
159 	return (ret);
160 }
161 
162 int
163 sysv_rwlock_wrlock (struct sysv_rwlock *rwlock)
164 {
165 	int ret;
166 
167 	if (rwlock == NULL)
168 		return (EINVAL);
169 
170 	/* Grab the monitor lock. */
171 	if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
172 		return (ret);
173 
174 	while (rwlock->state != 0) {
175 		rwlock->blocked_writers++;
176 
177 		ret = sysv_cond_wait(&rwlock->write_signal, &rwlock->lock);
178 		if (ret != 0) {
179 			rwlock->blocked_writers--;
180 			/* No unlock is required because only the lock
181 			 * operation can return error.
182 			 */
183 			//sysv_mutex_unlock(&rwlock->lock);
184 			return (ret);
185 		}
186 
187 		rwlock->blocked_writers--;
188 	}
189 
190 	/* Indicate that we are locked for writing. */
191 	rwlock->state = -1;
192 
193 	sysv_mutex_unlock(&rwlock->lock);
194 
195 	return (ret);
196 }
197 
198 int
199 sysv_rwlock_rdlock(struct sysv_rwlock *rwlock)
200 {
201 	int ret;
202 
203 //	sysv_print("try get rd lock\n");
204 	if (rwlock == NULL)
205 		return (EINVAL);
206 
207 	/* Grab the monitor lock. */
208 	if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0)
209 		return (ret);
210 
211 	/* Check the lock count. */
212 	if (rwlock->state == MAX_READ_LOCKS) {
213 		sysv_mutex_unlock(&rwlock->lock);
214 		return (EAGAIN);
215 	}
216 
217 	if ((rdlock_count > 0) && (rwlock->state > 0)) {
218 		/*
219 		 * Taken from the pthread implementation with only
220 		 * one change; rdlock_count is per process not per
221 		 * thread;
222 		 * Original comment:
223 		 * To avoid having to track all the rdlocks held by
224 		 * a thread or all of the threads that hold a rdlock,
225 		 * we keep a simple count of all the rdlocks held by
226 		 * a thread.  If a thread holds any rdlocks it is
227 		 * possible that it is attempting to take a recursive
228 		 * rdlock.  If there are blocked writers and precedence
229 		 * is given to them, then that would result in the thread
230 		 * deadlocking.  So allowing a thread to take the rdlock
231 		 * when it already has one or more rdlocks avoids the
232 		 * deadlock.  I hope the reader can follow that logic ;-)
233 		 */
234 		;	/* nothing needed */
235 	} else {
236 		/* Give writers priority over readers. */
237 		while (rwlock->blocked_writers || rwlock->state < 0) {
238 			ret = sysv_cond_wait(&rwlock->read_signal,
239 			   &rwlock->lock);
240 			if (ret != 0) {
241 				/* No unlock necessary because only lock
242 				 * operation can return error.
243 				 */
244 				//sysv_mutex_unlock(&rwlock->lock);
245 				return (ret);
246 			}
247 		}
248 	}
249 
250 	rdlock_count++;
251 	rwlock->state++; /* Indicate we are locked for reading. */
252 
253 	/*
254 	 * Something is really wrong if this call fails.  Returning
255 	 * error won't do because we've already obtained the read
256 	 * lock.  Decrementing 'state' is no good because we probably
257 	 * don't have the monitor lock.
258 	 */
259 	sysv_mutex_unlock(&rwlock->lock);
260 
261 	return (ret);
262 }
263