xref: /minix/external/bsd/libevent/dist/evthread.c (revision 9f988b79)
1 /*	$NetBSD: evthread.c,v 1.1.1.1 2013/04/11 16:43:25 christos Exp $	*/
2 /*
3  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "event2/event-config.h"
29 #include <sys/cdefs.h>
30 __RCSID("$NetBSD: evthread.c,v 1.1.1.1 2013/04/11 16:43:25 christos Exp $");
31 
32 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
33 
34 #include "event2/thread.h"
35 
36 #include <stdlib.h>
37 #include <string.h>
38 
39 #include "log-internal.h"
40 #include "mm-internal.h"
41 #include "util-internal.h"
42 #include "evthread-internal.h"
43 
44 #ifdef EVTHREAD_EXPOSE_STRUCTS
45 #define GLOBAL
46 #else
47 #define GLOBAL static
48 #endif
49 
50 /* globals */
51 GLOBAL int _evthread_lock_debugging_enabled = 0;
52 GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
53 	0, 0, NULL, NULL, NULL, NULL
54 };
55 GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
56 GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
57 	0, NULL, NULL, NULL, NULL
58 };
59 
60 /* Used for debugging */
61 static struct evthread_lock_callbacks _original_lock_fns = {
62 	0, 0, NULL, NULL, NULL, NULL
63 };
64 static struct evthread_condition_callbacks _original_cond_fns = {
65 	0, NULL, NULL, NULL, NULL
66 };
67 
68 void
69 evthread_set_id_callback(unsigned long (*id_fn)(void))
70 {
71 	_evthread_id_fn = id_fn;
72 }
73 
74 int
75 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
76 {
77 	struct evthread_lock_callbacks *target =
78 	    _evthread_lock_debugging_enabled
79 	    ? &_original_lock_fns : &_evthread_lock_fns;
80 
81 	if (!cbs) {
82 		if (target->alloc)
83 			event_warnx("Trying to disable lock functions after "
84 			    "they have been set up will probaby not work.");
85 		memset(target, 0, sizeof(_evthread_lock_fns));
86 		return 0;
87 	}
88 	if (target->alloc) {
89 		/* Uh oh; we already had locking callbacks set up.*/
90 		if (target->lock_api_version == cbs->lock_api_version &&
91 			target->supported_locktypes == cbs->supported_locktypes &&
92 			target->alloc == cbs->alloc &&
93 			target->free == cbs->free &&
94 			target->lock == cbs->lock &&
95 			target->unlock == cbs->unlock) {
96 			/* no change -- allow this. */
97 			return 0;
98 		}
99 		event_warnx("Can't change lock callbacks once they have been "
100 		    "initialized.");
101 		return -1;
102 	}
103 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
104 		memcpy(target, cbs, sizeof(_evthread_lock_fns));
105 		return event_global_setup_locks_(1);
106 	} else {
107 		return -1;
108 	}
109 }
110 
111 int
112 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
113 {
114 	struct evthread_condition_callbacks *target =
115 	    _evthread_lock_debugging_enabled
116 	    ? &_original_cond_fns : &_evthread_cond_fns;
117 
118 	if (!cbs) {
119 		if (target->alloc_condition)
120 			event_warnx("Trying to disable condition functions "
121 			    "after they have been set up will probaby not "
122 			    "work.");
123 		memset(target, 0, sizeof(_evthread_cond_fns));
124 		return 0;
125 	}
126 	if (target->alloc_condition) {
127 		/* Uh oh; we already had condition callbacks set up.*/
128 		if (target->condition_api_version == cbs->condition_api_version &&
129 			target->alloc_condition == cbs->alloc_condition &&
130 			target->free_condition == cbs->free_condition &&
131 			target->signal_condition == cbs->signal_condition &&
132 			target->wait_condition == cbs->wait_condition) {
133 			/* no change -- allow this. */
134 			return 0;
135 		}
136 		event_warnx("Can't change condition callbacks once they "
137 		    "have been initialized.");
138 		return -1;
139 	}
140 	if (cbs->alloc_condition && cbs->free_condition &&
141 	    cbs->signal_condition && cbs->wait_condition) {
142 		memcpy(target, cbs, sizeof(_evthread_cond_fns));
143 	}
144 	if (_evthread_lock_debugging_enabled) {
145 		_evthread_cond_fns.alloc_condition = cbs->alloc_condition;
146 		_evthread_cond_fns.free_condition = cbs->free_condition;
147 		_evthread_cond_fns.signal_condition = cbs->signal_condition;
148 	}
149 	return 0;
150 }
151 
152 struct debug_lock {
153 	unsigned locktype;
154 	unsigned long held_by;
155 	/* XXXX if we ever use read-write locks, we will need a separate
156 	 * lock to protect count. */
157 	int count;
158 	void *lock;
159 };
160 
161 static void *
162 debug_lock_alloc(unsigned locktype)
163 {
164 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
165 	if (!result)
166 		return NULL;
167 	if (_original_lock_fns.alloc) {
168 		if (!(result->lock = _original_lock_fns.alloc(
169 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
170 			mm_free(result);
171 			return NULL;
172 		}
173 	} else {
174 		result->lock = NULL;
175 	}
176 	result->locktype = locktype;
177 	result->count = 0;
178 	result->held_by = 0;
179 	return result;
180 }
181 
182 static void
183 debug_lock_free(void *lock_, unsigned locktype)
184 {
185 	struct debug_lock *lock = lock_;
186 	EVUTIL_ASSERT(lock->count == 0);
187 	EVUTIL_ASSERT(locktype == lock->locktype);
188 	if (_original_lock_fns.free) {
189 		_original_lock_fns.free(lock->lock,
190 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
191 	}
192 	lock->lock = NULL;
193 	lock->count = -100;
194 	mm_free(lock);
195 }
196 
197 static void
198 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
199 {
200 	++lock->count;
201 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
202 		EVUTIL_ASSERT(lock->count == 1);
203 	if (_evthread_id_fn) {
204 		unsigned long me;
205 		me = _evthread_id_fn();
206 		if (lock->count > 1)
207 			EVUTIL_ASSERT(lock->held_by == me);
208 		lock->held_by = me;
209 	}
210 }
211 
212 static int
213 debug_lock_lock(unsigned mode, void *lock_)
214 {
215 	struct debug_lock *lock = lock_;
216 	int res = 0;
217 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
218 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
219 	else
220 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
221 	if (_original_lock_fns.lock)
222 		res = _original_lock_fns.lock(mode, lock->lock);
223 	if (!res) {
224 		evthread_debug_lock_mark_locked(mode, lock);
225 	}
226 	return res;
227 }
228 
229 static void
230 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
231 {
232 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
233 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
234 	else
235 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
236 	if (_evthread_id_fn) {
237 		EVUTIL_ASSERT(lock->held_by == _evthread_id_fn());
238 		if (lock->count == 1)
239 			lock->held_by = 0;
240 	}
241 	--lock->count;
242 	EVUTIL_ASSERT(lock->count >= 0);
243 }
244 
245 static int
246 debug_lock_unlock(unsigned mode, void *lock_)
247 {
248 	struct debug_lock *lock = lock_;
249 	int res = 0;
250 	evthread_debug_lock_mark_unlocked(mode, lock);
251 	if (_original_lock_fns.unlock)
252 		res = _original_lock_fns.unlock(mode, lock->lock);
253 	return res;
254 }
255 
256 static int
257 debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
258 {
259 	int r;
260 	struct debug_lock *lock = _lock;
261 	EVUTIL_ASSERT(lock);
262 	EVLOCK_ASSERT_LOCKED(_lock);
263 	evthread_debug_lock_mark_unlocked(0, lock);
264 	r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
265 	evthread_debug_lock_mark_locked(0, lock);
266 	return r;
267 }
268 
269 void
270 evthread_enable_lock_debuging(void)
271 {
272 	struct evthread_lock_callbacks cbs = {
273 		EVTHREAD_LOCK_API_VERSION,
274 		EVTHREAD_LOCKTYPE_RECURSIVE,
275 		debug_lock_alloc,
276 		debug_lock_free,
277 		debug_lock_lock,
278 		debug_lock_unlock
279 	};
280 	if (_evthread_lock_debugging_enabled)
281 		return;
282 	memcpy(&_original_lock_fns, &_evthread_lock_fns,
283 	    sizeof(struct evthread_lock_callbacks));
284 	memcpy(&_evthread_lock_fns, &cbs,
285 	    sizeof(struct evthread_lock_callbacks));
286 
287 	memcpy(&_original_cond_fns, &_evthread_cond_fns,
288 	    sizeof(struct evthread_condition_callbacks));
289 	_evthread_cond_fns.wait_condition = debug_cond_wait;
290 	_evthread_lock_debugging_enabled = 1;
291 
292 	/* XXX return value should get checked. */
293 	event_global_setup_locks_(0);
294 }
295 
296 int
297 _evthread_is_debug_lock_held(void *lock_)
298 {
299 	struct debug_lock *lock = lock_;
300 	if (! lock->count)
301 		return 0;
302 	if (_evthread_id_fn) {
303 		unsigned long me = _evthread_id_fn();
304 		if (lock->held_by != me)
305 			return 0;
306 	}
307 	return 1;
308 }
309 
310 void *
311 _evthread_debug_get_real_lock(void *lock_)
312 {
313 	struct debug_lock *lock = lock_;
314 	return lock->lock;
315 }
316 
317 void *
318 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
319 {
320 	/* there are four cases here:
321 	   1) we're turning on debugging; locking is not on.
322 	   2) we're turning on debugging; locking is on.
323 	   3) we're turning on locking; debugging is not on.
324 	   4) we're turning on locking; debugging is on. */
325 
326 	if (!enable_locks && _original_lock_fns.alloc == NULL) {
327 		/* Case 1: allocate a debug lock. */
328 		EVUTIL_ASSERT(lock_ == NULL);
329 		return debug_lock_alloc(locktype);
330 	} else if (!enable_locks && _original_lock_fns.alloc != NULL) {
331 		/* Case 2: wrap the lock in a debug lock. */
332 		struct debug_lock *lock;
333 		EVUTIL_ASSERT(lock_ != NULL);
334 
335 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
336 			/* We can't wrap it: We need a recursive lock */
337 			_original_lock_fns.free(lock_, locktype);
338 			return debug_lock_alloc(locktype);
339 		}
340 		lock = mm_malloc(sizeof(struct debug_lock));
341 		if (!lock) {
342 			_original_lock_fns.free(lock_, locktype);
343 			return NULL;
344 		}
345 		lock->lock = lock_;
346 		lock->locktype = locktype;
347 		lock->count = 0;
348 		lock->held_by = 0;
349 		return lock;
350 	} else if (enable_locks && ! _evthread_lock_debugging_enabled) {
351 		/* Case 3: allocate a regular lock */
352 		EVUTIL_ASSERT(lock_ == NULL);
353 		return _evthread_lock_fns.alloc(locktype);
354 	} else {
355 		/* Case 4: Fill in a debug lock with a real lock */
356 		struct debug_lock *lock = lock_;
357 		EVUTIL_ASSERT(enable_locks &&
358 		              _evthread_lock_debugging_enabled);
359 		EVUTIL_ASSERT(lock->locktype == locktype);
360 		EVUTIL_ASSERT(lock->lock == NULL);
361 		lock->lock = _original_lock_fns.alloc(
362 			locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
363 		if (!lock->lock) {
364 			lock->count = -200;
365 			mm_free(lock);
366 			return NULL;
367 		}
368 		return lock;
369 	}
370 }
371 
372 
373 #ifndef EVTHREAD_EXPOSE_STRUCTS
374 unsigned long
375 _evthreadimpl_get_id()
376 {
377 	return _evthread_id_fn ? _evthread_id_fn() : 1;
378 }
379 void *
380 _evthreadimpl_lock_alloc(unsigned locktype)
381 {
382 	return _evthread_lock_fns.alloc ?
383 	    _evthread_lock_fns.alloc(locktype) : NULL;
384 }
385 void
386 _evthreadimpl_lock_free(void *lock, unsigned locktype)
387 {
388 	if (_evthread_lock_fns.free)
389 		_evthread_lock_fns.free(lock, locktype);
390 }
391 int
392 _evthreadimpl_lock_lock(unsigned mode, void *lock)
393 {
394 	if (_evthread_lock_fns.lock)
395 		return _evthread_lock_fns.lock(mode, lock);
396 	else
397 		return 0;
398 }
399 int
400 _evthreadimpl_lock_unlock(unsigned mode, void *lock)
401 {
402 	if (_evthread_lock_fns.unlock)
403 		return _evthread_lock_fns.unlock(mode, lock);
404 	else
405 		return 0;
406 }
407 void *
408 _evthreadimpl_cond_alloc(unsigned condtype)
409 {
410 	return _evthread_cond_fns.alloc_condition ?
411 	    _evthread_cond_fns.alloc_condition(condtype) : NULL;
412 }
413 void
414 _evthreadimpl_cond_free(void *cond)
415 {
416 	if (_evthread_cond_fns.free_condition)
417 		_evthread_cond_fns.free_condition(cond);
418 }
419 int
420 _evthreadimpl_cond_signal(void *cond, int broadcast)
421 {
422 	if (_evthread_cond_fns.signal_condition)
423 		return _evthread_cond_fns.signal_condition(cond, broadcast);
424 	else
425 		return 0;
426 }
427 int
428 _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
429 {
430 	if (_evthread_cond_fns.wait_condition)
431 		return _evthread_cond_fns.wait_condition(cond, lock, tv);
432 	else
433 		return 0;
434 }
435 int
436 _evthreadimpl_is_lock_debugging_enabled(void)
437 {
438 	return _evthread_lock_debugging_enabled;
439 }
440 
441 int
442 _evthreadimpl_locking_enabled(void)
443 {
444 	return _evthread_lock_fns.lock != NULL;
445 }
446 #endif
447 
448 #endif
449