xref: /freebsd/contrib/libevent/evthread.c (revision b50261e2)
1*c43e99fdSEd Maste /*
2*c43e99fdSEd Maste  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
3*c43e99fdSEd Maste  *
4*c43e99fdSEd Maste  * Redistribution and use in source and binary forms, with or without
5*c43e99fdSEd Maste  * modification, are permitted provided that the following conditions
6*c43e99fdSEd Maste  * are met:
7*c43e99fdSEd Maste  * 1. Redistributions of source code must retain the above copyright
8*c43e99fdSEd Maste  *    notice, this list of conditions and the following disclaimer.
9*c43e99fdSEd Maste  * 2. Redistributions in binary form must reproduce the above copyright
10*c43e99fdSEd Maste  *    notice, this list of conditions and the following disclaimer in the
11*c43e99fdSEd Maste  *    documentation and/or other materials provided with the distribution.
12*c43e99fdSEd Maste  * 3. The name of the author may not be used to endorse or promote products
13*c43e99fdSEd Maste  *    derived from this software without specific prior written permission.
14*c43e99fdSEd Maste  *
15*c43e99fdSEd Maste  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16*c43e99fdSEd Maste  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17*c43e99fdSEd Maste  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18*c43e99fdSEd Maste  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19*c43e99fdSEd Maste  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20*c43e99fdSEd Maste  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21*c43e99fdSEd Maste  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22*c43e99fdSEd Maste  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23*c43e99fdSEd Maste  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24*c43e99fdSEd Maste  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25*c43e99fdSEd Maste  */
26*c43e99fdSEd Maste 
27*c43e99fdSEd Maste #include "event2/event-config.h"
28*c43e99fdSEd Maste #include "evconfig-private.h"
29*c43e99fdSEd Maste 
30*c43e99fdSEd Maste #ifndef EVENT__DISABLE_THREAD_SUPPORT
31*c43e99fdSEd Maste 
32*c43e99fdSEd Maste #include "event2/thread.h"
33*c43e99fdSEd Maste 
34*c43e99fdSEd Maste #include <stdlib.h>
35*c43e99fdSEd Maste #include <string.h>
36*c43e99fdSEd Maste 
37*c43e99fdSEd Maste #include "log-internal.h"
38*c43e99fdSEd Maste #include "mm-internal.h"
39*c43e99fdSEd Maste #include "util-internal.h"
40*c43e99fdSEd Maste #include "evthread-internal.h"
41*c43e99fdSEd Maste 
42*c43e99fdSEd Maste #ifdef EVTHREAD_EXPOSE_STRUCTS
43*c43e99fdSEd Maste #define GLOBAL
44*c43e99fdSEd Maste #else
45*c43e99fdSEd Maste #define GLOBAL static
46*c43e99fdSEd Maste #endif
47*c43e99fdSEd Maste 
48*c43e99fdSEd Maste #ifndef EVENT__DISABLE_DEBUG_MODE
49*c43e99fdSEd Maste extern int event_debug_created_threadable_ctx_;
50*c43e99fdSEd Maste extern int event_debug_mode_on_;
51*c43e99fdSEd Maste #endif
52*c43e99fdSEd Maste 
53*c43e99fdSEd Maste /* globals */
54*c43e99fdSEd Maste GLOBAL int evthread_lock_debugging_enabled_ = 0;
55*c43e99fdSEd Maste GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
56*c43e99fdSEd Maste 	0, 0, NULL, NULL, NULL, NULL
57*c43e99fdSEd Maste };
58*c43e99fdSEd Maste GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
59*c43e99fdSEd Maste GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
60*c43e99fdSEd Maste 	0, NULL, NULL, NULL, NULL
61*c43e99fdSEd Maste };
62*c43e99fdSEd Maste 
63*c43e99fdSEd Maste /* Used for debugging */
64*c43e99fdSEd Maste static struct evthread_lock_callbacks original_lock_fns_ = {
65*c43e99fdSEd Maste 	0, 0, NULL, NULL, NULL, NULL
66*c43e99fdSEd Maste };
67*c43e99fdSEd Maste static struct evthread_condition_callbacks original_cond_fns_ = {
68*c43e99fdSEd Maste 	0, NULL, NULL, NULL, NULL
69*c43e99fdSEd Maste };
70*c43e99fdSEd Maste 
71*c43e99fdSEd Maste void
evthread_set_id_callback(unsigned long (* id_fn)(void))72*c43e99fdSEd Maste evthread_set_id_callback(unsigned long (*id_fn)(void))
73*c43e99fdSEd Maste {
74*c43e99fdSEd Maste 	evthread_id_fn_ = id_fn;
75*c43e99fdSEd Maste }
76*c43e99fdSEd Maste 
evthread_get_lock_callbacks()77*c43e99fdSEd Maste struct evthread_lock_callbacks *evthread_get_lock_callbacks()
78*c43e99fdSEd Maste {
79*c43e99fdSEd Maste 	return evthread_lock_debugging_enabled_
80*c43e99fdSEd Maste 	    ? &original_lock_fns_ : &evthread_lock_fns_;
81*c43e99fdSEd Maste }
evthread_get_condition_callbacks()82*c43e99fdSEd Maste struct evthread_condition_callbacks *evthread_get_condition_callbacks()
83*c43e99fdSEd Maste {
84*c43e99fdSEd Maste 	return evthread_lock_debugging_enabled_
85*c43e99fdSEd Maste 	    ? &original_cond_fns_ : &evthread_cond_fns_;
86*c43e99fdSEd Maste }
evthreadimpl_disable_lock_debugging_(void)87*c43e99fdSEd Maste void evthreadimpl_disable_lock_debugging_(void)
88*c43e99fdSEd Maste {
89*c43e99fdSEd Maste 	evthread_lock_debugging_enabled_ = 0;
90*c43e99fdSEd Maste }
91*c43e99fdSEd Maste 
92*c43e99fdSEd Maste int
evthread_set_lock_callbacks(const struct evthread_lock_callbacks * cbs)93*c43e99fdSEd Maste evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
94*c43e99fdSEd Maste {
95*c43e99fdSEd Maste 	struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
96*c43e99fdSEd Maste 
97*c43e99fdSEd Maste #ifndef EVENT__DISABLE_DEBUG_MODE
98*c43e99fdSEd Maste 	if (event_debug_mode_on_) {
99*c43e99fdSEd Maste 		if (event_debug_created_threadable_ctx_) {
100*c43e99fdSEd Maste 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
101*c43e99fdSEd Maste 		}
102*c43e99fdSEd Maste 	}
103*c43e99fdSEd Maste #endif
104*c43e99fdSEd Maste 
105*c43e99fdSEd Maste 	if (!cbs) {
106*c43e99fdSEd Maste 		if (target->alloc)
107*c43e99fdSEd Maste 			event_warnx("Trying to disable lock functions after "
108*c43e99fdSEd Maste 			    "they have been set up will probaby not work.");
109*c43e99fdSEd Maste 		memset(target, 0, sizeof(evthread_lock_fns_));
110*c43e99fdSEd Maste 		return 0;
111*c43e99fdSEd Maste 	}
112*c43e99fdSEd Maste 	if (target->alloc) {
113*c43e99fdSEd Maste 		/* Uh oh; we already had locking callbacks set up.*/
114*c43e99fdSEd Maste 		if (target->lock_api_version == cbs->lock_api_version &&
115*c43e99fdSEd Maste 			target->supported_locktypes == cbs->supported_locktypes &&
116*c43e99fdSEd Maste 			target->alloc == cbs->alloc &&
117*c43e99fdSEd Maste 			target->free == cbs->free &&
118*c43e99fdSEd Maste 			target->lock == cbs->lock &&
119*c43e99fdSEd Maste 			target->unlock == cbs->unlock) {
120*c43e99fdSEd Maste 			/* no change -- allow this. */
121*c43e99fdSEd Maste 			return 0;
122*c43e99fdSEd Maste 		}
123*c43e99fdSEd Maste 		event_warnx("Can't change lock callbacks once they have been "
124*c43e99fdSEd Maste 		    "initialized.");
125*c43e99fdSEd Maste 		return -1;
126*c43e99fdSEd Maste 	}
127*c43e99fdSEd Maste 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
128*c43e99fdSEd Maste 		memcpy(target, cbs, sizeof(evthread_lock_fns_));
129*c43e99fdSEd Maste 		return event_global_setup_locks_(1);
130*c43e99fdSEd Maste 	} else {
131*c43e99fdSEd Maste 		return -1;
132*c43e99fdSEd Maste 	}
133*c43e99fdSEd Maste }
134*c43e99fdSEd Maste 
135*c43e99fdSEd Maste int
evthread_set_condition_callbacks(const struct evthread_condition_callbacks * cbs)136*c43e99fdSEd Maste evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
137*c43e99fdSEd Maste {
138*c43e99fdSEd Maste 	struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
139*c43e99fdSEd Maste 
140*c43e99fdSEd Maste #ifndef EVENT__DISABLE_DEBUG_MODE
141*c43e99fdSEd Maste 	if (event_debug_mode_on_) {
142*c43e99fdSEd Maste 		if (event_debug_created_threadable_ctx_) {
143*c43e99fdSEd Maste 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
144*c43e99fdSEd Maste 		}
145*c43e99fdSEd Maste 	}
146*c43e99fdSEd Maste #endif
147*c43e99fdSEd Maste 
148*c43e99fdSEd Maste 	if (!cbs) {
149*c43e99fdSEd Maste 		if (target->alloc_condition)
150*c43e99fdSEd Maste 			event_warnx("Trying to disable condition functions "
151*c43e99fdSEd Maste 			    "after they have been set up will probaby not "
152*c43e99fdSEd Maste 			    "work.");
153*c43e99fdSEd Maste 		memset(target, 0, sizeof(evthread_cond_fns_));
154*c43e99fdSEd Maste 		return 0;
155*c43e99fdSEd Maste 	}
156*c43e99fdSEd Maste 	if (target->alloc_condition) {
157*c43e99fdSEd Maste 		/* Uh oh; we already had condition callbacks set up.*/
158*c43e99fdSEd Maste 		if (target->condition_api_version == cbs->condition_api_version &&
159*c43e99fdSEd Maste 			target->alloc_condition == cbs->alloc_condition &&
160*c43e99fdSEd Maste 			target->free_condition == cbs->free_condition &&
161*c43e99fdSEd Maste 			target->signal_condition == cbs->signal_condition &&
162*c43e99fdSEd Maste 			target->wait_condition == cbs->wait_condition) {
163*c43e99fdSEd Maste 			/* no change -- allow this. */
164*c43e99fdSEd Maste 			return 0;
165*c43e99fdSEd Maste 		}
166*c43e99fdSEd Maste 		event_warnx("Can't change condition callbacks once they "
167*c43e99fdSEd Maste 		    "have been initialized.");
168*c43e99fdSEd Maste 		return -1;
169*c43e99fdSEd Maste 	}
170*c43e99fdSEd Maste 	if (cbs->alloc_condition && cbs->free_condition &&
171*c43e99fdSEd Maste 	    cbs->signal_condition && cbs->wait_condition) {
172*c43e99fdSEd Maste 		memcpy(target, cbs, sizeof(evthread_cond_fns_));
173*c43e99fdSEd Maste 	}
174*c43e99fdSEd Maste 	if (evthread_lock_debugging_enabled_) {
175*c43e99fdSEd Maste 		evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
176*c43e99fdSEd Maste 		evthread_cond_fns_.free_condition = cbs->free_condition;
177*c43e99fdSEd Maste 		evthread_cond_fns_.signal_condition = cbs->signal_condition;
178*c43e99fdSEd Maste 	}
179*c43e99fdSEd Maste 	return 0;
180*c43e99fdSEd Maste }
181*c43e99fdSEd Maste 
182*c43e99fdSEd Maste #define DEBUG_LOCK_SIG	0xdeb0b10c
183*c43e99fdSEd Maste 
184*c43e99fdSEd Maste struct debug_lock {
185*c43e99fdSEd Maste 	unsigned signature;
186*c43e99fdSEd Maste 	unsigned locktype;
187*c43e99fdSEd Maste 	unsigned long held_by;
188*c43e99fdSEd Maste 	/* XXXX if we ever use read-write locks, we will need a separate
189*c43e99fdSEd Maste 	 * lock to protect count. */
190*c43e99fdSEd Maste 	int count;
191*c43e99fdSEd Maste 	void *lock;
192*c43e99fdSEd Maste };
193*c43e99fdSEd Maste 
194*c43e99fdSEd Maste static void *
debug_lock_alloc(unsigned locktype)195*c43e99fdSEd Maste debug_lock_alloc(unsigned locktype)
196*c43e99fdSEd Maste {
197*c43e99fdSEd Maste 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
198*c43e99fdSEd Maste 	if (!result)
199*c43e99fdSEd Maste 		return NULL;
200*c43e99fdSEd Maste 	if (original_lock_fns_.alloc) {
201*c43e99fdSEd Maste 		if (!(result->lock = original_lock_fns_.alloc(
202*c43e99fdSEd Maste 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
203*c43e99fdSEd Maste 			mm_free(result);
204*c43e99fdSEd Maste 			return NULL;
205*c43e99fdSEd Maste 		}
206*c43e99fdSEd Maste 	} else {
207*c43e99fdSEd Maste 		result->lock = NULL;
208*c43e99fdSEd Maste 	}
209*c43e99fdSEd Maste 	result->signature = DEBUG_LOCK_SIG;
210*c43e99fdSEd Maste 	result->locktype = locktype;
211*c43e99fdSEd Maste 	result->count = 0;
212*c43e99fdSEd Maste 	result->held_by = 0;
213*c43e99fdSEd Maste 	return result;
214*c43e99fdSEd Maste }
215*c43e99fdSEd Maste 
216*c43e99fdSEd Maste static void
debug_lock_free(void * lock_,unsigned locktype)217*c43e99fdSEd Maste debug_lock_free(void *lock_, unsigned locktype)
218*c43e99fdSEd Maste {
219*c43e99fdSEd Maste 	struct debug_lock *lock = lock_;
220*c43e99fdSEd Maste 	EVUTIL_ASSERT(lock->count == 0);
221*c43e99fdSEd Maste 	EVUTIL_ASSERT(locktype == lock->locktype);
222*c43e99fdSEd Maste 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
223*c43e99fdSEd Maste 	if (original_lock_fns_.free) {
224*c43e99fdSEd Maste 		original_lock_fns_.free(lock->lock,
225*c43e99fdSEd Maste 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
226*c43e99fdSEd Maste 	}
227*c43e99fdSEd Maste 	lock->lock = NULL;
228*c43e99fdSEd Maste 	lock->count = -100;
229*c43e99fdSEd Maste 	lock->signature = 0x12300fda;
230*c43e99fdSEd Maste 	mm_free(lock);
231*c43e99fdSEd Maste }
232*c43e99fdSEd Maste 
233*c43e99fdSEd Maste static void
evthread_debug_lock_mark_locked(unsigned mode,struct debug_lock * lock)234*c43e99fdSEd Maste evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
235*c43e99fdSEd Maste {
236*c43e99fdSEd Maste 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
237*c43e99fdSEd Maste 	++lock->count;
238*c43e99fdSEd Maste 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
239*c43e99fdSEd Maste 		EVUTIL_ASSERT(lock->count == 1);
240*c43e99fdSEd Maste 	if (evthread_id_fn_) {
241*c43e99fdSEd Maste 		unsigned long me;
242*c43e99fdSEd Maste 		me = evthread_id_fn_();
243*c43e99fdSEd Maste 		if (lock->count > 1)
244*c43e99fdSEd Maste 			EVUTIL_ASSERT(lock->held_by == me);
245*c43e99fdSEd Maste 		lock->held_by = me;
246*c43e99fdSEd Maste 	}
247*c43e99fdSEd Maste }
248*c43e99fdSEd Maste 
249*c43e99fdSEd Maste static int
debug_lock_lock(unsigned mode,void * lock_)250*c43e99fdSEd Maste debug_lock_lock(unsigned mode, void *lock_)
251*c43e99fdSEd Maste {
252*c43e99fdSEd Maste 	struct debug_lock *lock = lock_;
253*c43e99fdSEd Maste 	int res = 0;
254*c43e99fdSEd Maste 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
255*c43e99fdSEd Maste 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
256*c43e99fdSEd Maste 	else
257*c43e99fdSEd Maste 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
258*c43e99fdSEd Maste 	if (original_lock_fns_.lock)
259*c43e99fdSEd Maste 		res = original_lock_fns_.lock(mode, lock->lock);
260*c43e99fdSEd Maste 	if (!res) {
261*c43e99fdSEd Maste 		evthread_debug_lock_mark_locked(mode, lock);
262*c43e99fdSEd Maste 	}
263*c43e99fdSEd Maste 	return res;
264*c43e99fdSEd Maste }
265*c43e99fdSEd Maste 
266*c43e99fdSEd Maste static void
evthread_debug_lock_mark_unlocked(unsigned mode,struct debug_lock * lock)267*c43e99fdSEd Maste evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
268*c43e99fdSEd Maste {
269*c43e99fdSEd Maste 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
270*c43e99fdSEd Maste 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
271*c43e99fdSEd Maste 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
272*c43e99fdSEd Maste 	else
273*c43e99fdSEd Maste 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
274*c43e99fdSEd Maste 	if (evthread_id_fn_) {
275*c43e99fdSEd Maste 		unsigned long me;
276*c43e99fdSEd Maste 		me = evthread_id_fn_();
277*c43e99fdSEd Maste 		EVUTIL_ASSERT(lock->held_by == me);
278*c43e99fdSEd Maste 		if (lock->count == 1)
279*c43e99fdSEd Maste 			lock->held_by = 0;
280*c43e99fdSEd Maste 	}
281*c43e99fdSEd Maste 	--lock->count;
282*c43e99fdSEd Maste 	EVUTIL_ASSERT(lock->count >= 0);
283*c43e99fdSEd Maste }
284*c43e99fdSEd Maste 
285*c43e99fdSEd Maste static int
debug_lock_unlock(unsigned mode,void * lock_)286*c43e99fdSEd Maste debug_lock_unlock(unsigned mode, void *lock_)
287*c43e99fdSEd Maste {
288*c43e99fdSEd Maste 	struct debug_lock *lock = lock_;
289*c43e99fdSEd Maste 	int res = 0;
290*c43e99fdSEd Maste 	evthread_debug_lock_mark_unlocked(mode, lock);
291*c43e99fdSEd Maste 	if (original_lock_fns_.unlock)
292*c43e99fdSEd Maste 		res = original_lock_fns_.unlock(mode, lock->lock);
293*c43e99fdSEd Maste 	return res;
294*c43e99fdSEd Maste }
295*c43e99fdSEd Maste 
296*c43e99fdSEd Maste static int
debug_cond_wait(void * cond_,void * lock_,const struct timeval * tv)297*c43e99fdSEd Maste debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
298*c43e99fdSEd Maste {
299*c43e99fdSEd Maste 	int r;
300*c43e99fdSEd Maste 	struct debug_lock *lock = lock_;
301*c43e99fdSEd Maste 	EVUTIL_ASSERT(lock);
302*c43e99fdSEd Maste 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
303*c43e99fdSEd Maste 	EVLOCK_ASSERT_LOCKED(lock_);
304*c43e99fdSEd Maste 	evthread_debug_lock_mark_unlocked(0, lock);
305*c43e99fdSEd Maste 	r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
306*c43e99fdSEd Maste 	evthread_debug_lock_mark_locked(0, lock);
307*c43e99fdSEd Maste 	return r;
308*c43e99fdSEd Maste }
309*c43e99fdSEd Maste 
310*c43e99fdSEd Maste /* misspelled version for backward compatibility */
311*c43e99fdSEd Maste void
evthread_enable_lock_debuging(void)312*c43e99fdSEd Maste evthread_enable_lock_debuging(void)
313*c43e99fdSEd Maste {
314*c43e99fdSEd Maste 	evthread_enable_lock_debugging();
315*c43e99fdSEd Maste }
316*c43e99fdSEd Maste 
317*c43e99fdSEd Maste void
evthread_enable_lock_debugging(void)318*c43e99fdSEd Maste evthread_enable_lock_debugging(void)
319*c43e99fdSEd Maste {
320*c43e99fdSEd Maste 	struct evthread_lock_callbacks cbs = {
321*c43e99fdSEd Maste 		EVTHREAD_LOCK_API_VERSION,
322*c43e99fdSEd Maste 		EVTHREAD_LOCKTYPE_RECURSIVE,
323*c43e99fdSEd Maste 		debug_lock_alloc,
324*c43e99fdSEd Maste 		debug_lock_free,
325*c43e99fdSEd Maste 		debug_lock_lock,
326*c43e99fdSEd Maste 		debug_lock_unlock
327*c43e99fdSEd Maste 	};
328*c43e99fdSEd Maste 	if (evthread_lock_debugging_enabled_)
329*c43e99fdSEd Maste 		return;
330*c43e99fdSEd Maste 	memcpy(&original_lock_fns_, &evthread_lock_fns_,
331*c43e99fdSEd Maste 	    sizeof(struct evthread_lock_callbacks));
332*c43e99fdSEd Maste 	memcpy(&evthread_lock_fns_, &cbs,
333*c43e99fdSEd Maste 	    sizeof(struct evthread_lock_callbacks));
334*c43e99fdSEd Maste 
335*c43e99fdSEd Maste 	memcpy(&original_cond_fns_, &evthread_cond_fns_,
336*c43e99fdSEd Maste 	    sizeof(struct evthread_condition_callbacks));
337*c43e99fdSEd Maste 	evthread_cond_fns_.wait_condition = debug_cond_wait;
338*c43e99fdSEd Maste 	evthread_lock_debugging_enabled_ = 1;
339*c43e99fdSEd Maste 
340*c43e99fdSEd Maste 	/* XXX return value should get checked. */
341*c43e99fdSEd Maste 	event_global_setup_locks_(0);
342*c43e99fdSEd Maste }
343*c43e99fdSEd Maste 
344*c43e99fdSEd Maste int
evthread_is_debug_lock_held_(void * lock_)345*c43e99fdSEd Maste evthread_is_debug_lock_held_(void *lock_)
346*c43e99fdSEd Maste {
347*c43e99fdSEd Maste 	struct debug_lock *lock = lock_;
348*c43e99fdSEd Maste 	if (! lock->count)
349*c43e99fdSEd Maste 		return 0;
350*c43e99fdSEd Maste 	if (evthread_id_fn_) {
351*c43e99fdSEd Maste 		unsigned long me = evthread_id_fn_();
352*c43e99fdSEd Maste 		if (lock->held_by != me)
353*c43e99fdSEd Maste 			return 0;
354*c43e99fdSEd Maste 	}
355*c43e99fdSEd Maste 	return 1;
356*c43e99fdSEd Maste }
357*c43e99fdSEd Maste 
358*c43e99fdSEd Maste void *
evthread_debug_get_real_lock_(void * lock_)359*c43e99fdSEd Maste evthread_debug_get_real_lock_(void *lock_)
360*c43e99fdSEd Maste {
361*c43e99fdSEd Maste 	struct debug_lock *lock = lock_;
362*c43e99fdSEd Maste 	return lock->lock;
363*c43e99fdSEd Maste }
364*c43e99fdSEd Maste 
365*c43e99fdSEd Maste void *
evthread_setup_global_lock_(void * lock_,unsigned locktype,int enable_locks)366*c43e99fdSEd Maste evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
367*c43e99fdSEd Maste {
368*c43e99fdSEd Maste 	/* there are four cases here:
369*c43e99fdSEd Maste 	   1) we're turning on debugging; locking is not on.
370*c43e99fdSEd Maste 	   2) we're turning on debugging; locking is on.
371*c43e99fdSEd Maste 	   3) we're turning on locking; debugging is not on.
372*c43e99fdSEd Maste 	   4) we're turning on locking; debugging is on. */
373*c43e99fdSEd Maste 
374*c43e99fdSEd Maste 	if (!enable_locks && original_lock_fns_.alloc == NULL) {
375*c43e99fdSEd Maste 		/* Case 1: allocate a debug lock. */
376*c43e99fdSEd Maste 		EVUTIL_ASSERT(lock_ == NULL);
377*c43e99fdSEd Maste 		return debug_lock_alloc(locktype);
378*c43e99fdSEd Maste 	} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
379*c43e99fdSEd Maste 		/* Case 2: wrap the lock in a debug lock. */
380*c43e99fdSEd Maste 		struct debug_lock *lock;
381*c43e99fdSEd Maste 		EVUTIL_ASSERT(lock_ != NULL);
382*c43e99fdSEd Maste 
383*c43e99fdSEd Maste 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
384*c43e99fdSEd Maste 			/* We can't wrap it: We need a recursive lock */
385*c43e99fdSEd Maste 			original_lock_fns_.free(lock_, locktype);
386*c43e99fdSEd Maste 			return debug_lock_alloc(locktype);
387*c43e99fdSEd Maste 		}
388*c43e99fdSEd Maste 		lock = mm_malloc(sizeof(struct debug_lock));
389*c43e99fdSEd Maste 		if (!lock) {
390*c43e99fdSEd Maste 			original_lock_fns_.free(lock_, locktype);
391*c43e99fdSEd Maste 			return NULL;
392*c43e99fdSEd Maste 		}
393*c43e99fdSEd Maste 		lock->lock = lock_;
394*c43e99fdSEd Maste 		lock->locktype = locktype;
395*c43e99fdSEd Maste 		lock->count = 0;
396*c43e99fdSEd Maste 		lock->held_by = 0;
397*c43e99fdSEd Maste 		return lock;
398*c43e99fdSEd Maste 	} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
399*c43e99fdSEd Maste 		/* Case 3: allocate a regular lock */
400*c43e99fdSEd Maste 		EVUTIL_ASSERT(lock_ == NULL);
401*c43e99fdSEd Maste 		return evthread_lock_fns_.alloc(locktype);
402*c43e99fdSEd Maste 	} else {
403*c43e99fdSEd Maste 		/* Case 4: Fill in a debug lock with a real lock */
404*c43e99fdSEd Maste 		struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
405*c43e99fdSEd Maste 		EVUTIL_ASSERT(enable_locks &&
406*c43e99fdSEd Maste 		              evthread_lock_debugging_enabled_);
407*c43e99fdSEd Maste 		EVUTIL_ASSERT(lock->locktype == locktype);
408*c43e99fdSEd Maste 		if (!lock->lock) {
409*c43e99fdSEd Maste 			lock->lock = original_lock_fns_.alloc(
410*c43e99fdSEd Maste 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
411*c43e99fdSEd Maste 			if (!lock->lock) {
412*c43e99fdSEd Maste 				lock->count = -200;
413*c43e99fdSEd Maste 				mm_free(lock);
414*c43e99fdSEd Maste 				return NULL;
415*c43e99fdSEd Maste 			}
416*c43e99fdSEd Maste 		}
417*c43e99fdSEd Maste 		return lock;
418*c43e99fdSEd Maste 	}
419*c43e99fdSEd Maste }
420*c43e99fdSEd Maste 
421*c43e99fdSEd Maste 
422*c43e99fdSEd Maste #ifndef EVTHREAD_EXPOSE_STRUCTS
423*c43e99fdSEd Maste unsigned long
evthreadimpl_get_id_()424*c43e99fdSEd Maste evthreadimpl_get_id_()
425*c43e99fdSEd Maste {
426*c43e99fdSEd Maste 	return evthread_id_fn_ ? evthread_id_fn_() : 1;
427*c43e99fdSEd Maste }
428*c43e99fdSEd Maste void *
evthreadimpl_lock_alloc_(unsigned locktype)429*c43e99fdSEd Maste evthreadimpl_lock_alloc_(unsigned locktype)
430*c43e99fdSEd Maste {
431*c43e99fdSEd Maste #ifndef EVENT__DISABLE_DEBUG_MODE
432*c43e99fdSEd Maste 	if (event_debug_mode_on_) {
433*c43e99fdSEd Maste 		event_debug_created_threadable_ctx_ = 1;
434*c43e99fdSEd Maste 	}
435*c43e99fdSEd Maste #endif
436*c43e99fdSEd Maste 
437*c43e99fdSEd Maste 	return evthread_lock_fns_.alloc ?
438*c43e99fdSEd Maste 	    evthread_lock_fns_.alloc(locktype) : NULL;
439*c43e99fdSEd Maste }
440*c43e99fdSEd Maste void
evthreadimpl_lock_free_(void * lock,unsigned locktype)441*c43e99fdSEd Maste evthreadimpl_lock_free_(void *lock, unsigned locktype)
442*c43e99fdSEd Maste {
443*c43e99fdSEd Maste 	if (evthread_lock_fns_.free)
444*c43e99fdSEd Maste 		evthread_lock_fns_.free(lock, locktype);
445*c43e99fdSEd Maste }
446*c43e99fdSEd Maste int
evthreadimpl_lock_lock_(unsigned mode,void * lock)447*c43e99fdSEd Maste evthreadimpl_lock_lock_(unsigned mode, void *lock)
448*c43e99fdSEd Maste {
449*c43e99fdSEd Maste 	if (evthread_lock_fns_.lock)
450*c43e99fdSEd Maste 		return evthread_lock_fns_.lock(mode, lock);
451*c43e99fdSEd Maste 	else
452*c43e99fdSEd Maste 		return 0;
453*c43e99fdSEd Maste }
454*c43e99fdSEd Maste int
evthreadimpl_lock_unlock_(unsigned mode,void * lock)455*c43e99fdSEd Maste evthreadimpl_lock_unlock_(unsigned mode, void *lock)
456*c43e99fdSEd Maste {
457*c43e99fdSEd Maste 	if (evthread_lock_fns_.unlock)
458*c43e99fdSEd Maste 		return evthread_lock_fns_.unlock(mode, lock);
459*c43e99fdSEd Maste 	else
460*c43e99fdSEd Maste 		return 0;
461*c43e99fdSEd Maste }
462*c43e99fdSEd Maste void *
evthreadimpl_cond_alloc_(unsigned condtype)463*c43e99fdSEd Maste evthreadimpl_cond_alloc_(unsigned condtype)
464*c43e99fdSEd Maste {
465*c43e99fdSEd Maste #ifndef EVENT__DISABLE_DEBUG_MODE
466*c43e99fdSEd Maste 	if (event_debug_mode_on_) {
467*c43e99fdSEd Maste 		event_debug_created_threadable_ctx_ = 1;
468*c43e99fdSEd Maste 	}
469*c43e99fdSEd Maste #endif
470*c43e99fdSEd Maste 
471*c43e99fdSEd Maste 	return evthread_cond_fns_.alloc_condition ?
472*c43e99fdSEd Maste 	    evthread_cond_fns_.alloc_condition(condtype) : NULL;
473*c43e99fdSEd Maste }
474*c43e99fdSEd Maste void
evthreadimpl_cond_free_(void * cond)475*c43e99fdSEd Maste evthreadimpl_cond_free_(void *cond)
476*c43e99fdSEd Maste {
477*c43e99fdSEd Maste 	if (evthread_cond_fns_.free_condition)
478*c43e99fdSEd Maste 		evthread_cond_fns_.free_condition(cond);
479*c43e99fdSEd Maste }
480*c43e99fdSEd Maste int
evthreadimpl_cond_signal_(void * cond,int broadcast)481*c43e99fdSEd Maste evthreadimpl_cond_signal_(void *cond, int broadcast)
482*c43e99fdSEd Maste {
483*c43e99fdSEd Maste 	if (evthread_cond_fns_.signal_condition)
484*c43e99fdSEd Maste 		return evthread_cond_fns_.signal_condition(cond, broadcast);
485*c43e99fdSEd Maste 	else
486*c43e99fdSEd Maste 		return 0;
487*c43e99fdSEd Maste }
488*c43e99fdSEd Maste int
evthreadimpl_cond_wait_(void * cond,void * lock,const struct timeval * tv)489*c43e99fdSEd Maste evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
490*c43e99fdSEd Maste {
491*c43e99fdSEd Maste 	if (evthread_cond_fns_.wait_condition)
492*c43e99fdSEd Maste 		return evthread_cond_fns_.wait_condition(cond, lock, tv);
493*c43e99fdSEd Maste 	else
494*c43e99fdSEd Maste 		return 0;
495*c43e99fdSEd Maste }
496*c43e99fdSEd Maste int
evthreadimpl_is_lock_debugging_enabled_(void)497*c43e99fdSEd Maste evthreadimpl_is_lock_debugging_enabled_(void)
498*c43e99fdSEd Maste {
499*c43e99fdSEd Maste 	return evthread_lock_debugging_enabled_;
500*c43e99fdSEd Maste }
501*c43e99fdSEd Maste 
502*c43e99fdSEd Maste int
evthreadimpl_locking_enabled_(void)503*c43e99fdSEd Maste evthreadimpl_locking_enabled_(void)
504*c43e99fdSEd Maste {
505*c43e99fdSEd Maste 	return evthread_lock_fns_.lock != NULL;
506*c43e99fdSEd Maste }
507*c43e99fdSEd Maste #endif
508*c43e99fdSEd Maste 
509*c43e99fdSEd Maste #endif
510