1 #define JEMALLOC_MUTEX_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/malloc_io.h"
7 #include "jemalloc/internal/spin.h"
8 
9 #ifndef _CRT_SPINCOUNT
10 #define _CRT_SPINCOUNT 4000
11 #endif
12 
13 /******************************************************************************/
14 /* Data. */
15 
16 #ifdef JEMALLOC_LAZY_LOCK
17 bool isthreaded = false;
18 #endif
19 #ifdef JEMALLOC_MUTEX_INIT_CB
20 static bool		postpone_init = true;
21 static malloc_mutex_t	*postponed_mutexes = NULL;
22 #endif
23 
24 /******************************************************************************/
25 /*
26  * We intercept pthread_create() calls in order to toggle isthreaded if the
27  * process goes multi-threaded.
28  */
29 
30 #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
31 JEMALLOC_EXPORT int
pthread_create(pthread_t * __restrict thread,const pthread_attr_t * __restrict attr,void * (* start_routine)(void *),void * __restrict arg)32 pthread_create(pthread_t *__restrict thread,
33     const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
34     void *__restrict arg) {
35 	return pthread_create_wrapper(thread, attr, start_routine, arg);
36 }
37 #endif
38 
39 /******************************************************************************/
40 
41 #ifdef JEMALLOC_MUTEX_INIT_CB
42 JEMALLOC_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
43     void *(calloc_cb)(size_t, size_t));
44 #endif
45 
46 void
malloc_mutex_lock_slow(malloc_mutex_t * mutex)47 malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
48 	mutex_prof_data_t *data = &mutex->prof_data;
49 	nstime_t before = NSTIME_ZERO_INITIALIZER;
50 
51 	if (ncpus == 1) {
52 		goto label_spin_done;
53 	}
54 
55 	int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
56 	do {
57 		spin_cpu_spinwait();
58 		if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
59                     && !malloc_mutex_trylock_final(mutex)) {
60 			data->n_spin_acquired++;
61 			return;
62 		}
63 	} while (cnt++ < max_cnt);
64 
65 	if (!config_stats) {
66 		/* Only spin is useful when stats is off. */
67 		malloc_mutex_lock_final(mutex);
68 		return;
69 	}
70 label_spin_done:
71 	nstime_update(&before);
72 	/* Copy before to after to avoid clock skews. */
73 	nstime_t after;
74 	nstime_copy(&after, &before);
75 	uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
76 	    ATOMIC_RELAXED) + 1;
77 	/* One last try as above two calls may take quite some cycles. */
78 	if (!malloc_mutex_trylock_final(mutex)) {
79 		atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
80 		data->n_spin_acquired++;
81 		return;
82 	}
83 
84 	/* True slow path. */
85 	malloc_mutex_lock_final(mutex);
86 	/* Update more slow-path only counters. */
87 	atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
88 	nstime_update(&after);
89 
90 	nstime_t delta;
91 	nstime_copy(&delta, &after);
92 	nstime_subtract(&delta, &before);
93 
94 	data->n_wait_times++;
95 	nstime_add(&data->tot_wait_time, &delta);
96 	if (nstime_compare(&data->max_wait_time, &delta) < 0) {
97 		nstime_copy(&data->max_wait_time, &delta);
98 	}
99 	if (n_thds > data->max_n_thds) {
100 		data->max_n_thds = n_thds;
101 	}
102 }
103 
104 static void
mutex_prof_data_init(mutex_prof_data_t * data)105 mutex_prof_data_init(mutex_prof_data_t *data) {
106 	memset(data, 0, sizeof(mutex_prof_data_t));
107 	nstime_init(&data->max_wait_time, 0);
108 	nstime_init(&data->tot_wait_time, 0);
109 	data->prev_owner = NULL;
110 }
111 
112 void
malloc_mutex_prof_data_reset(tsdn_t * tsdn,malloc_mutex_t * mutex)113 malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
114 	malloc_mutex_assert_owner(tsdn, mutex);
115 	mutex_prof_data_init(&mutex->prof_data);
116 }
117 
118 static int
mutex_addr_comp(const witness_t * witness1,void * mutex1,const witness_t * witness2,void * mutex2)119 mutex_addr_comp(const witness_t *witness1, void *mutex1,
120     const witness_t *witness2, void *mutex2) {
121 	assert(mutex1 != NULL);
122 	assert(mutex2 != NULL);
123 	uintptr_t mu1int = (uintptr_t)mutex1;
124 	uintptr_t mu2int = (uintptr_t)mutex2;
125 	if (mu1int < mu2int) {
126 		return -1;
127 	} else if (mu1int == mu2int) {
128 		return 0;
129 	} else {
130 		return 1;
131 	}
132 }
133 
134 bool
malloc_mutex_init(malloc_mutex_t * mutex,const char * name,witness_rank_t rank,malloc_mutex_lock_order_t lock_order)135 malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
136     witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
137 	mutex_prof_data_init(&mutex->prof_data);
138 #ifdef _WIN32
139 #  if _WIN32_WINNT >= 0x0600
140 	InitializeSRWLock(&mutex->lock);
141 #  else
142 	if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
143 	    _CRT_SPINCOUNT)) {
144 		return true;
145 	}
146 #  endif
147 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
148        mutex->lock = OS_UNFAIR_LOCK_INIT;
149 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
150 	if (postpone_init) {
151 		mutex->postponed_next = postponed_mutexes;
152 		postponed_mutexes = mutex;
153 	} else {
154 		if (_pthread_mutex_init_calloc_cb(&mutex->lock,
155 		    bootstrap_calloc) != 0) {
156 			return true;
157 		}
158 	}
159 #else
160 	pthread_mutexattr_t attr;
161 
162 	if (pthread_mutexattr_init(&attr) != 0) {
163 		return true;
164 	}
165 	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
166 	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
167 		pthread_mutexattr_destroy(&attr);
168 		return true;
169 	}
170 	pthread_mutexattr_destroy(&attr);
171 #endif
172 	if (config_debug) {
173 		mutex->lock_order = lock_order;
174 		if (lock_order == malloc_mutex_address_ordered) {
175 			witness_init(&mutex->witness, name, rank,
176 			    mutex_addr_comp, mutex);
177 		} else {
178 			witness_init(&mutex->witness, name, rank, NULL, NULL);
179 		}
180 	}
181 	return false;
182 }
183 
184 void
malloc_mutex_prefork(tsdn_t * tsdn,malloc_mutex_t * mutex)185 malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
186 	malloc_mutex_lock(tsdn, mutex);
187 }
188 
189 void
malloc_mutex_postfork_parent(tsdn_t * tsdn,malloc_mutex_t * mutex)190 malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
191 	malloc_mutex_unlock(tsdn, mutex);
192 }
193 
194 void
malloc_mutex_postfork_child(tsdn_t * tsdn,malloc_mutex_t * mutex)195 malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
196 #ifdef JEMALLOC_MUTEX_INIT_CB
197 	malloc_mutex_unlock(tsdn, mutex);
198 #else
199 	if (malloc_mutex_init(mutex, mutex->witness.name,
200 	    mutex->witness.rank, mutex->lock_order)) {
201 		malloc_printf("<jemalloc>: Error re-initializing mutex in "
202 		    "child\n");
203 		if (opt_abort) {
204 			abort();
205 		}
206 	}
207 #endif
208 }
209 
210 bool
malloc_mutex_boot(void)211 malloc_mutex_boot(void) {
212 #ifdef JEMALLOC_MUTEX_INIT_CB
213 	postpone_init = false;
214 	while (postponed_mutexes != NULL) {
215 		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
216 		    bootstrap_calloc) != 0) {
217 			return true;
218 		}
219 		postponed_mutexes = postponed_mutexes->postponed_next;
220 	}
221 #endif
222 	return false;
223 }
224