1 #ifndef JEMALLOC_INTERNAL_MUTEX_H
2 #define JEMALLOC_INTERNAL_MUTEX_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex_prof.h"
6 #include "jemalloc/internal/tsd.h"
7 #include "jemalloc/internal/witness.h"
8 
9 typedef enum {
10 	/* Can only acquire one mutex of a given witness rank at a time. */
11 	malloc_mutex_rank_exclusive,
12 	/*
13 	 * Can acquire multiple mutexes of the same witness rank, but in
14 	 * address-ascending order only.
15 	 */
16 	malloc_mutex_address_ordered
17 } malloc_mutex_lock_order_t;
18 
19 typedef struct malloc_mutex_s malloc_mutex_t;
20 struct malloc_mutex_s {
21 	union {
22 		struct {
23 			/*
24 			 * prof_data is defined first to reduce cacheline
25 			 * bouncing: the data is not touched by the mutex holder
26 			 * during unlocking, while might be modified by
27 			 * contenders.  Having it before the mutex itself could
28 			 * avoid prefetching a modified cacheline (for the
29 			 * unlocking thread).
30 			 */
31 			mutex_prof_data_t	prof_data;
32 #ifdef _WIN32
33 #  if _WIN32_WINNT >= 0x0600
34 			SRWLOCK         	lock;
35 #  else
36 			CRITICAL_SECTION	lock;
37 #  endif
38 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
39 			os_unfair_lock		lock;
40 #elif (defined(JEMALLOC_OSSPIN))
41 			OSSpinLock		lock;
42 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
43 			pthread_mutex_t		lock;
44 			malloc_mutex_t		*postponed_next;
45 #else
46 			pthread_mutex_t		lock;
47 #endif
48 		};
49 		/*
50 		 * We only touch witness when configured w/ debug.  However we
51 		 * keep the field in a union when !debug so that we don't have
52 		 * to pollute the code base with #ifdefs, while avoid paying the
53 		 * memory cost.
54 		 */
55 #if !defined(JEMALLOC_DEBUG)
56 		witness_t			witness;
57 		malloc_mutex_lock_order_t	lock_order;
58 #endif
59 	};
60 
61 #if defined(JEMALLOC_DEBUG)
62 	witness_t			witness;
63 	malloc_mutex_lock_order_t	lock_order;
64 #endif
65 };
66 
67 /*
68  * Based on benchmark results, a fixed spin with this amount of retries works
69  * well for our critical sections.
70  */
71 #define MALLOC_MUTEX_MAX_SPIN 250
72 
73 #ifdef _WIN32
74 #  if _WIN32_WINNT >= 0x0600
75 #    define MALLOC_MUTEX_LOCK(m)    AcquireSRWLockExclusive(&(m)->lock)
76 #    define MALLOC_MUTEX_UNLOCK(m)  ReleaseSRWLockExclusive(&(m)->lock)
77 #    define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
78 #  else
79 #    define MALLOC_MUTEX_LOCK(m)    EnterCriticalSection(&(m)->lock)
80 #    define MALLOC_MUTEX_UNLOCK(m)  LeaveCriticalSection(&(m)->lock)
81 #    define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
82 #  endif
83 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
84 #    define MALLOC_MUTEX_LOCK(m)    os_unfair_lock_lock(&(m)->lock)
85 #    define MALLOC_MUTEX_UNLOCK(m)  os_unfair_lock_unlock(&(m)->lock)
86 #    define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
87 #elif (defined(JEMALLOC_OSSPIN))
88 #    define MALLOC_MUTEX_LOCK(m)    OSSpinLockLock(&(m)->lock)
89 #    define MALLOC_MUTEX_UNLOCK(m)  OSSpinLockUnlock(&(m)->lock)
90 #    define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
91 #else
92 #    define MALLOC_MUTEX_LOCK(m)    pthread_mutex_lock(&(m)->lock)
93 #    define MALLOC_MUTEX_UNLOCK(m)  pthread_mutex_unlock(&(m)->lock)
94 #    define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
95 #endif
96 
97 #define LOCK_PROF_DATA_INITIALIZER					\
98     {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0,		\
99 	    ATOMIC_INIT(0), 0, NULL, 0}
100 
101 #ifdef _WIN32
102 #  define MALLOC_MUTEX_INITIALIZER
103 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
104 #  define MALLOC_MUTEX_INITIALIZER					\
105      {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}},		\
106       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
107 #elif (defined(JEMALLOC_OSSPIN))
108 #  define MALLOC_MUTEX_INITIALIZER					\
109      {{{LOCK_PROF_DATA_INITIALIZER, 0}},				\
110       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
111 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
112 #  define MALLOC_MUTEX_INITIALIZER					\
113      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}},	\
114       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
115 #else
116 #    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
117 #    define MALLOC_MUTEX_INITIALIZER					\
118        {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}},	\
119         WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
120 #endif
121 
122 #ifdef JEMALLOC_LAZY_LOCK
123 extern bool isthreaded;
124 #endif
125 
126 bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
127     witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
128 void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
129 void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
130 void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
131 bool malloc_mutex_first_thread(void);
132 bool malloc_mutex_boot(void);
133 void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
134 
135 void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
136 
137 static inline void
138 malloc_mutex_lock_final(malloc_mutex_t *mutex) {
139 	MALLOC_MUTEX_LOCK(mutex);
140 }
141 
142 static inline bool
143 malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
144 	return MALLOC_MUTEX_TRYLOCK(mutex);
145 }
146 
147 static inline void
148 mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
149 	if (config_stats) {
150 		mutex_prof_data_t *data = &mutex->prof_data;
151 		data->n_lock_ops++;
152 		if (data->prev_owner != tsdn) {
153 			data->prev_owner = tsdn;
154 			data->n_owner_switches++;
155 		}
156 	}
157 }
158 
159 /* Trylock: return false if the lock is successfully acquired. */
160 static inline bool
161 malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
162 	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
163 	if (isthreaded) {
164 		if (malloc_mutex_trylock_final(mutex)) {
165 			return true;
166 		}
167 		mutex_owner_stats_update(tsdn, mutex);
168 	}
169 	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
170 
171 	return false;
172 }
173 
174 /* Aggregate lock prof data. */
175 static inline void
176 malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
177 	nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
178 	if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
179 		nstime_copy(&sum->max_wait_time, &data->max_wait_time);
180 	}
181 
182 	sum->n_wait_times += data->n_wait_times;
183 	sum->n_spin_acquired += data->n_spin_acquired;
184 
185 	if (sum->max_n_thds < data->max_n_thds) {
186 		sum->max_n_thds = data->max_n_thds;
187 	}
188 	uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
189 	    ATOMIC_RELAXED);
190 	uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
191 	    &data->n_waiting_thds, ATOMIC_RELAXED);
192 	atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
193 	    ATOMIC_RELAXED);
194 	sum->n_owner_switches += data->n_owner_switches;
195 	sum->n_lock_ops += data->n_lock_ops;
196 }
197 
198 static inline void
199 malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
200 	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
201 	if (isthreaded) {
202 		if (malloc_mutex_trylock_final(mutex)) {
203 			malloc_mutex_lock_slow(mutex);
204 		}
205 		mutex_owner_stats_update(tsdn, mutex);
206 	}
207 	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
208 }
209 
210 static inline void
211 malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
212 	witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
213 	if (isthreaded) {
214 		MALLOC_MUTEX_UNLOCK(mutex);
215 	}
216 }
217 
218 static inline void
219 malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
220 	witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
221 }
222 
223 static inline void
224 malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
225 	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
226 }
227 
228 /* Copy the prof data from mutex for processing. */
229 static inline void
230 malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
231     malloc_mutex_t *mutex) {
232 	mutex_prof_data_t *source = &mutex->prof_data;
233 	/* Can only read holding the mutex. */
234 	malloc_mutex_assert_owner(tsdn, mutex);
235 
236 	/*
237 	 * Not *really* allowed (we shouldn't be doing non-atomic loads of
238 	 * atomic data), but the mutex protection makes this safe, and writing
239 	 * a member-for-member copy is tedious for this situation.
240 	 */
241 	*data = *source;
242 	/* n_wait_thds is not reported (modified w/o locking). */
243 	atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
244 }
245 
246 #endif /* JEMALLOC_INTERNAL_MUTEX_H */
247