1 #include "test/jemalloc_test.h"
2
3 /*
4 * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
5 * be asserting that we're on one.
6 */
7 static bool originally_fast;
8 static int data_cleanup_count;
9
10 void
data_cleanup(int * data)11 data_cleanup(int *data) {
12 if (data_cleanup_count == 0) {
13 assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
14 "Argument passed into cleanup function should match tsd "
15 "value");
16 }
17 ++data_cleanup_count;
18
19 /*
20 * Allocate during cleanup for two rounds, in order to assure that
21 * jemalloc's internal tsd reinitialization happens.
22 */
23 bool reincarnate = false;
24 switch (*data) {
25 case MALLOC_TSD_TEST_DATA_INIT:
26 *data = 1;
27 reincarnate = true;
28 break;
29 case 1:
30 *data = 2;
31 reincarnate = true;
32 break;
33 case 2:
34 return;
35 default:
36 not_reached();
37 }
38
39 if (reincarnate) {
40 void *p = mallocx(1, 0);
41 assert_ptr_not_null(p, "Unexpeced mallocx() failure");
42 dallocx(p, 0);
43 }
44 }
45
46 static void *
thd_start(void * arg)47 thd_start(void *arg) {
48 int d = (int)(uintptr_t)arg;
49 void *p;
50
51 tsd_t *tsd = tsd_fetch();
52 assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
53 "Initial tsd get should return initialization value");
54
55 p = malloc(1);
56 assert_ptr_not_null(p, "Unexpected malloc() failure");
57
58 tsd_test_data_set(tsd, d);
59 assert_x_eq(tsd_test_data_get(tsd), d,
60 "After tsd set, tsd get should return value that was set");
61
62 d = 0;
63 assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
64 "Resetting local data should have no effect on tsd");
65
66 tsd_test_callback_set(tsd, &data_cleanup);
67
68 free(p);
69 return NULL;
70 }
71
TEST_BEGIN(test_tsd_main_thread)72 TEST_BEGIN(test_tsd_main_thread) {
73 thd_start((void *)(uintptr_t)0xa5f3e329);
74 }
75 TEST_END
76
TEST_BEGIN(test_tsd_sub_thread)77 TEST_BEGIN(test_tsd_sub_thread) {
78 thd_t thd;
79
80 data_cleanup_count = 0;
81 thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT);
82 thd_join(thd, NULL);
83 /*
84 * We reincarnate twice in the data cleanup, so it should execute at
85 * least 3 times.
86 */
87 assert_x_ge(data_cleanup_count, 3,
88 "Cleanup function should have executed multiple times.");
89 }
90 TEST_END
91
92 static void *
thd_start_reincarnated(void * arg)93 thd_start_reincarnated(void *arg) {
94 tsd_t *tsd = tsd_fetch();
95 assert(tsd);
96
97 void *p = malloc(1);
98 assert_ptr_not_null(p, "Unexpected malloc() failure");
99
100 /* Manually trigger reincarnation. */
101 assert_ptr_not_null(tsd_arena_get(tsd),
102 "Should have tsd arena set.");
103 tsd_cleanup((void *)tsd);
104 assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
105 "TSD arena should have been cleared.");
106 assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
107 "TSD state should be purgatory\n");
108
109 free(p);
110 assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
111 "TSD state should be reincarnated\n");
112 p = mallocx(1, MALLOCX_TCACHE_NONE);
113 assert_ptr_not_null(p, "Unexpected malloc() failure");
114 assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
115 "Should not have tsd arena set after reincarnation.");
116
117 free(p);
118 tsd_cleanup((void *)tsd);
119 assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
120 "TSD arena should have been cleared after 2nd cleanup.");
121
122 return NULL;
123 }
124
TEST_BEGIN(test_tsd_reincarnation)125 TEST_BEGIN(test_tsd_reincarnation) {
126 thd_t thd;
127 thd_create(&thd, thd_start_reincarnated, NULL);
128 thd_join(thd, NULL);
129 }
130 TEST_END
131
132 typedef struct {
133 atomic_u32_t phase;
134 atomic_b_t error;
135 } global_slow_data_t;
136
137 static void *
thd_start_global_slow(void * arg)138 thd_start_global_slow(void *arg) {
139 /* PHASE 0 */
140 global_slow_data_t *data = (global_slow_data_t *)arg;
141 free(mallocx(1, 0));
142
143 tsd_t *tsd = tsd_fetch();
144 /*
145 * No global slowness has happened yet; there was an error if we were
146 * originally fast but aren't now.
147 */
148 atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
149 ATOMIC_SEQ_CST);
150 atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
151
152 /* PHASE 2 */
153 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
154 }
155 free(mallocx(1, 0));
156 atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
157 atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
158
159 /* PHASE 4 */
160 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
161 }
162 free(mallocx(1, 0));
163 atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
164 atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
165
166 /* PHASE 6 */
167 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
168 }
169 free(mallocx(1, 0));
170 /* Only one decrement so far. */
171 atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
172 atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
173
174 /* PHASE 8 */
175 while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
176 }
177 free(mallocx(1, 0));
178 /*
179 * Both decrements happened; we should be fast again (if we ever
180 * were)
181 */
182 atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
183 ATOMIC_SEQ_CST);
184 atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
185
186 return NULL;
187 }
188
TEST_BEGIN(test_tsd_global_slow)189 TEST_BEGIN(test_tsd_global_slow) {
190 global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
191 /*
192 * Note that the "mallocx" here (vs. malloc) is important, since the
193 * compiler is allowed to optimize away free(malloc(1)) but not
194 * free(mallocx(1)).
195 */
196 free(mallocx(1, 0));
197 tsd_t *tsd = tsd_fetch();
198 originally_fast = tsd_fast(tsd);
199
200 thd_t thd;
201 thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
202 /* PHASE 1 */
203 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
204 /*
205 * We don't have a portable condvar/semaphore mechanism.
206 * Spin-wait.
207 */
208 }
209 assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
210 tsd_global_slow_inc(tsd_tsdn(tsd));
211 free(mallocx(1, 0));
212 assert_false(tsd_fast(tsd), "");
213 atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
214
215 /* PHASE 3 */
216 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
217 }
218 assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
219 /* Increase again, so that we can test multiple fast/slow changes. */
220 tsd_global_slow_inc(tsd_tsdn(tsd));
221 atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
222 free(mallocx(1, 0));
223 assert_false(tsd_fast(tsd), "");
224
225 /* PHASE 5 */
226 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
227 }
228 assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
229 tsd_global_slow_dec(tsd_tsdn(tsd));
230 atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
231 /* We only decreased once; things should still be slow. */
232 free(mallocx(1, 0));
233 assert_false(tsd_fast(tsd), "");
234
235 /* PHASE 7 */
236 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
237 }
238 assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
239 tsd_global_slow_dec(tsd_tsdn(tsd));
240 atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
241 /* We incremented and then decremented twice; we should be fast now. */
242 free(mallocx(1, 0));
243 assert_true(!originally_fast || tsd_fast(tsd), "");
244
245 /* PHASE 9 */
246 while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
247 }
248 assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
249
250 thd_join(thd, NULL);
251 }
252 TEST_END
253
254 int
main(void)255 main(void) {
256 /* Ensure tsd bootstrapped. */
257 if (nallocx(1, 0) == 0) {
258 malloc_printf("Initialization error");
259 return test_status_fail;
260 }
261
262 return test_no_reentrancy(
263 test_tsd_main_thread,
264 test_tsd_sub_thread,
265 test_tsd_reincarnation,
266 test_tsd_global_slow);
267 }
268