1 /* $NetBSD: sljitUtils.c,v 1.11 2020/09/05 16:30:11 riastradh Exp $ */
2
3 /*
4 * Stack-less Just-In-Time compiler
5 *
6 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without modification, are
9 * permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright notice, this list of
12 * conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
15 * of conditions and the following disclaimer in the documentation and/or other materials
16 * provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
21 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* ------------------------------------------------------------------------ */
30 /* Locks */
31 /* ------------------------------------------------------------------------ */
32
33 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
34
35 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
36
37 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
38
allocator_grab_lock(void)39 static SLJIT_INLINE void allocator_grab_lock(void)
40 {
41 /* Always successful. */
42 }
43
allocator_release_lock(void)44 static SLJIT_INLINE void allocator_release_lock(void)
45 {
46 /* Always successful. */
47 }
48
49 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
50
51 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
52
sljit_grab_lock(void)53 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
54 {
55 /* Always successful. */
56 }
57
sljit_release_lock(void)58 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
59 {
60 /* Always successful. */
61 }
62
63 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
64
65 #elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */
66
67 #include "windows.h"
68
69 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
70
71 static HANDLE allocator_mutex = 0;
72
allocator_grab_lock(void)73 static SLJIT_INLINE void allocator_grab_lock(void)
74 {
75 /* No idea what to do if an error occures. Static mutexes should never fail... */
76 if (!allocator_mutex)
77 allocator_mutex = CreateMutex(NULL, TRUE, NULL);
78 else
79 WaitForSingleObject(allocator_mutex, INFINITE);
80 }
81
allocator_release_lock(void)82 static SLJIT_INLINE void allocator_release_lock(void)
83 {
84 ReleaseMutex(allocator_mutex);
85 }
86
87 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
88
89 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
90
91 static HANDLE global_mutex = 0;
92
sljit_grab_lock(void)93 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
94 {
95 /* No idea what to do if an error occures. Static mutexes should never fail... */
96 if (!global_mutex)
97 global_mutex = CreateMutex(NULL, TRUE, NULL);
98 else
99 WaitForSingleObject(global_mutex, INFINITE);
100 }
101
sljit_release_lock(void)102 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
103 {
104 ReleaseMutex(global_mutex);
105 }
106
107 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
108
109 #else /* _WIN32 */
110
111 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
112
113 #ifdef _KERNEL
114
115 #include <sys/mutex.h>
116
117 /* Defined in sljit_mod.c */
118 extern kmutex_t sljit_allocator_mutex;
119
allocator_grab_lock(void)120 static SLJIT_INLINE void allocator_grab_lock(void)
121 {
122 mutex_enter(&sljit_allocator_mutex);
123 }
124
allocator_release_lock(void)125 static SLJIT_INLINE void allocator_release_lock(void)
126 {
127 mutex_exit(&sljit_allocator_mutex);
128 }
129 #else
130
131 #include <pthread.h>
132
133 static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER;
134
allocator_grab_lock(void)135 static SLJIT_INLINE void allocator_grab_lock(void)
136 {
137 pthread_mutex_lock(&allocator_mutex);
138 }
139
allocator_release_lock(void)140 static SLJIT_INLINE void allocator_release_lock(void)
141 {
142 pthread_mutex_unlock(&allocator_mutex);
143 }
144 #endif
145
146 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
147
148 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
149
150 #ifdef _KERNEL
151
152 #include <sys/mutex.h>
153
154 /* Defined in sljit_mod.c */
155 extern kmutex_t sljit_global_mutex;
156
sljit_grab_lock(void)157 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
158 {
159 mutex_enter(&sljit_global_mutex);
160 }
161
sljit_release_lock(void)162 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
163 {
164 mutex_exit(&sljit_global_mutex);
165 }
166 #else
167
168 #include <pthread.h>
169
170 static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER;
171
sljit_grab_lock(void)172 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
173 {
174 pthread_mutex_lock(&global_mutex);
175 }
176
sljit_release_lock(void)177 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
178 {
179 pthread_mutex_unlock(&global_mutex);
180 }
181 #endif
182
183 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
184
185 #endif /* _WIN32 */
186
187 /* ------------------------------------------------------------------------ */
188 /* Stack */
189 /* ------------------------------------------------------------------------ */
190
191 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
192
193 #ifdef _KERNEL
194 #include <sys/param.h>
195 #include <uvm/uvm_extern.h>
196 #elif defined(_WIN32)
197 #include "windows.h"
198 #else
199 /* Provides mmap function. */
200 #include <sys/mman.h>
201 /* For detecting the page size. */
202 #include <unistd.h>
203
204 #ifndef MAP_ANON
205
206 #include <fcntl.h>
207
208 /* Some old systems does not have MAP_ANON. */
209 static sljit_s32 dev_zero = -1;
210
211 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
212
open_dev_zero(void)213 static SLJIT_INLINE sljit_s32 open_dev_zero(void)
214 {
215 dev_zero = open("/dev/zero", O_RDWR);
216 return dev_zero < 0;
217 }
218
219 #else /* SLJIT_SINGLE_THREADED */
220
221 #include <pthread.h>
222
223 static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER;
224
open_dev_zero(void)225 static SLJIT_INLINE sljit_s32 open_dev_zero(void)
226 {
227 pthread_mutex_lock(&dev_zero_mutex);
228 /* The dev_zero might be initialized by another thread during the waiting. */
229 if (dev_zero < 0) {
230 dev_zero = open("/dev/zero", O_RDWR);
231 }
232 pthread_mutex_unlock(&dev_zero_mutex);
233 return dev_zero < 0;
234 }
235
236 #endif /* SLJIT_SINGLE_THREADED */
237
238 #endif
239
240 #endif
241
242 #endif /* SLJIT_UTIL_STACK || SLJIT_EXECUTABLE_ALLOCATOR */
243
244 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
245
246 /* Planning to make it even more clever in the future. */
247 static sljit_sw sljit_page_align = 0;
248
sljit_allocate_stack(sljit_uw limit,sljit_uw max_limit,void * allocator_data)249 SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit, void *allocator_data)
250 {
251 struct sljit_stack *stack;
252 void *ptr;
253 #ifdef _WIN32
254 SYSTEM_INFO si;
255 #endif
256
257 SLJIT_UNUSED_ARG(allocator_data);
258 if (limit > max_limit || limit < 1)
259 return NULL;
260
261 #ifdef _WIN32
262 if (!sljit_page_align) {
263 GetSystemInfo(&si);
264 sljit_page_align = si.dwPageSize - 1;
265 }
266 #else
267 if (!sljit_page_align) {
268 #ifdef _KERNEL
269 sljit_page_align = PAGE_SIZE;
270 #else
271 sljit_page_align = sysconf(_SC_PAGESIZE);
272 #endif
273 /* Should never happen. */
274 if (sljit_page_align < 0)
275 sljit_page_align = 4096;
276 sljit_page_align--;
277 }
278 #endif
279
280 stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack), allocator_data);
281 if (!stack)
282 return NULL;
283
284 /* Align max_limit. */
285 max_limit = (max_limit + sljit_page_align) & ~sljit_page_align;
286
287 #ifdef _WIN32
288 ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE);
289 if (!ptr) {
290 SLJIT_FREE(stack, allocator_data);
291 return NULL;
292 }
293 stack->max_limit = (sljit_u8 *)ptr;
294 stack->base = stack->max_limit + max_limit;
295 stack->limit = stack->base;
296 if (sljit_stack_resize(stack, stack->base - limit)) {
297 sljit_free_stack(stack, allocator_data);
298 return NULL;
299 }
300 #elif defined(_KERNEL)
301 ptr = (void *)uvm_km_alloc(kernel_map, max_limit, PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
302 if (ptr == NULL) {
303 SLJIT_FREE(stack, allocator_data);
304 return NULL;
305 }
306 stack->max_limit = (sljit_u8 *)ptr;
307 stack->base = stack->max_limit + max_limit;
308 stack->limit = stack->base - limit;
309 #else
310 #ifdef MAP_ANON
311 ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
312 #else
313 if (dev_zero < 0) {
314 if (open_dev_zero()) {
315 SLJIT_FREE(stack, allocator_data);
316 return NULL;
317 }
318 }
319 ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0);
320 #endif
321 if (ptr == MAP_FAILED) {
322 SLJIT_FREE(stack, allocator_data);
323 return NULL;
324 }
325 stack->max_limit = (sljit_u8 *)ptr;
326 stack->base = stack->max_limit + max_limit;
327 stack->limit = stack->base - limit;
328 #endif
329 stack->top = stack->base;
330 return stack;
331 }
332
333 #undef PAGE_ALIGN
334
sljit_free_stack(struct sljit_stack * stack,void * allocator_data)335 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack *stack, void *allocator_data)
336 {
337 SLJIT_UNUSED_ARG(allocator_data);
338 #ifdef _WIN32
339 VirtualFree((void*)stack->max_limit, 0, MEM_RELEASE);
340 #elif defined(_KERNEL)
341 uvm_km_free(kernel_map, (vaddr_t)stack->max_limit,
342 stack->base - stack->max_limit, UVM_KMF_WIRED);
343 #else
344 munmap((void*)stack->max_limit, stack->base - stack->max_limit);
345 #endif
346 SLJIT_FREE(stack, allocator_data);
347 }
348
sljit_stack_resize(struct sljit_stack * stack,sljit_u8 * new_limit)349 SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack *stack, sljit_u8 *new_limit)
350 {
351 #if defined(MADV_DONTNEED) || defined(POSIX_MADV_DONTNEED)
352 sljit_uw aligned_old_limit;
353 sljit_uw aligned_new_limit;
354 #endif
355
356 if ((new_limit < stack->max_limit) || (new_limit >= stack->base))
357 return -1;
358 #ifdef _WIN32
359 aligned_new_limit = (sljit_uw)new_limit & ~sljit_page_align;
360 aligned_old_limit = ((sljit_uw)stack->limit) & ~sljit_page_align;
361 if (aligned_new_limit != aligned_old_limit) {
362 if (aligned_new_limit < aligned_old_limit) {
363 if (!VirtualAlloc((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_COMMIT, PAGE_READWRITE))
364 return -1;
365 }
366 else {
367 if (!VirtualFree((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_DECOMMIT))
368 return -1;
369 }
370 }
371 stack->limit = new_limit;
372 return 0;
373 #else
374 if (new_limit <= stack->limit) {
375 stack->limit = new_limit;
376 return 0;
377 }
378 #if defined(MADV_DONTNEED) || defined(POSIX_MADV_DONTNEED)
379 aligned_new_limit = (sljit_uw)new_limit & ~sljit_page_align;
380 aligned_old_limit = ((sljit_uw)stack->limit) & ~sljit_page_align;
381 #endif
382 /* If madvise is available, we release the unnecessary space. */
383 #if defined(MADV_DONTNEED)
384 if (aligned_new_limit > aligned_old_limit)
385 madvise((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MADV_DONTNEED);
386 #elif defined(POSIX_MADV_DONTNEED)
387 if (aligned_new_limit > aligned_old_limit)
388 posix_madvise((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, POSIX_MADV_DONTNEED);
389 #endif
390 stack->limit = new_limit;
391 return 0;
392 #endif
393 }
394
395 #endif /* SLJIT_UTIL_STACK */
396
397 #endif
398