1 /*
2 ** Machine code management.
3 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4 */
5 
6 #define lj_mcode_c
7 #define LUA_CORE
8 
9 #include "lj_obj.h"
10 #if LJ_HASJIT
11 #include "lj_gc.h"
12 #include "lj_err.h"
13 #include "lj_jit.h"
14 #include "lj_mcode.h"
15 #include "lj_trace.h"
16 #include "lj_dispatch.h"
17 #endif
18 #if LJ_HASJIT || LJ_HASFFI
19 #include "lj_vm.h"
20 #endif
21 
22 /* -- OS-specific functions ----------------------------------------------- */
23 
24 #if LJ_HASJIT || LJ_HASFFI
25 
26 /* Define this if you want to run LuaJIT with Valgrind. */
27 #ifdef LUAJIT_USE_VALGRIND
28 #include <valgrind/valgrind.h>
29 #endif
30 
31 #if LJ_TARGET_IOS
32 void sys_icache_invalidate(void *start, size_t len);
33 #endif
34 
35 /* Synchronize data/instruction cache. */
lj_mcode_sync(void * start,void * end)36 void lj_mcode_sync(void *start, void *end)
37 {
38 #ifdef LUAJIT_USE_VALGRIND
39   VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
40 #endif
41 #if LJ_TARGET_X86ORX64
42   UNUSED(start); UNUSED(end);
43 #elif LJ_TARGET_IOS
44   sys_icache_invalidate(start, (char *)end-(char *)start);
45 #elif LJ_TARGET_PPC
46   lj_vm_cachesync(start, end);
47 #elif defined(__GNUC__)
48   __clear_cache(start, end);
49 #else
50 #error "Missing builtin to flush instruction cache"
51 #endif
52 }
53 
54 #endif
55 
56 #if LJ_HASJIT
57 
58 #if LJ_TARGET_WINDOWS
59 
60 #define WIN32_LEAN_AND_MEAN
61 #include <windows.h>
62 
63 #define MCPROT_RW	PAGE_READWRITE
64 #define MCPROT_RX	PAGE_EXECUTE_READ
65 #define MCPROT_RWX	PAGE_EXECUTE_READWRITE
66 
mcode_alloc_at(jit_State * J,uintptr_t hint,size_t sz,DWORD prot)67 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
68 {
69   void *p = VirtualAlloc((void *)hint, sz,
70 			 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
71   if (!p && !hint)
72     lj_trace_err(J, LJ_TRERR_MCODEAL);
73   return p;
74 }
75 
mcode_free(jit_State * J,void * p,size_t sz)76 static void mcode_free(jit_State *J, void *p, size_t sz)
77 {
78   UNUSED(J); UNUSED(sz);
79   VirtualFree(p, 0, MEM_RELEASE);
80 }
81 
mcode_setprot(void * p,size_t sz,DWORD prot)82 static int mcode_setprot(void *p, size_t sz, DWORD prot)
83 {
84   DWORD oprot;
85   return !VirtualProtect(p, sz, prot, &oprot);
86 }
87 
88 #elif LJ_TARGET_POSIX
89 
90 #include <sys/mman.h>
91 
92 #ifndef MAP_ANONYMOUS
93 #define MAP_ANONYMOUS	MAP_ANON
94 #endif
95 
96 #define MCPROT_RW	(PROT_READ|PROT_WRITE)
97 #define MCPROT_RX	(PROT_READ|PROT_EXEC)
98 #define MCPROT_RWX	(PROT_READ|PROT_WRITE|PROT_EXEC)
99 
mcode_alloc_at(jit_State * J,uintptr_t hint,size_t sz,int prot)100 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
101 {
102   void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
103   if (p == MAP_FAILED) {
104     if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
105     p = NULL;
106   }
107   return p;
108 }
109 
mcode_free(jit_State * J,void * p,size_t sz)110 static void mcode_free(jit_State *J, void *p, size_t sz)
111 {
112   UNUSED(J);
113   munmap(p, sz);
114 }
115 
mcode_setprot(void * p,size_t sz,int prot)116 static int mcode_setprot(void *p, size_t sz, int prot)
117 {
118   return mprotect(p, sz, prot);
119 }
120 
121 #elif LJ_64
122 
123 #error "Missing OS support for explicit placement of executable memory"
124 
125 #else
126 
127 /* Fallback allocator. This will fail if memory is not executable by default. */
128 #define LUAJIT_UNPROTECT_MCODE
129 #define MCPROT_RW	0
130 #define MCPROT_RX	0
131 #define MCPROT_RWX	0
132 
mcode_alloc_at(jit_State * J,uintptr_t hint,size_t sz,int prot)133 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
134 {
135   UNUSED(hint); UNUSED(prot);
136   return lj_mem_new(J->L, sz);
137 }
138 
mcode_free(jit_State * J,void * p,size_t sz)139 static void mcode_free(jit_State *J, void *p, size_t sz)
140 {
141   lj_mem_free(J2G(J), p, sz);
142 }
143 
144 #endif
145 
146 /* -- MCode area protection ----------------------------------------------- */
147 
148 /* Define this ONLY if page protection twiddling becomes a bottleneck. */
149 #ifdef LUAJIT_UNPROTECT_MCODE
150 
151 /* It's generally considered to be a potential security risk to have
152 ** pages with simultaneous write *and* execute access in a process.
153 **
154 ** Do not even think about using this mode for server processes or
155 ** apps handling untrusted external data (such as a browser).
156 **
157 ** The security risk is not in LuaJIT itself -- but if an adversary finds
158 ** any *other* flaw in your C application logic, then any RWX memory page
159 ** simplifies writing an exploit considerably.
160 */
161 #define MCPROT_GEN	MCPROT_RWX
162 #define MCPROT_RUN	MCPROT_RWX
163 
mcode_protect(jit_State * J,int prot)164 static void mcode_protect(jit_State *J, int prot)
165 {
166   UNUSED(J); UNUSED(prot);
167 }
168 
169 #else
170 
171 /* This is the default behaviour and much safer:
172 **
173 ** Most of the time the memory pages holding machine code are executable,
174 ** but NONE of them is writable.
175 **
176 ** The current memory area is marked read-write (but NOT executable) only
177 ** during the short time window while the assembler generates machine code.
178 */
179 #define MCPROT_GEN	MCPROT_RW
180 #define MCPROT_RUN	MCPROT_RX
181 
182 /* Protection twiddling failed. Probably due to kernel security. */
mcode_protfail(jit_State * J)183 static LJ_NOINLINE void mcode_protfail(jit_State *J)
184 {
185   lua_CFunction panic = J2G(J)->panic;
186   if (panic) {
187     lua_State *L = J->L;
188     setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
189     panic(L);
190   }
191 }
192 
193 /* Change protection of MCode area. */
mcode_protect(jit_State * J,int prot)194 static void mcode_protect(jit_State *J, int prot)
195 {
196   if (J->mcprot != prot) {
197     if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
198       mcode_protfail(J);
199     J->mcprot = prot;
200   }
201 }
202 
203 #endif
204 
205 /* -- MCode area allocation ----------------------------------------------- */
206 
207 #if LJ_64
208 #define mcode_validptr(p)	(p)
209 #else
210 #define mcode_validptr(p)	((p) && (uintptr_t)(p) < 0xffff0000)
211 #endif
212 
213 #ifdef LJ_TARGET_JUMPRANGE
214 
215 /* Get memory within relative jump distance of our code in 64 bit mode. */
mcode_alloc(jit_State * J,size_t sz)216 static void *mcode_alloc(jit_State *J, size_t sz)
217 {
218   /* Target an address in the static assembler code (64K aligned).
219   ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
220   ** Use half the jump range so every address in the range can reach any other.
221   */
222 #if LJ_TARGET_MIPS
223   /* Use the middle of the 256MB-aligned region. */
224   uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) +
225 		     0x08000000u;
226 #else
227   uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
228 #endif
229   const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
230   /* First try a contiguous area below the last one. */
231   uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
232   int i;
233   /* Limit probing iterations, depending on the available pool size. */
234   for (i = 0; i < LJ_TARGET_JUMPRANGE; i++) {
235     if (mcode_validptr(hint)) {
236       void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
237 
238       if (mcode_validptr(p) &&
239 	  ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
240 	return p;
241       if (p) mcode_free(J, p, sz);  /* Free badly placed area. */
242     }
243     /* Next try probing 64K-aligned pseudo-random addresses. */
244     do {
245       hint = LJ_PRNG_BITS(J, LJ_TARGET_JUMPRANGE-16) << 16;
246     } while (!(hint + sz < range+range));
247     hint = target + hint - range;
248   }
249   lj_trace_err(J, LJ_TRERR_MCODEAL);  /* Give up. OS probably ignores hints? */
250   return NULL;
251 }
252 
253 #else
254 
255 /* All memory addresses are reachable by relative jumps. */
mcode_alloc(jit_State * J,size_t sz)256 static void *mcode_alloc(jit_State *J, size_t sz)
257 {
258 #ifdef __OpenBSD__
259   /* Allow better executable memory allocation for OpenBSD W^X mode. */
260   void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
261   if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
262     mcode_free(J, p, sz);
263     return NULL;
264   }
265   return p;
266 #else
267   return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
268 #endif
269 }
270 
271 #endif
272 
273 /* -- MCode area management ----------------------------------------------- */
274 
275 /* Linked list of MCode areas. */
276 typedef struct MCLink {
277   MCode *next;		/* Next area. */
278   size_t size;		/* Size of current area. */
279 } MCLink;
280 
281 /* Allocate a new MCode area. */
mcode_allocarea(jit_State * J)282 static void mcode_allocarea(jit_State *J)
283 {
284   MCode *oldarea = J->mcarea;
285   size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
286   sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
287   J->mcarea = (MCode *)mcode_alloc(J, sz);
288   J->szmcarea = sz;
289   J->mcprot = MCPROT_GEN;
290   J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
291   J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
292   ((MCLink *)J->mcarea)->next = oldarea;
293   ((MCLink *)J->mcarea)->size = sz;
294   J->szallmcarea += sz;
295 }
296 
297 /* Free all MCode areas. */
lj_mcode_free(jit_State * J)298 void lj_mcode_free(jit_State *J)
299 {
300   MCode *mc = J->mcarea;
301   J->mcarea = NULL;
302   J->szallmcarea = 0;
303   while (mc) {
304     MCode *next = ((MCLink *)mc)->next;
305     mcode_free(J, mc, ((MCLink *)mc)->size);
306     mc = next;
307   }
308 }
309 
310 /* -- MCode transactions -------------------------------------------------- */
311 
312 /* Reserve the remainder of the current MCode area. */
lj_mcode_reserve(jit_State * J,MCode ** lim)313 MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
314 {
315   if (!J->mcarea)
316     mcode_allocarea(J);
317   else
318     mcode_protect(J, MCPROT_GEN);
319   *lim = J->mcbot;
320   return J->mctop;
321 }
322 
323 /* Commit the top part of the current MCode area. */
lj_mcode_commit(jit_State * J,MCode * top)324 void lj_mcode_commit(jit_State *J, MCode *top)
325 {
326   J->mctop = top;
327   mcode_protect(J, MCPROT_RUN);
328 }
329 
330 /* Abort the reservation. */
lj_mcode_abort(jit_State * J)331 void lj_mcode_abort(jit_State *J)
332 {
333   if (J->mcarea)
334     mcode_protect(J, MCPROT_RUN);
335 }
336 
337 /* Set/reset protection to allow patching of MCode areas. */
lj_mcode_patch(jit_State * J,MCode * ptr,int finish)338 MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
339 {
340 #ifdef LUAJIT_UNPROTECT_MCODE
341   UNUSED(J); UNUSED(ptr); UNUSED(finish);
342   return NULL;
343 #else
344   if (finish) {
345     if (J->mcarea == ptr)
346       mcode_protect(J, MCPROT_RUN);
347     else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
348       mcode_protfail(J);
349     return NULL;
350   } else {
351     MCode *mc = J->mcarea;
352     /* Try current area first to use the protection cache. */
353     if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
354       mcode_protect(J, MCPROT_GEN);
355       return mc;
356     }
357     /* Otherwise search through the list of MCode areas. */
358     for (;;) {
359       mc = ((MCLink *)mc)->next;
360       lua_assert(mc != NULL);
361       if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
362 	if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
363 	  mcode_protfail(J);
364 	return mc;
365       }
366     }
367   }
368 #endif
369 }
370 
371 /* Limit of MCode reservation reached. */
lj_mcode_limiterr(jit_State * J,size_t need)372 void lj_mcode_limiterr(jit_State *J, size_t need)
373 {
374   size_t sizemcode, maxmcode;
375   lj_mcode_abort(J);
376   sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
377   sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
378   maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
379   if ((size_t)need > sizemcode)
380     lj_trace_err(J, LJ_TRERR_MCODEOV);  /* Too long for any area. */
381   if (J->szallmcarea + sizemcode > maxmcode)
382     lj_trace_err(J, LJ_TRERR_MCODEAL);
383   mcode_allocarea(J);
384   lj_trace_err(J, LJ_TRERR_MCODELM);  /* Retry with new area. */
385 }
386 
387 #endif
388