1 /*
2 ** State and stack handling.
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 **
5 ** Portions taken verbatim or adapted from the Lua interpreter.
6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
7 */
8
9 #define lj_state_c
10 #define LUA_CORE
11
12 #include "lj_obj.h"
13 #include "lj_gc.h"
14 #include "lj_err.h"
15 #include "lj_buf.h"
16 #include "lj_str.h"
17 #include "lj_tab.h"
18 #include "lj_func.h"
19 #include "lj_meta.h"
20 #include "lj_state.h"
21 #include "lj_frame.h"
22 #if LJ_HASFFI
23 #include "lj_ctype.h"
24 #endif
25 #include "lj_trace.h"
26 #include "lj_dispatch.h"
27 #include "lj_vm.h"
28 #include "lj_prng.h"
29 #include "lj_lex.h"
30 #include "lj_alloc.h"
31 #include "luajit.h"
32
33 /* -- Stack handling ------------------------------------------------------ */
34
35 /* Stack sizes. */
36 #define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */
37 #define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */
38 #define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */
39 #define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA)
40
41 /* Explanation of LJ_STACK_EXTRA:
42 **
43 ** Calls to metamethods store their arguments beyond the current top
44 ** without checking for the stack limit. This avoids stack resizes which
45 ** would invalidate passed TValue pointers. The stack check is performed
46 ** later by the function header. This can safely resize the stack or raise
47 ** an error. Thus we need some extra slots beyond the current stack limit.
48 **
49 ** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus
50 ** one extra slot if mobj is not a function. Only lj_meta_tset needs 5
51 ** slots above top, but then mobj is always a function. So we can get by
52 ** with 5 extra slots.
53 ** LJ_FR2: We need 2 more slots for the frame PC and the continuation PC.
54 */
55
56 /* Resize stack slots and adjust pointers in state. */
resizestack(lua_State * L,MSize n)57 static void resizestack(lua_State *L, MSize n)
58 {
59 TValue *st, *oldst = tvref(L->stack);
60 ptrdiff_t delta;
61 MSize oldsize = L->stacksize;
62 MSize realsize = n + 1 + LJ_STACK_EXTRA;
63 GCobj *up;
64 lj_assertL((MSize)(tvref(L->maxstack)-oldst) == L->stacksize-LJ_STACK_EXTRA-1,
65 "inconsistent stack size");
66 st = (TValue *)lj_mem_realloc(L, tvref(L->stack),
67 (MSize)(oldsize*sizeof(TValue)),
68 (MSize)(realsize*sizeof(TValue)));
69 setmref(L->stack, st);
70 delta = (char *)st - (char *)oldst;
71 setmref(L->maxstack, st + n);
72 while (oldsize < realsize) /* Clear new slots. */
73 setnilV(st + oldsize++);
74 L->stacksize = realsize;
75 if ((size_t)(mref(G(L)->jit_base, char) - (char *)oldst) < oldsize)
76 setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
77 L->base = (TValue *)((char *)L->base + delta);
78 L->top = (TValue *)((char *)L->top + delta);
79 for (up = gcref(L->openupval); up != NULL; up = gcnext(up))
80 setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta));
81 }
82
83 /* Relimit stack after error, in case the limit was overdrawn. */
lj_state_relimitstack(lua_State * L)84 void lj_state_relimitstack(lua_State *L)
85 {
86 if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1)
87 resizestack(L, LJ_STACK_MAX);
88 }
89
90 /* Try to shrink the stack (called from GC). */
lj_state_shrinkstack(lua_State * L,MSize used)91 void lj_state_shrinkstack(lua_State *L, MSize used)
92 {
93 if (L->stacksize > LJ_STACK_MAXEX)
94 return; /* Avoid stack shrinking while handling stack overflow. */
95 if (4*used < L->stacksize &&
96 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize &&
97 /* Don't shrink stack of live trace. */
98 (tvref(G(L)->jit_base) == NULL || obj2gco(L) != gcref(G(L)->cur_L)))
99 resizestack(L, L->stacksize >> 1);
100 }
101
102 /* Try to grow stack. */
lj_state_growstack(lua_State * L,MSize need)103 void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need)
104 {
105 MSize n;
106 if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */
107 lj_err_throw(L, LUA_ERRERR);
108 n = L->stacksize + need;
109 if (n > LJ_STACK_MAX) {
110 n += 2*LUA_MINSTACK;
111 } else if (n < 2*L->stacksize) {
112 n = 2*L->stacksize;
113 if (n >= LJ_STACK_MAX)
114 n = LJ_STACK_MAX;
115 }
116 resizestack(L, n);
117 if (L->stacksize > LJ_STACK_MAXEX)
118 lj_err_msg(L, LJ_ERR_STKOV);
119 }
120
lj_state_growstack1(lua_State * L)121 void LJ_FASTCALL lj_state_growstack1(lua_State *L)
122 {
123 lj_state_growstack(L, 1);
124 }
125
126 /* Allocate basic stack for new state. */
stack_init(lua_State * L1,lua_State * L)127 static void stack_init(lua_State *L1, lua_State *L)
128 {
129 TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue);
130 setmref(L1->stack, st);
131 L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA;
132 stend = st + L1->stacksize;
133 setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1);
134 setthreadV(L1, st++, L1); /* Needed for curr_funcisL() on empty stack. */
135 if (LJ_FR2) setnilV(st++);
136 L1->base = L1->top = st;
137 while (st < stend) /* Clear new slots. */
138 setnilV(st++);
139 }
140
141 /* -- State handling ------------------------------------------------------ */
142
143 /* Open parts that may cause memory-allocation errors. */
cpluaopen(lua_State * L,lua_CFunction dummy,void * ud)144 static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud)
145 {
146 global_State *g = G(L);
147 UNUSED(dummy);
148 UNUSED(ud);
149 stack_init(L, L);
150 /* NOBARRIER: State initialization, all objects are white. */
151 setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL)));
152 settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY));
153 lj_str_init(L);
154 lj_meta_init(L);
155 lj_lex_init(L);
156 fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */
157 g->gc.threshold = 4*g->gc.total;
158 lj_trace_initstate(g);
159 lj_err_verify();
160 return NULL;
161 }
162
close_state(lua_State * L)163 static void close_state(lua_State *L)
164 {
165 global_State *g = G(L);
166 lj_func_closeuv(L, tvref(L->stack));
167 lj_gc_freeall(g);
168 lj_assertG(gcref(g->gc.root) == obj2gco(L),
169 "main thread is not first GC object");
170 lj_assertG(g->str.num == 0, "leaked %d strings", g->str.num);
171 lj_trace_freestate(g);
172 #if LJ_HASFFI
173 lj_ctype_freestate(g);
174 #endif
175 lj_str_freetab(g);
176 lj_buf_free(g, &g->tmpbuf);
177 lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
178 #if LJ_64
179 if (mref(g->gc.lightudseg, uint32_t)) {
180 MSize segnum = g->gc.lightudnum ? (2 << lj_fls(g->gc.lightudnum)) : 2;
181 lj_mem_freevec(g, mref(g->gc.lightudseg, uint32_t), segnum, uint32_t);
182 }
183 #endif
184 lj_assertG(g->gc.total == sizeof(GG_State),
185 "memory leak of %lld bytes",
186 (long long)(g->gc.total - sizeof(GG_State)));
187 #ifndef LUAJIT_USE_SYSMALLOC
188 if (g->allocf == lj_alloc_f)
189 lj_alloc_destroy(g->allocd);
190 else
191 #endif
192 g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0);
193 }
194
195 #if LJ_64 && !LJ_GC64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC))
lj_state_newstate(lua_Alloc allocf,void * allocd)196 lua_State *lj_state_newstate(lua_Alloc allocf, void *allocd)
197 #else
198 LUA_API lua_State *lua_newstate(lua_Alloc allocf, void *allocd)
199 #endif
200 {
201 PRNGState prng;
202 GG_State *GG;
203 lua_State *L;
204 global_State *g;
205 /* We need the PRNG for the memory allocator, so initialize this first. */
206 if (!lj_prng_seed_secure(&prng)) {
207 lj_assertX(0, "secure PRNG seeding failed");
208 /* Can only return NULL here, so this errors with "not enough memory". */
209 return NULL;
210 }
211 #ifndef LUAJIT_USE_SYSMALLOC
212 if (allocf == LJ_ALLOCF_INTERNAL) {
213 allocd = lj_alloc_create(&prng);
214 if (!allocd) return NULL;
215 allocf = lj_alloc_f;
216 }
217 #endif
218 GG = (GG_State *)allocf(allocd, NULL, 0, sizeof(GG_State));
219 if (GG == NULL || !checkptrGC(GG)) return NULL;
220 memset(GG, 0, sizeof(GG_State));
221 L = &GG->L;
222 g = &GG->g;
223 L->gct = ~LJ_TTHREAD;
224 L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */
225 L->dummy_ffid = FF_C;
226 setmref(L->glref, g);
227 g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED;
228 g->strempty.marked = LJ_GC_WHITE0;
229 g->strempty.gct = ~LJ_TSTR;
230 g->allocf = allocf;
231 g->allocd = allocd;
232 g->prng = prng;
233 #ifndef LUAJIT_USE_SYSMALLOC
234 if (allocf == lj_alloc_f) {
235 lj_alloc_setprng(allocd, &g->prng);
236 }
237 #endif
238 setgcref(g->mainthref, obj2gco(L));
239 setgcref(g->uvhead.prev, obj2gco(&g->uvhead));
240 setgcref(g->uvhead.next, obj2gco(&g->uvhead));
241 g->str.mask = ~(MSize)0;
242 setnilV(registry(L));
243 setnilV(&g->nilnode.val);
244 setnilV(&g->nilnode.key);
245 #if !LJ_GC64
246 setmref(g->nilnode.freetop, &g->nilnode);
247 #endif
248 lj_buf_init(NULL, &g->tmpbuf);
249 g->gc.state = GCSpause;
250 setgcref(g->gc.root, obj2gco(L));
251 setmref(g->gc.sweep, &g->gc.root);
252 g->gc.total = sizeof(GG_State);
253 g->gc.pause = LUAI_GCPAUSE;
254 g->gc.stepmul = LUAI_GCMUL;
255 lj_dispatch_init((GG_State *)L);
256 L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */
257 if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) {
258 /* Memory allocation error: free partial state. */
259 close_state(L);
260 return NULL;
261 }
262 L->status = LUA_OK;
263 L->exdata = NULL;
264 L->exdata2 = NULL;
265 return L;
266 }
267
cpfinalize(lua_State * L,lua_CFunction dummy,void * ud)268 static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud)
269 {
270 UNUSED(dummy);
271 UNUSED(ud);
272 lj_gc_finalize_cdata(L);
273 lj_gc_finalize_udata(L);
274 /* Frame pop omitted. */
275 return NULL;
276 }
277
lua_close(lua_State * L)278 LUA_API void lua_close(lua_State *L)
279 {
280 global_State *g = G(L);
281 int i;
282 L = mainthread(g); /* Only the main thread can be closed. */
283 #if LJ_HASPROFILE
284 luaJIT_profile_stop(L);
285 #endif
286 setgcrefnull(g->cur_L);
287 lj_func_closeuv(L, tvref(L->stack));
288 lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */
289 #if LJ_HASJIT
290 G2J(g)->flags &= ~JIT_F_ON;
291 G2J(g)->state = LJ_TRACE_IDLE;
292 lj_dispatch_update(g);
293 #endif
294 for (i = 0;;) {
295 hook_enter(g);
296 L->status = LUA_OK;
297 L->base = L->top = tvref(L->stack) + 1 + LJ_FR2;
298 L->cframe = NULL;
299 if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == LUA_OK) {
300 if (++i >= 10) break;
301 lj_gc_separateudata(g, 1); /* Separate udata again. */
302 if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */
303 break;
304 }
305 }
306 close_state(L);
307 }
308
lj_state_new(lua_State * L)309 lua_State *lj_state_new(lua_State *L)
310 {
311 lua_State *L1 = lj_mem_newobj(L, lua_State);
312 L1->gct = ~LJ_TTHREAD;
313 L1->dummy_ffid = FF_C;
314 L1->status = LUA_OK;
315 L1->stacksize = 0;
316 setmref(L1->stack, NULL);
317 L1->cframe = NULL;
318 /* NOBARRIER: The lua_State is new (marked white). */
319 setgcrefnull(L1->openupval);
320 setmrefr(L1->glref, L->glref);
321 setgcrefr(L1->env, L->env);
322 stack_init(L1, L); /* init stack */
323 lj_assertL(iswhite(obj2gco(L1)), "new thread object is not white");
324 L1->exdata = L->exdata;
325 L1->exdata2 = L->exdata2;
326 return L1;
327 }
328
lj_state_free(global_State * g,lua_State * L)329 void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L)
330 {
331 lj_assertG(L != mainthread(g), "free of main thread");
332 if (obj2gco(L) == gcref(g->cur_L))
333 setgcrefnull(g->cur_L);
334 lj_func_closeuv(L, tvref(L->stack));
335 lj_assertG(gcref(L->openupval) == NULL, "stale open upvalues");
336 lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
337 lj_mem_freet(g, L);
338 }
339
340