1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <atomic>
6 #include <type_traits>
7
8 #include "src/wasm/wasm-interpreter.h"
9
10 #include "src/assembler-inl.h"
11 #include "src/boxed-float.h"
12 #include "src/compiler/wasm-compiler.h"
13 #include "src/conversions.h"
14 #include "src/identity-map.h"
15 #include "src/objects-inl.h"
16 #include "src/trap-handler/trap-handler.h"
17 #include "src/utils.h"
18 #include "src/wasm/decoder.h"
19 #include "src/wasm/function-body-decoder-impl.h"
20 #include "src/wasm/function-body-decoder.h"
21 #include "src/wasm/memory-tracing.h"
22 #include "src/wasm/wasm-engine.h"
23 #include "src/wasm/wasm-external-refs.h"
24 #include "src/wasm/wasm-limits.h"
25 #include "src/wasm/wasm-module.h"
26 #include "src/wasm/wasm-objects-inl.h"
27
28 #include "src/zone/accounting-allocator.h"
29 #include "src/zone/zone-containers.h"
30
31 namespace v8 {
32 namespace internal {
33 namespace wasm {
34
35 #define TRACE(...) \
36 do { \
37 if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
38 } while (false)
39
40 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
41
42 #define WASM_CTYPES(V) \
43 V(I32, int32_t) V(I64, int64_t) V(F32, float) V(F64, double) V(S128, Simd128)
44
45 #define FOREACH_SIMPLE_BINOP(V) \
46 V(I32Add, uint32_t, +) \
47 V(I32Sub, uint32_t, -) \
48 V(I32Mul, uint32_t, *) \
49 V(I32And, uint32_t, &) \
50 V(I32Ior, uint32_t, |) \
51 V(I32Xor, uint32_t, ^) \
52 V(I32Eq, uint32_t, ==) \
53 V(I32Ne, uint32_t, !=) \
54 V(I32LtU, uint32_t, <) \
55 V(I32LeU, uint32_t, <=) \
56 V(I32GtU, uint32_t, >) \
57 V(I32GeU, uint32_t, >=) \
58 V(I32LtS, int32_t, <) \
59 V(I32LeS, int32_t, <=) \
60 V(I32GtS, int32_t, >) \
61 V(I32GeS, int32_t, >=) \
62 V(I64Add, uint64_t, +) \
63 V(I64Sub, uint64_t, -) \
64 V(I64Mul, uint64_t, *) \
65 V(I64And, uint64_t, &) \
66 V(I64Ior, uint64_t, |) \
67 V(I64Xor, uint64_t, ^) \
68 V(I64Eq, uint64_t, ==) \
69 V(I64Ne, uint64_t, !=) \
70 V(I64LtU, uint64_t, <) \
71 V(I64LeU, uint64_t, <=) \
72 V(I64GtU, uint64_t, >) \
73 V(I64GeU, uint64_t, >=) \
74 V(I64LtS, int64_t, <) \
75 V(I64LeS, int64_t, <=) \
76 V(I64GtS, int64_t, >) \
77 V(I64GeS, int64_t, >=) \
78 V(F32Add, float, +) \
79 V(F32Sub, float, -) \
80 V(F32Eq, float, ==) \
81 V(F32Ne, float, !=) \
82 V(F32Lt, float, <) \
83 V(F32Le, float, <=) \
84 V(F32Gt, float, >) \
85 V(F32Ge, float, >=) \
86 V(F64Add, double, +) \
87 V(F64Sub, double, -) \
88 V(F64Eq, double, ==) \
89 V(F64Ne, double, !=) \
90 V(F64Lt, double, <) \
91 V(F64Le, double, <=) \
92 V(F64Gt, double, >) \
93 V(F64Ge, double, >=) \
94 V(F32Mul, float, *) \
95 V(F64Mul, double, *) \
96 V(F32Div, float, /) \
97 V(F64Div, double, /)
98
99 #define FOREACH_OTHER_BINOP(V) \
100 V(I32DivS, int32_t) \
101 V(I32DivU, uint32_t) \
102 V(I32RemS, int32_t) \
103 V(I32RemU, uint32_t) \
104 V(I32Shl, uint32_t) \
105 V(I32ShrU, uint32_t) \
106 V(I32ShrS, int32_t) \
107 V(I64DivS, int64_t) \
108 V(I64DivU, uint64_t) \
109 V(I64RemS, int64_t) \
110 V(I64RemU, uint64_t) \
111 V(I64Shl, uint64_t) \
112 V(I64ShrU, uint64_t) \
113 V(I64ShrS, int64_t) \
114 V(I32Ror, int32_t) \
115 V(I32Rol, int32_t) \
116 V(I64Ror, int64_t) \
117 V(I64Rol, int64_t) \
118 V(F32Min, float) \
119 V(F32Max, float) \
120 V(F64Min, double) \
121 V(F64Max, double) \
122 V(I32AsmjsDivS, int32_t) \
123 V(I32AsmjsDivU, uint32_t) \
124 V(I32AsmjsRemS, int32_t) \
125 V(I32AsmjsRemU, uint32_t) \
126 V(F32CopySign, Float32) \
127 V(F64CopySign, Float64)
128
129 #define FOREACH_I32CONV_FLOATOP(V) \
130 V(I32SConvertF32, int32_t, float) \
131 V(I32SConvertF64, int32_t, double) \
132 V(I32UConvertF32, uint32_t, float) \
133 V(I32UConvertF64, uint32_t, double)
134
135 #define FOREACH_OTHER_UNOP(V) \
136 V(I32Clz, uint32_t) \
137 V(I32Ctz, uint32_t) \
138 V(I32Popcnt, uint32_t) \
139 V(I32Eqz, uint32_t) \
140 V(I64Clz, uint64_t) \
141 V(I64Ctz, uint64_t) \
142 V(I64Popcnt, uint64_t) \
143 V(I64Eqz, uint64_t) \
144 V(F32Abs, Float32) \
145 V(F32Neg, Float32) \
146 V(F32Ceil, float) \
147 V(F32Floor, float) \
148 V(F32Trunc, float) \
149 V(F32NearestInt, float) \
150 V(F64Abs, Float64) \
151 V(F64Neg, Float64) \
152 V(F64Ceil, double) \
153 V(F64Floor, double) \
154 V(F64Trunc, double) \
155 V(F64NearestInt, double) \
156 V(I32ConvertI64, int64_t) \
157 V(I64SConvertF32, float) \
158 V(I64SConvertF64, double) \
159 V(I64UConvertF32, float) \
160 V(I64UConvertF64, double) \
161 V(I64SConvertI32, int32_t) \
162 V(I64UConvertI32, uint32_t) \
163 V(F32SConvertI32, int32_t) \
164 V(F32UConvertI32, uint32_t) \
165 V(F32SConvertI64, int64_t) \
166 V(F32UConvertI64, uint64_t) \
167 V(F32ConvertF64, double) \
168 V(F32ReinterpretI32, int32_t) \
169 V(F64SConvertI32, int32_t) \
170 V(F64UConvertI32, uint32_t) \
171 V(F64SConvertI64, int64_t) \
172 V(F64UConvertI64, uint64_t) \
173 V(F64ConvertF32, float) \
174 V(F64ReinterpretI64, int64_t) \
175 V(I32AsmjsSConvertF32, float) \
176 V(I32AsmjsUConvertF32, float) \
177 V(I32AsmjsSConvertF64, double) \
178 V(I32AsmjsUConvertF64, double) \
179 V(F32Sqrt, float) \
180 V(F64Sqrt, double)
181
182 namespace {
183
184 constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
185 constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
186
ExecuteI32DivS(int32_t a,int32_t b,TrapReason * trap)187 inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
188 if (b == 0) {
189 *trap = kTrapDivByZero;
190 return 0;
191 }
192 if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
193 *trap = kTrapDivUnrepresentable;
194 return 0;
195 }
196 return a / b;
197 }
198
ExecuteI32DivU(uint32_t a,uint32_t b,TrapReason * trap)199 inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
200 if (b == 0) {
201 *trap = kTrapDivByZero;
202 return 0;
203 }
204 return a / b;
205 }
206
ExecuteI32RemS(int32_t a,int32_t b,TrapReason * trap)207 inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
208 if (b == 0) {
209 *trap = kTrapRemByZero;
210 return 0;
211 }
212 if (b == -1) return 0;
213 return a % b;
214 }
215
ExecuteI32RemU(uint32_t a,uint32_t b,TrapReason * trap)216 inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
217 if (b == 0) {
218 *trap = kTrapRemByZero;
219 return 0;
220 }
221 return a % b;
222 }
223
ExecuteI32Shl(uint32_t a,uint32_t b,TrapReason * trap)224 inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
225 return a << (b & 0x1F);
226 }
227
ExecuteI32ShrU(uint32_t a,uint32_t b,TrapReason * trap)228 inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
229 return a >> (b & 0x1F);
230 }
231
ExecuteI32ShrS(int32_t a,int32_t b,TrapReason * trap)232 inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
233 return a >> (b & 0x1F);
234 }
235
ExecuteI64DivS(int64_t a,int64_t b,TrapReason * trap)236 inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
237 if (b == 0) {
238 *trap = kTrapDivByZero;
239 return 0;
240 }
241 if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
242 *trap = kTrapDivUnrepresentable;
243 return 0;
244 }
245 return a / b;
246 }
247
ExecuteI64DivU(uint64_t a,uint64_t b,TrapReason * trap)248 inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
249 if (b == 0) {
250 *trap = kTrapDivByZero;
251 return 0;
252 }
253 return a / b;
254 }
255
ExecuteI64RemS(int64_t a,int64_t b,TrapReason * trap)256 inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
257 if (b == 0) {
258 *trap = kTrapRemByZero;
259 return 0;
260 }
261 if (b == -1) return 0;
262 return a % b;
263 }
264
ExecuteI64RemU(uint64_t a,uint64_t b,TrapReason * trap)265 inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
266 if (b == 0) {
267 *trap = kTrapRemByZero;
268 return 0;
269 }
270 return a % b;
271 }
272
ExecuteI64Shl(uint64_t a,uint64_t b,TrapReason * trap)273 inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
274 return a << (b & 0x3F);
275 }
276
ExecuteI64ShrU(uint64_t a,uint64_t b,TrapReason * trap)277 inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
278 return a >> (b & 0x3F);
279 }
280
ExecuteI64ShrS(int64_t a,int64_t b,TrapReason * trap)281 inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
282 return a >> (b & 0x3F);
283 }
284
ExecuteI32Ror(uint32_t a,uint32_t b,TrapReason * trap)285 inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
286 uint32_t shift = (b & 0x1F);
287 return (a >> shift) | (a << (32 - shift));
288 }
289
ExecuteI32Rol(uint32_t a,uint32_t b,TrapReason * trap)290 inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
291 uint32_t shift = (b & 0x1F);
292 return (a << shift) | (a >> (32 - shift));
293 }
294
ExecuteI64Ror(uint64_t a,uint64_t b,TrapReason * trap)295 inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
296 uint32_t shift = (b & 0x3F);
297 return (a >> shift) | (a << (64 - shift));
298 }
299
ExecuteI64Rol(uint64_t a,uint64_t b,TrapReason * trap)300 inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
301 uint32_t shift = (b & 0x3F);
302 return (a << shift) | (a >> (64 - shift));
303 }
304
ExecuteF32Min(float a,float b,TrapReason * trap)305 inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
306 return JSMin(a, b);
307 }
308
ExecuteF32Max(float a,float b,TrapReason * trap)309 inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
310 return JSMax(a, b);
311 }
312
ExecuteF32CopySign(Float32 a,Float32 b,TrapReason * trap)313 inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
314 return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
315 (b.get_bits() & kFloat32SignBitMask));
316 }
317
ExecuteF64Min(double a,double b,TrapReason * trap)318 inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
319 return JSMin(a, b);
320 }
321
ExecuteF64Max(double a,double b,TrapReason * trap)322 inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
323 return JSMax(a, b);
324 }
325
ExecuteF64CopySign(Float64 a,Float64 b,TrapReason * trap)326 inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
327 return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
328 (b.get_bits() & kFloat64SignBitMask));
329 }
330
ExecuteI32AsmjsDivS(int32_t a,int32_t b,TrapReason * trap)331 inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
332 if (b == 0) return 0;
333 if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
334 return std::numeric_limits<int32_t>::min();
335 }
336 return a / b;
337 }
338
ExecuteI32AsmjsDivU(uint32_t a,uint32_t b,TrapReason * trap)339 inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
340 if (b == 0) return 0;
341 return a / b;
342 }
343
ExecuteI32AsmjsRemS(int32_t a,int32_t b,TrapReason * trap)344 inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
345 if (b == 0) return 0;
346 if (b == -1) return 0;
347 return a % b;
348 }
349
ExecuteI32AsmjsRemU(uint32_t a,uint32_t b,TrapReason * trap)350 inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
351 if (b == 0) return 0;
352 return a % b;
353 }
354
ExecuteI32AsmjsSConvertF32(float a,TrapReason * trap)355 inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
356 return DoubleToInt32(a);
357 }
358
ExecuteI32AsmjsUConvertF32(float a,TrapReason * trap)359 inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
360 return DoubleToUint32(a);
361 }
362
ExecuteI32AsmjsSConvertF64(double a,TrapReason * trap)363 inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
364 return DoubleToInt32(a);
365 }
366
ExecuteI32AsmjsUConvertF64(double a,TrapReason * trap)367 inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
368 return DoubleToUint32(a);
369 }
370
ExecuteI32Clz(uint32_t val,TrapReason * trap)371 int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
372 return base::bits::CountLeadingZeros(val);
373 }
374
ExecuteI32Ctz(uint32_t val,TrapReason * trap)375 uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
376 return base::bits::CountTrailingZeros(val);
377 }
378
ExecuteI32Popcnt(uint32_t val,TrapReason * trap)379 uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
380 return base::bits::CountPopulation(val);
381 }
382
ExecuteI32Eqz(uint32_t val,TrapReason * trap)383 inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
384 return val == 0 ? 1 : 0;
385 }
386
ExecuteI64Clz(uint64_t val,TrapReason * trap)387 int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
388 return base::bits::CountLeadingZeros(val);
389 }
390
ExecuteI64Ctz(uint64_t val,TrapReason * trap)391 inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
392 return base::bits::CountTrailingZeros(val);
393 }
394
ExecuteI64Popcnt(uint64_t val,TrapReason * trap)395 inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
396 return base::bits::CountPopulation(val);
397 }
398
ExecuteI64Eqz(uint64_t val,TrapReason * trap)399 inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
400 return val == 0 ? 1 : 0;
401 }
402
ExecuteF32Abs(Float32 a,TrapReason * trap)403 inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
404 return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
405 }
406
ExecuteF32Neg(Float32 a,TrapReason * trap)407 inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
408 return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
409 }
410
ExecuteF32Ceil(float a,TrapReason * trap)411 inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
412
ExecuteF32Floor(float a,TrapReason * trap)413 inline float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
414
ExecuteF32Trunc(float a,TrapReason * trap)415 inline float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
416
ExecuteF32NearestInt(float a,TrapReason * trap)417 inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
418 return nearbyintf(a);
419 }
420
ExecuteF32Sqrt(float a,TrapReason * trap)421 inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
422 float result = sqrtf(a);
423 return result;
424 }
425
ExecuteF64Abs(Float64 a,TrapReason * trap)426 inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
427 return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
428 }
429
ExecuteF64Neg(Float64 a,TrapReason * trap)430 inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
431 return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
432 }
433
ExecuteF64Ceil(double a,TrapReason * trap)434 inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
435
ExecuteF64Floor(double a,TrapReason * trap)436 inline double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
437
ExecuteF64Trunc(double a,TrapReason * trap)438 inline double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
439
ExecuteF64NearestInt(double a,TrapReason * trap)440 inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
441 return nearbyint(a);
442 }
443
ExecuteF64Sqrt(double a,TrapReason * trap)444 inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
445
446 template <typename int_type, typename float_type>
447 int_type ExecuteConvert(float_type a, TrapReason* trap) {
448 if (is_inbounds<int_type>(a)) {
449 return static_cast<int_type>(a);
450 }
451 *trap = kTrapFloatUnrepresentable;
452 return 0;
453 }
454
455 template <typename int_type, typename float_type>
456 int_type ExecuteConvertSaturate(float_type a) {
457 TrapReason base_trap = kTrapCount;
458 int32_t val = ExecuteConvert<int_type>(a, &base_trap);
459 if (base_trap == kTrapCount) {
460 return val;
461 }
462 return std::isnan(a) ? 0
463 : (a < static_cast<float_type>(0.0)
464 ? std::numeric_limits<int_type>::min()
465 : std::numeric_limits<int_type>::max());
466 }
467
468 template <typename dst_type, typename src_type, void (*fn)(Address)>
CallExternalIntToFloatFunction(src_type input)469 inline dst_type CallExternalIntToFloatFunction(src_type input) {
470 uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
471 Address data_addr = reinterpret_cast<Address>(data);
472 WriteUnalignedValue<src_type>(data_addr, input);
473 fn(data_addr);
474 return ReadUnalignedValue<dst_type>(data_addr);
475 }
476
477 template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
CallExternalFloatToIntFunction(src_type input,TrapReason * trap)478 inline dst_type CallExternalFloatToIntFunction(src_type input,
479 TrapReason* trap) {
480 uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
481 Address data_addr = reinterpret_cast<Address>(data);
482 WriteUnalignedValue<src_type>(data_addr, input);
483 if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
484 return ReadUnalignedValue<dst_type>(data_addr);
485 }
486
ExecuteI32ConvertI64(int64_t a,TrapReason * trap)487 inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
488 return static_cast<uint32_t>(a & 0xFFFFFFFF);
489 }
490
ExecuteI64SConvertF32(float a,TrapReason * trap)491 int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
492 return CallExternalFloatToIntFunction<int64_t, float,
493 float32_to_int64_wrapper>(a, trap);
494 }
495
ExecuteI64SConvertSatF32(float a)496 int64_t ExecuteI64SConvertSatF32(float a) {
497 TrapReason base_trap = kTrapCount;
498 int64_t val = ExecuteI64SConvertF32(a, &base_trap);
499 if (base_trap == kTrapCount) {
500 return val;
501 }
502 return std::isnan(a) ? 0
503 : (a < 0.0 ? std::numeric_limits<int64_t>::min()
504 : std::numeric_limits<int64_t>::max());
505 }
506
ExecuteI64SConvertF64(double a,TrapReason * trap)507 int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
508 return CallExternalFloatToIntFunction<int64_t, double,
509 float64_to_int64_wrapper>(a, trap);
510 }
511
ExecuteI64SConvertSatF64(double a)512 int64_t ExecuteI64SConvertSatF64(double a) {
513 TrapReason base_trap = kTrapCount;
514 int64_t val = ExecuteI64SConvertF64(a, &base_trap);
515 if (base_trap == kTrapCount) {
516 return val;
517 }
518 return std::isnan(a) ? 0
519 : (a < 0.0 ? std::numeric_limits<int64_t>::min()
520 : std::numeric_limits<int64_t>::max());
521 }
522
ExecuteI64UConvertF32(float a,TrapReason * trap)523 uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
524 return CallExternalFloatToIntFunction<uint64_t, float,
525 float32_to_uint64_wrapper>(a, trap);
526 }
527
ExecuteI64UConvertSatF32(float a)528 uint64_t ExecuteI64UConvertSatF32(float a) {
529 TrapReason base_trap = kTrapCount;
530 uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
531 if (base_trap == kTrapCount) {
532 return val;
533 }
534 return std::isnan(a) ? 0
535 : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
536 : std::numeric_limits<uint64_t>::max());
537 }
538
ExecuteI64UConvertF64(double a,TrapReason * trap)539 uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
540 return CallExternalFloatToIntFunction<uint64_t, double,
541 float64_to_uint64_wrapper>(a, trap);
542 }
543
ExecuteI64UConvertSatF64(double a)544 uint64_t ExecuteI64UConvertSatF64(double a) {
545 TrapReason base_trap = kTrapCount;
546 int64_t val = ExecuteI64UConvertF64(a, &base_trap);
547 if (base_trap == kTrapCount) {
548 return val;
549 }
550 return std::isnan(a) ? 0
551 : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
552 : std::numeric_limits<uint64_t>::max());
553 }
554
ExecuteI64SConvertI32(int32_t a,TrapReason * trap)555 inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
556 return static_cast<int64_t>(a);
557 }
558
ExecuteI64UConvertI32(uint32_t a,TrapReason * trap)559 inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
560 return static_cast<uint64_t>(a);
561 }
562
ExecuteF32SConvertI32(int32_t a,TrapReason * trap)563 inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
564 return static_cast<float>(a);
565 }
566
ExecuteF32UConvertI32(uint32_t a,TrapReason * trap)567 inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
568 return static_cast<float>(a);
569 }
570
ExecuteF32SConvertI64(int64_t a,TrapReason * trap)571 inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
572 return static_cast<float>(a);
573 }
574
ExecuteF32UConvertI64(uint64_t a,TrapReason * trap)575 inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
576 return CallExternalIntToFloatFunction<float, uint64_t,
577 uint64_to_float32_wrapper>(a);
578 }
579
ExecuteF32ConvertF64(double a,TrapReason * trap)580 inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
581 return static_cast<float>(a);
582 }
583
ExecuteF32ReinterpretI32(int32_t a,TrapReason * trap)584 inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
585 return Float32::FromBits(a);
586 }
587
ExecuteF64SConvertI32(int32_t a,TrapReason * trap)588 inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
589 return static_cast<double>(a);
590 }
591
ExecuteF64UConvertI32(uint32_t a,TrapReason * trap)592 inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
593 return static_cast<double>(a);
594 }
595
ExecuteF64SConvertI64(int64_t a,TrapReason * trap)596 inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
597 return static_cast<double>(a);
598 }
599
ExecuteF64UConvertI64(uint64_t a,TrapReason * trap)600 inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
601 return CallExternalIntToFloatFunction<double, uint64_t,
602 uint64_to_float64_wrapper>(a);
603 }
604
ExecuteF64ConvertF32(float a,TrapReason * trap)605 inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
606 return static_cast<double>(a);
607 }
608
ExecuteF64ReinterpretI64(int64_t a,TrapReason * trap)609 inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
610 return Float64::FromBits(a);
611 }
612
ExecuteI32ReinterpretF32(WasmValue a)613 inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
614 return a.to_f32_boxed().get_bits();
615 }
616
ExecuteI64ReinterpretF64(WasmValue a)617 inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
618 return a.to_f64_boxed().get_bits();
619 }
620
621 enum InternalOpcode {
622 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
623 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
624 #undef DECL_INTERNAL_ENUM
625 };
626
OpcodeName(uint32_t val)627 const char* OpcodeName(uint32_t val) {
628 switch (val) {
629 #define DECL_INTERNAL_CASE(name, value) \
630 case kInternal##name: \
631 return "Internal" #name;
632 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
633 #undef DECL_INTERNAL_CASE
634 }
635 return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
636 }
637
638 class SideTable;
639
640 // Code and metadata needed to execute a function.
641 struct InterpreterCode {
642 const WasmFunction* function; // wasm function
643 BodyLocalDecls locals; // local declarations
644 const byte* orig_start; // start of original code
645 const byte* orig_end; // end of original code
646 byte* start; // start of (maybe altered) code
647 byte* end; // end of (maybe altered) code
648 SideTable* side_table; // precomputed side table for control flow.
649
atv8::internal::wasm::__anon0754df630111::InterpreterCode650 const byte* at(pc_t pc) { return start + pc; }
651 };
652
653 // A helper class to compute the control transfers for each bytecode offset.
654 // Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
655 // be directly executed without the need to dynamically track blocks.
656 class SideTable : public ZoneObject {
657 public:
658 ControlTransferMap map_;
659 uint32_t max_stack_height_ = 0;
660
SideTable(Zone * zone,const WasmModule * module,InterpreterCode * code)661 SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
662 : map_(zone) {
663 // Create a zone for all temporary objects.
664 Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
665
666 // Represents a control flow label.
667 class CLabel : public ZoneObject {
668 explicit CLabel(Zone* zone, uint32_t target_stack_height, uint32_t arity)
669 : target_stack_height(target_stack_height),
670 arity(arity),
671 refs(zone) {}
672
673 public:
674 struct Ref {
675 const byte* from_pc;
676 const uint32_t stack_height;
677 };
678 const byte* target = nullptr;
679 uint32_t target_stack_height;
680 // Arity when branching to this label.
681 const uint32_t arity;
682 ZoneVector<Ref> refs;
683
684 static CLabel* New(Zone* zone, uint32_t stack_height, uint32_t arity) {
685 return new (zone) CLabel(zone, stack_height, arity);
686 }
687
688 // Bind this label to the given PC.
689 void Bind(const byte* pc) {
690 DCHECK_NULL(target);
691 target = pc;
692 }
693
694 // Reference this label from the given location.
695 void Ref(const byte* from_pc, uint32_t stack_height) {
696 // Target being bound before a reference means this is a loop.
697 DCHECK_IMPLIES(target, *target == kExprLoop);
698 refs.push_back({from_pc, stack_height});
699 }
700
701 void Finish(ControlTransferMap* map, const byte* start) {
702 DCHECK_NOT_NULL(target);
703 for (auto ref : refs) {
704 size_t offset = static_cast<size_t>(ref.from_pc - start);
705 auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
706 DCHECK_GE(ref.stack_height, target_stack_height);
707 spdiff_t spdiff =
708 static_cast<spdiff_t>(ref.stack_height - target_stack_height);
709 TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
710 pcdiff, ref.stack_height, target_stack_height, spdiff);
711 ControlTransferEntry& entry = (*map)[offset];
712 entry.pc_diff = pcdiff;
713 entry.sp_diff = spdiff;
714 entry.target_arity = arity;
715 }
716 }
717 };
718
719 // An entry in the control stack.
720 struct Control {
721 const byte* pc;
722 CLabel* end_label;
723 CLabel* else_label;
724 // Arity (number of values on the stack) when exiting this control
725 // structure via |end|.
726 uint32_t exit_arity;
727 // Track whether this block was already left, i.e. all further
728 // instructions are unreachable.
729 bool unreachable = false;
730
731 Control(const byte* pc, CLabel* end_label, CLabel* else_label,
732 uint32_t exit_arity)
733 : pc(pc),
734 end_label(end_label),
735 else_label(else_label),
736 exit_arity(exit_arity) {}
737 Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
738 : Control(pc, end_label, nullptr, exit_arity) {}
739
740 void Finish(ControlTransferMap* map, const byte* start) {
741 end_label->Finish(map, start);
742 if (else_label) else_label->Finish(map, start);
743 }
744 };
745
746 // Compute the ControlTransfer map.
747 // This algorithm maintains a stack of control constructs similar to the
748 // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
749 // bytecodes with their target, as well as determining whether the current
750 // bytecodes are within the true or false block of an else.
751 ZoneVector<Control> control_stack(&control_transfer_zone);
752 uint32_t stack_height = 0;
753 uint32_t func_arity =
754 static_cast<uint32_t>(code->function->sig->return_count());
755 CLabel* func_label =
756 CLabel::New(&control_transfer_zone, stack_height, func_arity);
757 control_stack.emplace_back(code->orig_start, func_label, func_arity);
758 auto control_parent = [&]() -> Control& {
759 DCHECK_LE(2, control_stack.size());
760 return control_stack[control_stack.size() - 2];
761 };
762 auto copy_unreachable = [&] {
763 control_stack.back().unreachable = control_parent().unreachable;
764 };
765 for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
766 i.has_next(); i.next()) {
767 WasmOpcode opcode = i.current();
768 if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
769 bool unreachable = control_stack.back().unreachable;
770 if (unreachable) {
771 TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
772 WasmOpcodes::OpcodeName(opcode));
773 } else {
774 auto stack_effect =
775 StackEffect(module, code->function->sig, i.pc(), i.end());
776 TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
777 WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
778 stack_effect.second);
779 DCHECK_GE(stack_height, stack_effect.first);
780 DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
781 stack_effect.first + stack_effect.second);
782 stack_height = stack_height - stack_effect.first + stack_effect.second;
783 if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
784 }
785 switch (opcode) {
786 case kExprBlock:
787 case kExprLoop: {
788 bool is_loop = opcode == kExprLoop;
789 BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
790 if (imm.type == kWasmVar) {
791 imm.sig = module->signatures[imm.sig_index];
792 }
793 TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
794 is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
795 CLabel* label =
796 CLabel::New(&control_transfer_zone, stack_height,
797 is_loop ? imm.in_arity() : imm.out_arity());
798 control_stack.emplace_back(i.pc(), label, imm.out_arity());
799 copy_unreachable();
800 if (is_loop) label->Bind(i.pc());
801 break;
802 }
803 case kExprIf: {
804 BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
805 if (imm.type == kWasmVar) {
806 imm.sig = module->signatures[imm.sig_index];
807 }
808 TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
809 imm.in_arity(), imm.out_arity());
810 CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
811 imm.out_arity());
812 CLabel* else_label =
813 CLabel::New(&control_transfer_zone, stack_height, 0);
814 control_stack.emplace_back(i.pc(), end_label, else_label,
815 imm.out_arity());
816 copy_unreachable();
817 if (!unreachable) else_label->Ref(i.pc(), stack_height);
818 break;
819 }
820 case kExprElse: {
821 Control* c = &control_stack.back();
822 copy_unreachable();
823 TRACE("control @%u: Else\n", i.pc_offset());
824 if (!control_parent().unreachable) {
825 c->end_label->Ref(i.pc(), stack_height);
826 }
827 DCHECK_NOT_NULL(c->else_label);
828 c->else_label->Bind(i.pc() + 1);
829 c->else_label->Finish(&map_, code->orig_start);
830 c->else_label = nullptr;
831 DCHECK_GE(stack_height, c->end_label->target_stack_height);
832 stack_height = c->end_label->target_stack_height;
833 break;
834 }
835 case kExprEnd: {
836 Control* c = &control_stack.back();
837 TRACE("control @%u: End\n", i.pc_offset());
838 // Only loops have bound labels.
839 DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
840 if (!c->end_label->target) {
841 if (c->else_label) c->else_label->Bind(i.pc());
842 c->end_label->Bind(i.pc() + 1);
843 }
844 c->Finish(&map_, code->orig_start);
845 DCHECK_GE(stack_height, c->end_label->target_stack_height);
846 stack_height = c->end_label->target_stack_height + c->exit_arity;
847 control_stack.pop_back();
848 break;
849 }
850 case kExprBr: {
851 BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
852 TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
853 Control* c = &control_stack[control_stack.size() - imm.depth - 1];
854 if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
855 break;
856 }
857 case kExprBrIf: {
858 BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
859 TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
860 Control* c = &control_stack[control_stack.size() - imm.depth - 1];
861 if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
862 break;
863 }
864 case kExprBrTable: {
865 BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc());
866 BranchTableIterator<Decoder::kNoValidate> iterator(&i, imm);
867 TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
868 imm.table_count);
869 if (!unreachable) {
870 while (iterator.has_next()) {
871 uint32_t j = iterator.cur_index();
872 uint32_t target = iterator.next();
873 Control* c = &control_stack[control_stack.size() - target - 1];
874 c->end_label->Ref(i.pc() + j, stack_height);
875 }
876 }
877 break;
878 }
879 default:
880 break;
881 }
882 if (WasmOpcodes::IsUnconditionalJump(opcode)) {
883 control_stack.back().unreachable = true;
884 }
885 }
886 DCHECK_EQ(0, control_stack.size());
887 DCHECK_EQ(func_arity, stack_height);
888 }
889
Lookup(pc_t from)890 ControlTransferEntry& Lookup(pc_t from) {
891 auto result = map_.find(from);
892 DCHECK(result != map_.end());
893 return result->second;
894 }
895 };
896
897 struct ExternalCallResult {
898 enum Type {
899 // The function should be executed inside this interpreter.
900 INTERNAL,
901 // For indirect calls: Table or function does not exist.
902 INVALID_FUNC,
903 // For indirect calls: Signature does not match expected signature.
904 SIGNATURE_MISMATCH,
905 // The function was executed and returned normally.
906 EXTERNAL_RETURNED,
907 // The function was executed, threw an exception, and the stack was unwound.
908 EXTERNAL_UNWOUND
909 };
910 Type type;
911 // If type is INTERNAL, this field holds the function to call internally.
912 InterpreterCode* interpreter_code;
913
ExternalCallResultv8::internal::wasm::__anon0754df630111::ExternalCallResult914 ExternalCallResult(Type type) : type(type) { // NOLINT
915 DCHECK_NE(INTERNAL, type);
916 }
ExternalCallResultv8::internal::wasm::__anon0754df630111::ExternalCallResult917 ExternalCallResult(Type type, InterpreterCode* code)
918 : type(type), interpreter_code(code) {
919 DCHECK_EQ(INTERNAL, type);
920 }
921 };
922
923 // The main storage for interpreter code. It maps {WasmFunction} to the
924 // metadata needed to execute each function.
925 class CodeMap {
926 Zone* zone_;
927 const WasmModule* module_;
928 ZoneVector<InterpreterCode> interpreter_code_;
929 // TODO(wasm): Remove this testing wart. It is needed because interpreter
930 // entry stubs are not generated in testing the interpreter in cctests.
931 bool call_indirect_through_module_ = false;
932
933 public:
CodeMap(Isolate * isolate,const WasmModule * module,const uint8_t * module_start,Zone * zone)934 CodeMap(Isolate* isolate, const WasmModule* module,
935 const uint8_t* module_start, Zone* zone)
936 : zone_(zone), module_(module), interpreter_code_(zone) {
937 if (module == nullptr) return;
938 interpreter_code_.reserve(module->functions.size());
939 for (const WasmFunction& function : module->functions) {
940 if (function.imported) {
941 DCHECK(!function.code.is_set());
942 AddFunction(&function, nullptr, nullptr);
943 } else {
944 AddFunction(&function, module_start + function.code.offset(),
945 module_start + function.code.end_offset());
946 }
947 }
948 }
949
call_indirect_through_module()950 bool call_indirect_through_module() { return call_indirect_through_module_; }
951
set_call_indirect_through_module(bool val)952 void set_call_indirect_through_module(bool val) {
953 call_indirect_through_module_ = val;
954 }
955
module() const956 const WasmModule* module() const { return module_; }
957
GetCode(const WasmFunction * function)958 InterpreterCode* GetCode(const WasmFunction* function) {
959 InterpreterCode* code = GetCode(function->func_index);
960 DCHECK_EQ(function, code->function);
961 return code;
962 }
963
GetCode(uint32_t function_index)964 InterpreterCode* GetCode(uint32_t function_index) {
965 DCHECK_LT(function_index, interpreter_code_.size());
966 return Preprocess(&interpreter_code_[function_index]);
967 }
968
GetIndirectCode(uint32_t table_index,uint32_t entry_index)969 InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
970 uint32_t saved_index;
971 USE(saved_index);
972 if (table_index >= module_->function_tables.size()) return nullptr;
973 // Mask table index for SSCA mitigation.
974 saved_index = table_index;
975 table_index &=
976 static_cast<int32_t>((table_index - module_->function_tables.size()) &
977 ~static_cast<int32_t>(table_index)) >>
978 31;
979 DCHECK_EQ(table_index, saved_index);
980 const WasmIndirectFunctionTable* table =
981 &module_->function_tables[table_index];
982 if (entry_index >= table->values.size()) return nullptr;
983 // Mask entry_index for SSCA mitigation.
984 saved_index = entry_index;
985 entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
986 ~static_cast<int32_t>(entry_index)) >>
987 31;
988 DCHECK_EQ(entry_index, saved_index);
989 uint32_t index = table->values[entry_index];
990 if (index >= interpreter_code_.size()) return nullptr;
991 // Mask index for SSCA mitigation.
992 saved_index = index;
993 index &= static_cast<int32_t>((index - interpreter_code_.size()) &
994 ~static_cast<int32_t>(index)) >>
995 31;
996 DCHECK_EQ(index, saved_index);
997
998 return GetCode(index);
999 }
1000
Preprocess(InterpreterCode * code)1001 InterpreterCode* Preprocess(InterpreterCode* code) {
1002 DCHECK_EQ(code->function->imported, code->start == nullptr);
1003 if (!code->side_table && code->start) {
1004 // Compute the control targets map and the local declarations.
1005 code->side_table = new (zone_) SideTable(zone_, module_, code);
1006 }
1007 return code;
1008 }
1009
AddFunction(const WasmFunction * function,const byte * code_start,const byte * code_end)1010 void AddFunction(const WasmFunction* function, const byte* code_start,
1011 const byte* code_end) {
1012 InterpreterCode code = {
1013 function, BodyLocalDecls(zone_), code_start,
1014 code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
1015 nullptr};
1016
1017 DCHECK_EQ(interpreter_code_.size(), function->func_index);
1018 interpreter_code_.push_back(code);
1019 }
1020
SetFunctionCode(const WasmFunction * function,const byte * start,const byte * end)1021 void SetFunctionCode(const WasmFunction* function, const byte* start,
1022 const byte* end) {
1023 DCHECK_LT(function->func_index, interpreter_code_.size());
1024 InterpreterCode* code = &interpreter_code_[function->func_index];
1025 DCHECK_EQ(function, code->function);
1026 code->orig_start = start;
1027 code->orig_end = end;
1028 code->start = const_cast<byte*>(start);
1029 code->end = const_cast<byte*>(end);
1030 code->side_table = nullptr;
1031 Preprocess(code);
1032 }
1033 };
1034
1035 // Like a static_cast from src to dst, but specialized for boxed floats.
1036 template <typename dst, typename src>
1037 struct converter {
operator ()v8::internal::wasm::__anon0754df630111::converter1038 dst operator()(src val) const { return static_cast<dst>(val); }
1039 };
1040 template <>
1041 struct converter<Float64, uint64_t> {
operator ()v8::internal::wasm::__anon0754df630111::converter1042 Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
1043 };
1044 template <>
1045 struct converter<Float32, uint32_t> {
operator ()v8::internal::wasm::__anon0754df630111::converter1046 Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
1047 };
1048 template <>
1049 struct converter<uint64_t, Float64> {
operator ()v8::internal::wasm::__anon0754df630111::converter1050 uint64_t operator()(Float64 val) const { return val.get_bits(); }
1051 };
1052 template <>
1053 struct converter<uint32_t, Float32> {
operator ()v8::internal::wasm::__anon0754df630111::converter1054 uint32_t operator()(Float32 val) const { return val.get_bits(); }
1055 };
1056
1057 template <typename T>
has_nondeterminism(T val)1058 V8_INLINE bool has_nondeterminism(T val) {
1059 static_assert(!std::is_floating_point<T>::value, "missing specialization");
1060 return false;
1061 }
1062 template <>
has_nondeterminism(float val)1063 V8_INLINE bool has_nondeterminism<float>(float val) {
1064 return std::isnan(val);
1065 }
1066 template <>
has_nondeterminism(double val)1067 V8_INLINE bool has_nondeterminism<double>(double val) {
1068 return std::isnan(val);
1069 }
1070
1071 // Responsible for executing code directly.
1072 class ThreadImpl {
1073 struct Activation {
1074 uint32_t fp;
1075 sp_t sp;
Activationv8::internal::wasm::__anon0754df630111::ThreadImpl::Activation1076 Activation(uint32_t fp, sp_t sp) : fp(fp), sp(sp) {}
1077 };
1078
1079 public:
ThreadImpl(Zone * zone,CodeMap * codemap,Handle<WasmInstanceObject> instance_object)1080 ThreadImpl(Zone* zone, CodeMap* codemap,
1081 Handle<WasmInstanceObject> instance_object)
1082 : codemap_(codemap),
1083 instance_object_(instance_object),
1084 zone_(zone),
1085 frames_(zone),
1086 activations_(zone) {}
1087
1088 //==========================================================================
1089 // Implementation of public interface for WasmInterpreter::Thread.
1090 //==========================================================================
1091
state()1092 WasmInterpreter::State state() { return state_; }
1093
InitFrame(const WasmFunction * function,WasmValue * args)1094 void InitFrame(const WasmFunction* function, WasmValue* args) {
1095 DCHECK_EQ(current_activation().fp, frames_.size());
1096 InterpreterCode* code = codemap()->GetCode(function);
1097 size_t num_params = function->sig->parameter_count();
1098 EnsureStackSpace(num_params);
1099 Push(args, num_params);
1100 PushFrame(code);
1101 }
1102
Run(int num_steps=-1)1103 WasmInterpreter::State Run(int num_steps = -1) {
1104 DCHECK(state_ == WasmInterpreter::STOPPED ||
1105 state_ == WasmInterpreter::PAUSED);
1106 DCHECK(num_steps == -1 || num_steps > 0);
1107 if (num_steps == -1) {
1108 TRACE(" => Run()\n");
1109 } else if (num_steps == 1) {
1110 TRACE(" => Step()\n");
1111 } else {
1112 TRACE(" => Run(%d)\n", num_steps);
1113 }
1114 state_ = WasmInterpreter::RUNNING;
1115 Execute(frames_.back().code, frames_.back().pc, num_steps);
1116 // If state_ is STOPPED, the current activation must be fully unwound.
1117 DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED,
1118 current_activation().fp == frames_.size());
1119 return state_;
1120 }
1121
Pause()1122 void Pause() { UNIMPLEMENTED(); }
1123
Reset()1124 void Reset() {
1125 TRACE("----- RESET -----\n");
1126 sp_ = stack_start_;
1127 frames_.clear();
1128 state_ = WasmInterpreter::STOPPED;
1129 trap_reason_ = kTrapCount;
1130 possible_nondeterminism_ = false;
1131 }
1132
GetFrameCount()1133 int GetFrameCount() {
1134 DCHECK_GE(kMaxInt, frames_.size());
1135 return static_cast<int>(frames_.size());
1136 }
1137
GetReturnValue(uint32_t index)1138 WasmValue GetReturnValue(uint32_t index) {
1139 if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
1140 DCHECK_EQ(WasmInterpreter::FINISHED, state_);
1141 Activation act = current_activation();
1142 // Current activation must be finished.
1143 DCHECK_EQ(act.fp, frames_.size());
1144 return GetStackValue(act.sp + index);
1145 }
1146
GetStackValue(sp_t index)1147 WasmValue GetStackValue(sp_t index) {
1148 DCHECK_GT(StackHeight(), index);
1149 return stack_start_[index];
1150 }
1151
SetStackValue(sp_t index,WasmValue value)1152 void SetStackValue(sp_t index, WasmValue value) {
1153 DCHECK_GT(StackHeight(), index);
1154 stack_start_[index] = value;
1155 }
1156
GetTrapReason()1157 TrapReason GetTrapReason() { return trap_reason_; }
1158
GetBreakpointPc()1159 pc_t GetBreakpointPc() { return break_pc_; }
1160
PossibleNondeterminism()1161 bool PossibleNondeterminism() { return possible_nondeterminism_; }
1162
NumInterpretedCalls()1163 uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
1164
AddBreakFlags(uint8_t flags)1165 void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
1166
ClearBreakFlags()1167 void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
1168
NumActivations()1169 uint32_t NumActivations() {
1170 return static_cast<uint32_t>(activations_.size());
1171 }
1172
StartActivation()1173 uint32_t StartActivation() {
1174 TRACE("----- START ACTIVATION %zu -----\n", activations_.size());
1175 // If you use activations, use them consistently:
1176 DCHECK_IMPLIES(activations_.empty(), frames_.empty());
1177 DCHECK_IMPLIES(activations_.empty(), StackHeight() == 0);
1178 uint32_t activation_id = static_cast<uint32_t>(activations_.size());
1179 activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
1180 StackHeight());
1181 state_ = WasmInterpreter::STOPPED;
1182 return activation_id;
1183 }
1184
FinishActivation(uint32_t id)1185 void FinishActivation(uint32_t id) {
1186 TRACE("----- FINISH ACTIVATION %zu -----\n", activations_.size() - 1);
1187 DCHECK_LT(0, activations_.size());
1188 DCHECK_EQ(activations_.size() - 1, id);
1189 // Stack height must match the start of this activation (otherwise unwind
1190 // first).
1191 DCHECK_EQ(activations_.back().fp, frames_.size());
1192 DCHECK_LE(activations_.back().sp, StackHeight());
1193 sp_ = stack_start_ + activations_.back().sp;
1194 activations_.pop_back();
1195 }
1196
ActivationFrameBase(uint32_t id)1197 uint32_t ActivationFrameBase(uint32_t id) {
1198 DCHECK_GT(activations_.size(), id);
1199 return activations_[id].fp;
1200 }
1201
1202 // Handle a thrown exception. Returns whether the exception was handled inside
1203 // the current activation. Unwinds the interpreted stack accordingly.
HandleException(Isolate * isolate)1204 WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
1205 Isolate* isolate) {
1206 DCHECK(isolate->has_pending_exception());
1207 // TODO(wasm): Add wasm exception handling (would return HANDLED).
1208 USE(isolate->pending_exception());
1209 TRACE("----- UNWIND -----\n");
1210 DCHECK_LT(0, activations_.size());
1211 Activation& act = activations_.back();
1212 DCHECK_LE(act.fp, frames_.size());
1213 frames_.resize(act.fp);
1214 DCHECK_LE(act.sp, StackHeight());
1215 sp_ = stack_start_ + act.sp;
1216 state_ = WasmInterpreter::STOPPED;
1217 return WasmInterpreter::Thread::UNWOUND;
1218 }
1219
1220 private:
1221 // Entries on the stack of functions being evaluated.
1222 struct Frame {
1223 InterpreterCode* code;
1224 pc_t pc;
1225 sp_t sp;
1226
1227 // Limit of parameters.
plimitv8::internal::wasm::__anon0754df630111::ThreadImpl::Frame1228 sp_t plimit() { return sp + code->function->sig->parameter_count(); }
1229 // Limit of locals.
llimitv8::internal::wasm::__anon0754df630111::ThreadImpl::Frame1230 sp_t llimit() { return plimit() + code->locals.type_list.size(); }
1231 };
1232
1233 struct Block {
1234 pc_t pc;
1235 sp_t sp;
1236 size_t fp;
1237 unsigned arity;
1238 };
1239
1240 friend class InterpretedFrameImpl;
1241
1242 CodeMap* codemap_;
1243 Handle<WasmInstanceObject> instance_object_;
1244 Zone* zone_;
1245 WasmValue* stack_start_ = nullptr; // Start of allocated stack space.
1246 WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
1247 WasmValue* sp_ = nullptr; // Current stack pointer.
1248 ZoneVector<Frame> frames_;
1249 WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
1250 pc_t break_pc_ = kInvalidPc;
1251 TrapReason trap_reason_ = kTrapCount;
1252 bool possible_nondeterminism_ = false;
1253 uint8_t break_flags_ = 0; // a combination of WasmInterpreter::BreakFlag
1254 uint64_t num_interpreted_calls_ = 0;
1255 // Store the stack height of each activation (for unwind and frame
1256 // inspection).
1257 ZoneVector<Activation> activations_;
1258
codemap() const1259 CodeMap* codemap() const { return codemap_; }
module() const1260 const WasmModule* module() const { return codemap_->module(); }
1261
DoTrap(TrapReason trap,pc_t pc)1262 void DoTrap(TrapReason trap, pc_t pc) {
1263 state_ = WasmInterpreter::TRAPPED;
1264 trap_reason_ = trap;
1265 CommitPc(pc);
1266 }
1267
1268 // Push a frame with arguments already on the stack.
PushFrame(InterpreterCode * code)1269 void PushFrame(InterpreterCode* code) {
1270 DCHECK_NOT_NULL(code);
1271 DCHECK_NOT_NULL(code->side_table);
1272 EnsureStackSpace(code->side_table->max_stack_height_ +
1273 code->locals.type_list.size());
1274
1275 ++num_interpreted_calls_;
1276 size_t arity = code->function->sig->parameter_count();
1277 // The parameters will overlap the arguments already on the stack.
1278 DCHECK_GE(StackHeight(), arity);
1279 frames_.push_back({code, 0, StackHeight() - arity});
1280 frames_.back().pc = InitLocals(code);
1281 TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
1282 code->function->func_index, frames_.back().pc);
1283 }
1284
InitLocals(InterpreterCode * code)1285 pc_t InitLocals(InterpreterCode* code) {
1286 for (auto p : code->locals.type_list) {
1287 WasmValue val;
1288 switch (p) {
1289 #define CASE_TYPE(wasm, ctype) \
1290 case kWasm##wasm: \
1291 val = WasmValue(ctype{}); \
1292 break;
1293 WASM_CTYPES(CASE_TYPE)
1294 #undef CASE_TYPE
1295 default:
1296 UNREACHABLE();
1297 break;
1298 }
1299 Push(val);
1300 }
1301 return code->locals.encoded_size;
1302 }
1303
CommitPc(pc_t pc)1304 void CommitPc(pc_t pc) {
1305 DCHECK(!frames_.empty());
1306 frames_.back().pc = pc;
1307 }
1308
SkipBreakpoint(InterpreterCode * code,pc_t pc)1309 bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
1310 if (pc == break_pc_) {
1311 // Skip the previously hit breakpoint when resuming.
1312 break_pc_ = kInvalidPc;
1313 return true;
1314 }
1315 return false;
1316 }
1317
LookupTargetDelta(InterpreterCode * code,pc_t pc)1318 int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
1319 return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
1320 }
1321
DoBreak(InterpreterCode * code,pc_t pc,size_t depth)1322 int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
1323 ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
1324 DoStackTransfer(sp_ - control_transfer_entry.sp_diff,
1325 control_transfer_entry.target_arity);
1326 return control_transfer_entry.pc_diff;
1327 }
1328
ReturnPc(Decoder * decoder,InterpreterCode * code,pc_t pc)1329 pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
1330 switch (code->orig_start[pc]) {
1331 case kExprCallFunction: {
1332 CallFunctionImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
1333 return pc + 1 + imm.length;
1334 }
1335 case kExprCallIndirect: {
1336 CallIndirectImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
1337 return pc + 1 + imm.length;
1338 }
1339 default:
1340 UNREACHABLE();
1341 }
1342 }
1343
DoReturn(Decoder * decoder,InterpreterCode ** code,pc_t * pc,pc_t * limit,size_t arity)1344 bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
1345 size_t arity) {
1346 DCHECK_GT(frames_.size(), 0);
1347 WasmValue* sp_dest = stack_start_ + frames_.back().sp;
1348 frames_.pop_back();
1349 if (frames_.size() == current_activation().fp) {
1350 // A return from the last frame terminates the execution.
1351 state_ = WasmInterpreter::FINISHED;
1352 DoStackTransfer(sp_dest, arity);
1353 TRACE(" => finish\n");
1354 return false;
1355 } else {
1356 // Return to caller frame.
1357 Frame* top = &frames_.back();
1358 *code = top->code;
1359 decoder->Reset((*code)->start, (*code)->end);
1360 *pc = ReturnPc(decoder, *code, top->pc);
1361 *limit = top->code->end - top->code->start;
1362 TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
1363 (*code)->function->func_index, *pc);
1364 DoStackTransfer(sp_dest, arity);
1365 return true;
1366 }
1367 }
1368
1369 // Returns true if the call was successful, false if the stack check failed
1370 // and the current activation was fully unwound.
DoCall(Decoder * decoder,InterpreterCode * target,pc_t * pc,pc_t * limit)1371 bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
1372 pc_t* limit) V8_WARN_UNUSED_RESULT {
1373 frames_.back().pc = *pc;
1374 PushFrame(target);
1375 if (!DoStackCheck()) return false;
1376 *pc = frames_.back().pc;
1377 *limit = target->end - target->start;
1378 decoder->Reset(target->start, target->end);
1379 return true;
1380 }
1381
1382 // Copies {arity} values on the top of the stack down the stack to {dest},
1383 // dropping the values in-between.
DoStackTransfer(WasmValue * dest,size_t arity)1384 void DoStackTransfer(WasmValue* dest, size_t arity) {
1385 // before: |---------------| pop_count | arity |
1386 // ^ 0 ^ dest ^ sp_
1387 //
1388 // after: |---------------| arity |
1389 // ^ 0 ^ sp_
1390 DCHECK_LE(dest, sp_);
1391 DCHECK_LE(dest + arity, sp_);
1392 if (arity) memmove(dest, sp_ - arity, arity * sizeof(*sp_));
1393 sp_ = dest + arity;
1394 }
1395
1396 template <typename mtype>
BoundsCheckMem(uint32_t offset,uint32_t index)1397 inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
1398 size_t mem_size = instance_object_->memory_size();
1399 if (sizeof(mtype) > mem_size) return kNullAddress;
1400 if (offset > (mem_size - sizeof(mtype))) return kNullAddress;
1401 if (index > (mem_size - sizeof(mtype) - offset)) return kNullAddress;
1402 // Compute the effective address of the access, making sure to condition
1403 // the index even in the in-bounds case.
1404 return reinterpret_cast<Address>(instance_object_->memory_start()) +
1405 offset + (index & instance_object_->memory_mask());
1406 }
1407
1408 template <typename ctype, typename mtype>
ExecuteLoad(Decoder * decoder,InterpreterCode * code,pc_t pc,int & len,MachineRepresentation rep)1409 bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
1410 MachineRepresentation rep) {
1411 MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
1412 sizeof(ctype));
1413 uint32_t index = Pop().to<uint32_t>();
1414 Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1415 if (!addr) {
1416 DoTrap(kTrapMemOutOfBounds, pc);
1417 return false;
1418 }
1419 WasmValue result(
1420 converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
1421
1422 Push(result);
1423 len = 1 + imm.length;
1424
1425 if (FLAG_wasm_trace_memory) {
1426 wasm::MemoryTracingInfo info(imm.offset + index, false, rep);
1427 TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
1428 code->function->func_index, static_cast<int>(pc),
1429 instance_object_->memory_start());
1430 }
1431
1432 return true;
1433 }
1434
1435 template <typename ctype, typename mtype>
ExecuteStore(Decoder * decoder,InterpreterCode * code,pc_t pc,int & len,MachineRepresentation rep)1436 bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
1437 MachineRepresentation rep) {
1438 MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
1439 sizeof(ctype));
1440 ctype val = Pop().to<ctype>();
1441
1442 uint32_t index = Pop().to<uint32_t>();
1443 Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1444 if (!addr) {
1445 DoTrap(kTrapMemOutOfBounds, pc);
1446 return false;
1447 }
1448 WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
1449 len = 1 + imm.length;
1450
1451 if (FLAG_wasm_trace_memory) {
1452 wasm::MemoryTracingInfo info(imm.offset + index, true, rep);
1453 TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
1454 code->function->func_index, static_cast<int>(pc),
1455 instance_object_->memory_start());
1456 }
1457
1458 return true;
1459 }
1460
1461 template <typename type>
ExtractAtomicOpParams(Decoder * decoder,InterpreterCode * code,Address & address,pc_t pc,int & len,type * val=nullptr,type * val2=nullptr)1462 bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
1463 Address& address, pc_t pc, int& len,
1464 type* val = nullptr, type* val2 = nullptr) {
1465 MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
1466 sizeof(type));
1467 if (val2) *val2 = Pop().to<uint32_t>();
1468 if (val) *val = Pop().to<uint32_t>();
1469 uint32_t index = Pop().to<uint32_t>();
1470 address = BoundsCheckMem<type>(imm.offset, index);
1471 if (!address) {
1472 DoTrap(kTrapMemOutOfBounds, pc);
1473 return false;
1474 }
1475 len = 2 + imm.length;
1476 return true;
1477 }
1478
ExecuteNumericOp(WasmOpcode opcode,Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1479 bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
1480 InterpreterCode* code, pc_t pc, int& len) {
1481 switch (opcode) {
1482 case kExprI32SConvertSatF32:
1483 Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
1484 return true;
1485 case kExprI32UConvertSatF32:
1486 Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
1487 return true;
1488 case kExprI32SConvertSatF64:
1489 Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
1490 return true;
1491 case kExprI32UConvertSatF64:
1492 Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
1493 return true;
1494 case kExprI64SConvertSatF32:
1495 Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
1496 return true;
1497 case kExprI64UConvertSatF32:
1498 Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
1499 return true;
1500 case kExprI64SConvertSatF64:
1501 Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
1502 return true;
1503 case kExprI64UConvertSatF64:
1504 Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
1505 return true;
1506 default:
1507 FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
1508 OpcodeName(code->start[pc]));
1509 UNREACHABLE();
1510 }
1511 return false;
1512 }
1513
ExecuteAtomicOp(WasmOpcode opcode,Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1514 bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
1515 InterpreterCode* code, pc_t pc, int& len) {
1516 WasmValue result;
1517 switch (opcode) {
1518 #define ATOMIC_BINOP_CASE(name, type, operation) \
1519 case kExpr##name: { \
1520 type val; \
1521 Address addr; \
1522 if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
1523 return false; \
1524 } \
1525 static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1526 "Size mismatch for types std::atomic<" #type \
1527 ">, and " #type); \
1528 result = WasmValue( \
1529 std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
1530 Push(result); \
1531 break; \
1532 }
1533 ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
1534 ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
1535 ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
1536 ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, atomic_fetch_sub);
1537 ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, atomic_fetch_sub);
1538 ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, atomic_fetch_sub);
1539 ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, atomic_fetch_and);
1540 ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, atomic_fetch_and);
1541 ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, atomic_fetch_and);
1542 ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, atomic_fetch_or);
1543 ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, atomic_fetch_or);
1544 ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, atomic_fetch_or);
1545 ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
1546 ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
1547 ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
1548 ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
1549 ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
1550 ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
1551 #undef ATOMIC_BINOP_CASE
1552 #define ATOMIC_COMPARE_EXCHANGE_CASE(name, type) \
1553 case kExpr##name: { \
1554 type val; \
1555 type val2; \
1556 Address addr; \
1557 if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val, \
1558 &val2)) { \
1559 return false; \
1560 } \
1561 static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1562 "Size mismatch for types std::atomic<" #type \
1563 ">, and " #type); \
1564 std::atomic_compare_exchange_strong( \
1565 reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
1566 Push(WasmValue(val)); \
1567 break; \
1568 }
1569 ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t);
1570 ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t);
1571 ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t);
1572 #undef ATOMIC_COMPARE_EXCHANGE_CASE
1573 #define ATOMIC_LOAD_CASE(name, type, operation) \
1574 case kExpr##name: { \
1575 Address addr; \
1576 if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len)) { \
1577 return false; \
1578 } \
1579 static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1580 "Size mismatch for types std::atomic<" #type \
1581 ">, and " #type); \
1582 result = \
1583 WasmValue(std::operation(reinterpret_cast<std::atomic<type>*>(addr))); \
1584 Push(result); \
1585 break; \
1586 }
1587 ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, atomic_load);
1588 ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, atomic_load);
1589 ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, atomic_load);
1590 #undef ATOMIC_LOAD_CASE
1591 #define ATOMIC_STORE_CASE(name, type, operation) \
1592 case kExpr##name: { \
1593 type val; \
1594 Address addr; \
1595 if (!ExtractAtomicOpParams<type>(decoder, code, addr, pc, len, &val)) { \
1596 return false; \
1597 } \
1598 static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1599 "Size mismatch for types std::atomic<" #type \
1600 ">, and " #type); \
1601 std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
1602 break; \
1603 }
1604 ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, atomic_store);
1605 ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, atomic_store);
1606 ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, atomic_store);
1607 #undef ATOMIC_STORE_CASE
1608 default:
1609 UNREACHABLE();
1610 return false;
1611 }
1612 return true;
1613 }
1614
GetGlobalPtr(const WasmGlobal * global)1615 byte* GetGlobalPtr(const WasmGlobal* global) {
1616 if (global->mutability && global->imported) {
1617 DCHECK(FLAG_experimental_wasm_mut_global);
1618 return reinterpret_cast<byte*>(
1619 instance_object_->imported_mutable_globals()[global->index]);
1620 } else {
1621 return instance_object_->globals_start() + global->offset;
1622 }
1623 }
1624
ExecuteSimdOp(WasmOpcode opcode,Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1625 bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
1626 pc_t pc, int& len) {
1627 switch (opcode) {
1628 #define SPLAT_CASE(format, sType, valType, num) \
1629 case kExpr##format##Splat: { \
1630 WasmValue val = Pop(); \
1631 valType v = val.to<valType>(); \
1632 sType s; \
1633 for (int i = 0; i < num; i++) s.val[i] = v; \
1634 Push(WasmValue(Simd128(s))); \
1635 return true; \
1636 }
1637 SPLAT_CASE(I32x4, int4, int32_t, 4)
1638 SPLAT_CASE(F32x4, float4, float, 4)
1639 SPLAT_CASE(I16x8, int8, int32_t, 8)
1640 SPLAT_CASE(I8x16, int16, int32_t, 16)
1641 #undef SPLAT_CASE
1642 #define EXTRACT_LANE_CASE(format, name) \
1643 case kExpr##format##ExtractLane: { \
1644 SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
1645 ++len; \
1646 WasmValue val = Pop(); \
1647 Simd128 s = val.to_s128(); \
1648 Push(WasmValue(s.to_##name().val[imm.lane])); \
1649 return true; \
1650 }
1651 EXTRACT_LANE_CASE(I32x4, i32x4)
1652 EXTRACT_LANE_CASE(F32x4, f32x4)
1653 EXTRACT_LANE_CASE(I16x8, i16x8)
1654 EXTRACT_LANE_CASE(I8x16, i8x16)
1655 #undef EXTRACT_LANE_CASE
1656 default:
1657 return false;
1658 }
1659 }
1660
1661 // Check if our control stack (frames_) exceeds the limit. Trigger stack
1662 // overflow if it does, and unwinding the current frame.
1663 // Returns true if execution can continue, false if the current activation was
1664 // fully unwound.
1665 // Do call this function immediately *after* pushing a new frame. The pc of
1666 // the top frame will be reset to 0 if the stack check fails.
DoStackCheck()1667 bool DoStackCheck() V8_WARN_UNUSED_RESULT {
1668 // The goal of this stack check is not to prevent actual stack overflows,
1669 // but to simulate stack overflows during the execution of compiled code.
1670 // That is why this function uses FLAG_stack_size, even though the value
1671 // stack actually lies in zone memory.
1672 const size_t stack_size_limit = FLAG_stack_size * KB;
1673 // Sum up the value stack size and the control stack size.
1674 const size_t current_stack_size =
1675 (sp_ - stack_start_) + frames_.size() * sizeof(Frame);
1676 if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
1677 return true;
1678 }
1679 // The pc of the top frame is initialized to the first instruction. We reset
1680 // it to 0 here such that we report the same position as in compiled code.
1681 frames_.back().pc = 0;
1682 Isolate* isolate = instance_object_->GetIsolate();
1683 HandleScope handle_scope(isolate);
1684 isolate->StackOverflow();
1685 return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
1686 }
1687
Execute(InterpreterCode * code,pc_t pc,int max)1688 void Execute(InterpreterCode* code, pc_t pc, int max) {
1689 DCHECK_NOT_NULL(code->side_table);
1690 DCHECK(!frames_.empty());
1691 // There must be enough space on the stack to hold the arguments, locals,
1692 // and the value stack.
1693 DCHECK_LE(code->function->sig->parameter_count() +
1694 code->locals.type_list.size() +
1695 code->side_table->max_stack_height_,
1696 stack_limit_ - stack_start_ - frames_.back().sp);
1697
1698 Decoder decoder(code->start, code->end);
1699 pc_t limit = code->end - code->start;
1700 bool hit_break = false;
1701
1702 while (true) {
1703 #define PAUSE_IF_BREAK_FLAG(flag) \
1704 if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) { \
1705 hit_break = true; \
1706 max = 0; \
1707 }
1708
1709 DCHECK_GT(limit, pc);
1710 DCHECK_NOT_NULL(code->start);
1711
1712 // Do first check for a breakpoint, in order to set hit_break correctly.
1713 const char* skip = " ";
1714 int len = 1;
1715 byte orig = code->start[pc];
1716 WasmOpcode opcode = static_cast<WasmOpcode>(orig);
1717 if (WasmOpcodes::IsPrefixOpcode(opcode)) {
1718 opcode = static_cast<WasmOpcode>(opcode << 8 | code->start[pc + 1]);
1719 }
1720 if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
1721 orig = code->orig_start[pc];
1722 if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
1723 opcode =
1724 static_cast<WasmOpcode>(orig << 8 | code->orig_start[pc + 1]);
1725 }
1726 if (SkipBreakpoint(code, pc)) {
1727 // skip breakpoint by switching on original code.
1728 skip = "[skip] ";
1729 } else {
1730 TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
1731 TraceValueStack();
1732 TRACE("\n");
1733 hit_break = true;
1734 break;
1735 }
1736 }
1737
1738 // If max is 0, break. If max is positive (a limit is set), decrement it.
1739 if (max == 0) break;
1740 if (max > 0) --max;
1741
1742 USE(skip);
1743 TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
1744 TraceValueStack();
1745 TRACE("\n");
1746
1747 #ifdef DEBUG
1748 // Compute the stack effect of this opcode, and verify later that the
1749 // stack was modified accordingly.
1750 std::pair<uint32_t, uint32_t> stack_effect = wasm::StackEffect(
1751 codemap_->module(), frames_.back().code->function->sig,
1752 code->orig_start + pc, code->orig_end);
1753 sp_t expected_new_stack_height =
1754 StackHeight() - stack_effect.first + stack_effect.second;
1755 #endif
1756
1757 switch (orig) {
1758 case kExprNop:
1759 break;
1760 case kExprBlock: {
1761 BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1762 len = 1 + imm.length;
1763 break;
1764 }
1765 case kExprLoop: {
1766 BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1767 len = 1 + imm.length;
1768 break;
1769 }
1770 case kExprIf: {
1771 BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1772 WasmValue cond = Pop();
1773 bool is_true = cond.to<uint32_t>() != 0;
1774 if (is_true) {
1775 // fall through to the true block.
1776 len = 1 + imm.length;
1777 TRACE(" true => fallthrough\n");
1778 } else {
1779 len = LookupTargetDelta(code, pc);
1780 TRACE(" false => @%zu\n", pc + len);
1781 }
1782 break;
1783 }
1784 case kExprElse: {
1785 len = LookupTargetDelta(code, pc);
1786 TRACE(" end => @%zu\n", pc + len);
1787 break;
1788 }
1789 case kExprSelect: {
1790 WasmValue cond = Pop();
1791 WasmValue fval = Pop();
1792 WasmValue tval = Pop();
1793 Push(cond.to<int32_t>() != 0 ? tval : fval);
1794 break;
1795 }
1796 case kExprBr: {
1797 BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1798 len = DoBreak(code, pc, imm.depth);
1799 TRACE(" br => @%zu\n", pc + len);
1800 break;
1801 }
1802 case kExprBrIf: {
1803 BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1804 WasmValue cond = Pop();
1805 bool is_true = cond.to<uint32_t>() != 0;
1806 if (is_true) {
1807 len = DoBreak(code, pc, imm.depth);
1808 TRACE(" br_if => @%zu\n", pc + len);
1809 } else {
1810 TRACE(" false => fallthrough\n");
1811 len = 1 + imm.length;
1812 }
1813 break;
1814 }
1815 case kExprBrTable: {
1816 BranchTableImmediate<Decoder::kNoValidate> imm(&decoder,
1817 code->at(pc));
1818 BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, imm);
1819 uint32_t key = Pop().to<uint32_t>();
1820 uint32_t depth = 0;
1821 if (key >= imm.table_count) key = imm.table_count;
1822 for (uint32_t i = 0; i <= key; i++) {
1823 DCHECK(iterator.has_next());
1824 depth = iterator.next();
1825 }
1826 len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
1827 TRACE(" br[%u] => @%zu\n", key, pc + key + len);
1828 break;
1829 }
1830 case kExprReturn: {
1831 size_t arity = code->function->sig->return_count();
1832 if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
1833 PAUSE_IF_BREAK_FLAG(AfterReturn);
1834 continue;
1835 }
1836 case kExprUnreachable: {
1837 return DoTrap(kTrapUnreachable, pc);
1838 }
1839 case kExprEnd: {
1840 break;
1841 }
1842 case kExprI32Const: {
1843 ImmI32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1844 Push(WasmValue(imm.value));
1845 len = 1 + imm.length;
1846 break;
1847 }
1848 case kExprI64Const: {
1849 ImmI64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1850 Push(WasmValue(imm.value));
1851 len = 1 + imm.length;
1852 break;
1853 }
1854 case kExprF32Const: {
1855 ImmF32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1856 Push(WasmValue(imm.value));
1857 len = 1 + imm.length;
1858 break;
1859 }
1860 case kExprF64Const: {
1861 ImmF64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1862 Push(WasmValue(imm.value));
1863 len = 1 + imm.length;
1864 break;
1865 }
1866 case kExprGetLocal: {
1867 LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1868 Push(GetStackValue(frames_.back().sp + imm.index));
1869 len = 1 + imm.length;
1870 break;
1871 }
1872 case kExprSetLocal: {
1873 LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1874 WasmValue val = Pop();
1875 SetStackValue(frames_.back().sp + imm.index, val);
1876 len = 1 + imm.length;
1877 break;
1878 }
1879 case kExprTeeLocal: {
1880 LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
1881 WasmValue val = Pop();
1882 SetStackValue(frames_.back().sp + imm.index, val);
1883 Push(val);
1884 len = 1 + imm.length;
1885 break;
1886 }
1887 case kExprDrop: {
1888 Pop();
1889 break;
1890 }
1891 case kExprCallFunction: {
1892 CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
1893 code->at(pc));
1894 InterpreterCode* target = codemap()->GetCode(imm.index);
1895 if (target->function->imported) {
1896 CommitPc(pc);
1897 ExternalCallResult result =
1898 CallImportedFunction(target->function->func_index);
1899 switch (result.type) {
1900 case ExternalCallResult::INTERNAL:
1901 // The import is a function of this instance. Call it directly.
1902 target = result.interpreter_code;
1903 DCHECK(!target->function->imported);
1904 break;
1905 case ExternalCallResult::INVALID_FUNC:
1906 case ExternalCallResult::SIGNATURE_MISMATCH:
1907 // Direct calls are checked statically.
1908 UNREACHABLE();
1909 case ExternalCallResult::EXTERNAL_RETURNED:
1910 PAUSE_IF_BREAK_FLAG(AfterCall);
1911 len = 1 + imm.length;
1912 break;
1913 case ExternalCallResult::EXTERNAL_UNWOUND:
1914 return;
1915 }
1916 if (result.type != ExternalCallResult::INTERNAL) break;
1917 }
1918 // Execute an internal call.
1919 if (!DoCall(&decoder, target, &pc, &limit)) return;
1920 code = target;
1921 PAUSE_IF_BREAK_FLAG(AfterCall);
1922 continue; // don't bump pc
1923 } break;
1924 case kExprCallIndirect: {
1925 CallIndirectImmediate<Decoder::kNoValidate> imm(&decoder,
1926 code->at(pc));
1927 uint32_t entry_index = Pop().to<uint32_t>();
1928 // Assume only one table for now.
1929 DCHECK_LE(module()->function_tables.size(), 1u);
1930 ExternalCallResult result =
1931 CallIndirectFunction(0, entry_index, imm.sig_index);
1932 switch (result.type) {
1933 case ExternalCallResult::INTERNAL:
1934 // The import is a function of this instance. Call it directly.
1935 if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
1936 return;
1937 code = result.interpreter_code;
1938 PAUSE_IF_BREAK_FLAG(AfterCall);
1939 continue; // don't bump pc
1940 case ExternalCallResult::INVALID_FUNC:
1941 return DoTrap(kTrapFuncInvalid, pc);
1942 case ExternalCallResult::SIGNATURE_MISMATCH:
1943 return DoTrap(kTrapFuncSigMismatch, pc);
1944 case ExternalCallResult::EXTERNAL_RETURNED:
1945 PAUSE_IF_BREAK_FLAG(AfterCall);
1946 len = 1 + imm.length;
1947 break;
1948 case ExternalCallResult::EXTERNAL_UNWOUND:
1949 return;
1950 }
1951 } break;
1952 case kExprGetGlobal: {
1953 GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
1954 code->at(pc));
1955 const WasmGlobal* global = &module()->globals[imm.index];
1956 byte* ptr = GetGlobalPtr(global);
1957 WasmValue val;
1958 switch (global->type) {
1959 #define CASE_TYPE(wasm, ctype) \
1960 case kWasm##wasm: \
1961 val = WasmValue(*reinterpret_cast<ctype*>(ptr)); \
1962 break;
1963 WASM_CTYPES(CASE_TYPE)
1964 #undef CASE_TYPE
1965 default:
1966 UNREACHABLE();
1967 }
1968 Push(val);
1969 len = 1 + imm.length;
1970 break;
1971 }
1972 case kExprSetGlobal: {
1973 GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
1974 code->at(pc));
1975 const WasmGlobal* global = &module()->globals[imm.index];
1976 byte* ptr = GetGlobalPtr(global);
1977 WasmValue val = Pop();
1978 switch (global->type) {
1979 #define CASE_TYPE(wasm, ctype) \
1980 case kWasm##wasm: \
1981 *reinterpret_cast<ctype*>(ptr) = val.to<ctype>(); \
1982 break;
1983 WASM_CTYPES(CASE_TYPE)
1984 #undef CASE_TYPE
1985 default:
1986 UNREACHABLE();
1987 }
1988 len = 1 + imm.length;
1989 break;
1990 }
1991
1992 #define LOAD_CASE(name, ctype, mtype, rep) \
1993 case kExpr##name: { \
1994 if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \
1995 MachineRepresentation::rep)) \
1996 return; \
1997 break; \
1998 }
1999
2000 LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
2001 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
2002 LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
2003 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
2004 LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
2005 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
2006 LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
2007 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
2008 LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
2009 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
2010 LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
2011 LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
2012 LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
2013 LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
2014 #undef LOAD_CASE
2015
2016 #define STORE_CASE(name, ctype, mtype, rep) \
2017 case kExpr##name: { \
2018 if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \
2019 MachineRepresentation::rep)) \
2020 return; \
2021 break; \
2022 }
2023
2024 STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
2025 STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
2026 STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
2027 STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
2028 STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
2029 STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
2030 STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
2031 STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
2032 STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
2033 #undef STORE_CASE
2034
2035 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
2036 case kExpr##name: { \
2037 uint32_t index = Pop().to<uint32_t>(); \
2038 ctype result; \
2039 Address addr = BoundsCheckMem<mtype>(0, index); \
2040 if (!addr) { \
2041 result = defval; \
2042 } else { \
2043 /* TODO(titzer): alignment for asmjs load mem? */ \
2044 result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
2045 } \
2046 Push(WasmValue(result)); \
2047 break; \
2048 }
2049 ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
2050 ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
2051 ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
2052 ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
2053 ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
2054 ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
2055 std::numeric_limits<float>::quiet_NaN());
2056 ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
2057 std::numeric_limits<double>::quiet_NaN());
2058 #undef ASMJS_LOAD_CASE
2059
2060 #define ASMJS_STORE_CASE(name, ctype, mtype) \
2061 case kExpr##name: { \
2062 WasmValue val = Pop(); \
2063 uint32_t index = Pop().to<uint32_t>(); \
2064 Address addr = BoundsCheckMem<mtype>(0, index); \
2065 if (addr) { \
2066 *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
2067 } \
2068 Push(val); \
2069 break; \
2070 }
2071
2072 ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
2073 ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
2074 ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
2075 ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
2076 ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
2077 #undef ASMJS_STORE_CASE
2078 case kExprGrowMemory: {
2079 MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
2080 code->at(pc));
2081 uint32_t delta_pages = Pop().to<uint32_t>();
2082 Handle<WasmMemoryObject> memory(instance_object_->memory_object());
2083 Isolate* isolate = memory->GetIsolate();
2084 int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
2085 Push(WasmValue(result));
2086 len = 1 + imm.length;
2087 // Treat one grow_memory instruction like 1000 other instructions,
2088 // because it is a really expensive operation.
2089 if (max > 0) max = std::max(0, max - 1000);
2090 break;
2091 }
2092 case kExprMemorySize: {
2093 MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
2094 code->at(pc));
2095 Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
2096 kWasmPageSize)));
2097 len = 1 + imm.length;
2098 break;
2099 }
2100 // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
2101 // specially to guarantee that the quiet bit of a NaN is preserved on
2102 // ia32 by the reinterpret casts.
2103 case kExprI32ReinterpretF32: {
2104 WasmValue val = Pop();
2105 Push(WasmValue(ExecuteI32ReinterpretF32(val)));
2106 break;
2107 }
2108 case kExprI64ReinterpretF64: {
2109 WasmValue val = Pop();
2110 Push(WasmValue(ExecuteI64ReinterpretF64(val)));
2111 break;
2112 }
2113 case kNumericPrefix: {
2114 ++len;
2115 if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
2116 break;
2117 }
2118 case kAtomicPrefix: {
2119 if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
2120 break;
2121 }
2122 case kSimdPrefix: {
2123 ++len;
2124 if (!ExecuteSimdOp(opcode, &decoder, code, pc, len)) return;
2125 break;
2126 }
2127
2128 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
2129 case kExpr##name: { \
2130 WasmValue rval = Pop(); \
2131 WasmValue lval = Pop(); \
2132 auto result = lval.to<ctype>() op rval.to<ctype>(); \
2133 possible_nondeterminism_ |= has_nondeterminism(result); \
2134 Push(WasmValue(result)); \
2135 break; \
2136 }
2137 FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
2138 #undef EXECUTE_SIMPLE_BINOP
2139
2140 #define EXECUTE_OTHER_BINOP(name, ctype) \
2141 case kExpr##name: { \
2142 TrapReason trap = kTrapCount; \
2143 ctype rval = Pop().to<ctype>(); \
2144 ctype lval = Pop().to<ctype>(); \
2145 auto result = Execute##name(lval, rval, &trap); \
2146 possible_nondeterminism_ |= has_nondeterminism(result); \
2147 if (trap != kTrapCount) return DoTrap(trap, pc); \
2148 Push(WasmValue(result)); \
2149 break; \
2150 }
2151 FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
2152 #undef EXECUTE_OTHER_BINOP
2153
2154 #define EXECUTE_UNOP(name, ctype, exec_fn) \
2155 case kExpr##name: { \
2156 TrapReason trap = kTrapCount; \
2157 ctype val = Pop().to<ctype>(); \
2158 auto result = exec_fn(val, &trap); \
2159 possible_nondeterminism_ |= has_nondeterminism(result); \
2160 if (trap != kTrapCount) return DoTrap(trap, pc); \
2161 Push(WasmValue(result)); \
2162 break; \
2163 }
2164
2165 #define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
2166 FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
2167 #undef EXECUTE_OTHER_UNOP
2168
2169 #define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
2170 EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
2171 FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
2172 #undef EXECUTE_I32CONV_FLOATOP
2173 #undef EXECUTE_UNOP
2174
2175 default:
2176 FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
2177 OpcodeName(code->start[pc]));
2178 UNREACHABLE();
2179 }
2180
2181 #ifdef DEBUG
2182 if (!WasmOpcodes::IsControlOpcode(opcode)) {
2183 DCHECK_EQ(expected_new_stack_height, StackHeight());
2184 }
2185 #endif
2186
2187 pc += len;
2188 if (pc == limit) {
2189 // Fell off end of code; do an implicit return.
2190 TRACE("@%-3zu: ImplicitReturn\n", pc);
2191 if (!DoReturn(&decoder, &code, &pc, &limit,
2192 code->function->sig->return_count()))
2193 return;
2194 PAUSE_IF_BREAK_FLAG(AfterReturn);
2195 }
2196 #undef PAUSE_IF_BREAK_FLAG
2197 }
2198
2199 state_ = WasmInterpreter::PAUSED;
2200 break_pc_ = hit_break ? pc : kInvalidPc;
2201 CommitPc(pc);
2202 }
2203
Pop()2204 WasmValue Pop() {
2205 DCHECK_GT(frames_.size(), 0);
2206 DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
2207 return *--sp_;
2208 }
2209
PopN(int n)2210 void PopN(int n) {
2211 DCHECK_GE(StackHeight(), n);
2212 DCHECK_GT(frames_.size(), 0);
2213 // Check that we don't pop into locals.
2214 DCHECK_GE(StackHeight() - n, frames_.back().llimit());
2215 sp_ -= n;
2216 }
2217
PopArity(size_t arity)2218 WasmValue PopArity(size_t arity) {
2219 if (arity == 0) return WasmValue();
2220 CHECK_EQ(1, arity);
2221 return Pop();
2222 }
2223
Push(WasmValue val)2224 void Push(WasmValue val) {
2225 DCHECK_NE(kWasmStmt, val.type());
2226 DCHECK_LE(1, stack_limit_ - sp_);
2227 *sp_++ = val;
2228 }
2229
Push(WasmValue * vals,size_t arity)2230 void Push(WasmValue* vals, size_t arity) {
2231 DCHECK_LE(arity, stack_limit_ - sp_);
2232 for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
2233 DCHECK_NE(kWasmStmt, val->type());
2234 }
2235 memcpy(sp_, vals, arity * sizeof(*sp_));
2236 sp_ += arity;
2237 }
2238
EnsureStackSpace(size_t size)2239 void EnsureStackSpace(size_t size) {
2240 if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
2241 size_t old_size = stack_limit_ - stack_start_;
2242 size_t requested_size =
2243 base::bits::RoundUpToPowerOfTwo64((sp_ - stack_start_) + size);
2244 size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
2245 WasmValue* new_stack = zone_->NewArray<WasmValue>(new_size);
2246 memcpy(new_stack, stack_start_, old_size * sizeof(*sp_));
2247 sp_ = new_stack + (sp_ - stack_start_);
2248 stack_start_ = new_stack;
2249 stack_limit_ = new_stack + new_size;
2250 }
2251
StackHeight()2252 sp_t StackHeight() { return sp_ - stack_start_; }
2253
TraceValueStack()2254 void TraceValueStack() {
2255 #ifdef DEBUG
2256 if (!FLAG_trace_wasm_interpreter) return;
2257 Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
2258 sp_t sp = top ? top->sp : 0;
2259 sp_t plimit = top ? top->plimit() : 0;
2260 sp_t llimit = top ? top->llimit() : 0;
2261 for (size_t i = sp; i < StackHeight(); ++i) {
2262 if (i < plimit)
2263 PrintF(" p%zu:", i);
2264 else if (i < llimit)
2265 PrintF(" l%zu:", i);
2266 else
2267 PrintF(" s%zu:", i);
2268 WasmValue val = GetStackValue(i);
2269 switch (val.type()) {
2270 case kWasmI32:
2271 PrintF("i32:%d", val.to<int32_t>());
2272 break;
2273 case kWasmI64:
2274 PrintF("i64:%" PRId64 "", val.to<int64_t>());
2275 break;
2276 case kWasmF32:
2277 PrintF("f32:%f", val.to<float>());
2278 break;
2279 case kWasmF64:
2280 PrintF("f64:%lf", val.to<double>());
2281 break;
2282 case kWasmStmt:
2283 PrintF("void");
2284 break;
2285 default:
2286 UNREACHABLE();
2287 break;
2288 }
2289 }
2290 #endif // DEBUG
2291 }
2292
TryHandleException(Isolate * isolate)2293 ExternalCallResult TryHandleException(Isolate* isolate) {
2294 if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
2295 return {ExternalCallResult::EXTERNAL_UNWOUND};
2296 }
2297 return {ExternalCallResult::EXTERNAL_RETURNED};
2298 }
2299
CallExternalWasmFunction(Isolate * isolate,Handle<WasmInstanceObject> instance,const wasm::WasmCode * code,FunctionSig * sig)2300 ExternalCallResult CallExternalWasmFunction(
2301 Isolate* isolate, Handle<WasmInstanceObject> instance,
2302 const wasm::WasmCode* code, FunctionSig* sig) {
2303 if (code->kind() == wasm::WasmCode::kWasmToJsWrapper &&
2304 !IsJSCompatibleSignature(sig)) {
2305 isolate->Throw(*isolate->factory()->NewTypeError(
2306 MessageTemplate::kWasmTrapTypeError));
2307 return TryHandleException(isolate);
2308 }
2309
2310 Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
2311 Handle<JSFunction> wasm_entry =
2312 WasmDebugInfo::GetCWasmEntry(debug_info, sig);
2313
2314 TRACE(" => Calling external wasm function\n");
2315
2316 // Copy the arguments to one buffer.
2317 // TODO(clemensh): Introduce a helper for all argument buffer
2318 // con-/destruction.
2319 int num_args = static_cast<int>(sig->parameter_count());
2320 std::vector<uint8_t> arg_buffer(num_args * 8);
2321 size_t offset = 0;
2322 WasmValue* wasm_args = sp_ - num_args;
2323 for (int i = 0; i < num_args; ++i) {
2324 int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(i));
2325 if (arg_buffer.size() < offset + param_size) {
2326 arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
2327 }
2328 Address address = reinterpret_cast<Address>(arg_buffer.data()) + offset;
2329 switch (sig->GetParam(i)) {
2330 case kWasmI32:
2331 WriteUnalignedValue(address, wasm_args[i].to<uint32_t>());
2332 break;
2333 case kWasmI64:
2334 WriteUnalignedValue(address, wasm_args[i].to<uint64_t>());
2335 break;
2336 case kWasmF32:
2337 WriteUnalignedValue(address, wasm_args[i].to<float>());
2338 break;
2339 case kWasmF64:
2340 WriteUnalignedValue(address, wasm_args[i].to<double>());
2341 break;
2342 default:
2343 UNIMPLEMENTED();
2344 }
2345 offset += param_size;
2346 }
2347
2348 // Ensure that there is enough space in the arg_buffer to hold the return
2349 // value(s).
2350 size_t return_size = 0;
2351 for (ValueType t : sig->returns()) {
2352 return_size += ValueTypes::ElementSizeInBytes(t);
2353 }
2354 if (arg_buffer.size() < return_size) {
2355 arg_buffer.resize(return_size);
2356 }
2357
2358 // Wrap the arg_buffer data pointer in a handle. As
2359 // this is an aligned pointer, to the GC it will look like a Smi.
2360 Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
2361 isolate);
2362 DCHECK(!arg_buffer_obj->IsHeapObject());
2363
2364 static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
2365 "code below needs adaption");
2366 Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
2367 args[compiler::CWasmEntryParameters::kCodeObject] = Handle<Object>::cast(
2368 isolate->factory()->NewForeign(code->instruction_start(), TENURED));
2369 args[compiler::CWasmEntryParameters::kWasmInstance] = instance;
2370 args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
2371
2372 Handle<Object> receiver = isolate->factory()->undefined_value();
2373 trap_handler::SetThreadInWasm();
2374 MaybeHandle<Object> maybe_retval =
2375 Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
2376 TRACE(" => External wasm function returned%s\n",
2377 maybe_retval.is_null() ? " with exception" : "");
2378
2379 if (maybe_retval.is_null()) {
2380 // JSEntryStub may through a stack overflow before we actually get to wasm
2381 // code or back to the interpreter, meaning the thread-in-wasm flag won't
2382 // be cleared.
2383 if (trap_handler::IsThreadInWasm()) {
2384 trap_handler::ClearThreadInWasm();
2385 }
2386 return TryHandleException(isolate);
2387 }
2388
2389 trap_handler::ClearThreadInWasm();
2390
2391 // Pop arguments off the stack.
2392 sp_ -= num_args;
2393 // Push return values.
2394 if (sig->return_count() > 0) {
2395 // TODO(wasm): Handle multiple returns.
2396 DCHECK_EQ(1, sig->return_count());
2397 Address address = reinterpret_cast<Address>(arg_buffer.data());
2398 switch (sig->GetReturn()) {
2399 case kWasmI32:
2400 Push(WasmValue(ReadUnalignedValue<uint32_t>(address)));
2401 break;
2402 case kWasmI64:
2403 Push(WasmValue(ReadUnalignedValue<uint64_t>(address)));
2404 break;
2405 case kWasmF32:
2406 Push(WasmValue(ReadUnalignedValue<float>(address)));
2407 break;
2408 case kWasmF64:
2409 Push(WasmValue(ReadUnalignedValue<double>(address)));
2410 break;
2411 default:
2412 UNIMPLEMENTED();
2413 }
2414 }
2415 return {ExternalCallResult::EXTERNAL_RETURNED};
2416 }
2417
CallImportedFunction(uint32_t function_index)2418 ExternalCallResult CallImportedFunction(uint32_t function_index) {
2419 // Use a new HandleScope to avoid leaking / accumulating handles in the
2420 // outer scope.
2421 Isolate* isolate = instance_object_->GetIsolate();
2422 HandleScope handle_scope(isolate);
2423
2424 DCHECK_GT(module()->num_imported_functions, function_index);
2425 Handle<WasmInstanceObject> instance;
2426 WasmCode* code;
2427 {
2428 ImportedFunctionEntry entry(instance_object_, function_index);
2429 instance = handle(entry.instance(), isolate);
2430 code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
2431 entry.target());
2432 }
2433 FunctionSig* sig = codemap()->module()->functions[function_index].sig;
2434 return CallExternalWasmFunction(isolate, instance, code, sig);
2435 }
2436
CallIndirectFunction(uint32_t table_index,uint32_t entry_index,uint32_t sig_index)2437 ExternalCallResult CallIndirectFunction(uint32_t table_index,
2438 uint32_t entry_index,
2439 uint32_t sig_index) {
2440 if (codemap()->call_indirect_through_module()) {
2441 // Rely on the information stored in the WasmModule.
2442 InterpreterCode* code =
2443 codemap()->GetIndirectCode(table_index, entry_index);
2444 if (!code) return {ExternalCallResult::INVALID_FUNC};
2445 if (code->function->sig_index != sig_index) {
2446 // If not an exact match, we have to do a canonical check.
2447 int function_canonical_id =
2448 module()->signature_ids[code->function->sig_index];
2449 int expected_canonical_id = module()->signature_ids[sig_index];
2450 DCHECK_EQ(function_canonical_id,
2451 module()->signature_map.Find(code->function->sig));
2452 if (function_canonical_id != expected_canonical_id) {
2453 return {ExternalCallResult::SIGNATURE_MISMATCH};
2454 }
2455 }
2456 return {ExternalCallResult::INTERNAL, code};
2457 }
2458
2459 Isolate* isolate = instance_object_->GetIsolate();
2460 uint32_t expected_sig_id = module()->signature_ids[sig_index];
2461 DCHECK_EQ(expected_sig_id,
2462 module()->signature_map.Find(module()->signatures[sig_index]));
2463
2464 // The function table is stored in the instance.
2465 // TODO(wasm): the wasm interpreter currently supports only one table.
2466 CHECK_EQ(0, table_index);
2467 // Bounds check against table size.
2468 if (entry_index >= instance_object_->indirect_function_table_size()) {
2469 return {ExternalCallResult::INVALID_FUNC};
2470 }
2471
2472 WasmCode* code;
2473 Handle<WasmInstanceObject> instance;
2474 {
2475 IndirectFunctionTableEntry entry(instance_object_, entry_index);
2476 // Signature check.
2477 if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
2478 return {ExternalCallResult::SIGNATURE_MISMATCH};
2479 }
2480
2481 instance = handle(entry.instance(), isolate);
2482 code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
2483 entry.target());
2484 }
2485
2486 // Call either an internal or external WASM function.
2487 HandleScope scope(isolate);
2488 FunctionSig* signature = module()->signatures[sig_index];
2489
2490 if (code->kind() == wasm::WasmCode::kFunction) {
2491 if (!instance_object_.is_identical_to(instance)) {
2492 // Cross instance call.
2493 return CallExternalWasmFunction(isolate, instance, code, signature);
2494 }
2495 return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
2496 }
2497
2498 // Call to external function.
2499 if (code->kind() == wasm::WasmCode::kInterpreterEntry ||
2500 code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
2501 return CallExternalWasmFunction(isolate, instance, code, signature);
2502 }
2503 return {ExternalCallResult::INVALID_FUNC};
2504 }
2505
current_activation()2506 inline Activation current_activation() {
2507 return activations_.empty() ? Activation(0, 0) : activations_.back();
2508 }
2509 };
2510
2511 class InterpretedFrameImpl {
2512 public:
InterpretedFrameImpl(ThreadImpl * thread,int index)2513 InterpretedFrameImpl(ThreadImpl* thread, int index)
2514 : thread_(thread), index_(index) {
2515 DCHECK_LE(0, index);
2516 }
2517
function() const2518 const WasmFunction* function() const { return frame()->code->function; }
2519
pc() const2520 int pc() const {
2521 DCHECK_LE(0, frame()->pc);
2522 DCHECK_GE(kMaxInt, frame()->pc);
2523 return static_cast<int>(frame()->pc);
2524 }
2525
GetParameterCount() const2526 int GetParameterCount() const {
2527 DCHECK_GE(kMaxInt, function()->sig->parameter_count());
2528 return static_cast<int>(function()->sig->parameter_count());
2529 }
2530
GetLocalCount() const2531 int GetLocalCount() const {
2532 size_t num_locals = function()->sig->parameter_count() +
2533 frame()->code->locals.type_list.size();
2534 DCHECK_GE(kMaxInt, num_locals);
2535 return static_cast<int>(num_locals);
2536 }
2537
GetStackHeight() const2538 int GetStackHeight() const {
2539 bool is_top_frame =
2540 static_cast<size_t>(index_) + 1 == thread_->frames_.size();
2541 size_t stack_limit =
2542 is_top_frame ? thread_->StackHeight() : thread_->frames_[index_ + 1].sp;
2543 DCHECK_LE(frame()->sp, stack_limit);
2544 size_t frame_size = stack_limit - frame()->sp;
2545 DCHECK_LE(GetLocalCount(), frame_size);
2546 return static_cast<int>(frame_size) - GetLocalCount();
2547 }
2548
GetLocalValue(int index) const2549 WasmValue GetLocalValue(int index) const {
2550 DCHECK_LE(0, index);
2551 DCHECK_GT(GetLocalCount(), index);
2552 return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
2553 }
2554
GetStackValue(int index) const2555 WasmValue GetStackValue(int index) const {
2556 DCHECK_LE(0, index);
2557 // Index must be within the number of stack values of this frame.
2558 DCHECK_GT(GetStackHeight(), index);
2559 return thread_->GetStackValue(static_cast<int>(frame()->sp) +
2560 GetLocalCount() + index);
2561 }
2562
2563 private:
2564 ThreadImpl* thread_;
2565 int index_;
2566
frame() const2567 ThreadImpl::Frame* frame() const {
2568 DCHECK_GT(thread_->frames_.size(), index_);
2569 return &thread_->frames_[index_];
2570 }
2571 };
2572
2573 // Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
2574 // Thread* is the public interface, without knowledge of the object layout.
2575 // This cast is potentially risky, but as long as we always cast it back before
2576 // accessing any data, it should be fine. UBSan is not complaining.
ToThread(ThreadImpl * impl)2577 WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
2578 return reinterpret_cast<WasmInterpreter::Thread*>(impl);
2579 }
ToImpl(WasmInterpreter::Thread * thread)2580 ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
2581 return reinterpret_cast<ThreadImpl*>(thread);
2582 }
2583
2584 // Same conversion for InterpretedFrame and InterpretedFrameImpl.
ToFrame(InterpretedFrameImpl * impl)2585 InterpretedFrame* ToFrame(InterpretedFrameImpl* impl) {
2586 return reinterpret_cast<InterpretedFrame*>(impl);
2587 }
ToImpl(const InterpretedFrame * frame)2588 const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
2589 return reinterpret_cast<const InterpretedFrameImpl*>(frame);
2590 }
2591
2592 } // namespace
2593
2594 //============================================================================
2595 // Implementation of the pimpl idiom for WasmInterpreter::Thread.
2596 // Instead of placing a pointer to the ThreadImpl inside of the Thread object,
2597 // we just reinterpret_cast them. ThreadImpls are only allocated inside this
2598 // translation unit anyway.
2599 //============================================================================
state()2600 WasmInterpreter::State WasmInterpreter::Thread::state() {
2601 return ToImpl(this)->state();
2602 }
InitFrame(const WasmFunction * function,WasmValue * args)2603 void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
2604 WasmValue* args) {
2605 ToImpl(this)->InitFrame(function, args);
2606 }
Run(int num_steps)2607 WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
2608 return ToImpl(this)->Run(num_steps);
2609 }
Pause()2610 void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
Reset()2611 void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
2612 WasmInterpreter::Thread::ExceptionHandlingResult
HandleException(Isolate * isolate)2613 WasmInterpreter::Thread::HandleException(Isolate* isolate) {
2614 return ToImpl(this)->HandleException(isolate);
2615 }
GetBreakpointPc()2616 pc_t WasmInterpreter::Thread::GetBreakpointPc() {
2617 return ToImpl(this)->GetBreakpointPc();
2618 }
GetFrameCount()2619 int WasmInterpreter::Thread::GetFrameCount() {
2620 return ToImpl(this)->GetFrameCount();
2621 }
GetFrame(int index)2622 WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
2623 DCHECK_LE(0, index);
2624 DCHECK_GT(GetFrameCount(), index);
2625 return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
2626 }
GetReturnValue(int index)2627 WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
2628 return ToImpl(this)->GetReturnValue(index);
2629 }
GetTrapReason()2630 TrapReason WasmInterpreter::Thread::GetTrapReason() {
2631 return ToImpl(this)->GetTrapReason();
2632 }
PossibleNondeterminism()2633 bool WasmInterpreter::Thread::PossibleNondeterminism() {
2634 return ToImpl(this)->PossibleNondeterminism();
2635 }
NumInterpretedCalls()2636 uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
2637 return ToImpl(this)->NumInterpretedCalls();
2638 }
AddBreakFlags(uint8_t flags)2639 void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
2640 ToImpl(this)->AddBreakFlags(flags);
2641 }
ClearBreakFlags()2642 void WasmInterpreter::Thread::ClearBreakFlags() {
2643 ToImpl(this)->ClearBreakFlags();
2644 }
NumActivations()2645 uint32_t WasmInterpreter::Thread::NumActivations() {
2646 return ToImpl(this)->NumActivations();
2647 }
StartActivation()2648 uint32_t WasmInterpreter::Thread::StartActivation() {
2649 return ToImpl(this)->StartActivation();
2650 }
FinishActivation(uint32_t id)2651 void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
2652 ToImpl(this)->FinishActivation(id);
2653 }
ActivationFrameBase(uint32_t id)2654 uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
2655 return ToImpl(this)->ActivationFrameBase(id);
2656 }
2657
2658 //============================================================================
2659 // The implementation details of the interpreter.
2660 //============================================================================
2661 class WasmInterpreterInternals : public ZoneObject {
2662 public:
2663 // Create a copy of the module bytes for the interpreter, since the passed
2664 // pointer might be invalidated after constructing the interpreter.
2665 const ZoneVector<uint8_t> module_bytes_;
2666 CodeMap codemap_;
2667 ZoneVector<ThreadImpl> threads_;
2668
WasmInterpreterInternals(Isolate * isolate,Zone * zone,const WasmModule * module,const ModuleWireBytes & wire_bytes,Handle<WasmInstanceObject> instance_object)2669 WasmInterpreterInternals(Isolate* isolate, Zone* zone,
2670 const WasmModule* module,
2671 const ModuleWireBytes& wire_bytes,
2672 Handle<WasmInstanceObject> instance_object)
2673 : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
2674 codemap_(isolate, module, module_bytes_.data(), zone),
2675 threads_(zone) {
2676 threads_.emplace_back(zone, &codemap_, instance_object);
2677 }
2678 };
2679
2680 namespace {
2681 // TODO(wasm): a finalizer is only required to delete the global handle.
GlobalHandleDeleter(const v8::WeakCallbackInfo<void> & data)2682 void GlobalHandleDeleter(const v8::WeakCallbackInfo<void>& data) {
2683 GlobalHandles::Destroy(reinterpret_cast<Object**>(
2684 reinterpret_cast<JSObject**>(data.GetParameter())));
2685 }
2686
MakeWeak(Isolate * isolate,Handle<WasmInstanceObject> instance_object)2687 Handle<WasmInstanceObject> MakeWeak(
2688 Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
2689 Handle<Object> handle = isolate->global_handles()->Create(*instance_object);
2690 // TODO(wasm): use a phantom handle in the WasmInterpreter.
2691 GlobalHandles::MakeWeak(handle.location(), handle.location(),
2692 &GlobalHandleDeleter,
2693 v8::WeakCallbackType::kFinalizer);
2694 return Handle<WasmInstanceObject>::cast(handle);
2695 }
2696 } // namespace
2697
2698 //============================================================================
2699 // Implementation of the public interface of the interpreter.
2700 //============================================================================
WasmInterpreter(Isolate * isolate,const WasmModule * module,const ModuleWireBytes & wire_bytes,Handle<WasmInstanceObject> instance_object)2701 WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
2702 const ModuleWireBytes& wire_bytes,
2703 Handle<WasmInstanceObject> instance_object)
2704 : zone_(isolate->allocator(), ZONE_NAME),
2705 internals_(new (&zone_) WasmInterpreterInternals(
2706 isolate, &zone_, module, wire_bytes,
2707 MakeWeak(isolate, instance_object))) {}
2708
~WasmInterpreter()2709 WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
2710
Run()2711 void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
2712
Pause()2713 void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
2714
SetBreakpoint(const WasmFunction * function,pc_t pc,bool enabled)2715 bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
2716 bool enabled) {
2717 InterpreterCode* code = internals_->codemap_.GetCode(function);
2718 size_t size = static_cast<size_t>(code->end - code->start);
2719 // Check bounds for {pc}.
2720 if (pc < code->locals.encoded_size || pc >= size) return false;
2721 // Make a copy of the code before enabling a breakpoint.
2722 if (enabled && code->orig_start == code->start) {
2723 code->start = reinterpret_cast<byte*>(zone_.New(size));
2724 memcpy(code->start, code->orig_start, size);
2725 code->end = code->start + size;
2726 }
2727 bool prev = code->start[pc] == kInternalBreakpoint;
2728 if (enabled) {
2729 code->start[pc] = kInternalBreakpoint;
2730 } else {
2731 code->start[pc] = code->orig_start[pc];
2732 }
2733 return prev;
2734 }
2735
GetBreakpoint(const WasmFunction * function,pc_t pc)2736 bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
2737 InterpreterCode* code = internals_->codemap_.GetCode(function);
2738 size_t size = static_cast<size_t>(code->end - code->start);
2739 // Check bounds for {pc}.
2740 if (pc < code->locals.encoded_size || pc >= size) return false;
2741 // Check if a breakpoint is present at that place in the code.
2742 return code->start[pc] == kInternalBreakpoint;
2743 }
2744
SetTracing(const WasmFunction * function,bool enabled)2745 bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
2746 UNIMPLEMENTED();
2747 return false;
2748 }
2749
GetThreadCount()2750 int WasmInterpreter::GetThreadCount() {
2751 return 1; // only one thread for now.
2752 }
2753
GetThread(int id)2754 WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
2755 CHECK_EQ(0, id); // only one thread for now.
2756 return ToThread(&internals_->threads_[id]);
2757 }
2758
AddFunctionForTesting(const WasmFunction * function)2759 void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
2760 internals_->codemap_.AddFunction(function, nullptr, nullptr);
2761 }
2762
SetFunctionCodeForTesting(const WasmFunction * function,const byte * start,const byte * end)2763 void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
2764 const byte* start,
2765 const byte* end) {
2766 internals_->codemap_.SetFunctionCode(function, start, end);
2767 }
2768
SetCallIndirectTestMode()2769 void WasmInterpreter::SetCallIndirectTestMode() {
2770 internals_->codemap_.set_call_indirect_through_module(true);
2771 }
2772
ComputeControlTransfersForTesting(Zone * zone,const WasmModule * module,const byte * start,const byte * end)2773 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
2774 Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
2775 // Create some dummy structures, to avoid special-casing the implementation
2776 // just for testing.
2777 FunctionSig sig(0, 0, nullptr);
2778 WasmFunction function{&sig, 0, 0, {0, 0}, false, false};
2779 InterpreterCode code{
2780 &function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
2781
2782 // Now compute and return the control transfers.
2783 SideTable side_table(zone, module, &code);
2784 return side_table.map_;
2785 }
2786
2787 //============================================================================
2788 // Implementation of the frame inspection interface.
2789 //============================================================================
function() const2790 const WasmFunction* InterpretedFrame::function() const {
2791 return ToImpl(this)->function();
2792 }
pc() const2793 int InterpretedFrame::pc() const { return ToImpl(this)->pc(); }
GetParameterCount() const2794 int InterpretedFrame::GetParameterCount() const {
2795 return ToImpl(this)->GetParameterCount();
2796 }
GetLocalCount() const2797 int InterpretedFrame::GetLocalCount() const {
2798 return ToImpl(this)->GetLocalCount();
2799 }
GetStackHeight() const2800 int InterpretedFrame::GetStackHeight() const {
2801 return ToImpl(this)->GetStackHeight();
2802 }
GetLocalValue(int index) const2803 WasmValue InterpretedFrame::GetLocalValue(int index) const {
2804 return ToImpl(this)->GetLocalValue(index);
2805 }
GetStackValue(int index) const2806 WasmValue InterpretedFrame::GetStackValue(int index) const {
2807 return ToImpl(this)->GetStackValue(index);
2808 }
operator ()(InterpretedFrame * ptr)2809 void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) {
2810 delete ToImpl(ptr);
2811 }
2812
2813 #undef TRACE
2814 #undef FOREACH_INTERNAL_OPCODE
2815 #undef WASM_CTYPES
2816 #undef FOREACH_SIMPLE_BINOP
2817 #undef FOREACH_OTHER_BINOP
2818 #undef FOREACH_I32CONV_FLOATOP
2819 #undef FOREACH_OTHER_UNOP
2820
2821 } // namespace wasm
2822 } // namespace internal
2823 } // namespace v8
2824