1 /* 2 * Copyright 2015 WebAssembly Community Group participants 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 // 18 // Simple WebAssembly interpreter. This operates directly on the AST, 19 // for simplicity and clarity. A goal is for it to be possible for 20 // people to read this code and understand WebAssembly semantics. 21 // 22 23 #ifndef wasm_wasm_interpreter_h 24 #define wasm_wasm_interpreter_h 25 26 #include <cmath> 27 #include <limits.h> 28 #include <sstream> 29 30 #include "ir/module-utils.h" 31 #include "support/bits.h" 32 #include "support/safe_integer.h" 33 #include "wasm-builder.h" 34 #include "wasm-traversal.h" 35 #include "wasm.h" 36 37 #ifdef WASM_INTERPRETER_DEBUG 38 #include "wasm-printing.h" 39 #endif 40 41 namespace wasm { 42 43 struct WasmException { WasmExceptionWasmException44 WasmException(Literal exn) : exn(exn) {} 45 Literal exn; 46 }; 47 48 using namespace cashew; 49 50 // Utilities 51 52 extern Name WASM, RETURN_FLOW, NONCONSTANT_FLOW; 53 54 // Stuff that flows around during executing expressions: a literal, or a change 55 // in control flow. 56 class Flow { 57 public: Flow()58 Flow() : values() {} Flow(Literal value)59 Flow(Literal value) : values{value} { assert(value.type.isConcrete()); } Flow(Literals & values)60 Flow(Literals& values) : values(values) {} Flow(Literals && values)61 Flow(Literals&& values) : values(std::move(values)) {} Flow(Name breakTo)62 Flow(Name breakTo) : values(), breakTo(breakTo) {} 63 64 Literals values; 65 Name breakTo; // if non-null, a break is going on 66 67 // A helper function for the common case where there is only one value getSingleValue()68 const Literal& getSingleValue() { 69 assert(values.size() == 1); 70 return values[0]; 71 } 72 getType()73 Type getType() { return values.getType(); } 74 getConstExpression(Module & module)75 Expression* getConstExpression(Module& module) { 76 assert(values.size() > 0); 77 Builder builder(module); 78 return builder.makeConstantExpression(values); 79 } 80 breaking()81 bool breaking() { return breakTo.is(); } 82 clearIf(Name target)83 void clearIf(Name target) { 84 if (breakTo == target) { 85 breakTo.clear(); 86 } 87 } 88 89 friend std::ostream& operator<<(std::ostream& o, const Flow& flow) { 90 o << "(flow " << (flow.breakTo.is() ? flow.breakTo.str : "-") << " : {"; 91 for (size_t i = 0; i < flow.values.size(); ++i) { 92 if (i > 0) { 93 o << ", "; 94 } 95 o << flow.values[i]; 96 } 97 o << "})"; 98 return o; 99 } 100 }; 101 102 // A list of literals, for function calls 103 typedef std::vector<Literal> LiteralList; 104 105 // Debugging helpers 106 #ifdef WASM_INTERPRETER_DEBUG 107 class Indenter { 108 static int indentLevel; 109 110 const char* entryName; 111 112 public: 113 Indenter(const char* entry); 114 ~Indenter(); 115 116 static void print(); 117 }; 118 119 #define NOTE_ENTER(x) \ 120 Indenter _int_blah(x); \ 121 { \ 122 Indenter::print(); \ 123 std::cout << "visit " << x << " : " << curr << "\n"; \ 124 } 125 #define NOTE_ENTER_(x) \ 126 Indenter _int_blah(x); \ 127 { \ 128 Indenter::print(); \ 129 std::cout << "visit " << x << "\n"; \ 130 } 131 #define NOTE_NAME(p0) \ 132 { \ 133 Indenter::print(); \ 134 std::cout << "name " << '(' << Name(p0) << ")\n"; \ 135 } 136 #define NOTE_EVAL1(p0) \ 137 { \ 138 Indenter::print(); \ 139 std::cout << "eval " #p0 " (" << p0 << ")\n"; \ 140 } 141 #define NOTE_EVAL2(p0, p1) \ 142 { \ 143 Indenter::print(); \ 144 std::cout << "eval " #p0 " (" << p0 << "), " #p1 " (" << p1 << ")\n"; \ 145 } 146 #else // WASM_INTERPRETER_DEBUG 147 #define NOTE_ENTER(x) 148 #define NOTE_ENTER_(x) 149 #define NOTE_NAME(p0) 150 #define NOTE_EVAL1(p0) 151 #define NOTE_EVAL2(p0, p1) 152 #endif // WASM_INTERPRETER_DEBUG 153 154 // Execute an expression 155 template<typename SubType> 156 class ExpressionRunner : public OverriddenVisitor<SubType, Flow> { 157 protected: 158 // Maximum depth before giving up. 159 Index maxDepth; 160 Index depth = 0; 161 162 // Maximum iterations before giving up on a loop. 163 Index maxLoopIterations; 164 generateArguments(const ExpressionList & operands,LiteralList & arguments)165 Flow generateArguments(const ExpressionList& operands, 166 LiteralList& arguments) { 167 NOTE_ENTER_("generateArguments"); 168 arguments.reserve(operands.size()); 169 for (auto expression : operands) { 170 Flow flow = this->visit(expression); 171 if (flow.breaking()) { 172 return flow; 173 } 174 NOTE_EVAL1(flow.values); 175 arguments.push_back(flow.getSingleValue()); 176 } 177 return Flow(); 178 } 179 180 public: 181 // Indicates no limit of maxDepth or maxLoopIterations. 182 static const Index NO_LIMIT = 0; 183 184 ExpressionRunner(Index maxDepth = NO_LIMIT, 185 Index maxLoopIterations = NO_LIMIT) maxDepth(maxDepth)186 : maxDepth(maxDepth), maxLoopIterations(maxLoopIterations) {} 187 visit(Expression * curr)188 Flow visit(Expression* curr) { 189 depth++; 190 if (maxDepth != NO_LIMIT && depth > maxDepth) { 191 trap("interpreter recursion limit"); 192 } 193 auto ret = OverriddenVisitor<SubType, Flow>::visit(curr); 194 if (!ret.breaking()) { 195 Type type = ret.getType(); 196 if (type.isConcrete() || curr->type.isConcrete()) { 197 #if 1 // def WASM_INTERPRETER_DEBUG 198 if (!Type::isSubType(type, curr->type)) { 199 std::cerr << "expected " << curr->type << ", seeing " << type 200 << " from\n" 201 << curr << '\n'; 202 } 203 #endif 204 assert(Type::isSubType(type, curr->type)); 205 } 206 } 207 depth--; 208 return ret; 209 } 210 visitBlock(Block * curr)211 Flow visitBlock(Block* curr) { 212 NOTE_ENTER("Block"); 213 // special-case Block, because Block nesting (in their first element) can be 214 // incredibly deep 215 std::vector<Block*> stack; 216 stack.push_back(curr); 217 while (curr->list.size() > 0 && curr->list[0]->is<Block>()) { 218 curr = curr->list[0]->cast<Block>(); 219 stack.push_back(curr); 220 } 221 Flow flow; 222 auto* top = stack.back(); 223 while (stack.size() > 0) { 224 curr = stack.back(); 225 stack.pop_back(); 226 if (flow.breaking()) { 227 flow.clearIf(curr->name); 228 continue; 229 } 230 auto& list = curr->list; 231 for (size_t i = 0; i < list.size(); i++) { 232 if (curr != top && i == 0) { 233 // one of the block recursions we already handled 234 continue; 235 } 236 flow = visit(list[i]); 237 if (flow.breaking()) { 238 flow.clearIf(curr->name); 239 break; 240 } 241 } 242 } 243 return flow; 244 } visitIf(If * curr)245 Flow visitIf(If* curr) { 246 NOTE_ENTER("If"); 247 Flow flow = visit(curr->condition); 248 if (flow.breaking()) { 249 return flow; 250 } 251 NOTE_EVAL1(flow.values); 252 if (flow.getSingleValue().geti32()) { 253 Flow flow = visit(curr->ifTrue); 254 if (!flow.breaking() && !curr->ifFalse) { 255 flow = Flow(); // if_else returns a value, but if does not 256 } 257 return flow; 258 } 259 if (curr->ifFalse) { 260 return visit(curr->ifFalse); 261 } 262 return Flow(); 263 } visitLoop(Loop * curr)264 Flow visitLoop(Loop* curr) { 265 NOTE_ENTER("Loop"); 266 Index loopCount = 0; 267 while (1) { 268 Flow flow = visit(curr->body); 269 if (flow.breaking()) { 270 if (flow.breakTo == curr->name) { 271 if (maxLoopIterations != NO_LIMIT && 272 ++loopCount >= maxLoopIterations) { 273 return Flow(NONCONSTANT_FLOW); 274 } 275 continue; // lol 276 } 277 } 278 // loop does not loop automatically, only continue achieves that 279 return flow; 280 } 281 } visitBreak(Break * curr)282 Flow visitBreak(Break* curr) { 283 NOTE_ENTER("Break"); 284 bool condition = true; 285 Flow flow; 286 if (curr->value) { 287 flow = visit(curr->value); 288 if (flow.breaking()) { 289 return flow; 290 } 291 } 292 if (curr->condition) { 293 Flow conditionFlow = visit(curr->condition); 294 if (conditionFlow.breaking()) { 295 return conditionFlow; 296 } 297 condition = conditionFlow.getSingleValue().getInteger() != 0; 298 if (!condition) { 299 return flow; 300 } 301 } 302 flow.breakTo = curr->name; 303 return flow; 304 } visitSwitch(Switch * curr)305 Flow visitSwitch(Switch* curr) { 306 NOTE_ENTER("Switch"); 307 Flow flow; 308 Literals values; 309 if (curr->value) { 310 flow = visit(curr->value); 311 if (flow.breaking()) { 312 return flow; 313 } 314 values = flow.values; 315 } 316 flow = visit(curr->condition); 317 if (flow.breaking()) { 318 return flow; 319 } 320 int64_t index = flow.getSingleValue().getInteger(); 321 Name target = curr->default_; 322 if (index >= 0 && (size_t)index < curr->targets.size()) { 323 target = curr->targets[(size_t)index]; 324 } 325 flow.breakTo = target; 326 flow.values = values; 327 return flow; 328 } 329 visitConst(Const * curr)330 Flow visitConst(Const* curr) { 331 NOTE_ENTER("Const"); 332 NOTE_EVAL1(curr->value); 333 return Flow(curr->value); // heh 334 } 335 336 // Unary and Binary nodes, the core math computations. We mostly just 337 // delegate to the Literal::* methods, except we handle traps here. 338 visitUnary(Unary * curr)339 Flow visitUnary(Unary* curr) { 340 NOTE_ENTER("Unary"); 341 Flow flow = visit(curr->value); 342 if (flow.breaking()) { 343 return flow; 344 } 345 Literal value = flow.getSingleValue(); 346 NOTE_EVAL1(value); 347 switch (curr->op) { 348 case ClzInt32: 349 case ClzInt64: 350 return value.countLeadingZeroes(); 351 case CtzInt32: 352 case CtzInt64: 353 return value.countTrailingZeroes(); 354 case PopcntInt32: 355 case PopcntInt64: 356 return value.popCount(); 357 case EqZInt32: 358 case EqZInt64: 359 return value.eqz(); 360 case ReinterpretInt32: 361 return value.castToF32(); 362 case ReinterpretInt64: 363 return value.castToF64(); 364 case ExtendSInt32: 365 return value.extendToSI64(); 366 case ExtendUInt32: 367 return value.extendToUI64(); 368 case WrapInt64: 369 return value.wrapToI32(); 370 case ConvertUInt32ToFloat32: 371 case ConvertUInt64ToFloat32: 372 return value.convertUIToF32(); 373 case ConvertUInt32ToFloat64: 374 case ConvertUInt64ToFloat64: 375 return value.convertUIToF64(); 376 case ConvertSInt32ToFloat32: 377 case ConvertSInt64ToFloat32: 378 return value.convertSIToF32(); 379 case ConvertSInt32ToFloat64: 380 case ConvertSInt64ToFloat64: 381 return value.convertSIToF64(); 382 case ExtendS8Int32: 383 case ExtendS8Int64: 384 return value.extendS8(); 385 case ExtendS16Int32: 386 case ExtendS16Int64: 387 return value.extendS16(); 388 case ExtendS32Int64: 389 return value.extendS32(); 390 391 case NegFloat32: 392 case NegFloat64: 393 return value.neg(); 394 case AbsFloat32: 395 case AbsFloat64: 396 return value.abs(); 397 case CeilFloat32: 398 case CeilFloat64: 399 return value.ceil(); 400 case FloorFloat32: 401 case FloorFloat64: 402 return value.floor(); 403 case TruncFloat32: 404 case TruncFloat64: 405 return value.trunc(); 406 case NearestFloat32: 407 case NearestFloat64: 408 return value.nearbyint(); 409 case SqrtFloat32: 410 case SqrtFloat64: 411 return value.sqrt(); 412 case TruncSFloat32ToInt32: 413 case TruncSFloat64ToInt32: 414 case TruncSFloat32ToInt64: 415 case TruncSFloat64ToInt64: 416 return truncSFloat(curr, value); 417 case TruncUFloat32ToInt32: 418 case TruncUFloat64ToInt32: 419 case TruncUFloat32ToInt64: 420 case TruncUFloat64ToInt64: 421 return truncUFloat(curr, value); 422 case TruncSatSFloat32ToInt32: 423 case TruncSatSFloat64ToInt32: 424 return value.truncSatToSI32(); 425 case TruncSatSFloat32ToInt64: 426 case TruncSatSFloat64ToInt64: 427 return value.truncSatToSI64(); 428 case TruncSatUFloat32ToInt32: 429 case TruncSatUFloat64ToInt32: 430 return value.truncSatToUI32(); 431 case TruncSatUFloat32ToInt64: 432 case TruncSatUFloat64ToInt64: 433 return value.truncSatToUI64(); 434 case ReinterpretFloat32: 435 return value.castToI32(); 436 case PromoteFloat32: 437 return value.extendToF64(); 438 case ReinterpretFloat64: 439 return value.castToI64(); 440 case DemoteFloat64: 441 return value.demote(); 442 case SplatVecI8x16: 443 return value.splatI8x16(); 444 case SplatVecI16x8: 445 return value.splatI16x8(); 446 case SplatVecI32x4: 447 return value.splatI32x4(); 448 case SplatVecI64x2: 449 return value.splatI64x2(); 450 case SplatVecF32x4: 451 return value.splatF32x4(); 452 case SplatVecF64x2: 453 return value.splatF64x2(); 454 case NotVec128: 455 return value.notV128(); 456 case AbsVecI8x16: 457 return value.absI8x16(); 458 case NegVecI8x16: 459 return value.negI8x16(); 460 case AnyTrueVecI8x16: 461 return value.anyTrueI8x16(); 462 case AllTrueVecI8x16: 463 return value.allTrueI8x16(); 464 case BitmaskVecI8x16: 465 return value.bitmaskI8x16(); 466 case AbsVecI16x8: 467 return value.absI16x8(); 468 case NegVecI16x8: 469 return value.negI16x8(); 470 case AnyTrueVecI16x8: 471 return value.anyTrueI16x8(); 472 case AllTrueVecI16x8: 473 return value.allTrueI16x8(); 474 case BitmaskVecI16x8: 475 return value.bitmaskI16x8(); 476 case AbsVecI32x4: 477 return value.absI32x4(); 478 case NegVecI32x4: 479 return value.negI32x4(); 480 case AnyTrueVecI32x4: 481 return value.anyTrueI32x4(); 482 case AllTrueVecI32x4: 483 return value.allTrueI32x4(); 484 case BitmaskVecI32x4: 485 return value.bitmaskI32x4(); 486 case NegVecI64x2: 487 return value.negI64x2(); 488 case AnyTrueVecI64x2: 489 return value.anyTrueI64x2(); 490 case AllTrueVecI64x2: 491 return value.allTrueI64x2(); 492 case AbsVecF32x4: 493 return value.absF32x4(); 494 case NegVecF32x4: 495 return value.negF32x4(); 496 case SqrtVecF32x4: 497 return value.sqrtF32x4(); 498 case CeilVecF32x4: 499 return value.ceilF32x4(); 500 case FloorVecF32x4: 501 return value.floorF32x4(); 502 case TruncVecF32x4: 503 return value.truncF32x4(); 504 case NearestVecF32x4: 505 return value.nearestF32x4(); 506 case AbsVecF64x2: 507 return value.absF64x2(); 508 case NegVecF64x2: 509 return value.negF64x2(); 510 case SqrtVecF64x2: 511 return value.sqrtF64x2(); 512 case CeilVecF64x2: 513 return value.ceilF64x2(); 514 case FloorVecF64x2: 515 return value.floorF64x2(); 516 case TruncVecF64x2: 517 return value.truncF64x2(); 518 case NearestVecF64x2: 519 return value.nearestF64x2(); 520 case TruncSatSVecF32x4ToVecI32x4: 521 return value.truncSatToSI32x4(); 522 case TruncSatUVecF32x4ToVecI32x4: 523 return value.truncSatToUI32x4(); 524 case TruncSatSVecF64x2ToVecI64x2: 525 return value.truncSatToSI64x2(); 526 case TruncSatUVecF64x2ToVecI64x2: 527 return value.truncSatToUI64x2(); 528 case ConvertSVecI32x4ToVecF32x4: 529 return value.convertSToF32x4(); 530 case ConvertUVecI32x4ToVecF32x4: 531 return value.convertUToF32x4(); 532 case ConvertSVecI64x2ToVecF64x2: 533 return value.convertSToF64x2(); 534 case ConvertUVecI64x2ToVecF64x2: 535 return value.convertUToF64x2(); 536 case WidenLowSVecI8x16ToVecI16x8: 537 return value.widenLowSToVecI16x8(); 538 case WidenHighSVecI8x16ToVecI16x8: 539 return value.widenHighSToVecI16x8(); 540 case WidenLowUVecI8x16ToVecI16x8: 541 return value.widenLowUToVecI16x8(); 542 case WidenHighUVecI8x16ToVecI16x8: 543 return value.widenHighUToVecI16x8(); 544 case WidenLowSVecI16x8ToVecI32x4: 545 return value.widenLowSToVecI32x4(); 546 case WidenHighSVecI16x8ToVecI32x4: 547 return value.widenHighSToVecI32x4(); 548 case WidenLowUVecI16x8ToVecI32x4: 549 return value.widenLowUToVecI32x4(); 550 case WidenHighUVecI16x8ToVecI32x4: 551 return value.widenHighUToVecI32x4(); 552 case InvalidUnary: 553 WASM_UNREACHABLE("invalid unary op"); 554 } 555 WASM_UNREACHABLE("invalid op"); 556 } visitBinary(Binary * curr)557 Flow visitBinary(Binary* curr) { 558 NOTE_ENTER("Binary"); 559 Flow flow = visit(curr->left); 560 if (flow.breaking()) { 561 return flow; 562 } 563 Literal left = flow.getSingleValue(); 564 flow = visit(curr->right); 565 if (flow.breaking()) { 566 return flow; 567 } 568 Literal right = flow.getSingleValue(); 569 NOTE_EVAL2(left, right); 570 assert(curr->left->type.isConcrete() ? left.type == curr->left->type 571 : true); 572 assert(curr->right->type.isConcrete() ? right.type == curr->right->type 573 : true); 574 switch (curr->op) { 575 case AddInt32: 576 case AddInt64: 577 case AddFloat32: 578 case AddFloat64: 579 return left.add(right); 580 case SubInt32: 581 case SubInt64: 582 case SubFloat32: 583 case SubFloat64: 584 return left.sub(right); 585 case MulInt32: 586 case MulInt64: 587 case MulFloat32: 588 case MulFloat64: 589 return left.mul(right); 590 case DivSInt32: { 591 if (right.getInteger() == 0) { 592 trap("i32.div_s by 0"); 593 } 594 if (left.getInteger() == std::numeric_limits<int32_t>::min() && 595 right.getInteger() == -1) { 596 trap("i32.div_s overflow"); // signed division overflow 597 } 598 return left.divS(right); 599 } 600 case DivUInt32: { 601 if (right.getInteger() == 0) { 602 trap("i32.div_u by 0"); 603 } 604 return left.divU(right); 605 } 606 case RemSInt32: { 607 if (right.getInteger() == 0) { 608 trap("i32.rem_s by 0"); 609 } 610 if (left.getInteger() == std::numeric_limits<int32_t>::min() && 611 right.getInteger() == -1) { 612 return Literal(int32_t(0)); 613 } 614 return left.remS(right); 615 } 616 case RemUInt32: { 617 if (right.getInteger() == 0) { 618 trap("i32.rem_u by 0"); 619 } 620 return left.remU(right); 621 } 622 case DivSInt64: { 623 if (right.getInteger() == 0) { 624 trap("i64.div_s by 0"); 625 } 626 if (left.getInteger() == LLONG_MIN && right.getInteger() == -1LL) { 627 trap("i64.div_s overflow"); // signed division overflow 628 } 629 return left.divS(right); 630 } 631 case DivUInt64: { 632 if (right.getInteger() == 0) { 633 trap("i64.div_u by 0"); 634 } 635 return left.divU(right); 636 } 637 case RemSInt64: { 638 if (right.getInteger() == 0) { 639 trap("i64.rem_s by 0"); 640 } 641 if (left.getInteger() == LLONG_MIN && right.getInteger() == -1LL) { 642 return Literal(int64_t(0)); 643 } 644 return left.remS(right); 645 } 646 case RemUInt64: { 647 if (right.getInteger() == 0) { 648 trap("i64.rem_u by 0"); 649 } 650 return left.remU(right); 651 } 652 case DivFloat32: 653 case DivFloat64: 654 return left.div(right); 655 case AndInt32: 656 case AndInt64: 657 return left.and_(right); 658 case OrInt32: 659 case OrInt64: 660 return left.or_(right); 661 case XorInt32: 662 case XorInt64: 663 return left.xor_(right); 664 case ShlInt32: 665 case ShlInt64: 666 return left.shl(right); 667 case ShrUInt32: 668 case ShrUInt64: 669 return left.shrU(right); 670 case ShrSInt32: 671 case ShrSInt64: 672 return left.shrS(right); 673 case RotLInt32: 674 case RotLInt64: 675 return left.rotL(right); 676 case RotRInt32: 677 case RotRInt64: 678 return left.rotR(right); 679 680 case EqInt32: 681 case EqInt64: 682 case EqFloat32: 683 case EqFloat64: 684 return left.eq(right); 685 case NeInt32: 686 case NeInt64: 687 case NeFloat32: 688 case NeFloat64: 689 return left.ne(right); 690 case LtSInt32: 691 case LtSInt64: 692 return left.ltS(right); 693 case LtUInt32: 694 case LtUInt64: 695 return left.ltU(right); 696 case LeSInt32: 697 case LeSInt64: 698 return left.leS(right); 699 case LeUInt32: 700 case LeUInt64: 701 return left.leU(right); 702 case GtSInt32: 703 case GtSInt64: 704 return left.gtS(right); 705 case GtUInt32: 706 case GtUInt64: 707 return left.gtU(right); 708 case GeSInt32: 709 case GeSInt64: 710 return left.geS(right); 711 case GeUInt32: 712 case GeUInt64: 713 return left.geU(right); 714 case LtFloat32: 715 case LtFloat64: 716 return left.lt(right); 717 case LeFloat32: 718 case LeFloat64: 719 return left.le(right); 720 case GtFloat32: 721 case GtFloat64: 722 return left.gt(right); 723 case GeFloat32: 724 case GeFloat64: 725 return left.ge(right); 726 727 case CopySignFloat32: 728 case CopySignFloat64: 729 return left.copysign(right); 730 case MinFloat32: 731 case MinFloat64: 732 return left.min(right); 733 case MaxFloat32: 734 case MaxFloat64: 735 return left.max(right); 736 737 case EqVecI8x16: 738 return left.eqI8x16(right); 739 case NeVecI8x16: 740 return left.neI8x16(right); 741 case LtSVecI8x16: 742 return left.ltSI8x16(right); 743 case LtUVecI8x16: 744 return left.ltUI8x16(right); 745 case GtSVecI8x16: 746 return left.gtSI8x16(right); 747 case GtUVecI8x16: 748 return left.gtUI8x16(right); 749 case LeSVecI8x16: 750 return left.leSI8x16(right); 751 case LeUVecI8x16: 752 return left.leUI8x16(right); 753 case GeSVecI8x16: 754 return left.geSI8x16(right); 755 case GeUVecI8x16: 756 return left.geUI8x16(right); 757 case EqVecI16x8: 758 return left.eqI16x8(right); 759 case NeVecI16x8: 760 return left.neI16x8(right); 761 case LtSVecI16x8: 762 return left.ltSI16x8(right); 763 case LtUVecI16x8: 764 return left.ltUI16x8(right); 765 case GtSVecI16x8: 766 return left.gtSI16x8(right); 767 case GtUVecI16x8: 768 return left.gtUI16x8(right); 769 case LeSVecI16x8: 770 return left.leSI16x8(right); 771 case LeUVecI16x8: 772 return left.leUI16x8(right); 773 case GeSVecI16x8: 774 return left.geSI16x8(right); 775 case GeUVecI16x8: 776 return left.geUI16x8(right); 777 case EqVecI32x4: 778 return left.eqI32x4(right); 779 case NeVecI32x4: 780 return left.neI32x4(right); 781 case LtSVecI32x4: 782 return left.ltSI32x4(right); 783 case LtUVecI32x4: 784 return left.ltUI32x4(right); 785 case GtSVecI32x4: 786 return left.gtSI32x4(right); 787 case GtUVecI32x4: 788 return left.gtUI32x4(right); 789 case LeSVecI32x4: 790 return left.leSI32x4(right); 791 case LeUVecI32x4: 792 return left.leUI32x4(right); 793 case GeSVecI32x4: 794 return left.geSI32x4(right); 795 case GeUVecI32x4: 796 return left.geUI32x4(right); 797 case EqVecF32x4: 798 return left.eqF32x4(right); 799 case NeVecF32x4: 800 return left.neF32x4(right); 801 case LtVecF32x4: 802 return left.ltF32x4(right); 803 case GtVecF32x4: 804 return left.gtF32x4(right); 805 case LeVecF32x4: 806 return left.leF32x4(right); 807 case GeVecF32x4: 808 return left.geF32x4(right); 809 case EqVecF64x2: 810 return left.eqF64x2(right); 811 case NeVecF64x2: 812 return left.neF64x2(right); 813 case LtVecF64x2: 814 return left.ltF64x2(right); 815 case GtVecF64x2: 816 return left.gtF64x2(right); 817 case LeVecF64x2: 818 return left.leF64x2(right); 819 case GeVecF64x2: 820 return left.geF64x2(right); 821 822 case AndVec128: 823 return left.andV128(right); 824 case OrVec128: 825 return left.orV128(right); 826 case XorVec128: 827 return left.xorV128(right); 828 case AndNotVec128: 829 return left.andV128(right.notV128()); 830 831 case AddVecI8x16: 832 return left.addI8x16(right); 833 case AddSatSVecI8x16: 834 return left.addSaturateSI8x16(right); 835 case AddSatUVecI8x16: 836 return left.addSaturateUI8x16(right); 837 case SubVecI8x16: 838 return left.subI8x16(right); 839 case SubSatSVecI8x16: 840 return left.subSaturateSI8x16(right); 841 case SubSatUVecI8x16: 842 return left.subSaturateUI8x16(right); 843 case MulVecI8x16: 844 return left.mulI8x16(right); 845 case MinSVecI8x16: 846 return left.minSI8x16(right); 847 case MinUVecI8x16: 848 return left.minUI8x16(right); 849 case MaxSVecI8x16: 850 return left.maxSI8x16(right); 851 case MaxUVecI8x16: 852 return left.maxUI8x16(right); 853 case AvgrUVecI8x16: 854 return left.avgrUI8x16(right); 855 case AddVecI16x8: 856 return left.addI16x8(right); 857 case AddSatSVecI16x8: 858 return left.addSaturateSI16x8(right); 859 case AddSatUVecI16x8: 860 return left.addSaturateUI16x8(right); 861 case SubVecI16x8: 862 return left.subI16x8(right); 863 case SubSatSVecI16x8: 864 return left.subSaturateSI16x8(right); 865 case SubSatUVecI16x8: 866 return left.subSaturateUI16x8(right); 867 case MulVecI16x8: 868 return left.mulI16x8(right); 869 case MinSVecI16x8: 870 return left.minSI16x8(right); 871 case MinUVecI16x8: 872 return left.minUI16x8(right); 873 case MaxSVecI16x8: 874 return left.maxSI16x8(right); 875 case MaxUVecI16x8: 876 return left.maxUI16x8(right); 877 case AvgrUVecI16x8: 878 return left.avgrUI16x8(right); 879 case AddVecI32x4: 880 return left.addI32x4(right); 881 case SubVecI32x4: 882 return left.subI32x4(right); 883 case MulVecI32x4: 884 return left.mulI32x4(right); 885 case MinSVecI32x4: 886 return left.minSI32x4(right); 887 case MinUVecI32x4: 888 return left.minUI32x4(right); 889 case MaxSVecI32x4: 890 return left.maxSI32x4(right); 891 case MaxUVecI32x4: 892 return left.maxUI32x4(right); 893 case DotSVecI16x8ToVecI32x4: 894 return left.dotSI16x8toI32x4(right); 895 case AddVecI64x2: 896 return left.addI64x2(right); 897 case SubVecI64x2: 898 return left.subI64x2(right); 899 case MulVecI64x2: 900 return left.mulI64x2(right); 901 902 case AddVecF32x4: 903 return left.addF32x4(right); 904 case SubVecF32x4: 905 return left.subF32x4(right); 906 case MulVecF32x4: 907 return left.mulF32x4(right); 908 case DivVecF32x4: 909 return left.divF32x4(right); 910 case MinVecF32x4: 911 return left.minF32x4(right); 912 case MaxVecF32x4: 913 return left.maxF32x4(right); 914 case PMinVecF32x4: 915 return left.pminF32x4(right); 916 case PMaxVecF32x4: 917 return left.pmaxF32x4(right); 918 case AddVecF64x2: 919 return left.addF64x2(right); 920 case SubVecF64x2: 921 return left.subF64x2(right); 922 case MulVecF64x2: 923 return left.mulF64x2(right); 924 case DivVecF64x2: 925 return left.divF64x2(right); 926 case MinVecF64x2: 927 return left.minF64x2(right); 928 case MaxVecF64x2: 929 return left.maxF64x2(right); 930 case PMinVecF64x2: 931 return left.pminF64x2(right); 932 case PMaxVecF64x2: 933 return left.pmaxF64x2(right); 934 935 case NarrowSVecI16x8ToVecI8x16: 936 return left.narrowSToVecI8x16(right); 937 case NarrowUVecI16x8ToVecI8x16: 938 return left.narrowUToVecI8x16(right); 939 case NarrowSVecI32x4ToVecI16x8: 940 return left.narrowSToVecI16x8(right); 941 case NarrowUVecI32x4ToVecI16x8: 942 return left.narrowUToVecI16x8(right); 943 944 case SwizzleVec8x16: 945 return left.swizzleVec8x16(right); 946 947 case InvalidBinary: 948 WASM_UNREACHABLE("invalid binary op"); 949 } 950 WASM_UNREACHABLE("invalid op"); 951 } visitSIMDExtract(SIMDExtract * curr)952 Flow visitSIMDExtract(SIMDExtract* curr) { 953 NOTE_ENTER("SIMDExtract"); 954 Flow flow = this->visit(curr->vec); 955 if (flow.breaking()) { 956 return flow; 957 } 958 Literal vec = flow.getSingleValue(); 959 switch (curr->op) { 960 case ExtractLaneSVecI8x16: 961 return vec.extractLaneSI8x16(curr->index); 962 case ExtractLaneUVecI8x16: 963 return vec.extractLaneUI8x16(curr->index); 964 case ExtractLaneSVecI16x8: 965 return vec.extractLaneSI16x8(curr->index); 966 case ExtractLaneUVecI16x8: 967 return vec.extractLaneUI16x8(curr->index); 968 case ExtractLaneVecI32x4: 969 return vec.extractLaneI32x4(curr->index); 970 case ExtractLaneVecI64x2: 971 return vec.extractLaneI64x2(curr->index); 972 case ExtractLaneVecF32x4: 973 return vec.extractLaneF32x4(curr->index); 974 case ExtractLaneVecF64x2: 975 return vec.extractLaneF64x2(curr->index); 976 } 977 WASM_UNREACHABLE("invalid op"); 978 } visitSIMDReplace(SIMDReplace * curr)979 Flow visitSIMDReplace(SIMDReplace* curr) { 980 NOTE_ENTER("SIMDReplace"); 981 Flow flow = this->visit(curr->vec); 982 if (flow.breaking()) { 983 return flow; 984 } 985 Literal vec = flow.getSingleValue(); 986 flow = this->visit(curr->value); 987 if (flow.breaking()) { 988 return flow; 989 } 990 Literal value = flow.getSingleValue(); 991 switch (curr->op) { 992 case ReplaceLaneVecI8x16: 993 return vec.replaceLaneI8x16(value, curr->index); 994 case ReplaceLaneVecI16x8: 995 return vec.replaceLaneI16x8(value, curr->index); 996 case ReplaceLaneVecI32x4: 997 return vec.replaceLaneI32x4(value, curr->index); 998 case ReplaceLaneVecI64x2: 999 return vec.replaceLaneI64x2(value, curr->index); 1000 case ReplaceLaneVecF32x4: 1001 return vec.replaceLaneF32x4(value, curr->index); 1002 case ReplaceLaneVecF64x2: 1003 return vec.replaceLaneF64x2(value, curr->index); 1004 } 1005 WASM_UNREACHABLE("invalid op"); 1006 } visitSIMDShuffle(SIMDShuffle * curr)1007 Flow visitSIMDShuffle(SIMDShuffle* curr) { 1008 NOTE_ENTER("SIMDShuffle"); 1009 Flow flow = this->visit(curr->left); 1010 if (flow.breaking()) { 1011 return flow; 1012 } 1013 Literal left = flow.getSingleValue(); 1014 flow = this->visit(curr->right); 1015 if (flow.breaking()) { 1016 return flow; 1017 } 1018 Literal right = flow.getSingleValue(); 1019 return left.shuffleV8x16(right, curr->mask); 1020 } visitSIMDTernary(SIMDTernary * curr)1021 Flow visitSIMDTernary(SIMDTernary* curr) { 1022 NOTE_ENTER("SIMDBitselect"); 1023 Flow flow = this->visit(curr->a); 1024 if (flow.breaking()) { 1025 return flow; 1026 } 1027 Literal a = flow.getSingleValue(); 1028 flow = this->visit(curr->b); 1029 if (flow.breaking()) { 1030 return flow; 1031 } 1032 Literal b = flow.getSingleValue(); 1033 flow = this->visit(curr->c); 1034 if (flow.breaking()) { 1035 return flow; 1036 } 1037 Literal c = flow.getSingleValue(); 1038 switch (curr->op) { 1039 case Bitselect: 1040 return c.bitselectV128(a, b); 1041 default: 1042 // TODO: implement qfma/qfms 1043 WASM_UNREACHABLE("not implemented"); 1044 } 1045 } visitSIMDShift(SIMDShift * curr)1046 Flow visitSIMDShift(SIMDShift* curr) { 1047 NOTE_ENTER("SIMDShift"); 1048 Flow flow = this->visit(curr->vec); 1049 if (flow.breaking()) { 1050 return flow; 1051 } 1052 Literal vec = flow.getSingleValue(); 1053 flow = this->visit(curr->shift); 1054 if (flow.breaking()) { 1055 return flow; 1056 } 1057 Literal shift = flow.getSingleValue(); 1058 switch (curr->op) { 1059 case ShlVecI8x16: 1060 return vec.shlI8x16(shift); 1061 case ShrSVecI8x16: 1062 return vec.shrSI8x16(shift); 1063 case ShrUVecI8x16: 1064 return vec.shrUI8x16(shift); 1065 case ShlVecI16x8: 1066 return vec.shlI16x8(shift); 1067 case ShrSVecI16x8: 1068 return vec.shrSI16x8(shift); 1069 case ShrUVecI16x8: 1070 return vec.shrUI16x8(shift); 1071 case ShlVecI32x4: 1072 return vec.shlI32x4(shift); 1073 case ShrSVecI32x4: 1074 return vec.shrSI32x4(shift); 1075 case ShrUVecI32x4: 1076 return vec.shrUI32x4(shift); 1077 case ShlVecI64x2: 1078 return vec.shlI64x2(shift); 1079 case ShrSVecI64x2: 1080 return vec.shrSI64x2(shift); 1081 case ShrUVecI64x2: 1082 return vec.shrUI64x2(shift); 1083 } 1084 WASM_UNREACHABLE("invalid op"); 1085 } visitSelect(Select * curr)1086 Flow visitSelect(Select* curr) { 1087 NOTE_ENTER("Select"); 1088 Flow ifTrue = visit(curr->ifTrue); 1089 if (ifTrue.breaking()) { 1090 return ifTrue; 1091 } 1092 Flow ifFalse = visit(curr->ifFalse); 1093 if (ifFalse.breaking()) { 1094 return ifFalse; 1095 } 1096 Flow condition = visit(curr->condition); 1097 if (condition.breaking()) { 1098 return condition; 1099 } 1100 NOTE_EVAL1(condition.getSingleValue()); 1101 return condition.getSingleValue().geti32() ? ifTrue : ifFalse; // ;-) 1102 } visitDrop(Drop * curr)1103 Flow visitDrop(Drop* curr) { 1104 NOTE_ENTER("Drop"); 1105 Flow value = visit(curr->value); 1106 if (value.breaking()) { 1107 return value; 1108 } 1109 return Flow(); 1110 } visitReturn(Return * curr)1111 Flow visitReturn(Return* curr) { 1112 NOTE_ENTER("Return"); 1113 Flow flow; 1114 if (curr->value) { 1115 flow = visit(curr->value); 1116 if (flow.breaking()) { 1117 return flow; 1118 } 1119 NOTE_EVAL1(flow.getSingleValue()); 1120 } 1121 flow.breakTo = RETURN_FLOW; 1122 return flow; 1123 } visitNop(Nop * curr)1124 Flow visitNop(Nop* curr) { 1125 NOTE_ENTER("Nop"); 1126 return Flow(); 1127 } visitUnreachable(Unreachable * curr)1128 Flow visitUnreachable(Unreachable* curr) { 1129 NOTE_ENTER("Unreachable"); 1130 trap("unreachable"); 1131 WASM_UNREACHABLE("unreachable"); 1132 } 1133 truncSFloat(Unary * curr,Literal value)1134 Literal truncSFloat(Unary* curr, Literal value) { 1135 double val = value.getFloat(); 1136 if (std::isnan(val)) { 1137 trap("truncSFloat of nan"); 1138 } 1139 if (curr->type == Type::i32) { 1140 if (value.type == Type::f32) { 1141 if (!isInRangeI32TruncS(value.reinterpreti32())) { 1142 trap("i32.truncSFloat overflow"); 1143 } 1144 } else { 1145 if (!isInRangeI32TruncS(value.reinterpreti64())) { 1146 trap("i32.truncSFloat overflow"); 1147 } 1148 } 1149 return Literal(int32_t(val)); 1150 } else { 1151 if (value.type == Type::f32) { 1152 if (!isInRangeI64TruncS(value.reinterpreti32())) { 1153 trap("i64.truncSFloat overflow"); 1154 } 1155 } else { 1156 if (!isInRangeI64TruncS(value.reinterpreti64())) { 1157 trap("i64.truncSFloat overflow"); 1158 } 1159 } 1160 return Literal(int64_t(val)); 1161 } 1162 } 1163 truncUFloat(Unary * curr,Literal value)1164 Literal truncUFloat(Unary* curr, Literal value) { 1165 double val = value.getFloat(); 1166 if (std::isnan(val)) { 1167 trap("truncUFloat of nan"); 1168 } 1169 if (curr->type == Type::i32) { 1170 if (value.type == Type::f32) { 1171 if (!isInRangeI32TruncU(value.reinterpreti32())) { 1172 trap("i32.truncUFloat overflow"); 1173 } 1174 } else { 1175 if (!isInRangeI32TruncU(value.reinterpreti64())) { 1176 trap("i32.truncUFloat overflow"); 1177 } 1178 } 1179 return Literal(uint32_t(val)); 1180 } else { 1181 if (value.type == Type::f32) { 1182 if (!isInRangeI64TruncU(value.reinterpreti32())) { 1183 trap("i64.truncUFloat overflow"); 1184 } 1185 } else { 1186 if (!isInRangeI64TruncU(value.reinterpreti64())) { 1187 trap("i64.truncUFloat overflow"); 1188 } 1189 } 1190 return Literal(uint64_t(val)); 1191 } 1192 } visitAtomicFence(AtomicFence * curr)1193 Flow visitAtomicFence(AtomicFence* curr) { 1194 // Wasm currently supports only sequentially consistent atomics, in which 1195 // case atomic_fence can be lowered to nothing. 1196 NOTE_ENTER("AtomicFence"); 1197 return Flow(); 1198 } visitTupleMake(TupleMake * curr)1199 Flow visitTupleMake(TupleMake* curr) { 1200 NOTE_ENTER("tuple.make"); 1201 LiteralList arguments; 1202 Flow flow = generateArguments(curr->operands, arguments); 1203 if (flow.breaking()) { 1204 return flow; 1205 } 1206 for (auto arg : arguments) { 1207 assert(arg.type.isConcrete()); 1208 flow.values.push_back(arg); 1209 } 1210 return flow; 1211 } visitTupleExtract(TupleExtract * curr)1212 Flow visitTupleExtract(TupleExtract* curr) { 1213 NOTE_ENTER("tuple.extract"); 1214 Flow flow = visit(curr->tuple); 1215 if (flow.breaking()) { 1216 return flow; 1217 } 1218 assert(flow.values.size() > curr->index); 1219 return Flow(flow.values[curr->index]); 1220 } visitLocalGet(LocalGet * curr)1221 Flow visitLocalGet(LocalGet* curr) { WASM_UNREACHABLE("unimp"); } visitLocalSet(LocalSet * curr)1222 Flow visitLocalSet(LocalSet* curr) { WASM_UNREACHABLE("unimp"); } visitGlobalGet(GlobalGet * curr)1223 Flow visitGlobalGet(GlobalGet* curr) { WASM_UNREACHABLE("unimp"); } visitGlobalSet(GlobalSet * curr)1224 Flow visitGlobalSet(GlobalSet* curr) { WASM_UNREACHABLE("unimp"); } visitCall(Call * curr)1225 Flow visitCall(Call* curr) { WASM_UNREACHABLE("unimp"); } visitCallIndirect(CallIndirect * curr)1226 Flow visitCallIndirect(CallIndirect* curr) { WASM_UNREACHABLE("unimp"); } visitLoad(Load * curr)1227 Flow visitLoad(Load* curr) { WASM_UNREACHABLE("unimp"); } visitStore(Store * curr)1228 Flow visitStore(Store* curr) { WASM_UNREACHABLE("unimp"); } visitMemorySize(MemorySize * curr)1229 Flow visitMemorySize(MemorySize* curr) { WASM_UNREACHABLE("unimp"); } visitMemoryGrow(MemoryGrow * curr)1230 Flow visitMemoryGrow(MemoryGrow* curr) { WASM_UNREACHABLE("unimp"); } visitMemoryInit(MemoryInit * curr)1231 Flow visitMemoryInit(MemoryInit* curr) { WASM_UNREACHABLE("unimp"); } visitDataDrop(DataDrop * curr)1232 Flow visitDataDrop(DataDrop* curr) { WASM_UNREACHABLE("unimp"); } visitMemoryCopy(MemoryCopy * curr)1233 Flow visitMemoryCopy(MemoryCopy* curr) { WASM_UNREACHABLE("unimp"); } visitMemoryFill(MemoryFill * curr)1234 Flow visitMemoryFill(MemoryFill* curr) { WASM_UNREACHABLE("unimp"); } visitAtomicRMW(AtomicRMW * curr)1235 Flow visitAtomicRMW(AtomicRMW* curr) { WASM_UNREACHABLE("unimp"); } visitAtomicCmpxchg(AtomicCmpxchg * curr)1236 Flow visitAtomicCmpxchg(AtomicCmpxchg* curr) { WASM_UNREACHABLE("unimp"); } visitAtomicWait(AtomicWait * curr)1237 Flow visitAtomicWait(AtomicWait* curr) { WASM_UNREACHABLE("unimp"); } visitAtomicNotify(AtomicNotify * curr)1238 Flow visitAtomicNotify(AtomicNotify* curr) { WASM_UNREACHABLE("unimp"); } visitSIMDLoad(SIMDLoad * curr)1239 Flow visitSIMDLoad(SIMDLoad* curr) { WASM_UNREACHABLE("unimp"); } visitSIMDLoadSplat(SIMDLoad * curr)1240 Flow visitSIMDLoadSplat(SIMDLoad* curr) { WASM_UNREACHABLE("unimp"); } visitSIMDLoadExtend(SIMDLoad * curr)1241 Flow visitSIMDLoadExtend(SIMDLoad* curr) { WASM_UNREACHABLE("unimp"); } visitSIMDLoadZero(SIMDLoad * curr)1242 Flow visitSIMDLoadZero(SIMDLoad* curr) { WASM_UNREACHABLE("unimp"); } visitPop(Pop * curr)1243 Flow visitPop(Pop* curr) { WASM_UNREACHABLE("unimp"); } visitRefNull(RefNull * curr)1244 Flow visitRefNull(RefNull* curr) { 1245 NOTE_ENTER("RefNull"); 1246 return Literal::makeNull(curr->type); 1247 } visitRefIsNull(RefIsNull * curr)1248 Flow visitRefIsNull(RefIsNull* curr) { 1249 NOTE_ENTER("RefIsNull"); 1250 Flow flow = visit(curr->value); 1251 if (flow.breaking()) { 1252 return flow; 1253 } 1254 const auto& value = flow.getSingleValue(); 1255 NOTE_EVAL1(value); 1256 return Literal(value.isNull()); 1257 } visitRefFunc(RefFunc * curr)1258 Flow visitRefFunc(RefFunc* curr) { 1259 NOTE_ENTER("RefFunc"); 1260 NOTE_NAME(curr->func); 1261 return Literal::makeFunc(curr->func); 1262 } visitRefEq(RefEq * curr)1263 Flow visitRefEq(RefEq* curr) { 1264 NOTE_ENTER("RefEq"); 1265 Flow flow = visit(curr->left); 1266 if (flow.breaking()) { 1267 return flow; 1268 } 1269 auto left = flow.getSingleValue(); 1270 flow = visit(curr->right); 1271 if (flow.breaking()) { 1272 return flow; 1273 } 1274 auto right = flow.getSingleValue(); 1275 NOTE_EVAL2(left, right); 1276 return Literal(int32_t(left == right)); 1277 } visitTry(Try * curr)1278 Flow visitTry(Try* curr) { WASM_UNREACHABLE("unimp"); } visitThrow(Throw * curr)1279 Flow visitThrow(Throw* curr) { 1280 NOTE_ENTER("Throw"); 1281 LiteralList arguments; 1282 Flow flow = generateArguments(curr->operands, arguments); 1283 if (flow.breaking()) { 1284 return flow; 1285 } 1286 NOTE_EVAL1(curr->event); 1287 auto exn = std::make_unique<ExceptionPackage>(); 1288 exn->event = curr->event; 1289 for (auto item : arguments) { 1290 exn->values.push_back(item); 1291 } 1292 throwException(Literal::makeExn(std::move(exn))); 1293 WASM_UNREACHABLE("throw"); 1294 } visitRethrow(Rethrow * curr)1295 Flow visitRethrow(Rethrow* curr) { 1296 NOTE_ENTER("Rethrow"); 1297 Flow flow = visit(curr->exnref); 1298 if (flow.breaking()) { 1299 return flow; 1300 } 1301 const auto& value = flow.getSingleValue(); 1302 if (value.isNull()) { 1303 trap("rethrow: argument is null"); 1304 } 1305 throwException(value); 1306 WASM_UNREACHABLE("rethrow"); 1307 } visitBrOnExn(BrOnExn * curr)1308 Flow visitBrOnExn(BrOnExn* curr) { 1309 NOTE_ENTER("BrOnExn"); 1310 Flow flow = this->visit(curr->exnref); 1311 if (flow.breaking()) { 1312 return flow; 1313 } 1314 const auto& value = flow.getSingleValue(); 1315 if (value.isNull()) { 1316 trap("br_on_exn: argument is null"); 1317 } 1318 auto ex = value.getExceptionPackage(); 1319 if (curr->event != ex.event) { // Not taken 1320 return flow; 1321 } 1322 // Taken 1323 flow.values = ex.values; 1324 flow.breakTo = curr->name; 1325 return flow; 1326 } visitI31New(I31New * curr)1327 Flow visitI31New(I31New* curr) { 1328 NOTE_ENTER("I31New"); 1329 Flow flow = visit(curr->value); 1330 if (flow.breaking()) { 1331 return flow; 1332 } 1333 const auto& value = flow.getSingleValue(); 1334 NOTE_EVAL1(value); 1335 return Literal::makeI31(value.geti32()); 1336 } visitI31Get(I31Get * curr)1337 Flow visitI31Get(I31Get* curr) { 1338 NOTE_ENTER("I31Get"); 1339 Flow flow = visit(curr->i31); 1340 if (flow.breaking()) { 1341 return flow; 1342 } 1343 const auto& value = flow.getSingleValue(); 1344 NOTE_EVAL1(value); 1345 return Literal(value.geti31(curr->signed_)); 1346 } visitRefTest(RefTest * curr)1347 Flow visitRefTest(RefTest* curr) { 1348 NOTE_ENTER("RefTest"); 1349 WASM_UNREACHABLE("TODO (gc): ref.test"); 1350 } visitRefCast(RefCast * curr)1351 Flow visitRefCast(RefCast* curr) { 1352 NOTE_ENTER("RefCast"); 1353 WASM_UNREACHABLE("TODO (gc): ref.cast"); 1354 } visitBrOnCast(BrOnCast * curr)1355 Flow visitBrOnCast(BrOnCast* curr) { 1356 NOTE_ENTER("BrOnCast"); 1357 WASM_UNREACHABLE("TODO (gc): br_on_cast"); 1358 } visitRttCanon(RttCanon * curr)1359 Flow visitRttCanon(RttCanon* curr) { 1360 NOTE_ENTER("RttCanon"); 1361 WASM_UNREACHABLE("TODO (gc): rtt.canon"); 1362 } visitRttSub(RttSub * curr)1363 Flow visitRttSub(RttSub* curr) { 1364 NOTE_ENTER("RttSub"); 1365 WASM_UNREACHABLE("TODO (gc): rtt.sub"); 1366 } visitStructNew(StructNew * curr)1367 Flow visitStructNew(StructNew* curr) { 1368 NOTE_ENTER("StructNew"); 1369 WASM_UNREACHABLE("TODO (gc): struct.new"); 1370 } visitStructGet(StructGet * curr)1371 Flow visitStructGet(StructGet* curr) { 1372 NOTE_ENTER("StructGet"); 1373 WASM_UNREACHABLE("TODO (gc): struct.get"); 1374 } visitStructSet(StructSet * curr)1375 Flow visitStructSet(StructSet* curr) { 1376 NOTE_ENTER("StructSet"); 1377 WASM_UNREACHABLE("TODO (gc): struct.set"); 1378 } visitArrayNew(ArrayNew * curr)1379 Flow visitArrayNew(ArrayNew* curr) { 1380 NOTE_ENTER("ArrayNew"); 1381 WASM_UNREACHABLE("TODO (gc): array.new"); 1382 } visitArrayGet(ArrayGet * curr)1383 Flow visitArrayGet(ArrayGet* curr) { 1384 NOTE_ENTER("ArrayGet"); 1385 WASM_UNREACHABLE("TODO (gc): array.get"); 1386 } visitArraySet(ArraySet * curr)1387 Flow visitArraySet(ArraySet* curr) { 1388 NOTE_ENTER("ArraySet"); 1389 WASM_UNREACHABLE("TODO (gc): array.set"); 1390 } visitArrayLen(ArrayLen * curr)1391 Flow visitArrayLen(ArrayLen* curr) { 1392 NOTE_ENTER("ArrayLen"); 1393 WASM_UNREACHABLE("TODO (gc): array.len"); 1394 } 1395 trap(const char * why)1396 virtual void trap(const char* why) { WASM_UNREACHABLE("unimp"); } 1397 throwException(Literal exn)1398 virtual void throwException(Literal exn) { WASM_UNREACHABLE("unimp"); } 1399 }; 1400 1401 // Execute a suspected constant expression (precompute and C-API). 1402 template<typename SubType> 1403 class ConstantExpressionRunner : public ExpressionRunner<SubType> { 1404 public: 1405 enum FlagValues { 1406 // By default, just evaluate the expression, i.e. all we want to know is 1407 // whether it computes down to a concrete value, where it is not necessary 1408 // to preserve side effects like those of a `local.tee`. 1409 DEFAULT = 0, 1410 // Be very careful to preserve any side effects. For example, if we are 1411 // intending to replace the expression with a constant afterwards, even if 1412 // we can technically evaluate down to a constant, we still cannot replace 1413 // the expression if it also sets a local, which must be preserved in this 1414 // scenario so subsequent code keeps functioning. 1415 PRESERVE_SIDEEFFECTS = 1 << 0, 1416 // Traverse through function calls, attempting to compute their concrete 1417 // value. Must not be used in function-parallel scenarios, where the called 1418 // function might be concurrently modified, leading to undefined behavior. 1419 TRAVERSE_CALLS = 1 << 1 1420 }; 1421 1422 // Flags indicating special requirements, for example whether we are just 1423 // evaluating (default), also going to replace the expression afterwards or 1424 // executing in a function-parallel scenario. See FlagValues. 1425 typedef uint32_t Flags; 1426 1427 // Indicates no limit of maxDepth or maxLoopIterations. 1428 static const Index NO_LIMIT = 0; 1429 1430 protected: 1431 // Optional module context to search for globals and called functions. NULL if 1432 // we are not interested in any context. 1433 Module* module = nullptr; 1434 1435 // Flags indicating special requirements. See FlagValues. 1436 Flags flags = FlagValues::DEFAULT; 1437 1438 // Map remembering concrete local values set in the context of this flow. 1439 std::unordered_map<Index, Literals> localValues; 1440 // Map remembering concrete global values set in the context of this flow. 1441 std::unordered_map<Name, Literals> globalValues; 1442 1443 public: 1444 struct NonconstantException { 1445 }; // TODO: use a flow with a special name, as this is likely very slow 1446 ConstantExpressionRunner(Module * module,Flags flags,Index maxDepth,Index maxLoopIterations)1447 ConstantExpressionRunner(Module* module, 1448 Flags flags, 1449 Index maxDepth, 1450 Index maxLoopIterations) 1451 : ExpressionRunner<SubType>(maxDepth, maxLoopIterations), module(module), 1452 flags(flags) {} 1453 1454 // Gets the module this runner is operating on. getModule()1455 Module* getModule() { return module; } 1456 1457 // Sets a known local value to use. setLocalValue(Index index,Literals & values)1458 void setLocalValue(Index index, Literals& values) { 1459 assert(values.isConcrete()); 1460 localValues[index] = values; 1461 } 1462 1463 // Sets a known global value to use. setGlobalValue(Name name,Literals & values)1464 void setGlobalValue(Name name, Literals& values) { 1465 assert(values.isConcrete()); 1466 globalValues[name] = values; 1467 } 1468 visitLocalGet(LocalGet * curr)1469 Flow visitLocalGet(LocalGet* curr) { 1470 NOTE_ENTER("LocalGet"); 1471 NOTE_EVAL1(curr->index); 1472 // Check if a constant value has been set in the context of this runner. 1473 auto iter = localValues.find(curr->index); 1474 if (iter != localValues.end()) { 1475 return Flow(iter->second); 1476 } 1477 return Flow(NONCONSTANT_FLOW); 1478 } visitLocalSet(LocalSet * curr)1479 Flow visitLocalSet(LocalSet* curr) { 1480 NOTE_ENTER("LocalSet"); 1481 NOTE_EVAL1(curr->index); 1482 if (!(flags & FlagValues::PRESERVE_SIDEEFFECTS)) { 1483 // If we are evaluating and not replacing the expression, remember the 1484 // constant value set, if any, and see if there is a value flowing through 1485 // a tee. 1486 auto setFlow = ExpressionRunner<SubType>::visit(curr->value); 1487 if (!setFlow.breaking()) { 1488 setLocalValue(curr->index, setFlow.values); 1489 if (curr->type.isConcrete()) { 1490 assert(curr->isTee()); 1491 return setFlow; 1492 } 1493 return Flow(); 1494 } 1495 } 1496 return Flow(NONCONSTANT_FLOW); 1497 } visitGlobalGet(GlobalGet * curr)1498 Flow visitGlobalGet(GlobalGet* curr) { 1499 NOTE_ENTER("GlobalGet"); 1500 NOTE_NAME(curr->name); 1501 if (module != nullptr) { 1502 auto* global = module->getGlobal(curr->name); 1503 // Check if the global has an immutable value anyway 1504 if (!global->imported() && !global->mutable_) { 1505 return ExpressionRunner<SubType>::visit(global->init); 1506 } 1507 } 1508 // Check if a constant value has been set in the context of this runner. 1509 auto iter = globalValues.find(curr->name); 1510 if (iter != globalValues.end()) { 1511 return Flow(iter->second); 1512 } 1513 return Flow(NONCONSTANT_FLOW); 1514 } visitGlobalSet(GlobalSet * curr)1515 Flow visitGlobalSet(GlobalSet* curr) { 1516 NOTE_ENTER("GlobalSet"); 1517 NOTE_NAME(curr->name); 1518 if (!(flags & FlagValues::PRESERVE_SIDEEFFECTS) && module != nullptr) { 1519 // If we are evaluating and not replacing the expression, remember the 1520 // constant value set, if any, for subsequent gets. 1521 auto* global = module->getGlobal(curr->name); 1522 assert(global->mutable_); 1523 auto setFlow = ExpressionRunner<SubType>::visit(curr->value); 1524 if (!setFlow.breaking()) { 1525 setGlobalValue(curr->name, setFlow.values); 1526 return Flow(); 1527 } 1528 } 1529 return Flow(NONCONSTANT_FLOW); 1530 } visitCall(Call * curr)1531 Flow visitCall(Call* curr) { 1532 NOTE_ENTER("Call"); 1533 NOTE_NAME(curr->target); 1534 // Traverse into functions using the same mode, which we can also do 1535 // when replacing as long as the function does not have any side effects. 1536 // Might yield something useful for simple functions like `clamp`, sometimes 1537 // even if arguments are only partially constant or not constant at all. 1538 if ((flags & FlagValues::TRAVERSE_CALLS) != 0 && module != nullptr) { 1539 auto* func = module->getFunction(curr->target); 1540 if (!func->imported()) { 1541 if (func->sig.results.isConcrete()) { 1542 auto numOperands = curr->operands.size(); 1543 assert(numOperands == func->getNumParams()); 1544 auto prevLocalValues = localValues; 1545 localValues.clear(); 1546 for (Index i = 0; i < numOperands; ++i) { 1547 auto argFlow = ExpressionRunner<SubType>::visit(curr->operands[i]); 1548 if (!argFlow.breaking()) { 1549 assert(argFlow.values.isConcrete()); 1550 localValues[i] = argFlow.values; 1551 } 1552 } 1553 auto retFlow = ExpressionRunner<SubType>::visit(func->body); 1554 localValues = prevLocalValues; 1555 if (retFlow.breakTo == RETURN_FLOW) { 1556 return Flow(retFlow.values); 1557 } else if (!retFlow.breaking()) { 1558 return retFlow; 1559 } 1560 } 1561 } 1562 } 1563 return Flow(NONCONSTANT_FLOW); 1564 } 1565 visitCallIndirect(CallIndirect * curr)1566 Flow visitCallIndirect(CallIndirect* curr) { 1567 NOTE_ENTER("CallIndirect"); 1568 return Flow(NONCONSTANT_FLOW); 1569 } visitLoad(Load * curr)1570 Flow visitLoad(Load* curr) { 1571 NOTE_ENTER("Load"); 1572 return Flow(NONCONSTANT_FLOW); 1573 } visitStore(Store * curr)1574 Flow visitStore(Store* curr) { 1575 NOTE_ENTER("Store"); 1576 return Flow(NONCONSTANT_FLOW); 1577 } visitMemorySize(MemorySize * curr)1578 Flow visitMemorySize(MemorySize* curr) { 1579 NOTE_ENTER("MemorySize"); 1580 return Flow(NONCONSTANT_FLOW); 1581 } visitMemoryGrow(MemoryGrow * curr)1582 Flow visitMemoryGrow(MemoryGrow* curr) { 1583 NOTE_ENTER("MemoryGrow"); 1584 return Flow(NONCONSTANT_FLOW); 1585 } visitMemoryInit(MemoryInit * curr)1586 Flow visitMemoryInit(MemoryInit* curr) { 1587 NOTE_ENTER("MemoryInit"); 1588 return Flow(NONCONSTANT_FLOW); 1589 } visitDataDrop(DataDrop * curr)1590 Flow visitDataDrop(DataDrop* curr) { 1591 NOTE_ENTER("DataDrop"); 1592 return Flow(NONCONSTANT_FLOW); 1593 } visitMemoryCopy(MemoryCopy * curr)1594 Flow visitMemoryCopy(MemoryCopy* curr) { 1595 NOTE_ENTER("MemoryCopy"); 1596 return Flow(NONCONSTANT_FLOW); 1597 } visitMemoryFill(MemoryFill * curr)1598 Flow visitMemoryFill(MemoryFill* curr) { 1599 NOTE_ENTER("MemoryFill"); 1600 return Flow(NONCONSTANT_FLOW); 1601 } visitAtomicRMW(AtomicRMW * curr)1602 Flow visitAtomicRMW(AtomicRMW* curr) { 1603 NOTE_ENTER("AtomicRMW"); 1604 return Flow(NONCONSTANT_FLOW); 1605 } visitAtomicCmpxchg(AtomicCmpxchg * curr)1606 Flow visitAtomicCmpxchg(AtomicCmpxchg* curr) { 1607 NOTE_ENTER("AtomicCmpxchg"); 1608 return Flow(NONCONSTANT_FLOW); 1609 } visitAtomicWait(AtomicWait * curr)1610 Flow visitAtomicWait(AtomicWait* curr) { 1611 NOTE_ENTER("AtomicWait"); 1612 return Flow(NONCONSTANT_FLOW); 1613 } visitAtomicNotify(AtomicNotify * curr)1614 Flow visitAtomicNotify(AtomicNotify* curr) { 1615 NOTE_ENTER("AtomicNotify"); 1616 return Flow(NONCONSTANT_FLOW); 1617 } visitSIMDLoad(SIMDLoad * curr)1618 Flow visitSIMDLoad(SIMDLoad* curr) { 1619 NOTE_ENTER("SIMDLoad"); 1620 return Flow(NONCONSTANT_FLOW); 1621 } visitSIMDLoadSplat(SIMDLoad * curr)1622 Flow visitSIMDLoadSplat(SIMDLoad* curr) { 1623 NOTE_ENTER("SIMDLoadSplat"); 1624 return Flow(NONCONSTANT_FLOW); 1625 } visitSIMDLoadExtend(SIMDLoad * curr)1626 Flow visitSIMDLoadExtend(SIMDLoad* curr) { 1627 NOTE_ENTER("SIMDLoadExtend"); 1628 return Flow(NONCONSTANT_FLOW); 1629 } visitPop(Pop * curr)1630 Flow visitPop(Pop* curr) { 1631 NOTE_ENTER("Pop"); 1632 return Flow(NONCONSTANT_FLOW); 1633 } visitTry(Try * curr)1634 Flow visitTry(Try* curr) { 1635 NOTE_ENTER("Try"); 1636 return Flow(NONCONSTANT_FLOW); 1637 } 1638 trap(const char * why)1639 void trap(const char* why) override { throw NonconstantException(); } 1640 throwException(Literal exn)1641 virtual void throwException(Literal exn) override { 1642 throw NonconstantException(); 1643 } 1644 }; 1645 1646 // Execute an initializer expression of a global, data or element segment. 1647 // see: https://webassembly.org/docs/modules/#initializer-expression 1648 template<typename GlobalManager> 1649 class InitializerExpressionRunner 1650 : public ExpressionRunner<InitializerExpressionRunner<GlobalManager>> { 1651 GlobalManager& globals; 1652 1653 public: InitializerExpressionRunner(GlobalManager & globals,Index maxDepth)1654 InitializerExpressionRunner(GlobalManager& globals, Index maxDepth) 1655 : ExpressionRunner<InitializerExpressionRunner<GlobalManager>>(maxDepth), 1656 globals(globals) {} 1657 visitGlobalGet(GlobalGet * curr)1658 Flow visitGlobalGet(GlobalGet* curr) { return Flow(globals[curr->name]); } 1659 }; 1660 1661 // 1662 // An instance of a WebAssembly module, which can execute it via AST 1663 // interpretation. 1664 // 1665 // To embed this interpreter, you need to provide an ExternalInterface instance 1666 // (see below) which provides the embedding-specific details, that is, how to 1667 // connect to the embedding implementation. 1668 // 1669 // To call into the interpreter, use callExport. 1670 // 1671 1672 template<typename GlobalManager, typename SubType> class ModuleInstanceBase { 1673 public: 1674 // 1675 // You need to implement one of these to create a concrete interpreter. The 1676 // ExternalInterface provides embedding-specific functionality like calling 1677 // an imported function or accessing memory. 1678 // 1679 struct ExternalInterface { initExternalInterface1680 virtual void init(Module& wasm, SubType& instance) {} 1681 virtual void importGlobals(GlobalManager& globals, Module& wasm) = 0; 1682 virtual Literals callImport(Function* import, LiteralList& arguments) = 0; 1683 virtual Literals callTable(Index index, 1684 Signature sig, 1685 LiteralList& arguments, 1686 Type result, 1687 SubType& instance) = 0; 1688 virtual bool growMemory(Address oldSize, Address newSize) = 0; 1689 virtual void trap(const char* why) = 0; 1690 virtual void throwException(Literal exnref) = 0; 1691 1692 // the default impls for load and store switch on the sizes. you can either 1693 // customize load/store, or the sub-functions which they call loadExternalInterface1694 virtual Literal load(Load* load, Address addr) { 1695 switch (load->type.getBasic()) { 1696 case Type::i32: { 1697 switch (load->bytes) { 1698 case 1: 1699 return load->signed_ ? Literal((int32_t)load8s(addr)) 1700 : Literal((int32_t)load8u(addr)); 1701 case 2: 1702 return load->signed_ ? Literal((int32_t)load16s(addr)) 1703 : Literal((int32_t)load16u(addr)); 1704 case 4: 1705 return Literal((int32_t)load32s(addr)); 1706 default: 1707 WASM_UNREACHABLE("invalid size"); 1708 } 1709 break; 1710 } 1711 case Type::i64: { 1712 switch (load->bytes) { 1713 case 1: 1714 return load->signed_ ? Literal((int64_t)load8s(addr)) 1715 : Literal((int64_t)load8u(addr)); 1716 case 2: 1717 return load->signed_ ? Literal((int64_t)load16s(addr)) 1718 : Literal((int64_t)load16u(addr)); 1719 case 4: 1720 return load->signed_ ? Literal((int64_t)load32s(addr)) 1721 : Literal((int64_t)load32u(addr)); 1722 case 8: 1723 return Literal((int64_t)load64s(addr)); 1724 default: 1725 WASM_UNREACHABLE("invalid size"); 1726 } 1727 break; 1728 } 1729 case Type::f32: 1730 return Literal(load32u(addr)).castToF32(); 1731 case Type::f64: 1732 return Literal(load64u(addr)).castToF64(); 1733 case Type::v128: 1734 return Literal(load128(addr).data()); 1735 case Type::funcref: 1736 case Type::externref: 1737 case Type::exnref: 1738 case Type::anyref: 1739 case Type::eqref: 1740 case Type::i31ref: 1741 case Type::none: 1742 case Type::unreachable: 1743 WASM_UNREACHABLE("unexpected type"); 1744 } 1745 WASM_UNREACHABLE("invalid type"); 1746 } storeExternalInterface1747 virtual void store(Store* store, Address addr, Literal value) { 1748 switch (store->valueType.getBasic()) { 1749 case Type::i32: { 1750 switch (store->bytes) { 1751 case 1: 1752 store8(addr, value.geti32()); 1753 break; 1754 case 2: 1755 store16(addr, value.geti32()); 1756 break; 1757 case 4: 1758 store32(addr, value.geti32()); 1759 break; 1760 default: 1761 WASM_UNREACHABLE("invalid store size"); 1762 } 1763 break; 1764 } 1765 case Type::i64: { 1766 switch (store->bytes) { 1767 case 1: 1768 store8(addr, value.geti64()); 1769 break; 1770 case 2: 1771 store16(addr, value.geti64()); 1772 break; 1773 case 4: 1774 store32(addr, value.geti64()); 1775 break; 1776 case 8: 1777 store64(addr, value.geti64()); 1778 break; 1779 default: 1780 WASM_UNREACHABLE("invalid store size"); 1781 } 1782 break; 1783 } 1784 // write floats carefully, ensuring all bits reach memory 1785 case Type::f32: 1786 store32(addr, value.reinterpreti32()); 1787 break; 1788 case Type::f64: 1789 store64(addr, value.reinterpreti64()); 1790 break; 1791 case Type::v128: 1792 store128(addr, value.getv128()); 1793 break; 1794 case Type::funcref: 1795 case Type::externref: 1796 case Type::exnref: 1797 case Type::anyref: 1798 case Type::eqref: 1799 case Type::i31ref: 1800 case Type::none: 1801 case Type::unreachable: 1802 WASM_UNREACHABLE("unexpected type"); 1803 } 1804 } 1805 load8sExternalInterface1806 virtual int8_t load8s(Address addr) { WASM_UNREACHABLE("unimp"); } load8uExternalInterface1807 virtual uint8_t load8u(Address addr) { WASM_UNREACHABLE("unimp"); } load16sExternalInterface1808 virtual int16_t load16s(Address addr) { WASM_UNREACHABLE("unimp"); } load16uExternalInterface1809 virtual uint16_t load16u(Address addr) { WASM_UNREACHABLE("unimp"); } load32sExternalInterface1810 virtual int32_t load32s(Address addr) { WASM_UNREACHABLE("unimp"); } load32uExternalInterface1811 virtual uint32_t load32u(Address addr) { WASM_UNREACHABLE("unimp"); } load64sExternalInterface1812 virtual int64_t load64s(Address addr) { WASM_UNREACHABLE("unimp"); } load64uExternalInterface1813 virtual uint64_t load64u(Address addr) { WASM_UNREACHABLE("unimp"); } load128ExternalInterface1814 virtual std::array<uint8_t, 16> load128(Address addr) { 1815 WASM_UNREACHABLE("unimp"); 1816 } 1817 store8ExternalInterface1818 virtual void store8(Address addr, int8_t value) { 1819 WASM_UNREACHABLE("unimp"); 1820 } store16ExternalInterface1821 virtual void store16(Address addr, int16_t value) { 1822 WASM_UNREACHABLE("unimp"); 1823 } store32ExternalInterface1824 virtual void store32(Address addr, int32_t value) { 1825 WASM_UNREACHABLE("unimp"); 1826 } store64ExternalInterface1827 virtual void store64(Address addr, int64_t value) { 1828 WASM_UNREACHABLE("unimp"); 1829 } store128ExternalInterface1830 virtual void store128(Address addr, const std::array<uint8_t, 16>&) { 1831 WASM_UNREACHABLE("unimp"); 1832 } 1833 tableStoreExternalInterface1834 virtual void tableStore(Address addr, Name entry) { 1835 WASM_UNREACHABLE("unimp"); 1836 } 1837 }; 1838 self()1839 SubType* self() { return static_cast<SubType*>(this); } 1840 1841 Module& wasm; 1842 1843 // Values of globals 1844 GlobalManager globals; 1845 1846 // Multivalue ABI support (see push/pop). 1847 std::vector<Literal> multiValues; 1848 ModuleInstanceBase(Module & wasm,ExternalInterface * externalInterface)1849 ModuleInstanceBase(Module& wasm, ExternalInterface* externalInterface) 1850 : wasm(wasm), externalInterface(externalInterface) { 1851 // import globals from the outside 1852 externalInterface->importGlobals(globals, wasm); 1853 // prepare memory 1854 memorySize = wasm.memory.initial; 1855 // generate internal (non-imported) globals 1856 ModuleUtils::iterDefinedGlobals(wasm, [&](Global* global) { 1857 globals[global->name] = 1858 InitializerExpressionRunner<GlobalManager>(globals, maxDepth) 1859 .visit(global->init) 1860 .values; 1861 }); 1862 1863 // initialize the rest of the external interface 1864 externalInterface->init(wasm, *self()); 1865 1866 initializeTableContents(); 1867 initializeMemoryContents(); 1868 1869 // run start, if present 1870 if (wasm.start.is()) { 1871 LiteralList arguments; 1872 callFunction(wasm.start, arguments); 1873 } 1874 } 1875 1876 // call an exported function callExport(Name name,const LiteralList & arguments)1877 Literals callExport(Name name, const LiteralList& arguments) { 1878 Export* export_ = wasm.getExportOrNull(name); 1879 if (!export_) { 1880 externalInterface->trap("callExport not found"); 1881 } 1882 return callFunction(export_->value, arguments); 1883 } 1884 callExport(Name name)1885 Literals callExport(Name name) { return callExport(name, LiteralList()); } 1886 1887 // get an exported global getExport(Name name)1888 Literals getExport(Name name) { 1889 Export* export_ = wasm.getExportOrNull(name); 1890 if (!export_) { 1891 externalInterface->trap("getExport external not found"); 1892 } 1893 Name internalName = export_->value; 1894 auto iter = globals.find(internalName); 1895 if (iter == globals.end()) { 1896 externalInterface->trap("getExport internal not found"); 1897 } 1898 return iter->second; 1899 } 1900 printFunctionStack()1901 std::string printFunctionStack() { 1902 std::string ret = "/== (binaryen interpreter stack trace)\n"; 1903 for (int i = int(functionStack.size()) - 1; i >= 0; i--) { 1904 ret += std::string("|: ") + functionStack[i].str + "\n"; 1905 } 1906 ret += std::string("\\==\n"); 1907 return ret; 1908 } 1909 1910 private: 1911 // Keep a record of call depth, to guard against excessive recursion. 1912 size_t callDepth; 1913 1914 // Function name stack. We maintain this explicitly to allow printing of 1915 // stack traces. 1916 std::vector<Name> functionStack; 1917 1918 std::unordered_set<size_t> droppedSegments; 1919 initializeTableContents()1920 void initializeTableContents() { 1921 for (auto& segment : wasm.table.segments) { 1922 Address offset = 1923 (uint32_t)InitializerExpressionRunner<GlobalManager>(globals, maxDepth) 1924 .visit(segment.offset) 1925 .getSingleValue() 1926 .geti32(); 1927 if (offset + segment.data.size() > wasm.table.initial) { 1928 externalInterface->trap("invalid offset when initializing table"); 1929 } 1930 for (size_t i = 0; i != segment.data.size(); ++i) { 1931 externalInterface->tableStore(offset + i, segment.data[i]); 1932 } 1933 } 1934 } 1935 initializeMemoryContents()1936 void initializeMemoryContents() { 1937 Const offset; 1938 offset.value = Literal(uint32_t(0)); 1939 offset.finalize(); 1940 1941 // apply active memory segments 1942 for (size_t i = 0, e = wasm.memory.segments.size(); i < e; ++i) { 1943 Memory::Segment& segment = wasm.memory.segments[i]; 1944 if (segment.isPassive) { 1945 continue; 1946 } 1947 1948 Const size; 1949 size.value = Literal(uint32_t(segment.data.size())); 1950 size.finalize(); 1951 1952 MemoryInit init; 1953 init.segment = i; 1954 init.dest = segment.offset; 1955 init.offset = &offset; 1956 init.size = &size; 1957 init.finalize(); 1958 1959 DataDrop drop; 1960 drop.segment = i; 1961 drop.finalize(); 1962 1963 // we don't actually have a function, but we need one in order to visit 1964 // the memory.init and data.drop instructions. 1965 Function dummyFunc; 1966 FunctionScope dummyScope(&dummyFunc, {}); 1967 RuntimeExpressionRunner runner(*this, dummyScope, maxDepth); 1968 runner.visit(&init); 1969 runner.visit(&drop); 1970 } 1971 } 1972 1973 class FunctionScope { 1974 public: 1975 std::vector<Literals> locals; 1976 Function* function; 1977 FunctionScope(Function * function,const LiteralList & arguments)1978 FunctionScope(Function* function, const LiteralList& arguments) 1979 : function(function) { 1980 if (function->sig.params.size() != arguments.size()) { 1981 std::cerr << "Function `" << function->name << "` expects " 1982 << function->sig.params.size() << " parameters, got " 1983 << arguments.size() << " arguments." << std::endl; 1984 WASM_UNREACHABLE("invalid param count"); 1985 } 1986 locals.resize(function->getNumLocals()); 1987 for (size_t i = 0; i < function->getNumLocals(); i++) { 1988 if (i < arguments.size()) { 1989 if (!Type::isSubType(arguments[i].type, function->sig.params[i])) { 1990 std::cerr << "Function `" << function->name << "` expects type " 1991 << function->sig.params[i] << " for parameter " << i 1992 << ", got " << arguments[i].type << "." << std::endl; 1993 WASM_UNREACHABLE("invalid param count"); 1994 } 1995 locals[i] = {arguments[i]}; 1996 } else { 1997 assert(function->isVar(i)); 1998 locals[i] = Literal::makeZeros(function->getLocalType(i)); 1999 } 2000 } 2001 } 2002 }; 2003 2004 // Executes expressions with concrete runtime info, the function and module at 2005 // runtime 2006 class RuntimeExpressionRunner 2007 : public ExpressionRunner<RuntimeExpressionRunner> { 2008 ModuleInstanceBase& instance; 2009 FunctionScope& scope; 2010 2011 public: RuntimeExpressionRunner(ModuleInstanceBase & instance,FunctionScope & scope,Index maxDepth)2012 RuntimeExpressionRunner(ModuleInstanceBase& instance, 2013 FunctionScope& scope, 2014 Index maxDepth) 2015 : ExpressionRunner<RuntimeExpressionRunner>(maxDepth), instance(instance), 2016 scope(scope) {} 2017 visitCall(Call * curr)2018 Flow visitCall(Call* curr) { 2019 NOTE_ENTER("Call"); 2020 NOTE_NAME(curr->target); 2021 LiteralList arguments; 2022 Flow flow = this->generateArguments(curr->operands, arguments); 2023 if (flow.breaking()) { 2024 return flow; 2025 } 2026 auto* func = instance.wasm.getFunction(curr->target); 2027 Flow ret; 2028 if (func->imported()) { 2029 ret.values = instance.externalInterface->callImport(func, arguments); 2030 } else { 2031 ret.values = instance.callFunctionInternal(curr->target, arguments); 2032 } 2033 #ifdef WASM_INTERPRETER_DEBUG 2034 std::cout << "(returned to " << scope.function->name << ")\n"; 2035 #endif 2036 // TODO: make this a proper tail call (return first) 2037 if (curr->isReturn) { 2038 ret.breakTo = RETURN_FLOW; 2039 } 2040 return ret; 2041 } visitCallIndirect(CallIndirect * curr)2042 Flow visitCallIndirect(CallIndirect* curr) { 2043 NOTE_ENTER("CallIndirect"); 2044 LiteralList arguments; 2045 Flow flow = this->generateArguments(curr->operands, arguments); 2046 if (flow.breaking()) { 2047 return flow; 2048 } 2049 Flow target = this->visit(curr->target); 2050 if (target.breaking()) { 2051 return target; 2052 } 2053 Index index = target.getSingleValue().geti32(); 2054 Type type = curr->isReturn ? scope.function->sig.results : curr->type; 2055 Flow ret = instance.externalInterface->callTable( 2056 index, curr->sig, arguments, type, *instance.self()); 2057 // TODO: make this a proper tail call (return first) 2058 if (curr->isReturn) { 2059 ret.breakTo = RETURN_FLOW; 2060 } 2061 return ret; 2062 } 2063 visitLocalGet(LocalGet * curr)2064 Flow visitLocalGet(LocalGet* curr) { 2065 NOTE_ENTER("LocalGet"); 2066 auto index = curr->index; 2067 NOTE_EVAL1(index); 2068 NOTE_EVAL1(scope.locals[index]); 2069 return scope.locals[index]; 2070 } visitLocalSet(LocalSet * curr)2071 Flow visitLocalSet(LocalSet* curr) { 2072 NOTE_ENTER("LocalSet"); 2073 auto index = curr->index; 2074 Flow flow = this->visit(curr->value); 2075 if (flow.breaking()) { 2076 return flow; 2077 } 2078 NOTE_EVAL1(index); 2079 NOTE_EVAL1(flow.getSingleValue()); 2080 assert(curr->isTee() ? Type::isSubType(flow.getType(), curr->type) 2081 : true); 2082 scope.locals[index] = flow.values; 2083 return curr->isTee() ? flow : Flow(); 2084 } 2085 visitGlobalGet(GlobalGet * curr)2086 Flow visitGlobalGet(GlobalGet* curr) { 2087 NOTE_ENTER("GlobalGet"); 2088 auto name = curr->name; 2089 NOTE_EVAL1(name); 2090 assert(instance.globals.find(name) != instance.globals.end()); 2091 NOTE_EVAL1(instance.globals[name]); 2092 return instance.globals[name]; 2093 } visitGlobalSet(GlobalSet * curr)2094 Flow visitGlobalSet(GlobalSet* curr) { 2095 NOTE_ENTER("GlobalSet"); 2096 auto name = curr->name; 2097 Flow flow = this->visit(curr->value); 2098 if (flow.breaking()) { 2099 return flow; 2100 } 2101 NOTE_EVAL1(name); 2102 NOTE_EVAL1(flow.getSingleValue()); 2103 instance.globals[name] = flow.values; 2104 return Flow(); 2105 } 2106 visitLoad(Load * curr)2107 Flow visitLoad(Load* curr) { 2108 NOTE_ENTER("Load"); 2109 Flow flow = this->visit(curr->ptr); 2110 if (flow.breaking()) { 2111 return flow; 2112 } 2113 NOTE_EVAL1(flow); 2114 auto addr = instance.getFinalAddress(curr, flow.getSingleValue()); 2115 if (curr->isAtomic) { 2116 instance.checkAtomicAddress(addr, curr->bytes); 2117 } 2118 auto ret = instance.externalInterface->load(curr, addr); 2119 NOTE_EVAL1(addr); 2120 NOTE_EVAL1(ret); 2121 return ret; 2122 } visitStore(Store * curr)2123 Flow visitStore(Store* curr) { 2124 NOTE_ENTER("Store"); 2125 Flow ptr = this->visit(curr->ptr); 2126 if (ptr.breaking()) { 2127 return ptr; 2128 } 2129 Flow value = this->visit(curr->value); 2130 if (value.breaking()) { 2131 return value; 2132 } 2133 auto addr = instance.getFinalAddress(curr, ptr.getSingleValue()); 2134 if (curr->isAtomic) { 2135 instance.checkAtomicAddress(addr, curr->bytes); 2136 } 2137 NOTE_EVAL1(addr); 2138 NOTE_EVAL1(value); 2139 instance.externalInterface->store(curr, addr, value.getSingleValue()); 2140 return Flow(); 2141 } 2142 visitAtomicRMW(AtomicRMW * curr)2143 Flow visitAtomicRMW(AtomicRMW* curr) { 2144 NOTE_ENTER("AtomicRMW"); 2145 Flow ptr = this->visit(curr->ptr); 2146 if (ptr.breaking()) { 2147 return ptr; 2148 } 2149 auto value = this->visit(curr->value); 2150 if (value.breaking()) { 2151 return value; 2152 } 2153 NOTE_EVAL1(ptr); 2154 auto addr = instance.getFinalAddress(curr, ptr.getSingleValue()); 2155 NOTE_EVAL1(addr); 2156 NOTE_EVAL1(value); 2157 auto loaded = instance.doAtomicLoad(addr, curr->bytes, curr->type); 2158 NOTE_EVAL1(loaded); 2159 auto computed = value.getSingleValue(); 2160 switch (curr->op) { 2161 case Add: 2162 computed = loaded.add(computed); 2163 break; 2164 case Sub: 2165 computed = loaded.sub(computed); 2166 break; 2167 case And: 2168 computed = loaded.and_(computed); 2169 break; 2170 case Or: 2171 computed = loaded.or_(computed); 2172 break; 2173 case Xor: 2174 computed = loaded.xor_(computed); 2175 break; 2176 case Xchg: 2177 break; 2178 } 2179 instance.doAtomicStore(addr, curr->bytes, computed); 2180 return loaded; 2181 } visitAtomicCmpxchg(AtomicCmpxchg * curr)2182 Flow visitAtomicCmpxchg(AtomicCmpxchg* curr) { 2183 NOTE_ENTER("AtomicCmpxchg"); 2184 Flow ptr = this->visit(curr->ptr); 2185 if (ptr.breaking()) { 2186 return ptr; 2187 } 2188 NOTE_EVAL1(ptr); 2189 auto expected = this->visit(curr->expected); 2190 if (expected.breaking()) { 2191 return expected; 2192 } 2193 auto replacement = this->visit(curr->replacement); 2194 if (replacement.breaking()) { 2195 return replacement; 2196 } 2197 auto addr = instance.getFinalAddress(curr, ptr.getSingleValue()); 2198 expected = 2199 Flow(wrapToSmallerSize(expected.getSingleValue(), curr->bytes)); 2200 NOTE_EVAL1(addr); 2201 NOTE_EVAL1(expected); 2202 NOTE_EVAL1(replacement); 2203 auto loaded = instance.doAtomicLoad(addr, curr->bytes, curr->type); 2204 NOTE_EVAL1(loaded); 2205 if (loaded == expected.getSingleValue()) { 2206 instance.doAtomicStore(addr, curr->bytes, replacement.getSingleValue()); 2207 } 2208 return loaded; 2209 } visitAtomicWait(AtomicWait * curr)2210 Flow visitAtomicWait(AtomicWait* curr) { 2211 NOTE_ENTER("AtomicWait"); 2212 Flow ptr = this->visit(curr->ptr); 2213 if (ptr.breaking()) { 2214 return ptr; 2215 } 2216 NOTE_EVAL1(ptr); 2217 auto expected = this->visit(curr->expected); 2218 NOTE_EVAL1(expected); 2219 if (expected.breaking()) { 2220 return expected; 2221 } 2222 auto timeout = this->visit(curr->timeout); 2223 NOTE_EVAL1(timeout); 2224 if (timeout.breaking()) { 2225 return timeout; 2226 } 2227 auto bytes = curr->expectedType.getByteSize(); 2228 auto addr = instance.getFinalAddress(curr, ptr.getSingleValue(), bytes); 2229 auto loaded = instance.doAtomicLoad(addr, bytes, curr->expectedType); 2230 NOTE_EVAL1(loaded); 2231 if (loaded != expected.getSingleValue()) { 2232 return Literal(int32_t(1)); // not equal 2233 } 2234 // TODO: add threads support! 2235 // for now, just assume we are woken up 2236 return Literal(int32_t(0)); // woken up 2237 } visitAtomicNotify(AtomicNotify * curr)2238 Flow visitAtomicNotify(AtomicNotify* curr) { 2239 NOTE_ENTER("AtomicNotify"); 2240 Flow ptr = this->visit(curr->ptr); 2241 if (ptr.breaking()) { 2242 return ptr; 2243 } 2244 NOTE_EVAL1(ptr); 2245 auto count = this->visit(curr->notifyCount); 2246 NOTE_EVAL1(count); 2247 if (count.breaking()) { 2248 return count; 2249 } 2250 auto addr = instance.getFinalAddress(curr, ptr.getSingleValue(), 4); 2251 // Just check TODO actual threads support 2252 instance.checkAtomicAddress(addr, 4); 2253 return Literal(int32_t(0)); // none woken up 2254 } visitSIMDLoad(SIMDLoad * curr)2255 Flow visitSIMDLoad(SIMDLoad* curr) { 2256 NOTE_ENTER("SIMDLoad"); 2257 switch (curr->op) { 2258 case LoadSplatVec8x16: 2259 case LoadSplatVec16x8: 2260 case LoadSplatVec32x4: 2261 case LoadSplatVec64x2: 2262 return visitSIMDLoadSplat(curr); 2263 case LoadExtSVec8x8ToVecI16x8: 2264 case LoadExtUVec8x8ToVecI16x8: 2265 case LoadExtSVec16x4ToVecI32x4: 2266 case LoadExtUVec16x4ToVecI32x4: 2267 case LoadExtSVec32x2ToVecI64x2: 2268 case LoadExtUVec32x2ToVecI64x2: 2269 return visitSIMDLoadExtend(curr); 2270 case Load32Zero: 2271 case Load64Zero: 2272 return visitSIMDLoadZero(curr); 2273 } 2274 WASM_UNREACHABLE("invalid op"); 2275 } visitSIMDLoadSplat(SIMDLoad * curr)2276 Flow visitSIMDLoadSplat(SIMDLoad* curr) { 2277 Load load; 2278 load.type = Type::i32; 2279 load.bytes = curr->getMemBytes(); 2280 load.signed_ = false; 2281 load.offset = curr->offset; 2282 load.align = curr->align; 2283 load.isAtomic = false; 2284 load.ptr = curr->ptr; 2285 Literal (Literal::*splat)() const = nullptr; 2286 switch (curr->op) { 2287 case LoadSplatVec8x16: 2288 splat = &Literal::splatI8x16; 2289 break; 2290 case LoadSplatVec16x8: 2291 splat = &Literal::splatI16x8; 2292 break; 2293 case LoadSplatVec32x4: 2294 splat = &Literal::splatI32x4; 2295 break; 2296 case LoadSplatVec64x2: 2297 load.type = Type::i64; 2298 splat = &Literal::splatI64x2; 2299 break; 2300 default: 2301 WASM_UNREACHABLE("invalid op"); 2302 } 2303 load.finalize(); 2304 Flow flow = this->visit(&load); 2305 if (flow.breaking()) { 2306 return flow; 2307 } 2308 return (flow.getSingleValue().*splat)(); 2309 } visitSIMDLoadExtend(SIMDLoad * curr)2310 Flow visitSIMDLoadExtend(SIMDLoad* curr) { 2311 Flow flow = this->visit(curr->ptr); 2312 if (flow.breaking()) { 2313 return flow; 2314 } 2315 NOTE_EVAL1(flow); 2316 Address src(uint32_t(flow.getSingleValue().geti32())); 2317 auto loadLane = [&](Address addr) { 2318 switch (curr->op) { 2319 case LoadExtSVec8x8ToVecI16x8: 2320 return Literal(int32_t(instance.externalInterface->load8s(addr))); 2321 case LoadExtUVec8x8ToVecI16x8: 2322 return Literal(int32_t(instance.externalInterface->load8u(addr))); 2323 case LoadExtSVec16x4ToVecI32x4: 2324 return Literal(int32_t(instance.externalInterface->load16s(addr))); 2325 case LoadExtUVec16x4ToVecI32x4: 2326 return Literal(int32_t(instance.externalInterface->load16u(addr))); 2327 case LoadExtSVec32x2ToVecI64x2: 2328 return Literal(int64_t(instance.externalInterface->load32s(addr))); 2329 case LoadExtUVec32x2ToVecI64x2: 2330 return Literal(int64_t(instance.externalInterface->load32u(addr))); 2331 default: 2332 WASM_UNREACHABLE("unexpected op"); 2333 } 2334 WASM_UNREACHABLE("invalid op"); 2335 }; 2336 auto fillLanes = [&](auto lanes, size_t laneBytes) { 2337 for (auto& lane : lanes) { 2338 lane = loadLane( 2339 instance.getFinalAddress(curr, Literal(uint32_t(src)), laneBytes)); 2340 src = Address(uint32_t(src) + laneBytes); 2341 } 2342 return Literal(lanes); 2343 }; 2344 switch (curr->op) { 2345 case LoadExtSVec8x8ToVecI16x8: 2346 case LoadExtUVec8x8ToVecI16x8: { 2347 std::array<Literal, 8> lanes; 2348 return fillLanes(lanes, 1); 2349 } 2350 case LoadExtSVec16x4ToVecI32x4: 2351 case LoadExtUVec16x4ToVecI32x4: { 2352 std::array<Literal, 4> lanes; 2353 return fillLanes(lanes, 2); 2354 } 2355 case LoadExtSVec32x2ToVecI64x2: 2356 case LoadExtUVec32x2ToVecI64x2: { 2357 std::array<Literal, 2> lanes; 2358 return fillLanes(lanes, 4); 2359 } 2360 default: 2361 WASM_UNREACHABLE("unexpected op"); 2362 } 2363 WASM_UNREACHABLE("invalid op"); 2364 } visitSIMDLoadZero(SIMDLoad * curr)2365 Flow visitSIMDLoadZero(SIMDLoad* curr) { 2366 Flow flow = this->visit(curr->ptr); 2367 if (flow.breaking()) { 2368 return flow; 2369 } 2370 NOTE_EVAL1(flow); 2371 Address src = instance.getFinalAddress( 2372 curr, flow.getSingleValue(), curr->op == Load32Zero ? 32 : 64); 2373 auto zero = 2374 Literal::makeZero(curr->op == Load32Zero ? Type::i32 : Type::i64); 2375 if (curr->op == Load32Zero) { 2376 auto val = Literal(instance.externalInterface->load32u(src)); 2377 return Literal(std::array<Literal, 4>{{val, zero, zero, zero}}); 2378 } else { 2379 auto val = Literal(instance.externalInterface->load64u(src)); 2380 return Literal(std::array<Literal, 2>{{val, zero}}); 2381 } 2382 } visitMemorySize(MemorySize * curr)2383 Flow visitMemorySize(MemorySize* curr) { 2384 NOTE_ENTER("MemorySize"); 2385 return Literal::makeFromInt64(instance.memorySize, 2386 instance.wasm.memory.indexType); 2387 } visitMemoryGrow(MemoryGrow * curr)2388 Flow visitMemoryGrow(MemoryGrow* curr) { 2389 NOTE_ENTER("MemoryGrow"); 2390 auto indexType = instance.wasm.memory.indexType; 2391 auto fail = Literal::makeFromInt64(-1, indexType); 2392 Flow flow = this->visit(curr->delta); 2393 if (flow.breaking()) { 2394 return flow; 2395 } 2396 Flow ret = Literal::makeFromInt64(instance.memorySize, indexType); 2397 uint64_t delta = flow.getSingleValue().getUnsigned(); 2398 if (delta > uint32_t(-1) / Memory::kPageSize && indexType == Type::i32) { 2399 return fail; 2400 } 2401 if (instance.memorySize >= uint32_t(-1) - delta && 2402 indexType == Type::i32) { 2403 return fail; 2404 } 2405 auto newSize = instance.memorySize + delta; 2406 if (newSize > instance.wasm.memory.max) { 2407 return fail; 2408 } 2409 if (!instance.externalInterface->growMemory( 2410 instance.memorySize * Memory::kPageSize, 2411 newSize * Memory::kPageSize)) { 2412 // We failed to grow the memory in practice, even though it was valid 2413 // to try to do so. 2414 return fail; 2415 } 2416 instance.memorySize = newSize; 2417 return ret; 2418 } visitMemoryInit(MemoryInit * curr)2419 Flow visitMemoryInit(MemoryInit* curr) { 2420 NOTE_ENTER("MemoryInit"); 2421 Flow dest = this->visit(curr->dest); 2422 if (dest.breaking()) { 2423 return dest; 2424 } 2425 Flow offset = this->visit(curr->offset); 2426 if (offset.breaking()) { 2427 return offset; 2428 } 2429 Flow size = this->visit(curr->size); 2430 if (size.breaking()) { 2431 return size; 2432 } 2433 NOTE_EVAL1(dest); 2434 NOTE_EVAL1(offset); 2435 NOTE_EVAL1(size); 2436 2437 assert(curr->segment < instance.wasm.memory.segments.size()); 2438 Memory::Segment& segment = instance.wasm.memory.segments[curr->segment]; 2439 2440 Address destVal(dest.getSingleValue().getUnsigned()); 2441 Address offsetVal(uint32_t(offset.getSingleValue().geti32())); 2442 Address sizeVal(uint32_t(size.getSingleValue().geti32())); 2443 2444 if (offsetVal + sizeVal > 0 && 2445 instance.droppedSegments.count(curr->segment)) { 2446 trap("out of bounds segment access in memory.init"); 2447 } 2448 if ((uint64_t)offsetVal + sizeVal > segment.data.size()) { 2449 trap("out of bounds segment access in memory.init"); 2450 } 2451 if (destVal + sizeVal > instance.memorySize * Memory::kPageSize) { 2452 trap("out of bounds memory access in memory.init"); 2453 } 2454 for (size_t i = 0; i < sizeVal; ++i) { 2455 Literal addr(destVal + i); 2456 instance.externalInterface->store8( 2457 instance.getFinalAddressWithoutOffset(addr, 1), 2458 segment.data[offsetVal + i]); 2459 } 2460 return {}; 2461 } visitDataDrop(DataDrop * curr)2462 Flow visitDataDrop(DataDrop* curr) { 2463 NOTE_ENTER("DataDrop"); 2464 instance.droppedSegments.insert(curr->segment); 2465 return {}; 2466 } visitMemoryCopy(MemoryCopy * curr)2467 Flow visitMemoryCopy(MemoryCopy* curr) { 2468 NOTE_ENTER("MemoryCopy"); 2469 Flow dest = this->visit(curr->dest); 2470 if (dest.breaking()) { 2471 return dest; 2472 } 2473 Flow source = this->visit(curr->source); 2474 if (source.breaking()) { 2475 return source; 2476 } 2477 Flow size = this->visit(curr->size); 2478 if (size.breaking()) { 2479 return size; 2480 } 2481 NOTE_EVAL1(dest); 2482 NOTE_EVAL1(source); 2483 NOTE_EVAL1(size); 2484 Address destVal(dest.getSingleValue().getUnsigned()); 2485 Address sourceVal(source.getSingleValue().getUnsigned()); 2486 Address sizeVal(size.getSingleValue().getUnsigned()); 2487 2488 if (sourceVal + sizeVal > instance.memorySize * Memory::kPageSize || 2489 destVal + sizeVal > instance.memorySize * Memory::kPageSize || 2490 // FIXME: better/cheaper way to detect wrapping? 2491 sourceVal + sizeVal < sourceVal || sourceVal + sizeVal < sizeVal || 2492 destVal + sizeVal < destVal || destVal + sizeVal < sizeVal) { 2493 trap("out of bounds segment access in memory.copy"); 2494 } 2495 2496 int64_t start = 0; 2497 int64_t end = sizeVal; 2498 int step = 1; 2499 // Reverse direction if source is below dest 2500 if (sourceVal < destVal) { 2501 start = int64_t(sizeVal) - 1; 2502 end = -1; 2503 step = -1; 2504 } 2505 for (int64_t i = start; i != end; i += step) { 2506 instance.externalInterface->store8( 2507 instance.getFinalAddressWithoutOffset(Literal(destVal + i), 1), 2508 instance.externalInterface->load8s( 2509 instance.getFinalAddressWithoutOffset(Literal(sourceVal + i), 1))); 2510 } 2511 return {}; 2512 } visitMemoryFill(MemoryFill * curr)2513 Flow visitMemoryFill(MemoryFill* curr) { 2514 NOTE_ENTER("MemoryFill"); 2515 Flow dest = this->visit(curr->dest); 2516 if (dest.breaking()) { 2517 return dest; 2518 } 2519 Flow value = this->visit(curr->value); 2520 if (value.breaking()) { 2521 return value; 2522 } 2523 Flow size = this->visit(curr->size); 2524 if (size.breaking()) { 2525 return size; 2526 } 2527 NOTE_EVAL1(dest); 2528 NOTE_EVAL1(value); 2529 NOTE_EVAL1(size); 2530 Address destVal(dest.getSingleValue().getUnsigned()); 2531 Address sizeVal(size.getSingleValue().getUnsigned()); 2532 2533 // FIXME: cheaper wrapping detection? 2534 if (destVal > instance.memorySize * Memory::kPageSize || 2535 sizeVal > instance.memorySize * Memory::kPageSize || 2536 destVal + sizeVal > instance.memorySize * Memory::kPageSize) { 2537 trap("out of bounds memory access in memory.fill"); 2538 } 2539 uint8_t val(value.getSingleValue().geti32()); 2540 for (size_t i = 0; i < sizeVal; ++i) { 2541 instance.externalInterface->store8( 2542 instance.getFinalAddressWithoutOffset(Literal(destVal + i), 1), val); 2543 } 2544 return {}; 2545 } visitTry(Try * curr)2546 Flow visitTry(Try* curr) { 2547 NOTE_ENTER("Try"); 2548 try { 2549 return this->visit(curr->body); 2550 } catch (const WasmException& e) { 2551 instance.multiValues.push_back(e.exn); 2552 return this->visit(curr->catchBody); 2553 } 2554 } visitPop(Pop * curr)2555 Flow visitPop(Pop* curr) { 2556 NOTE_ENTER("Pop"); 2557 assert(!instance.multiValues.empty()); 2558 auto ret = instance.multiValues.back(); 2559 instance.multiValues.pop_back(); 2560 return ret; 2561 } 2562 trap(const char * why)2563 void trap(const char* why) override { 2564 instance.externalInterface->trap(why); 2565 } 2566 throwException(Literal exn)2567 void throwException(Literal exn) override { 2568 instance.externalInterface->throwException(exn); 2569 } 2570 2571 // Given a value, wrap it to a smaller given number of bytes. wrapToSmallerSize(Literal value,Index bytes)2572 Literal wrapToSmallerSize(Literal value, Index bytes) { 2573 if (value.type == Type::i32) { 2574 switch (bytes) { 2575 case 1: { 2576 return value.and_(Literal(uint32_t(0xff))); 2577 } 2578 case 2: { 2579 return value.and_(Literal(uint32_t(0xffff))); 2580 } 2581 case 4: { 2582 break; 2583 } 2584 default: 2585 WASM_UNREACHABLE("unexpected bytes"); 2586 } 2587 } else { 2588 assert(value.type == Type::i64); 2589 switch (bytes) { 2590 case 1: { 2591 return value.and_(Literal(uint64_t(0xff))); 2592 } 2593 case 2: { 2594 return value.and_(Literal(uint64_t(0xffff))); 2595 } 2596 case 4: { 2597 return value.and_(Literal(uint64_t(0xffffffffUL))); 2598 } 2599 case 8: { 2600 break; 2601 } 2602 default: 2603 WASM_UNREACHABLE("unexpected bytes"); 2604 } 2605 } 2606 return value; 2607 } 2608 }; 2609 2610 public: 2611 // Call a function, starting an invocation. callFunction(Name name,const LiteralList & arguments)2612 Literals callFunction(Name name, const LiteralList& arguments) { 2613 // if the last call ended in a jump up the stack, it might have left stuff 2614 // for us to clean up here 2615 callDepth = 0; 2616 functionStack.clear(); 2617 return callFunctionInternal(name, arguments); 2618 } 2619 2620 // Internal function call. Must be public so that callTable implementations 2621 // can use it (refactor?) callFunctionInternal(Name name,const LiteralList & arguments)2622 Literals callFunctionInternal(Name name, const LiteralList& arguments) { 2623 if (callDepth > maxDepth) { 2624 externalInterface->trap("stack limit"); 2625 } 2626 auto previousCallDepth = callDepth; 2627 callDepth++; 2628 auto previousFunctionStackSize = functionStack.size(); 2629 functionStack.push_back(name); 2630 2631 Function* function = wasm.getFunction(name); 2632 assert(function); 2633 FunctionScope scope(function, arguments); 2634 2635 #ifdef WASM_INTERPRETER_DEBUG 2636 std::cout << "entering " << function->name << "\n with arguments:\n"; 2637 for (unsigned i = 0; i < arguments.size(); ++i) { 2638 std::cout << " $" << i << ": " << arguments[i] << '\n'; 2639 } 2640 #endif 2641 2642 Flow flow = 2643 RuntimeExpressionRunner(*this, scope, maxDepth).visit(function->body); 2644 // cannot still be breaking, it means we missed our stop 2645 assert(!flow.breaking() || flow.breakTo == RETURN_FLOW); 2646 auto type = flow.getType(); 2647 if (!Type::isSubType(type, function->sig.results)) { 2648 std::cerr << "calling " << function->name << " resulted in " << type 2649 << " but the function type is " << function->sig.results 2650 << '\n'; 2651 WASM_UNREACHABLE("unexpected result type"); 2652 } 2653 // may decrease more than one, if we jumped up the stack 2654 callDepth = previousCallDepth; 2655 // if we jumped up the stack, we also need to pop higher frames 2656 while (functionStack.size() > previousFunctionStackSize) { 2657 functionStack.pop_back(); 2658 } 2659 #ifdef WASM_INTERPRETER_DEBUG 2660 std::cout << "exiting " << function->name << " with " << flow.values 2661 << '\n'; 2662 #endif 2663 return flow.values; 2664 } 2665 2666 protected: 2667 Address memorySize; // in pages 2668 2669 static const Index maxDepth = 250; 2670 trapIfGt(uint64_t lhs,uint64_t rhs,const char * msg)2671 void trapIfGt(uint64_t lhs, uint64_t rhs, const char* msg) { 2672 if (lhs > rhs) { 2673 std::stringstream ss; 2674 ss << msg << ": " << lhs << " > " << rhs; 2675 externalInterface->trap(ss.str().c_str()); 2676 } 2677 } 2678 2679 template<class LS> getFinalAddress(LS * curr,Literal ptr,Index bytes)2680 Address getFinalAddress(LS* curr, Literal ptr, Index bytes) { 2681 Address memorySizeBytes = memorySize * Memory::kPageSize; 2682 uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64(); 2683 trapIfGt(curr->offset, memorySizeBytes, "offset > memory"); 2684 trapIfGt(addr, memorySizeBytes - curr->offset, "final > memory"); 2685 addr += curr->offset; 2686 trapIfGt(bytes, memorySizeBytes, "bytes > memory"); 2687 checkLoadAddress(addr, bytes); 2688 return addr; 2689 } 2690 getFinalAddress(LS * curr,Literal ptr)2691 template<class LS> Address getFinalAddress(LS* curr, Literal ptr) { 2692 return getFinalAddress(curr, ptr, curr->bytes); 2693 } 2694 getFinalAddressWithoutOffset(Literal ptr,Index bytes)2695 Address getFinalAddressWithoutOffset(Literal ptr, Index bytes) { 2696 uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64(); 2697 checkLoadAddress(addr, bytes); 2698 return addr; 2699 } 2700 checkLoadAddress(Address addr,Index bytes)2701 void checkLoadAddress(Address addr, Index bytes) { 2702 Address memorySizeBytes = memorySize * Memory::kPageSize; 2703 trapIfGt(addr, memorySizeBytes - bytes, "highest > memory"); 2704 } 2705 checkAtomicAddress(Address addr,Index bytes)2706 void checkAtomicAddress(Address addr, Index bytes) { 2707 checkLoadAddress(addr, bytes); 2708 // Unaligned atomics trap. 2709 if (bytes > 1) { 2710 if (addr & (bytes - 1)) { 2711 externalInterface->trap("unaligned atomic operation"); 2712 } 2713 } 2714 } 2715 doAtomicLoad(Address addr,Index bytes,Type type)2716 Literal doAtomicLoad(Address addr, Index bytes, Type type) { 2717 checkAtomicAddress(addr, bytes); 2718 Const ptr; 2719 ptr.value = Literal(int32_t(addr)); 2720 ptr.type = Type::i32; 2721 Load load; 2722 load.bytes = bytes; 2723 // When an atomic loads a partial number of bytes for the type, it is 2724 // always an unsigned extension. 2725 load.signed_ = false; 2726 load.align = bytes; 2727 load.isAtomic = true; // understatement 2728 load.ptr = &ptr; 2729 load.type = type; 2730 return externalInterface->load(&load, addr); 2731 } 2732 doAtomicStore(Address addr,Index bytes,Literal toStore)2733 void doAtomicStore(Address addr, Index bytes, Literal toStore) { 2734 checkAtomicAddress(addr, bytes); 2735 Const ptr; 2736 ptr.value = Literal(int32_t(addr)); 2737 ptr.type = Type::i32; 2738 Const value; 2739 value.value = toStore; 2740 value.type = toStore.type; 2741 Store store; 2742 store.bytes = bytes; 2743 store.align = bytes; 2744 store.isAtomic = true; // understatement 2745 store.ptr = &ptr; 2746 store.value = &value; 2747 store.valueType = value.type; 2748 return externalInterface->store(&store, addr, toStore); 2749 } 2750 2751 ExternalInterface* externalInterface; 2752 }; 2753 2754 // The default ModuleInstance uses a trivial global manager 2755 using TrivialGlobalManager = std::map<Name, Literals>; 2756 class ModuleInstance 2757 : public ModuleInstanceBase<TrivialGlobalManager, ModuleInstance> { 2758 public: ModuleInstance(Module & wasm,ExternalInterface * externalInterface)2759 ModuleInstance(Module& wasm, ExternalInterface* externalInterface) 2760 : ModuleInstanceBase(wasm, externalInterface) {} 2761 }; 2762 2763 } // namespace wasm 2764 2765 #endif // wasm_wasm_interpreter_h 2766