1 /*
2 * Copyright 2016 WebAssembly Community Group participants
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //
18 // Memory Packing.
19 //
20 // Reduces binary size by splitting data segments around ranges of zeros. This
21 // pass assumes that memory initialized by active segments is zero on
22 // instantiation and therefore simply drops the zero ranges from the active
23 // segments. For passive segments, we perform the same splitting, but we also
24 // record how each segment was split and update all bulk memory operations
25 // accordingly. To preserve trapping semantics for memory.init instructions, it
26 // is sometimes necessary to explicitly track whether input segments would have
27 // been dropped in globals. We are careful to emit only as many of these globals
28 // as necessary.
29 //
30
31 #include "ir/manipulation.h"
32 #include "ir/module-utils.h"
33 #include "ir/utils.h"
34 #include "pass.h"
35 #include "wasm-binary.h"
36 #include "wasm-builder.h"
37 #include "wasm.h"
38
39 namespace wasm {
40
41 // A subsection of an orginal memory segment. If `isZero` is true, memory.fill
42 // will be used instead of memory.init for this range.
43 struct Range {
44 bool isZero;
45 size_t start;
46 size_t end;
47 };
48
49 // A function that produces the transformed bulk memory op. We need to use a
50 // function here instead of simple data because the replacement code sequence
51 // may require allocating new locals, which in turn requires the enclosing
52 // Function, which is only available in the parallelized instruction replacement
53 // phase. However, we can't move the entire calculation of replacement code
54 // sequences into the parallel phase because the lowering of data.drops depends
55 // on the lowering of memory.inits to determine whether a drop state global is
56 // necessary. The solution is that we calculate the shape of the replacement
57 // code sequence up front and use a closure just to allocate and insert new
58 // locals as necessary.
59 using Replacement = std::function<Expression*(Function*)>;
60
61 // Maps each bulk memory op to the replacement that must be applied to it.
62 using Replacements = std::unordered_map<Expression*, Replacement>;
63
64 // A collection of bulk memory operations referring to a particular segment
65 using Referrers = std::vector<Expression*>;
66
67 // memory.init: 2 byte opcode + 1 byte segment index + 1 byte memory index +
68 // 3 x 2 byte operands
69 const size_t MEMORY_INIT_SIZE = 10;
70
71 // memory.fill: 2 byte opcode + 1 byte memory index + 3 x 2 byte operands
72 const size_t MEMORY_FILL_SIZE = 9;
73
74 // data.drop: 2 byte opcode + ~1 byte index immediate
75 const size_t DATA_DROP_SIZE = 3;
76
77 namespace {
78
79 Expression*
makeGtShiftedMemorySize(Builder & builder,Module & module,MemoryInit * curr)80 makeGtShiftedMemorySize(Builder& builder, Module& module, MemoryInit* curr) {
81 return builder.makeBinary(
82 module.memory.is64() ? GtUInt64 : GtUInt32,
83 curr->dest,
84 builder.makeBinary(module.memory.is64() ? ShlInt64 : ShlInt32,
85 builder.makeMemorySize(),
86 builder.makeConstPtr(16)));
87 }
88
89 } // anonymous namespace
90
91 struct MemoryPacking : public Pass {
92 size_t dropStateGlobalCount = 0;
93
94 // FIXME: Chrome has a bug decoding section indices that prevents it from
95 // using more than 63. Just use WebLimitations::MaxDataSegments once this is
96 // fixed. See https://bugs.chromium.org/p/v8/issues/detail?id=10151.
97 uint32_t maxSegments;
98
99 void run(PassRunner* runner, Module* module) override;
100 void optimizeBulkMemoryOps(PassRunner* runner, Module* module);
101 void getSegmentReferrers(Module* module, std::vector<Referrers>& referrers);
102 void dropUnusedSegments(std::vector<Memory::Segment>& segments,
103 std::vector<Referrers>& referrers);
104 bool canSplit(const Memory::Segment& segment, const Referrers& referrers);
105 void calculateRanges(const Memory::Segment& segment,
106 const Referrers& referrers,
107 std::vector<Range>& ranges);
108 void createSplitSegments(Builder& builder,
109 const Memory::Segment& segment,
110 std::vector<Range>& ranges,
111 std::vector<Memory::Segment>& packed,
112 size_t segmentsRemaining);
113 void createReplacements(Module* module,
114 const std::vector<Range>& ranges,
115 const Referrers& referrers,
116 Replacements& replacements,
117 const Index segmentIndex);
118 void replaceBulkMemoryOps(PassRunner* runner,
119 Module* module,
120 Replacements& replacements);
121 };
122
run(PassRunner * runner,Module * module)123 void MemoryPacking::run(PassRunner* runner, Module* module) {
124 if (!module->memory.exists) {
125 return;
126 }
127
128 maxSegments = module->features.hasBulkMemory()
129 ? 63
130 : uint32_t(WebLimitations::MaxDataSegments);
131 auto& segments = module->memory.segments;
132
133 // For each segment, a list of bulk memory instructions that refer to it
134 std::vector<Referrers> referrers(segments.size());
135
136 if (module->features.hasBulkMemory()) {
137 // Optimize out memory.inits and data.drops that can be entirely replaced
138 // with other instruction sequences. This can increase the number of unused
139 // segments that can be dropped entirely and allows later replacement
140 // creation to make more assumptions about what these instructions will look
141 // like, such as memory.inits not having both zero offset and size.
142 optimizeBulkMemoryOps(runner, module);
143 getSegmentReferrers(module, referrers);
144 dropUnusedSegments(segments, referrers);
145 }
146
147 // The new, split memory segments
148 std::vector<Memory::Segment> packed;
149
150 Replacements replacements;
151 Builder builder(*module);
152 for (size_t origIndex = 0; origIndex < segments.size(); ++origIndex) {
153 auto& segment = segments[origIndex];
154 auto& currReferrers = referrers[origIndex];
155
156 std::vector<Range> ranges;
157 if (canSplit(segment, currReferrers)) {
158 calculateRanges(segment, currReferrers, ranges);
159 } else {
160 // A single range covers the entire segment. Set isZero to false so the
161 // original memory.init will be used even if segment is all zeroes.
162 ranges.push_back({false, 0, segment.data.size()});
163 }
164
165 Index firstNewIndex = packed.size();
166 size_t segmentsRemaining = segments.size() - origIndex;
167 createSplitSegments(builder, segment, ranges, packed, segmentsRemaining);
168 createReplacements(
169 module, ranges, currReferrers, replacements, firstNewIndex);
170 }
171
172 segments.swap(packed);
173
174 if (module->features.hasBulkMemory()) {
175 replaceBulkMemoryOps(runner, module, replacements);
176 }
177 }
178
canSplit(const Memory::Segment & segment,const Referrers & referrers)179 bool MemoryPacking::canSplit(const Memory::Segment& segment,
180 const Referrers& referrers) {
181 if (segment.isPassive) {
182 for (auto* referrer : referrers) {
183 if (auto* init = referrer->dynCast<MemoryInit>()) {
184 // Do not try to split if there is a nonconstant offset or size
185 if (!init->offset->is<Const>() || !init->size->is<Const>()) {
186 return false;
187 }
188 }
189 }
190 return true;
191 } else {
192 // Active segments can only be split if they have constant offsets
193 return segment.offset->is<Const>();
194 }
195 }
196
calculateRanges(const Memory::Segment & segment,const Referrers & referrers,std::vector<Range> & ranges)197 void MemoryPacking::calculateRanges(const Memory::Segment& segment,
198 const Referrers& referrers,
199 std::vector<Range>& ranges) {
200 auto& data = segment.data;
201 if (data.size() == 0) {
202 return;
203 }
204
205 // Calculate initial zero and nonzero ranges
206 size_t start = 0;
207 while (start < data.size()) {
208 size_t end = start;
209 while (end < data.size() && data[end] == 0) {
210 end++;
211 }
212 if (end > start) {
213 ranges.push_back({true, start, end});
214 start = end;
215 }
216 while (end < data.size() && data[end] != 0) {
217 end++;
218 }
219 if (end > start) {
220 ranges.push_back({false, start, end});
221 start = end;
222 }
223 }
224
225 // Calculate the number of consecutive zeroes for which splitting is
226 // beneficial. This is an approximation that assumes all memory.inits cover an
227 // entire segment and that all its arguments are constants. These assumptions
228 // are true of all memory.inits generated by the tools.
229 size_t threshold = 0;
230 if (segment.isPassive) {
231 // Passive segment metadata size
232 threshold += 2;
233 // Zeroes on the edge do not increase the number of segments or data.drops,
234 // so their threshold is lower. The threshold for interior zeroes depends on
235 // an estimate of the number of new memory.fill and data.drop instructions
236 // splitting would introduce.
237 size_t edgeThreshold = 0;
238 for (auto* referrer : referrers) {
239 if (referrer->is<MemoryInit>()) {
240 // Splitting adds a new memory.fill and a new memory.init
241 threshold += MEMORY_FILL_SIZE + MEMORY_INIT_SIZE;
242 edgeThreshold += MEMORY_FILL_SIZE;
243 } else {
244 threshold += DATA_DROP_SIZE;
245 }
246 }
247
248 // Merge edge zeroes if they are not worth splitting
249 if (ranges.size() >= 2) {
250 auto last = ranges.end() - 1;
251 auto penultimate = ranges.end() - 2;
252 if (last->isZero && last->end - last->start <= edgeThreshold) {
253 penultimate->end = last->end;
254 ranges.erase(last);
255 }
256 }
257 if (ranges.size() >= 2) {
258 auto first = ranges.begin();
259 auto second = ranges.begin() + 1;
260 if (first->isZero && first->end - first->start <= edgeThreshold) {
261 second->start = first->start;
262 ranges.erase(first);
263 }
264 }
265 } else {
266 // Legacy ballpark overhead of active segment with offset
267 // TODO: Tune this
268 threshold = 8;
269 }
270
271 // Merge ranges across small spans of zeroes
272 std::vector<Range> mergedRanges = {ranges.front()};
273 size_t i;
274 for (i = 1; i < ranges.size() - 1; ++i) {
275 auto left = mergedRanges.end() - 1;
276 auto curr = ranges.begin() + i;
277 auto right = ranges.begin() + i + 1;
278 if (curr->isZero && curr->end - curr->start <= threshold) {
279 left->end = right->end;
280 ++i;
281 } else {
282 mergedRanges.push_back(*curr);
283 }
284 }
285 // Add the final range if it hasn't already been merged in
286 if (i < ranges.size()) {
287 mergedRanges.push_back(ranges.back());
288 }
289 std::swap(ranges, mergedRanges);
290 }
291
optimizeBulkMemoryOps(PassRunner * runner,Module * module)292 void MemoryPacking::optimizeBulkMemoryOps(PassRunner* runner, Module* module) {
293 struct Optimizer : WalkerPass<PostWalker<Optimizer>> {
294 bool isFunctionParallel() override { return true; }
295 Pass* create() override { return new Optimizer; }
296
297 bool needsRefinalizing;
298
299 void visitMemoryInit(MemoryInit* curr) {
300 Builder builder(*getModule());
301 Memory::Segment& segment = getModule()->memory.segments[curr->segment];
302 size_t maxRuntimeSize = segment.isPassive ? segment.data.size() : 0;
303 bool mustNop = false;
304 bool mustTrap = false;
305 auto* offset = curr->offset->dynCast<Const>();
306 auto* size = curr->size->dynCast<Const>();
307 if (offset && uint32_t(offset->value.geti32()) > maxRuntimeSize) {
308 mustTrap = true;
309 }
310 if (size && uint32_t(size->value.geti32()) > maxRuntimeSize) {
311 mustTrap = true;
312 }
313 if (offset && size) {
314 uint64_t offsetVal(offset->value.geti32());
315 uint64_t sizeVal(size->value.geti32());
316 if (offsetVal + sizeVal > maxRuntimeSize) {
317 mustTrap = true;
318 } else if (offsetVal == 0 && sizeVal == 0) {
319 mustNop = true;
320 }
321 }
322 assert(!mustNop || !mustTrap);
323 if (mustNop) {
324 // Offset and size are 0, so just trap if dest > memory.size
325 replaceCurrent(
326 builder.makeIf(makeGtShiftedMemorySize(builder, *getModule(), curr),
327 builder.makeUnreachable()));
328 } else if (mustTrap) {
329 // Drop dest, offset, and size then trap
330 replaceCurrent(builder.blockify(builder.makeDrop(curr->dest),
331 builder.makeDrop(curr->offset),
332 builder.makeDrop(curr->size),
333 builder.makeUnreachable()));
334 needsRefinalizing = true;
335 } else if (!segment.isPassive) {
336 // trap if (dest > memory.size | offset | size) != 0
337 replaceCurrent(builder.makeIf(
338 builder.makeBinary(
339 OrInt32,
340 makeGtShiftedMemorySize(builder, *getModule(), curr),
341 builder.makeBinary(OrInt32, curr->offset, curr->size)),
342 builder.makeUnreachable()));
343 }
344 }
345 void visitDataDrop(DataDrop* curr) {
346 if (!getModule()->memory.segments[curr->segment].isPassive) {
347 ExpressionManipulator::nop(curr);
348 }
349 }
350 void doWalkFunction(Function* func) {
351 needsRefinalizing = false;
352 super::doWalkFunction(func);
353 if (needsRefinalizing) {
354 ReFinalize().walkFunctionInModule(func, getModule());
355 }
356 }
357 } optimizer;
358 optimizer.run(runner, module);
359 }
360
getSegmentReferrers(Module * module,std::vector<Referrers> & referrers)361 void MemoryPacking::getSegmentReferrers(Module* module,
362 std::vector<Referrers>& referrers) {
363 auto collectReferrers = [&](Function* func,
364 std::vector<Referrers>& referrers) {
365 if (func->imported()) {
366 return;
367 }
368 struct Collector : WalkerPass<PostWalker<Collector>> {
369 std::vector<Referrers>& referrers;
370 Collector(std::vector<Referrers>& referrers) : referrers(referrers) {}
371
372 void visitMemoryInit(MemoryInit* curr) {
373 referrers[curr->segment].push_back(curr);
374 }
375 void visitDataDrop(DataDrop* curr) {
376 referrers[curr->segment].push_back(curr);
377 }
378 void doWalkFunction(Function* func) {
379 referrers.resize(getModule()->memory.segments.size());
380 super::doWalkFunction(func);
381 }
382 } collector(referrers);
383 collector.walkFunctionInModule(func, module);
384 };
385 ModuleUtils::ParallelFunctionAnalysis<std::vector<Referrers>> analysis(
386 *module, collectReferrers);
387 referrers.resize(module->memory.segments.size());
388 for (auto& pair : analysis.map) {
389 std::vector<Referrers>& funcReferrers = pair.second;
390 for (size_t i = 0; i < funcReferrers.size(); ++i) {
391 referrers[i].insert(
392 referrers[i].end(), funcReferrers[i].begin(), funcReferrers[i].end());
393 }
394 }
395 }
396
dropUnusedSegments(std::vector<Memory::Segment> & segments,std::vector<Referrers> & referrers)397 void MemoryPacking::dropUnusedSegments(std::vector<Memory::Segment>& segments,
398 std::vector<Referrers>& referrers) {
399 std::vector<Memory::Segment> usedSegments;
400 std::vector<Referrers> usedReferrers;
401 // Remove segments that are never used
402 // TODO: remove unused portions of partially used segments as well
403 for (size_t i = 0; i < segments.size(); ++i) {
404 bool used = false;
405 if (segments[i].isPassive) {
406 for (auto* referrer : referrers[i]) {
407 if (referrer->is<MemoryInit>()) {
408 used = true;
409 break;
410 }
411 }
412 } else {
413 used = true;
414 }
415 if (used) {
416 usedSegments.push_back(segments[i]);
417 usedReferrers.push_back(referrers[i]);
418 } else {
419 // All referrers are data.drops. Make them nops.
420 for (auto* referrer : referrers[i]) {
421 ExpressionManipulator::nop(referrer);
422 }
423 }
424 }
425 std::swap(segments, usedSegments);
426 std::swap(referrers, usedReferrers);
427 }
428
createSplitSegments(Builder & builder,const Memory::Segment & segment,std::vector<Range> & ranges,std::vector<Memory::Segment> & packed,size_t segmentsRemaining)429 void MemoryPacking::createSplitSegments(Builder& builder,
430 const Memory::Segment& segment,
431 std::vector<Range>& ranges,
432 std::vector<Memory::Segment>& packed,
433 size_t segmentsRemaining) {
434 for (size_t i = 0; i < ranges.size(); ++i) {
435 Range& range = ranges[i];
436 if (range.isZero) {
437 continue;
438 }
439 Expression* offset = nullptr;
440 if (!segment.isPassive) {
441 if (auto* c = segment.offset->dynCast<Const>()) {
442 offset = builder.makeConst(int32_t(c->value.geti32() + range.start));
443 } else {
444 assert(ranges.size() == 1);
445 offset = segment.offset;
446 }
447 }
448 if (maxSegments <= packed.size() + segmentsRemaining) {
449 // Give up splitting and merge all remaining ranges except end zeroes
450 auto lastNonzero = ranges.end() - 1;
451 if (lastNonzero->isZero) {
452 --lastNonzero;
453 }
454 range.end = lastNonzero->end;
455 ranges.erase(ranges.begin() + i + 1, lastNonzero + 1);
456 }
457 packed.emplace_back(segment.isPassive,
458 offset,
459 &segment.data[range.start],
460 range.end - range.start);
461 }
462 }
463
createReplacements(Module * module,const std::vector<Range> & ranges,const Referrers & referrers,Replacements & replacements,const Index segmentIndex)464 void MemoryPacking::createReplacements(Module* module,
465 const std::vector<Range>& ranges,
466 const Referrers& referrers,
467 Replacements& replacements,
468 const Index segmentIndex) {
469 // If there was no transformation, only update the indices
470 if (ranges.size() == 1 && !ranges.front().isZero) {
471 for (auto referrer : referrers) {
472 replacements[referrer] = [referrer, segmentIndex](Function*) {
473 if (auto* init = referrer->dynCast<MemoryInit>()) {
474 init->segment = segmentIndex;
475 } else if (auto* drop = referrer->dynCast<DataDrop>()) {
476 drop->segment = segmentIndex;
477 } else {
478 WASM_UNREACHABLE("Unexpected bulk memory operation");
479 }
480 return referrer;
481 };
482 }
483 return;
484 }
485
486 Builder builder(*module);
487
488 Name dropStateGlobal;
489
490 // Return the drop state global, initializing it if it does not exist. This
491 // may change module-global state and has the important side effect of setting
492 // dropStateGlobal, so it must be evaluated eagerly, not in the replacements.
493 auto getDropStateGlobal = [&]() {
494 if (dropStateGlobal != Name()) {
495 return dropStateGlobal;
496 }
497 dropStateGlobal = Name(std::string("__mem_segment_drop_state_") +
498 std::to_string(dropStateGlobalCount++));
499 module->addGlobal(builder.makeGlobal(dropStateGlobal,
500 Type::i32,
501 builder.makeConst(int32_t(0)),
502 Builder::Mutable));
503 return dropStateGlobal;
504 };
505
506 // Create replacements for memory.init instructions first
507 for (auto referrer : referrers) {
508 auto* init = referrer->dynCast<MemoryInit>();
509 if (init == nullptr) {
510 continue;
511 }
512
513 // Nonconstant offsets or sizes will have inhibited splitting
514 size_t start = init->offset->cast<Const>()->value.geti32();
515 size_t end = start + init->size->cast<Const>()->value.geti32();
516
517 // Index of the range from which this memory.init starts reading
518 size_t firstRangeIdx = 0;
519 while (firstRangeIdx < ranges.size() &&
520 ranges[firstRangeIdx].end <= start) {
521 ++firstRangeIdx;
522 }
523
524 // Handle zero-length memory.inits separately so we can later assume that
525 // start is in bounds and that some range will be intersected.
526 if (start == end) {
527 // Offset is nonzero because init would otherwise have previously been
528 // optimized out, so trap if the dest is out of bounds or the segment is
529 // dropped
530 Expression* result = builder.makeIf(
531 builder.makeBinary(
532 OrInt32,
533 makeGtShiftedMemorySize(builder, *module, init),
534 builder.makeGlobalGet(getDropStateGlobal(), Type::i32)),
535 builder.makeUnreachable());
536 replacements[init] = [result](Function*) { return result; };
537 continue;
538 }
539
540 assert(firstRangeIdx < ranges.size());
541
542 // Split init into multiple memory.inits and memory.fills, storing the
543 // original base destination in a local if it is not a constant. If the
544 // first access is a memory.fill, explicitly check the drop status first to
545 // avoid writing zeroes when we should have trapped.
546 Expression* result = nullptr;
547 auto appendResult = [&](Expression* expr) {
548 result = result ? builder.blockify(result, expr) : expr;
549 };
550
551 // The local var holding the dest is not known until replacement time. Keep
552 // track of the locations where it will need to be patched in.
553 Index* setVar = nullptr;
554 std::vector<Index*> getVars;
555 if (!init->dest->is<Const>()) {
556 auto set = builder.makeLocalSet(-1, init->dest);
557 setVar = &set->index;
558 appendResult(set);
559 }
560
561 // We only need to explicitly check the drop state when we will emit
562 // memory.fill first, since memory.init will implicitly do the check for us.
563 if (ranges[firstRangeIdx].isZero) {
564 appendResult(
565 builder.makeIf(builder.makeGlobalGet(getDropStateGlobal(), Type::i32),
566 builder.makeUnreachable()));
567 }
568
569 size_t bytesWritten = 0;
570
571 size_t initIndex = segmentIndex;
572 for (size_t i = firstRangeIdx; i < ranges.size() && ranges[i].start < end;
573 ++i) {
574 auto& range = ranges[i];
575
576 // Calculate dest, either as a const or as an addition to the dest local
577 Expression* dest;
578 if (auto* c = init->dest->dynCast<Const>()) {
579 dest = builder.makeConst(int32_t(c->value.geti32() + bytesWritten));
580 } else {
581 auto* get = builder.makeLocalGet(-1, Type::i32);
582 getVars.push_back(&get->index);
583 dest = get;
584 if (bytesWritten > 0) {
585 Const* addend = builder.makeConst(int32_t(bytesWritten));
586 dest = builder.makeBinary(AddInt32, dest, addend);
587 }
588 }
589
590 // How many bytes are read from this range
591 size_t bytes = std::min(range.end, end) - std::max(range.start, start);
592 Expression* size = builder.makeConst(int32_t(bytes));
593 bytesWritten += bytes;
594
595 // Create new memory.init or memory.fill
596 if (range.isZero) {
597 Expression* value = builder.makeConst(Literal::makeZero(Type::i32));
598 appendResult(builder.makeMemoryFill(dest, value, size));
599 } else {
600 size_t offsetBytes = std::max(start, range.start) - range.start;
601 Expression* offset = builder.makeConst(int32_t(offsetBytes));
602 appendResult(builder.makeMemoryInit(initIndex, dest, offset, size));
603 initIndex++;
604 }
605 }
606
607 // Non-zero length memory.inits must have intersected some range
608 assert(result);
609 replacements[init] = [module, setVar, getVars, result](Function* function) {
610 if (setVar != nullptr) {
611 Index destVar = Builder(*module).addVar(function, Type::i32);
612 *setVar = destVar;
613 for (auto* getVar : getVars) {
614 *getVar = destVar;
615 }
616 }
617 return result;
618 };
619 }
620
621 // Create replacements for data.drop instructions now that we know whether we
622 // need a drop state global
623 for (auto drop : referrers) {
624 if (!drop->is<DataDrop>()) {
625 continue;
626 }
627
628 Expression* result = nullptr;
629 auto appendResult = [&](Expression* expr) {
630 result = result ? builder.blockify(result, expr) : expr;
631 };
632
633 // Track drop state in a global only if some memory.init required it
634 if (dropStateGlobal != Name()) {
635 appendResult(
636 builder.makeGlobalSet(dropStateGlobal, builder.makeConst(int32_t(1))));
637 }
638 size_t dropIndex = segmentIndex;
639 for (auto range : ranges) {
640 if (!range.isZero) {
641 appendResult(builder.makeDataDrop(dropIndex++));
642 }
643 }
644 replacements[drop] = [result, module](Function*) {
645 return result ? result : Builder(*module).makeNop();
646 };
647 }
648 }
649
replaceBulkMemoryOps(PassRunner * runner,Module * module,Replacements & replacements)650 void MemoryPacking::replaceBulkMemoryOps(PassRunner* runner,
651 Module* module,
652 Replacements& replacements) {
653 struct Replacer : WalkerPass<PostWalker<Replacer>> {
654 bool isFunctionParallel() override { return true; }
655
656 Replacements& replacements;
657
658 Replacer(Replacements& replacements) : replacements(replacements){};
659 Pass* create() override { return new Replacer(replacements); }
660
661 void visitMemoryInit(MemoryInit* curr) {
662 auto replacement = replacements.find(curr);
663 assert(replacement != replacements.end());
664 replaceCurrent(replacement->second(getFunction()));
665 }
666
667 void visitDataDrop(DataDrop* curr) {
668 auto replacement = replacements.find(curr);
669 assert(replacement != replacements.end());
670 replaceCurrent(replacement->second(getFunction()));
671 }
672 } replacer(replacements);
673 replacer.run(runner, module);
674 }
675
createMemoryPackingPass()676 Pass* createMemoryPackingPass() { return new MemoryPacking(); }
677
678 } // namespace wasm
679