1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34
35 #include "src/assembler.h"
36
37 #include "src/assembler-inl.h"
38 #include "src/code-stubs.h"
39 #include "src/deoptimizer.h"
40 #include "src/disassembler.h"
41 #include "src/instruction-stream.h"
42 #include "src/isolate.h"
43 #include "src/ostreams.h"
44 #include "src/simulator.h" // For flushing instruction cache.
45 #include "src/snapshot/serializer-common.h"
46
47 namespace v8 {
48 namespace internal {
49
50 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
51
52 // -----------------------------------------------------------------------------
53 // Implementation of AssemblerBase
54
IsolateData(Isolate * isolate)55 AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
56 : serializer_enabled_(isolate->serializer_enabled())
57 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
58 ,
59 code_range_start_(
60 isolate->heap()->memory_allocator()->code_range()->start())
61 #endif
62 {
63 }
64
AssemblerBase(IsolateData isolate_data,void * buffer,int buffer_size)65 AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
66 int buffer_size)
67 : isolate_data_(isolate_data),
68 enabled_cpu_features_(0),
69 emit_debug_code_(FLAG_debug_code),
70 predictable_code_size_(false),
71 constant_pool_available_(false),
72 jump_optimization_info_(nullptr) {
73 own_buffer_ = buffer == nullptr;
74 if (buffer_size == 0) buffer_size = kMinimalBufferSize;
75 DCHECK_GT(buffer_size, 0);
76 if (own_buffer_) buffer = NewArray<byte>(buffer_size);
77 buffer_ = static_cast<byte*>(buffer);
78 buffer_size_ = buffer_size;
79 pc_ = buffer_;
80 }
81
~AssemblerBase()82 AssemblerBase::~AssemblerBase() {
83 if (own_buffer_) DeleteArray(buffer_);
84 }
85
FlushICache(void * start,size_t size)86 void AssemblerBase::FlushICache(void* start, size_t size) {
87 if (size == 0) return;
88
89 #if defined(USE_SIMULATOR)
90 base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
91 Simulator::FlushICache(Simulator::i_cache(), start, size);
92 #else
93 CpuFeatures::FlushICache(start, size);
94 #endif // USE_SIMULATOR
95 }
96
Print(Isolate * isolate)97 void AssemblerBase::Print(Isolate* isolate) {
98 OFStream os(stdout);
99 v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_);
100 }
101
102 // -----------------------------------------------------------------------------
103 // Implementation of PredictableCodeSizeScope
104
PredictableCodeSizeScope(AssemblerBase * assembler,int expected_size)105 PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
106 int expected_size)
107 : assembler_(assembler),
108 expected_size_(expected_size),
109 start_offset_(assembler->pc_offset()),
110 old_value_(assembler->predictable_code_size()) {
111 assembler_->set_predictable_code_size(true);
112 }
113
~PredictableCodeSizeScope()114 PredictableCodeSizeScope::~PredictableCodeSizeScope() {
115 CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
116 assembler_->set_predictable_code_size(old_value_);
117 }
118
119 // -----------------------------------------------------------------------------
120 // Implementation of CpuFeatureScope
121
122 #ifdef DEBUG
CpuFeatureScope(AssemblerBase * assembler,CpuFeature f,CheckPolicy check)123 CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
124 CheckPolicy check)
125 : assembler_(assembler) {
126 DCHECK_IMPLIES(check == kCheckSupported, CpuFeatures::IsSupported(f));
127 old_enabled_ = assembler_->enabled_cpu_features();
128 assembler_->EnableCpuFeature(f);
129 }
130
~CpuFeatureScope()131 CpuFeatureScope::~CpuFeatureScope() {
132 assembler_->set_enabled_cpu_features(old_enabled_);
133 }
134 #endif
135
136 bool CpuFeatures::initialized_ = false;
137 unsigned CpuFeatures::supported_ = 0;
138 unsigned CpuFeatures::icache_line_size_ = 0;
139 unsigned CpuFeatures::dcache_line_size_ = 0;
140
141 // -----------------------------------------------------------------------------
142 // Implementation of RelocInfoWriter and RelocIterator
143 //
144 // Relocation information is written backwards in memory, from high addresses
145 // towards low addresses, byte by byte. Therefore, in the encodings listed
146 // below, the first byte listed it at the highest address, and successive
147 // bytes in the record are at progressively lower addresses.
148 //
149 // Encoding
150 //
151 // The most common modes are given single-byte encodings. Also, it is
152 // easy to identify the type of reloc info and skip unwanted modes in
153 // an iteration.
154 //
155 // The encoding relies on the fact that there are fewer than 14
156 // different relocation modes using standard non-compact encoding.
157 //
158 // The first byte of a relocation record has a tag in its low 2 bits:
159 // Here are the record schemes, depending on the low tag and optional higher
160 // tags.
161 //
162 // Low tag:
163 // 00: embedded_object: [6-bit pc delta] 00
164 //
165 // 01: code_target: [6-bit pc delta] 01
166 //
167 // 10: short_data_record: [6-bit pc delta] 10 followed by
168 // [8-bit data delta]
169 //
170 // 11: long_record [6 bit reloc mode] 11
171 // followed by pc delta
172 // followed by optional data depending on type.
173 //
174 // If a pc delta exceeds 6 bits, it is split into a remainder that fits into
175 // 6 bits and a part that does not. The latter is encoded as a long record
176 // with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
177 // the following record in the usual way. The long pc jump record has variable
178 // length:
179 // pc-jump: [PC_JUMP] 11
180 // [7 bits data] 0
181 // ...
182 // [7 bits data] 1
183 // (Bits 6..31 of pc delta, with leading zeroes
184 // dropped, and last non-zero chunk tagged with 1.)
185
186 const int kTagBits = 2;
187 const int kTagMask = (1 << kTagBits) - 1;
188 const int kLongTagBits = 6;
189
190 const int kEmbeddedObjectTag = 0;
191 const int kCodeTargetTag = 1;
192 const int kLocatableTag = 2;
193 const int kDefaultTag = 3;
194
195 const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
196 const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
197 const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
198
199 const int kChunkBits = 7;
200 const int kChunkMask = (1 << kChunkBits) - 1;
201 const int kLastChunkTagBits = 1;
202 const int kLastChunkTagMask = 1;
203 const int kLastChunkTag = 1;
204
205 // static
OffHeapTargetIsCodedSpecially()206 bool RelocInfo::OffHeapTargetIsCodedSpecially() {
207 #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
208 defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
209 return false;
210 #elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
211 defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
212 return true;
213 #endif
214 }
215
set_global_handle(Address address,ICacheFlushMode icache_flush_mode)216 void RelocInfo::set_global_handle(Address address,
217 ICacheFlushMode icache_flush_mode) {
218 DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
219 set_embedded_address(address, icache_flush_mode);
220 }
221
wasm_call_address() const222 Address RelocInfo::wasm_call_address() const {
223 DCHECK_EQ(rmode_, WASM_CALL);
224 return Assembler::target_address_at(pc_, constant_pool_);
225 }
226
set_wasm_call_address(Address address,ICacheFlushMode icache_flush_mode)227 void RelocInfo::set_wasm_call_address(Address address,
228 ICacheFlushMode icache_flush_mode) {
229 DCHECK_EQ(rmode_, WASM_CALL);
230 Assembler::set_target_address_at(pc_, constant_pool_, address,
231 icache_flush_mode);
232 }
233
global_handle() const234 Address RelocInfo::global_handle() const {
235 DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
236 return embedded_address();
237 }
238
set_target_address(Address target,WriteBarrierMode write_barrier_mode,ICacheFlushMode icache_flush_mode)239 void RelocInfo::set_target_address(Address target,
240 WriteBarrierMode write_barrier_mode,
241 ICacheFlushMode icache_flush_mode) {
242 DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
243 Assembler::set_target_address_at(pc_, constant_pool_, target,
244 icache_flush_mode);
245 if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
246 IsCodeTarget(rmode_)) {
247 Code* target_code = Code::GetCodeFromTargetAddress(target);
248 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
249 target_code);
250 }
251 }
252
WriteLongPCJump(uint32_t pc_delta)253 uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
254 // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
255 // Otherwise write a variable length PC jump for the bits that do
256 // not fit in the kSmallPCDeltaBits bits.
257 if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
258 WriteMode(RelocInfo::PC_JUMP);
259 uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
260 DCHECK_GT(pc_jump, 0);
261 // Write kChunkBits size chunks of the pc_jump.
262 for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
263 byte b = pc_jump & kChunkMask;
264 *--pos_ = b << kLastChunkTagBits;
265 }
266 // Tag the last chunk so it can be identified.
267 *pos_ = *pos_ | kLastChunkTag;
268 // Return the remaining kSmallPCDeltaBits of the pc_delta.
269 return pc_delta & kSmallPCDeltaMask;
270 }
271
WriteShortTaggedPC(uint32_t pc_delta,int tag)272 void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
273 // Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
274 pc_delta = WriteLongPCJump(pc_delta);
275 *--pos_ = pc_delta << kTagBits | tag;
276 }
277
WriteShortData(intptr_t data_delta)278 void RelocInfoWriter::WriteShortData(intptr_t data_delta) {
279 *--pos_ = static_cast<byte>(data_delta);
280 }
281
WriteMode(RelocInfo::Mode rmode)282 void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
283 STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
284 *--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
285 }
286
WriteModeAndPC(uint32_t pc_delta,RelocInfo::Mode rmode)287 void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
288 // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
289 pc_delta = WriteLongPCJump(pc_delta);
290 WriteMode(rmode);
291 *--pos_ = pc_delta;
292 }
293
WriteIntData(int number)294 void RelocInfoWriter::WriteIntData(int number) {
295 for (int i = 0; i < kIntSize; i++) {
296 *--pos_ = static_cast<byte>(number);
297 // Signed right shift is arithmetic shift. Tested in test-utils.cc.
298 number = number >> kBitsPerByte;
299 }
300 }
301
WriteData(intptr_t data_delta)302 void RelocInfoWriter::WriteData(intptr_t data_delta) {
303 for (int i = 0; i < kIntptrSize; i++) {
304 *--pos_ = static_cast<byte>(data_delta);
305 // Signed right shift is arithmetic shift. Tested in test-utils.cc.
306 data_delta = data_delta >> kBitsPerByte;
307 }
308 }
309
Write(const RelocInfo * rinfo)310 void RelocInfoWriter::Write(const RelocInfo* rinfo) {
311 RelocInfo::Mode rmode = rinfo->rmode();
312 #ifdef DEBUG
313 byte* begin_pos = pos_;
314 #endif
315 DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
316 DCHECK_GE(rinfo->pc() - reinterpret_cast<Address>(last_pc_), 0);
317 // Use unsigned delta-encoding for pc.
318 uint32_t pc_delta =
319 static_cast<uint32_t>(rinfo->pc() - reinterpret_cast<Address>(last_pc_));
320
321 // The two most common modes are given small tags, and usually fit in a byte.
322 if (rmode == RelocInfo::EMBEDDED_OBJECT) {
323 WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
324 } else if (rmode == RelocInfo::CODE_TARGET) {
325 WriteShortTaggedPC(pc_delta, kCodeTargetTag);
326 DCHECK_LE(begin_pos - pos_, RelocInfo::kMaxCallSize);
327 } else if (rmode == RelocInfo::DEOPT_REASON) {
328 DCHECK(rinfo->data() < (1 << kBitsPerByte));
329 WriteShortTaggedPC(pc_delta, kLocatableTag);
330 WriteShortData(rinfo->data());
331 } else {
332 WriteModeAndPC(pc_delta, rmode);
333 if (RelocInfo::IsComment(rmode)) {
334 WriteData(rinfo->data());
335 } else if (RelocInfo::IsConstPool(rmode) ||
336 RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
337 RelocInfo::IsDeoptPosition(rmode)) {
338 WriteIntData(static_cast<int>(rinfo->data()));
339 }
340 }
341 last_pc_ = reinterpret_cast<byte*>(rinfo->pc());
342 #ifdef DEBUG
343 DCHECK_LE(begin_pos - pos_, kMaxSize);
344 #endif
345 }
346
AdvanceGetTag()347 inline int RelocIterator::AdvanceGetTag() {
348 return *--pos_ & kTagMask;
349 }
350
GetMode()351 inline RelocInfo::Mode RelocIterator::GetMode() {
352 return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
353 ((1 << kLongTagBits) - 1));
354 }
355
ReadShortTaggedPC()356 inline void RelocIterator::ReadShortTaggedPC() {
357 rinfo_.pc_ += *pos_ >> kTagBits;
358 }
359
AdvanceReadPC()360 inline void RelocIterator::AdvanceReadPC() {
361 rinfo_.pc_ += *--pos_;
362 }
363
AdvanceReadInt()364 void RelocIterator::AdvanceReadInt() {
365 int x = 0;
366 for (int i = 0; i < kIntSize; i++) {
367 x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
368 }
369 rinfo_.data_ = x;
370 }
371
AdvanceReadData()372 void RelocIterator::AdvanceReadData() {
373 intptr_t x = 0;
374 for (int i = 0; i < kIntptrSize; i++) {
375 x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
376 }
377 rinfo_.data_ = x;
378 }
379
AdvanceReadLongPCJump()380 void RelocIterator::AdvanceReadLongPCJump() {
381 // Read the 32-kSmallPCDeltaBits most significant bits of the
382 // pc jump in kChunkBits bit chunks and shift them into place.
383 // Stop when the last chunk is encountered.
384 uint32_t pc_jump = 0;
385 for (int i = 0; i < kIntSize; i++) {
386 byte pc_jump_part = *--pos_;
387 pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
388 if ((pc_jump_part & kLastChunkTagMask) == 1) break;
389 }
390 // The least significant kSmallPCDeltaBits bits will be added
391 // later.
392 rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
393 }
394
ReadShortData()395 inline void RelocIterator::ReadShortData() {
396 uint8_t unsigned_b = *pos_;
397 rinfo_.data_ = unsigned_b;
398 }
399
next()400 void RelocIterator::next() {
401 DCHECK(!done());
402 // Basically, do the opposite of RelocInfoWriter::Write.
403 // Reading of data is as far as possible avoided for unwanted modes,
404 // but we must always update the pc.
405 //
406 // We exit this loop by returning when we find a mode we want.
407 while (pos_ > end_) {
408 int tag = AdvanceGetTag();
409 if (tag == kEmbeddedObjectTag) {
410 ReadShortTaggedPC();
411 if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
412 } else if (tag == kCodeTargetTag) {
413 ReadShortTaggedPC();
414 if (SetMode(RelocInfo::CODE_TARGET)) return;
415 } else if (tag == kLocatableTag) {
416 ReadShortTaggedPC();
417 Advance();
418 if (SetMode(RelocInfo::DEOPT_REASON)) {
419 ReadShortData();
420 return;
421 }
422 } else {
423 DCHECK_EQ(tag, kDefaultTag);
424 RelocInfo::Mode rmode = GetMode();
425 if (rmode == RelocInfo::PC_JUMP) {
426 AdvanceReadLongPCJump();
427 } else {
428 AdvanceReadPC();
429 if (RelocInfo::IsComment(rmode)) {
430 if (SetMode(rmode)) {
431 AdvanceReadData();
432 return;
433 }
434 Advance(kIntptrSize);
435 } else if (RelocInfo::IsConstPool(rmode) ||
436 RelocInfo::IsVeneerPool(rmode) ||
437 RelocInfo::IsDeoptId(rmode) ||
438 RelocInfo::IsDeoptPosition(rmode)) {
439 if (SetMode(rmode)) {
440 AdvanceReadInt();
441 return;
442 }
443 Advance(kIntSize);
444 } else if (SetMode(static_cast<RelocInfo::Mode>(rmode))) {
445 return;
446 }
447 }
448 }
449 }
450 done_ = true;
451 }
452
RelocIterator(Code * code,int mode_mask)453 RelocIterator::RelocIterator(Code* code, int mode_mask)
454 : RelocIterator(code, code->raw_instruction_start(), code->constant_pool(),
455 code->relocation_end(), code->relocation_start(),
456 mode_mask) {}
457
RelocIterator(const CodeReference code_reference,int mode_mask)458 RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
459 : RelocIterator(nullptr, code_reference.instruction_start(),
460 code_reference.constant_pool(),
461 code_reference.relocation_end(),
462 code_reference.relocation_start(), mode_mask) {}
463
RelocIterator(const CodeDesc & desc,int mode_mask)464 RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
465 : RelocIterator(nullptr, reinterpret_cast<Address>(desc.buffer), 0,
466 desc.buffer + desc.buffer_size,
467 desc.buffer + desc.buffer_size - desc.reloc_size,
468 mode_mask) {}
469
RelocIterator(Vector<byte> instructions,Vector<const byte> reloc_info,Address const_pool,int mode_mask)470 RelocIterator::RelocIterator(Vector<byte> instructions,
471 Vector<const byte> reloc_info, Address const_pool,
472 int mode_mask)
473 : RelocIterator(nullptr, reinterpret_cast<Address>(instructions.start()),
474 const_pool, reloc_info.start() + reloc_info.size(),
475 reloc_info.start(), mode_mask) {
476 rinfo_.flags_ = RelocInfo::kInNativeWasmCode;
477 }
478
RelocIterator(Code * host,Address pc,Address constant_pool,const byte * pos,const byte * end,int mode_mask)479 RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
480 const byte* pos, const byte* end, int mode_mask)
481 : pos_(pos), end_(end), mode_mask_(mode_mask) {
482 // Relocation info is read backwards.
483 DCHECK_GE(pos_, end_);
484 rinfo_.host_ = host;
485 rinfo_.pc_ = pc;
486 rinfo_.constant_pool_ = constant_pool;
487 if (mode_mask_ == 0) pos_ = end_;
488 next();
489 }
490
491 // -----------------------------------------------------------------------------
492 // Implementation of RelocInfo
493
494 #ifdef DEBUG
RequiresRelocation(const CodeDesc & desc)495 bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
496 // Ensure there are no code targets or embedded objects present in the
497 // deoptimization entries, they would require relocation after code
498 // generation.
499 int mode_mask = RelocInfo::kCodeTargetMask |
500 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
501 RelocInfo::kApplyMask;
502 RelocIterator it(desc, mode_mask);
503 return !it.done();
504 }
505 #endif
506
507 #ifdef ENABLE_DISASSEMBLER
RelocModeName(RelocInfo::Mode rmode)508 const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
509 switch (rmode) {
510 case NONE:
511 return "no reloc";
512 case EMBEDDED_OBJECT:
513 return "embedded object";
514 case CODE_TARGET:
515 return "code target";
516 case RUNTIME_ENTRY:
517 return "runtime entry";
518 case COMMENT:
519 return "comment";
520 case EXTERNAL_REFERENCE:
521 return "external reference";
522 case INTERNAL_REFERENCE:
523 return "internal reference";
524 case INTERNAL_REFERENCE_ENCODED:
525 return "encoded internal reference";
526 case OFF_HEAP_TARGET:
527 return "off heap target";
528 case DEOPT_SCRIPT_OFFSET:
529 return "deopt script offset";
530 case DEOPT_INLINING_ID:
531 return "deopt inlining id";
532 case DEOPT_REASON:
533 return "deopt reason";
534 case DEOPT_ID:
535 return "deopt index";
536 case CONST_POOL:
537 return "constant pool";
538 case VENEER_POOL:
539 return "veneer pool";
540 case WASM_GLOBAL_HANDLE:
541 return "global handle";
542 case WASM_CALL:
543 return "internal wasm call";
544 case WASM_CODE_TABLE_ENTRY:
545 return "wasm code table entry";
546 case JS_TO_WASM_CALL:
547 return "js to wasm call";
548 case NUMBER_OF_MODES:
549 case PC_JUMP:
550 UNREACHABLE();
551 }
552 return "unknown relocation type";
553 }
554
Print(Isolate * isolate,std::ostream & os)555 void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
556 os << reinterpret_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
557 if (IsComment(rmode_)) {
558 os << " (" << reinterpret_cast<char*>(data_) << ")";
559 } else if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
560 os << " (" << data() << ")";
561 } else if (rmode_ == DEOPT_REASON) {
562 os << " ("
563 << DeoptimizeReasonToString(static_cast<DeoptimizeReason>(data_)) << ")";
564 } else if (rmode_ == EMBEDDED_OBJECT) {
565 os << " (" << Brief(target_object()) << ")";
566 } else if (rmode_ == EXTERNAL_REFERENCE) {
567 ExternalReferenceEncoder ref_encoder(isolate);
568 os << " ("
569 << ref_encoder.NameOfAddress(isolate, target_external_reference())
570 << ") (" << reinterpret_cast<const void*>(target_external_reference())
571 << ")";
572 } else if (IsCodeTarget(rmode_)) {
573 const Address code_target = target_address();
574 if (flags_ & kInNativeWasmCode) {
575 os << " (wasm trampoline) ";
576 } else {
577 Code* code = Code::GetCodeFromTargetAddress(code_target);
578 DCHECK(code->IsCode());
579 os << " (" << Code::Kind2String(code->kind());
580 if (Builtins::IsBuiltin(code)) {
581 os << " " << Builtins::name(code->builtin_index());
582 } else if (code->kind() == Code::STUB) {
583 os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
584 }
585 os << ") ";
586 }
587 os << " (" << reinterpret_cast<const void*>(target_address()) << ")";
588 } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
589 // Depotimization bailouts are stored as runtime entries.
590 int id = Deoptimizer::GetDeoptimizationId(
591 isolate, target_address(), Deoptimizer::EAGER);
592 if (id != Deoptimizer::kNotDeoptimizationEntry) {
593 os << " (deoptimization bailout " << id << ")";
594 }
595 } else if (IsConstPool(rmode_)) {
596 os << " (size " << static_cast<int>(data_) << ")";
597 }
598
599 os << "\n";
600 }
601 #endif // ENABLE_DISASSEMBLER
602
603 #ifdef VERIFY_HEAP
Verify(Isolate * isolate)604 void RelocInfo::Verify(Isolate* isolate) {
605 switch (rmode_) {
606 case EMBEDDED_OBJECT:
607 Object::VerifyPointer(target_object());
608 break;
609 case CODE_TARGET: {
610 // convert inline target address to code object
611 Address addr = target_address();
612 CHECK_NE(addr, kNullAddress);
613 // Check that we can find the right code object.
614 Code* code = Code::GetCodeFromTargetAddress(addr);
615 Object* found = isolate->FindCodeObject(addr);
616 CHECK(found->IsCode());
617 CHECK(code->address() == HeapObject::cast(found)->address());
618 break;
619 }
620 case INTERNAL_REFERENCE:
621 case INTERNAL_REFERENCE_ENCODED: {
622 Address target = target_internal_reference();
623 Address pc = target_internal_reference_address();
624 Code* code = Code::cast(isolate->FindCodeObject(pc));
625 CHECK(target >= code->InstructionStart());
626 CHECK(target <= code->InstructionEnd());
627 break;
628 }
629 case OFF_HEAP_TARGET: {
630 Address addr = target_off_heap_target();
631 CHECK_NE(addr, kNullAddress);
632 CHECK_NOT_NULL(InstructionStream::TryLookupCode(isolate, addr));
633 break;
634 }
635 case RUNTIME_ENTRY:
636 case COMMENT:
637 case EXTERNAL_REFERENCE:
638 case DEOPT_SCRIPT_OFFSET:
639 case DEOPT_INLINING_ID:
640 case DEOPT_REASON:
641 case DEOPT_ID:
642 case CONST_POOL:
643 case VENEER_POOL:
644 case WASM_GLOBAL_HANDLE:
645 case WASM_CALL:
646 case JS_TO_WASM_CALL:
647 case WASM_CODE_TABLE_ENTRY:
648 case NONE:
649 break;
650 case NUMBER_OF_MODES:
651 case PC_JUMP:
652 UNREACHABLE();
653 break;
654 }
655 }
656 #endif // VERIFY_HEAP
657
ConstantPoolBuilder(int ptr_reach_bits,int double_reach_bits)658 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
659 int double_reach_bits) {
660 info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
661 info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
662 info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
663 }
664
NextAccess(ConstantPoolEntry::Type type) const665 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
666 ConstantPoolEntry::Type type) const {
667 const PerTypeEntryInfo& info = info_[type];
668
669 if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
670
671 int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
672 int dbl_offset = dbl_count * kDoubleSize;
673 int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
674 int ptr_offset = ptr_count * kPointerSize + dbl_offset;
675
676 if (type == ConstantPoolEntry::DOUBLE) {
677 // Double overflow detection must take into account the reach for both types
678 int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
679 if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
680 (ptr_count > 0 &&
681 !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) {
682 return ConstantPoolEntry::OVERFLOWED;
683 }
684 } else {
685 DCHECK(type == ConstantPoolEntry::INTPTR);
686 if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
687 return ConstantPoolEntry::OVERFLOWED;
688 }
689 }
690
691 return ConstantPoolEntry::REGULAR;
692 }
693
AddEntry(ConstantPoolEntry & entry,ConstantPoolEntry::Type type)694 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
695 ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
696 DCHECK(!emitted_label_.is_bound());
697 PerTypeEntryInfo& info = info_[type];
698 const int entry_size = ConstantPoolEntry::size(type);
699 bool merged = false;
700
701 if (entry.sharing_ok()) {
702 // Try to merge entries
703 std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
704 int end = static_cast<int>(info.shared_entries.size());
705 for (int i = 0; i < end; i++, it++) {
706 if ((entry_size == kPointerSize) ? entry.value() == it->value()
707 : entry.value64() == it->value64()) {
708 // Merge with found entry.
709 entry.set_merged_index(i);
710 merged = true;
711 break;
712 }
713 }
714 }
715
716 // By definition, merged entries have regular access.
717 DCHECK(!merged || entry.merged_index() < info.regular_count);
718 ConstantPoolEntry::Access access =
719 (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
720
721 // Enforce an upper bound on search time by limiting the search to
722 // unique sharable entries which fit in the regular section.
723 if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
724 info.shared_entries.push_back(entry);
725 } else {
726 info.entries.push_back(entry);
727 }
728
729 // We're done if we found a match or have already triggered the
730 // overflow state.
731 if (merged || info.overflow()) return access;
732
733 if (access == ConstantPoolEntry::REGULAR) {
734 info.regular_count++;
735 } else {
736 info.overflow_start = static_cast<int>(info.entries.size()) - 1;
737 }
738
739 return access;
740 }
741
EmitSharedEntries(Assembler * assm,ConstantPoolEntry::Type type)742 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
743 ConstantPoolEntry::Type type) {
744 PerTypeEntryInfo& info = info_[type];
745 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
746 const int entry_size = ConstantPoolEntry::size(type);
747 int base = emitted_label_.pos();
748 DCHECK_GT(base, 0);
749 int shared_end = static_cast<int>(shared_entries.size());
750 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
751 for (int i = 0; i < shared_end; i++, shared_it++) {
752 int offset = assm->pc_offset() - base;
753 shared_it->set_offset(offset); // Save offset for merged entries.
754 if (entry_size == kPointerSize) {
755 assm->dp(shared_it->value());
756 } else {
757 assm->dq(shared_it->value64());
758 }
759 DCHECK(is_uintn(offset, info.regular_reach_bits));
760
761 // Patch load sequence with correct offset.
762 assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
763 ConstantPoolEntry::REGULAR, type);
764 }
765 }
766
EmitGroup(Assembler * assm,ConstantPoolEntry::Access access,ConstantPoolEntry::Type type)767 void ConstantPoolBuilder::EmitGroup(Assembler* assm,
768 ConstantPoolEntry::Access access,
769 ConstantPoolEntry::Type type) {
770 PerTypeEntryInfo& info = info_[type];
771 const bool overflow = info.overflow();
772 std::vector<ConstantPoolEntry>& entries = info.entries;
773 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
774 const int entry_size = ConstantPoolEntry::size(type);
775 int base = emitted_label_.pos();
776 DCHECK_GT(base, 0);
777 int begin;
778 int end;
779
780 if (access == ConstantPoolEntry::REGULAR) {
781 // Emit any shared entries first
782 EmitSharedEntries(assm, type);
783 }
784
785 if (access == ConstantPoolEntry::REGULAR) {
786 begin = 0;
787 end = overflow ? info.overflow_start : static_cast<int>(entries.size());
788 } else {
789 DCHECK(access == ConstantPoolEntry::OVERFLOWED);
790 if (!overflow) return;
791 begin = info.overflow_start;
792 end = static_cast<int>(entries.size());
793 }
794
795 std::vector<ConstantPoolEntry>::iterator it = entries.begin();
796 if (begin > 0) std::advance(it, begin);
797 for (int i = begin; i < end; i++, it++) {
798 // Update constant pool if necessary and get the entry's offset.
799 int offset;
800 ConstantPoolEntry::Access entry_access;
801 if (!it->is_merged()) {
802 // Emit new entry
803 offset = assm->pc_offset() - base;
804 entry_access = access;
805 if (entry_size == kPointerSize) {
806 assm->dp(it->value());
807 } else {
808 assm->dq(it->value64());
809 }
810 } else {
811 // Retrieve offset from shared entry.
812 offset = shared_entries[it->merged_index()].offset();
813 entry_access = ConstantPoolEntry::REGULAR;
814 }
815
816 DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
817 is_uintn(offset, info.regular_reach_bits));
818
819 // Patch load sequence with correct offset.
820 assm->PatchConstantPoolAccessInstruction(it->position(), offset,
821 entry_access, type);
822 }
823 }
824
825 // Emit and return position of pool. Zero implies no constant pool.
Emit(Assembler * assm)826 int ConstantPoolBuilder::Emit(Assembler* assm) {
827 bool emitted = emitted_label_.is_bound();
828 bool empty = IsEmpty();
829
830 if (!emitted) {
831 // Mark start of constant pool. Align if necessary.
832 if (!empty) assm->DataAlign(kDoubleSize);
833 assm->bind(&emitted_label_);
834 if (!empty) {
835 // Emit in groups based on access and type.
836 // Emit doubles first for alignment purposes.
837 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
838 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
839 if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
840 assm->DataAlign(kDoubleSize);
841 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
842 ConstantPoolEntry::DOUBLE);
843 }
844 if (info_[ConstantPoolEntry::INTPTR].overflow()) {
845 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
846 ConstantPoolEntry::INTPTR);
847 }
848 }
849 }
850
851 return !empty ? emitted_label_.pos() : 0;
852 }
853
HeapObjectRequest(double heap_number,int offset)854 HeapObjectRequest::HeapObjectRequest(double heap_number, int offset)
855 : kind_(kHeapNumber), offset_(offset) {
856 value_.heap_number = heap_number;
857 DCHECK(!IsSmiDouble(value_.heap_number));
858 }
859
HeapObjectRequest(CodeStub * code_stub,int offset)860 HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
861 : kind_(kCodeStub), offset_(offset) {
862 value_.code_stub = code_stub;
863 DCHECK_NOT_NULL(value_.code_stub);
864 }
865
866 // Platform specific but identical code for all the platforms.
867
RecordDeoptReason(DeoptimizeReason reason,SourcePosition position,int id)868 void Assembler::RecordDeoptReason(DeoptimizeReason reason,
869 SourcePosition position, int id) {
870 EnsureSpace ensure_space(this);
871 RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset());
872 RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId());
873 RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
874 RecordRelocInfo(RelocInfo::DEOPT_ID, id);
875 }
876
RecordComment(const char * msg)877 void Assembler::RecordComment(const char* msg) {
878 if (FLAG_code_comments) {
879 EnsureSpace ensure_space(this);
880 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
881 }
882 }
883
DataAlign(int m)884 void Assembler::DataAlign(int m) {
885 DCHECK(m >= 2 && base::bits::IsPowerOfTwo(m));
886 while ((pc_offset() & (m - 1)) != 0) {
887 db(0);
888 }
889 }
890
RequestHeapObject(HeapObjectRequest request)891 void Assembler::RequestHeapObject(HeapObjectRequest request) {
892 request.set_offset(pc_offset());
893 heap_object_requests_.push_front(request);
894 }
895
896 } // namespace internal
897 } // namespace v8
898