1 // Copyright 2020 yuzu Emulator Project
2 // Licensed under GPLv2 or any later version
3 // Refer to the license.txt file included.
4
5 #include "common/assert.h"
6 #include "common/logging/log.h"
7 #include "common/microprofile.h"
8 #include "common/x64/xbyak_util.h"
9 #include "video_core/engines/maxwell_3d.h"
10 #include "video_core/macro/macro_interpreter.h"
11 #include "video_core/macro/macro_jit_x64.h"
12
13 MICROPROFILE_DEFINE(MacroJitCompile, "GPU", "Compile macro JIT", MP_RGB(173, 255, 47));
14 MICROPROFILE_DEFINE(MacroJitExecute, "GPU", "Execute macro JIT", MP_RGB(255, 255, 0));
15
16 namespace Tegra {
17 constexpr Xbyak::Reg64 STATE = Xbyak::util::rbx;
18 constexpr Xbyak::Reg32 RESULT = Xbyak::util::ebp;
19 constexpr Xbyak::Reg64 PARAMETERS = Xbyak::util::r12;
20 constexpr Xbyak::Reg32 METHOD_ADDRESS = Xbyak::util::r14d;
21 constexpr Xbyak::Reg64 BRANCH_HOLDER = Xbyak::util::r15;
22
23 static const std::bitset<32> PERSISTENT_REGISTERS = Common::X64::BuildRegSet({
24 STATE,
25 RESULT,
26 PARAMETERS,
27 METHOD_ADDRESS,
28 BRANCH_HOLDER,
29 });
30
MacroJITx64(Engines::Maxwell3D & maxwell3d_)31 MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d_)
32 : MacroEngine{maxwell3d_}, maxwell3d{maxwell3d_} {}
33
Compile(const std::vector<u32> & code)34 std::unique_ptr<CachedMacro> MacroJITx64::Compile(const std::vector<u32>& code) {
35 return std::make_unique<MacroJITx64Impl>(maxwell3d, code);
36 }
37
MacroJITx64Impl(Engines::Maxwell3D & maxwell3d_,const std::vector<u32> & code_)38 MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d_, const std::vector<u32>& code_)
39 : CodeGenerator{MAX_CODE_SIZE}, code{code_}, maxwell3d{maxwell3d_} {
40 Compile();
41 }
42
43 MacroJITx64Impl::~MacroJITx64Impl() = default;
44
Execute(const std::vector<u32> & parameters,u32 method)45 void MacroJITx64Impl::Execute(const std::vector<u32>& parameters, u32 method) {
46 MICROPROFILE_SCOPE(MacroJitExecute);
47 ASSERT_OR_EXECUTE(program != nullptr, { return; });
48 JITState state{};
49 state.maxwell3d = &maxwell3d;
50 state.registers = {};
51 program(&state, parameters.data());
52 }
53
Compile_ALU(Macro::Opcode opcode)54 void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) {
55 const bool is_a_zero = opcode.src_a == 0;
56 const bool is_b_zero = opcode.src_b == 0;
57 const bool valid_operation = !is_a_zero && !is_b_zero;
58 [[maybe_unused]] const bool is_move_operation = !is_a_zero && is_b_zero;
59 const bool has_zero_register = is_a_zero || is_b_zero;
60 const bool no_zero_reg_skip = opcode.alu_operation == Macro::ALUOperation::AddWithCarry ||
61 opcode.alu_operation == Macro::ALUOperation::SubtractWithBorrow;
62
63 Xbyak::Reg32 src_a;
64 Xbyak::Reg32 src_b;
65
66 if (!optimizer.zero_reg_skip || no_zero_reg_skip) {
67 src_a = Compile_GetRegister(opcode.src_a, RESULT);
68 src_b = Compile_GetRegister(opcode.src_b, eax);
69 } else {
70 if (!is_a_zero) {
71 src_a = Compile_GetRegister(opcode.src_a, RESULT);
72 }
73 if (!is_b_zero) {
74 src_b = Compile_GetRegister(opcode.src_b, eax);
75 }
76 }
77
78 bool has_emitted = false;
79
80 switch (opcode.alu_operation) {
81 case Macro::ALUOperation::Add:
82 if (optimizer.zero_reg_skip) {
83 if (valid_operation) {
84 add(src_a, src_b);
85 }
86 } else {
87 add(src_a, src_b);
88 }
89
90 if (!optimizer.can_skip_carry) {
91 setc(byte[STATE + offsetof(JITState, carry_flag)]);
92 }
93 break;
94 case Macro::ALUOperation::AddWithCarry:
95 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
96 adc(src_a, src_b);
97 setc(byte[STATE + offsetof(JITState, carry_flag)]);
98 break;
99 case Macro::ALUOperation::Subtract:
100 if (optimizer.zero_reg_skip) {
101 if (valid_operation) {
102 sub(src_a, src_b);
103 has_emitted = true;
104 }
105 } else {
106 sub(src_a, src_b);
107 has_emitted = true;
108 }
109 if (!optimizer.can_skip_carry && has_emitted) {
110 setc(byte[STATE + offsetof(JITState, carry_flag)]);
111 }
112 break;
113 case Macro::ALUOperation::SubtractWithBorrow:
114 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
115 sbb(src_a, src_b);
116 setc(byte[STATE + offsetof(JITState, carry_flag)]);
117 break;
118 case Macro::ALUOperation::Xor:
119 if (optimizer.zero_reg_skip) {
120 if (valid_operation) {
121 xor_(src_a, src_b);
122 }
123 } else {
124 xor_(src_a, src_b);
125 }
126 break;
127 case Macro::ALUOperation::Or:
128 if (optimizer.zero_reg_skip) {
129 if (valid_operation) {
130 or_(src_a, src_b);
131 }
132 } else {
133 or_(src_a, src_b);
134 }
135 break;
136 case Macro::ALUOperation::And:
137 if (optimizer.zero_reg_skip) {
138 if (!has_zero_register) {
139 and_(src_a, src_b);
140 }
141 } else {
142 and_(src_a, src_b);
143 }
144 break;
145 case Macro::ALUOperation::AndNot:
146 if (optimizer.zero_reg_skip) {
147 if (!is_a_zero) {
148 not_(src_b);
149 and_(src_a, src_b);
150 }
151 } else {
152 not_(src_b);
153 and_(src_a, src_b);
154 }
155 break;
156 case Macro::ALUOperation::Nand:
157 if (optimizer.zero_reg_skip) {
158 if (!is_a_zero) {
159 and_(src_a, src_b);
160 not_(src_a);
161 }
162 } else {
163 and_(src_a, src_b);
164 not_(src_a);
165 }
166 break;
167 default:
168 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}", opcode.alu_operation.Value());
169 break;
170 }
171 Compile_ProcessResult(opcode.result_operation, opcode.dst);
172 }
173
Compile_AddImmediate(Macro::Opcode opcode)174 void MacroJITx64Impl::Compile_AddImmediate(Macro::Opcode opcode) {
175 if (optimizer.skip_dummy_addimmediate) {
176 // Games tend to use this as an exit instruction placeholder. It's to encode an instruction
177 // without doing anything. In our case we can just not emit anything.
178 if (opcode.result_operation == Macro::ResultOperation::Move && opcode.dst == 0) {
179 return;
180 }
181 }
182 // Check for redundant moves
183 if (optimizer.optimize_for_method_move &&
184 opcode.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
185 if (next_opcode.has_value()) {
186 const auto next = *next_opcode;
187 if (next.result_operation == Macro::ResultOperation::MoveAndSetMethod &&
188 opcode.dst == next.dst) {
189 return;
190 }
191 }
192 }
193 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
194 if (opcode.immediate == 0) {
195 xor_(RESULT, RESULT);
196 } else {
197 mov(RESULT, opcode.immediate);
198 }
199 } else {
200 auto result = Compile_GetRegister(opcode.src_a, RESULT);
201 if (opcode.immediate > 2) {
202 add(result, opcode.immediate);
203 } else if (opcode.immediate == 1) {
204 inc(result);
205 } else if (opcode.immediate < 0) {
206 sub(result, opcode.immediate * -1);
207 }
208 }
209 Compile_ProcessResult(opcode.result_operation, opcode.dst);
210 }
211
Compile_ExtractInsert(Macro::Opcode opcode)212 void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
213 auto dst = Compile_GetRegister(opcode.src_a, RESULT);
214 auto src = Compile_GetRegister(opcode.src_b, eax);
215
216 if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
217 shr(src, opcode.bf_src_bit);
218 } else if (opcode.bf_src_bit == 31) {
219 xor_(src, src);
220 }
221 // Don't bother masking the whole register since we're using a 32 bit register
222 if (opcode.bf_size != 31 && opcode.bf_size != 0) {
223 and_(src, opcode.GetBitfieldMask());
224 } else if (opcode.bf_size == 0) {
225 xor_(src, src);
226 }
227 if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
228 shl(src, opcode.bf_dst_bit);
229 } else if (opcode.bf_dst_bit == 31) {
230 xor_(src, src);
231 }
232
233 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
234 if (mask != 0xffffffff) {
235 and_(dst, mask);
236 }
237 or_(dst, src);
238 Compile_ProcessResult(opcode.result_operation, opcode.dst);
239 }
240
Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode)241 void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
242 const auto dst = Compile_GetRegister(opcode.src_a, ecx);
243 const auto src = Compile_GetRegister(opcode.src_b, RESULT);
244
245 shr(src, dst.cvt8());
246 if (opcode.bf_size != 0 && opcode.bf_size != 31) {
247 and_(src, opcode.GetBitfieldMask());
248 } else if (opcode.bf_size == 0) {
249 xor_(src, src);
250 }
251
252 if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
253 shl(src, opcode.bf_dst_bit);
254 } else if (opcode.bf_dst_bit == 31) {
255 xor_(src, src);
256 }
257 Compile_ProcessResult(opcode.result_operation, opcode.dst);
258 }
259
Compile_ExtractShiftLeftRegister(Macro::Opcode opcode)260 void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
261 const auto dst = Compile_GetRegister(opcode.src_a, ecx);
262 const auto src = Compile_GetRegister(opcode.src_b, RESULT);
263
264 if (opcode.bf_src_bit != 0) {
265 shr(src, opcode.bf_src_bit);
266 }
267
268 if (opcode.bf_size != 31) {
269 and_(src, opcode.GetBitfieldMask());
270 }
271 shl(src, dst.cvt8());
272
273 Compile_ProcessResult(opcode.result_operation, opcode.dst);
274 }
275
Compile_Read(Macro::Opcode opcode)276 void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
277 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
278 if (opcode.immediate == 0) {
279 xor_(RESULT, RESULT);
280 } else {
281 mov(RESULT, opcode.immediate);
282 }
283 } else {
284 auto result = Compile_GetRegister(opcode.src_a, RESULT);
285 if (opcode.immediate > 2) {
286 add(result, opcode.immediate);
287 } else if (opcode.immediate == 1) {
288 inc(result);
289 } else if (opcode.immediate < 0) {
290 sub(result, opcode.immediate * -1);
291 }
292 }
293
294 // Equivalent to Engines::Maxwell3D::GetRegisterValue:
295 if (optimizer.enable_asserts) {
296 Xbyak::Label pass_range_check;
297 cmp(RESULT, static_cast<u32>(Engines::Maxwell3D::Regs::NUM_REGS));
298 jb(pass_range_check);
299 int3();
300 L(pass_range_check);
301 }
302 mov(rax, qword[STATE]);
303 mov(RESULT,
304 dword[rax + offsetof(Engines::Maxwell3D, regs) +
305 offsetof(Engines::Maxwell3D::Regs, reg_array) + RESULT.cvt64() * sizeof(u32)]);
306
307 Compile_ProcessResult(opcode.result_operation, opcode.dst);
308 }
309
Send(Engines::Maxwell3D * maxwell3d,Macro::MethodAddress method_address,u32 value)310 static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
311 maxwell3d->CallMethodFromMME(method_address.address, value);
312 }
313
Compile_Send(Xbyak::Reg32 value)314 void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
315 Common::X64::ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
316 mov(Common::X64::ABI_PARAM1, qword[STATE]);
317 mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS);
318 mov(Common::X64::ABI_PARAM3, value);
319 Common::X64::CallFarFunction(*this, &Send);
320 Common::X64::ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
321
322 Xbyak::Label dont_process{};
323 // Get increment
324 test(METHOD_ADDRESS, 0x3f000);
325 // If zero, method address doesn't update
326 je(dont_process);
327
328 mov(ecx, METHOD_ADDRESS);
329 and_(METHOD_ADDRESS, 0xfff);
330 shr(ecx, 12);
331 and_(ecx, 0x3f);
332 lea(eax, ptr[rcx + METHOD_ADDRESS.cvt64()]);
333 sal(ecx, 12);
334 or_(eax, ecx);
335
336 mov(METHOD_ADDRESS, eax);
337
338 L(dont_process);
339 }
340
Compile_Branch(Macro::Opcode opcode)341 void Tegra::MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
342 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
343 const s32 jump_address =
344 static_cast<s32>(pc) + static_cast<s32>(opcode.GetBranchTarget() / sizeof(s32));
345
346 Xbyak::Label end;
347 auto value = Compile_GetRegister(opcode.src_a, eax);
348 test(value, value);
349 if (optimizer.has_delayed_pc) {
350 switch (opcode.branch_condition) {
351 case Macro::BranchCondition::Zero:
352 jne(end, T_NEAR);
353 break;
354 case Macro::BranchCondition::NotZero:
355 je(end, T_NEAR);
356 break;
357 }
358
359 if (opcode.branch_annul) {
360 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
361 jmp(labels[jump_address], T_NEAR);
362 } else {
363 Xbyak::Label handle_post_exit{};
364 Xbyak::Label skip{};
365 jmp(skip, T_NEAR);
366 if (opcode.is_exit) {
367 L(handle_post_exit);
368 // Execute 1 instruction
369 mov(BRANCH_HOLDER, end_of_code);
370 // Jump to next instruction to skip delay slot check
371 jmp(labels[jump_address], T_NEAR);
372 } else {
373 L(handle_post_exit);
374 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
375 jmp(labels[jump_address], T_NEAR);
376 }
377 L(skip);
378 mov(BRANCH_HOLDER, handle_post_exit);
379 jmp(delay_skip[pc], T_NEAR);
380 }
381 } else {
382 switch (opcode.branch_condition) {
383 case Macro::BranchCondition::Zero:
384 je(labels[jump_address], T_NEAR);
385 break;
386 case Macro::BranchCondition::NotZero:
387 jne(labels[jump_address], T_NEAR);
388 break;
389 }
390 }
391
392 L(end);
393 }
394
Optimizer_ScanFlags()395 void Tegra::MacroJITx64Impl::Optimizer_ScanFlags() {
396 optimizer.can_skip_carry = true;
397 optimizer.has_delayed_pc = false;
398 for (auto raw_op : code) {
399 Macro::Opcode op{};
400 op.raw = raw_op;
401
402 if (op.operation == Macro::Operation::ALU) {
403 // Scan for any ALU operations which actually use the carry flag, if they don't exist in
404 // our current code we can skip emitting the carry flag handling operations
405 if (op.alu_operation == Macro::ALUOperation::AddWithCarry ||
406 op.alu_operation == Macro::ALUOperation::SubtractWithBorrow) {
407 optimizer.can_skip_carry = false;
408 }
409 }
410
411 if (op.operation == Macro::Operation::Branch) {
412 if (!op.branch_annul) {
413 optimizer.has_delayed_pc = true;
414 }
415 }
416 }
417 }
418
Compile()419 void MacroJITx64Impl::Compile() {
420 MICROPROFILE_SCOPE(MacroJitCompile);
421 labels.fill(Xbyak::Label());
422
423 Common::X64::ABI_PushRegistersAndAdjustStack(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
424 // JIT state
425 mov(STATE, Common::X64::ABI_PARAM1);
426 mov(PARAMETERS, Common::X64::ABI_PARAM2);
427 xor_(RESULT, RESULT);
428 xor_(METHOD_ADDRESS, METHOD_ADDRESS);
429 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
430
431 mov(dword[STATE + offsetof(JITState, registers) + 4], Compile_FetchParameter());
432
433 // Track get register for zero registers and mark it as no-op
434 optimizer.zero_reg_skip = true;
435
436 // AddImmediate tends to be used as a NOP instruction, if we detect this we can
437 // completely skip the entire code path and no emit anything
438 optimizer.skip_dummy_addimmediate = true;
439
440 // SMO tends to emit a lot of unnecessary method moves, we can mitigate this by only emitting
441 // one if our register isn't "dirty"
442 optimizer.optimize_for_method_move = true;
443
444 // Enable run-time assertions in JITted code
445 optimizer.enable_asserts = false;
446
447 // Check to see if we can skip emitting certain instructions
448 Optimizer_ScanFlags();
449
450 const u32 op_count = static_cast<u32>(code.size());
451 for (u32 i = 0; i < op_count; i++) {
452 if (i < op_count - 1) {
453 pc = i + 1;
454 next_opcode = GetOpCode();
455 } else {
456 next_opcode = {};
457 }
458 pc = i;
459 Compile_NextInstruction();
460 }
461
462 L(end_of_code);
463
464 Common::X64::ABI_PopRegistersAndAdjustStack(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
465 ret();
466 ready();
467 program = getCode<ProgramType>();
468 }
469
Compile_NextInstruction()470 bool MacroJITx64Impl::Compile_NextInstruction() {
471 const auto opcode = GetOpCode();
472 if (labels[pc].getAddress()) {
473 return false;
474 }
475
476 L(labels[pc]);
477
478 switch (opcode.operation) {
479 case Macro::Operation::ALU:
480 Compile_ALU(opcode);
481 break;
482 case Macro::Operation::AddImmediate:
483 Compile_AddImmediate(opcode);
484 break;
485 case Macro::Operation::ExtractInsert:
486 Compile_ExtractInsert(opcode);
487 break;
488 case Macro::Operation::ExtractShiftLeftImmediate:
489 Compile_ExtractShiftLeftImmediate(opcode);
490 break;
491 case Macro::Operation::ExtractShiftLeftRegister:
492 Compile_ExtractShiftLeftRegister(opcode);
493 break;
494 case Macro::Operation::Read:
495 Compile_Read(opcode);
496 break;
497 case Macro::Operation::Branch:
498 Compile_Branch(opcode);
499 break;
500 default:
501 UNIMPLEMENTED_MSG("Unimplemented opcode {}", opcode.operation.Value());
502 break;
503 }
504
505 if (optimizer.has_delayed_pc) {
506 if (opcode.is_exit) {
507 mov(rax, end_of_code);
508 test(BRANCH_HOLDER, BRANCH_HOLDER);
509 cmove(BRANCH_HOLDER, rax);
510 // Jump to next instruction to skip delay slot check
511 je(labels[pc + 1], T_NEAR);
512 } else {
513 // TODO(ogniK): Optimize delay slot branching
514 Xbyak::Label no_delay_slot{};
515 test(BRANCH_HOLDER, BRANCH_HOLDER);
516 je(no_delay_slot, T_NEAR);
517 mov(rax, BRANCH_HOLDER);
518 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
519 jmp(rax);
520 L(no_delay_slot);
521 }
522 L(delay_skip[pc]);
523 if (opcode.is_exit) {
524 return false;
525 }
526 } else {
527 test(BRANCH_HOLDER, BRANCH_HOLDER);
528 jne(end_of_code, T_NEAR);
529 if (opcode.is_exit) {
530 inc(BRANCH_HOLDER);
531 return false;
532 }
533 }
534 return true;
535 }
536
Compile_FetchParameter()537 Xbyak::Reg32 Tegra::MacroJITx64Impl::Compile_FetchParameter() {
538 mov(eax, dword[PARAMETERS]);
539 add(PARAMETERS, sizeof(u32));
540 return eax;
541 }
542
Compile_GetRegister(u32 index,Xbyak::Reg32 dst)543 Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) {
544 if (index == 0) {
545 // Register 0 is always zero
546 xor_(dst, dst);
547 } else {
548 mov(dst, dword[STATE + offsetof(JITState, registers) + index * sizeof(u32)]);
549 }
550
551 return dst;
552 }
553
Compile_ProcessResult(Macro::ResultOperation operation,u32 reg)554 void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) {
555 const auto SetRegister = [this](u32 reg_index, const Xbyak::Reg32& result) {
556 // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
557 // register.
558 if (reg_index == 0) {
559 return;
560 }
561 mov(dword[STATE + offsetof(JITState, registers) + reg_index * sizeof(u32)], result);
562 };
563 const auto SetMethodAddress = [this](const Xbyak::Reg32& reg32) { mov(METHOD_ADDRESS, reg32); };
564
565 switch (operation) {
566 case Macro::ResultOperation::IgnoreAndFetch:
567 SetRegister(reg, Compile_FetchParameter());
568 break;
569 case Macro::ResultOperation::Move:
570 SetRegister(reg, RESULT);
571 break;
572 case Macro::ResultOperation::MoveAndSetMethod:
573 SetRegister(reg, RESULT);
574 SetMethodAddress(RESULT);
575 break;
576 case Macro::ResultOperation::FetchAndSend:
577 // Fetch parameter and send result.
578 SetRegister(reg, Compile_FetchParameter());
579 Compile_Send(RESULT);
580 break;
581 case Macro::ResultOperation::MoveAndSend:
582 // Move and send result.
583 SetRegister(reg, RESULT);
584 Compile_Send(RESULT);
585 break;
586 case Macro::ResultOperation::FetchAndSetMethod:
587 // Fetch parameter and use result as Method Address.
588 SetRegister(reg, Compile_FetchParameter());
589 SetMethodAddress(RESULT);
590 break;
591 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
592 // Move result and use as Method Address, then fetch and send parameter.
593 SetRegister(reg, RESULT);
594 SetMethodAddress(RESULT);
595 Compile_Send(Compile_FetchParameter());
596 break;
597 case Macro::ResultOperation::MoveAndSetMethodSend:
598 // Move result and use as Method Address, then send bits 12:17 of result.
599 SetRegister(reg, RESULT);
600 SetMethodAddress(RESULT);
601 shr(RESULT, 12);
602 and_(RESULT, 0b111111);
603 Compile_Send(RESULT);
604 break;
605 default:
606 UNIMPLEMENTED_MSG("Unimplemented macro operation {}", operation);
607 }
608 }
609
GetOpCode() const610 Macro::Opcode MacroJITx64Impl::GetOpCode() const {
611 ASSERT(pc < code.size());
612 return {code[pc]};
613 }
614
PersistentCallerSavedRegs() const615 std::bitset<32> MacroJITx64Impl::PersistentCallerSavedRegs() const {
616 return PERSISTENT_REGISTERS & Common::X64::ABI_ALL_CALLER_SAVED;
617 }
618
619 } // namespace Tegra
620