1 /* This file is part of the dynarmic project.
2  * Copyright (c) 2018 MerryMage
3  * SPDX-License-Identifier: 0BSD
4  */
5 
6 #include "frontend/A64/translate/impl/impl.h"
7 
8 namespace Dynarmic::A64 {
9 
LoadStoreRegisterImmediate(TranslatorVisitor & v,bool wback,bool postindex,size_t scale,u64 offset,Imm<2> size,Imm<2> opc,Reg Rn,Reg Rt)10 static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt) {
11     IR::MemOp memop;
12     bool signed_ = false;
13     size_t regsize = 0;
14 
15     if (opc.Bit<1>() == 0) {
16         memop = opc.Bit<0>() ? IR::MemOp::LOAD : IR::MemOp::STORE;
17         regsize = size == 0b11 ? 64 : 32;
18         signed_ = false;
19     } else if (size == 0b11) {
20         memop = IR::MemOp::PREFETCH;
21         ASSERT(!opc.Bit<0>());
22     } else {
23         memop = IR::MemOp::LOAD;
24         ASSERT(!(size == 0b10 && opc.Bit<0>() == 1));
25         regsize = opc.Bit<0>() ? 32 : 64;
26         signed_ = true;
27     }
28 
29     if (memop == IR::MemOp::LOAD && wback && Rn == Rt && Rn != Reg::R31) {
30         return v.UnpredictableInstruction();
31     }
32     if (memop == IR::MemOp::STORE && wback && Rn == Rt && Rn != Reg::R31) {
33         return v.UnpredictableInstruction();
34     }
35 
36     // TODO: Check SP alignment
37     IR::U64 address = Rn == Reg::SP ? IR::U64(v.SP(64)) : IR::U64(v.X(64, Rn));
38     if (!postindex) {
39         address = v.ir.Add(address, v.ir.Imm64(offset));
40     }
41 
42     const size_t datasize = 8 << scale;
43     switch (memop) {
44     case IR::MemOp::STORE: {
45         const auto data = v.X(datasize, Rt);
46         v.Mem(address, datasize / 8, IR::AccType::NORMAL, data);
47         break;
48     }
49     case IR::MemOp::LOAD: {
50         const auto data = v.Mem(address, datasize / 8, IR::AccType::NORMAL);
51         if (signed_) {
52             v.X(regsize, Rt, v.SignExtend(data, regsize));
53         } else {
54             v.X(regsize, Rt, v.ZeroExtend(data, regsize));
55         }
56         break;
57     }
58     case IR::MemOp::PREFETCH:
59         // Prefetch(address, Rt)
60         break;
61     }
62 
63     if (wback) {
64         if (postindex) {
65             address = v.ir.Add(address, v.ir.Imm64(offset));
66         }
67 
68         if (Rn == Reg::SP) {
69             v.SP(64, address);
70         } else {
71             v.X(64, Rn, address);
72         }
73     }
74 
75     return true;
76 }
77 
STRx_LDRx_imm_1(Imm<2> size,Imm<2> opc,Imm<9> imm9,bool not_postindex,Reg Rn,Reg Rt)78 bool TranslatorVisitor::STRx_LDRx_imm_1(Imm<2> size, Imm<2> opc, Imm<9> imm9, bool not_postindex, Reg Rn, Reg Rt) {
79     const bool wback = true;
80     const bool postindex = !not_postindex;
81     const size_t scale = size.ZeroExtend<size_t>();
82     const u64 offset = imm9.SignExtend<u64>();
83 
84     return LoadStoreRegisterImmediate(*this, wback, postindex, scale, offset, size, opc, Rn, Rt);
85 }
86 
STRx_LDRx_imm_2(Imm<2> size,Imm<2> opc,Imm<12> imm12,Reg Rn,Reg Rt)87 bool TranslatorVisitor::STRx_LDRx_imm_2(Imm<2> size, Imm<2> opc, Imm<12> imm12, Reg Rn, Reg Rt) {
88     const bool wback = false;
89     const bool postindex = false;
90     const size_t scale = size.ZeroExtend<size_t>();
91     const u64 offset = imm12.ZeroExtend<u64>() << scale;
92 
93     return LoadStoreRegisterImmediate(*this, wback, postindex, scale, offset, size, opc, Rn, Rt);
94 }
95 
STURx_LDURx(Imm<2> size,Imm<2> opc,Imm<9> imm9,Reg Rn,Reg Rt)96 bool TranslatorVisitor::STURx_LDURx(Imm<2> size, Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
97     const bool wback = false;
98     const bool postindex = false;
99     const size_t scale = size.ZeroExtend<size_t>();
100     const u64 offset = imm9.SignExtend<u64>();
101 
102     return LoadStoreRegisterImmediate(*this, wback, postindex, scale, offset, size, opc, Rn, Rt);
103 }
104 
PRFM_imm(Imm<12> imm12,Reg Rn,Reg Rt)105 bool TranslatorVisitor::PRFM_imm([[maybe_unused]] Imm<12> imm12, [[maybe_unused]] Reg Rn, [[maybe_unused]] Reg Rt) {
106     // Currently a NOP (which is valid behavior, as indicated by
107     // the ARMv8 architecture reference manual)
108     return true;
109 }
110 
PRFM_unscaled_imm(Imm<9> imm9,Reg Rn,Reg Rt)111 bool TranslatorVisitor::PRFM_unscaled_imm([[maybe_unused]] Imm<9> imm9, [[maybe_unused]] Reg Rn, [[maybe_unused]] Reg Rt) {
112     // Currently a NOP (which is valid behavior, as indicated by
113     // the ARMv8 architecture reference manual)
114     return true;
115 }
116 
LoadStoreSIMD(TranslatorVisitor & v,bool wback,bool postindex,size_t scale,u64 offset,IR::MemOp memop,Reg Rn,Vec Vt)117 static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, IR::MemOp memop, Reg Rn, Vec Vt) {
118     const auto acctype = IR::AccType::VEC;
119     const size_t datasize = 8 << scale;
120 
121     IR::U64 address;
122     if (Rn == Reg::SP) {
123         // TODO: Check SP Alignment
124         address = v.SP(64);
125     } else {
126         address = v.X(64, Rn);
127     }
128 
129     if (!postindex) {
130         address = v.ir.Add(address, v.ir.Imm64(offset));
131     }
132 
133     switch (memop) {
134     case IR::MemOp::STORE:
135         if (datasize == 128) {
136             const IR::U128 data = v.V(128, Vt);
137             v.Mem(address, 16, acctype, data);
138         } else {
139             const IR::UAny data = v.ir.VectorGetElement(datasize, v.V(128, Vt), 0);
140             v.Mem(address, datasize / 8, acctype, data);
141         }
142         break;
143     case IR::MemOp::LOAD:
144         if (datasize == 128) {
145             const IR::U128 data = v.Mem(address, 16, acctype);
146             v.V(128, Vt, data);
147         } else {
148             const IR::UAny data = v.Mem(address, datasize / 8, acctype);
149             v.V(128, Vt, v.ir.ZeroExtendToQuad(data));
150         }
151         break;
152     default:
153         UNREACHABLE();
154     }
155 
156     if (wback) {
157         if (postindex) {
158             address = v.ir.Add(address, v.ir.Imm64(offset));
159         }
160 
161         if (Rn == Reg::SP) {
162             v.SP(64, address);
163         } else {
164             v.X(64, Rn, address);
165         }
166     }
167 
168     return true;
169 }
170 
STR_imm_fpsimd_1(Imm<2> size,Imm<1> opc_1,Imm<9> imm9,bool not_postindex,Reg Rn,Vec Vt)171 bool TranslatorVisitor::STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) {
172     const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
173     if (scale > 4) {
174         return UnallocatedEncoding();
175     }
176 
177     const bool wback = true;
178     const bool postindex = !not_postindex;
179     const u64 offset = imm9.SignExtend<u64>();
180 
181     return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt);
182 }
183 
STR_imm_fpsimd_2(Imm<2> size,Imm<1> opc_1,Imm<12> imm12,Reg Rn,Vec Vt)184 bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
185     const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
186     if (scale > 4) {
187         return UnallocatedEncoding();
188     }
189 
190     const bool wback = false;
191     const bool postindex = false;
192     const u64 offset = imm12.ZeroExtend<u64>() << scale;
193 
194     return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt);
195 }
196 
LDR_imm_fpsimd_1(Imm<2> size,Imm<1> opc_1,Imm<9> imm9,bool not_postindex,Reg Rn,Vec Vt)197 bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) {
198     const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
199     if (scale > 4) {
200         return UnallocatedEncoding();
201     }
202 
203     const bool wback = true;
204     const bool postindex = !not_postindex;
205     const u64 offset = imm9.SignExtend<u64>();
206 
207     return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt);
208 }
209 
LDR_imm_fpsimd_2(Imm<2> size,Imm<1> opc_1,Imm<12> imm12,Reg Rn,Vec Vt)210 bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
211     const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
212     if (scale > 4) {
213         return UnallocatedEncoding();
214     }
215 
216     const bool wback = false;
217     const bool postindex = false;
218     const u64 offset = imm12.ZeroExtend<u64>() << scale;
219 
220     return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt);
221 }
222 
STUR_fpsimd(Imm<2> size,Imm<1> opc_1,Imm<9> imm9,Reg Rn,Vec Vt)223 bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
224     const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
225     if (scale > 4) {
226         return UnallocatedEncoding();
227     }
228 
229     const bool wback = false;
230     const bool postindex = false;
231     const u64 offset = imm9.SignExtend<u64>();
232 
233     return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt);
234 }
235 
LDUR_fpsimd(Imm<2> size,Imm<1> opc_1,Imm<9> imm9,Reg Rn,Vec Vt)236 bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
237     const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
238     if (scale > 4) {
239         return UnallocatedEncoding();
240     }
241 
242     const bool wback = false;
243     const bool postindex = false;
244     const u64 offset = imm9.SignExtend<u64>();
245 
246     return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt);
247 }
248 
249 } // namespace Dynarmic::A64
250