1 // Copyright (c) 2012- PPSSPP Project.
2 
3 // This program is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, version 2.0 or later versions.
6 
7 // This program is distributed in the hope that it will be useful,
8 // but WITHOUT ANY WARRANTY; without even the implied warranty of
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 // GNU General Public License 2.0 for more details.
11 
12 // A copy of the GPL 2.0 should have been included with the program.
13 // If not, see http://www.gnu.org/licenses/
14 
15 // Official git repository and contact information can be found at
16 // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17 
18 #include "ppsspp_config.h"
19 #if PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
20 
21 #include "Core/MemMap.h"
22 #include "Core/MIPS/MIPSAnalyst.h"
23 #include "Core/Config.h"
24 #include "Core/MIPS/MIPSCodeUtils.h"
25 #include "Core/MIPS/x86/Jit.h"
26 #include "Core/MIPS/x86/RegCache.h"
27 
28 
29 #define _RS MIPS_GET_RS(op)
30 #define _RT MIPS_GET_RT(op)
31 #define _RD MIPS_GET_RD(op)
32 #define _FS MIPS_GET_FS(op)
33 #define _FT MIPS_GET_FT(op)
34 #define _FD MIPS_GET_FD(op)
35 #define _SA MIPS_GET_SA(op)
36 #define _POS  ((op>> 6) & 0x1F)
37 #define _SIZE ((op>>11) & 0x1F)
38 #define _IMM16 (signed short)(op & 0xFFFF)
39 #define _IMM26 (op & 0x03FFFFFF)
40 
41 // All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
42 // Currently known non working ones should have DISABLE.
43 
44 // #define CONDITIONAL_DISABLE(flag) { Comp_Generic(op); return; }
45 #define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
46 #define DISABLE { Comp_Generic(op); return; }
47 
48 namespace MIPSComp {
49 	using namespace Gen;
50 
CompITypeMemRead(MIPSOpcode op,u32 bits,void (XEmitter::* mov)(int,int,X64Reg,OpArg),const void * safeFunc)51 	void Jit::CompITypeMemRead(MIPSOpcode op, u32 bits, void (XEmitter::*mov)(int, int, X64Reg, OpArg), const void *safeFunc)
52 	{
53 		CONDITIONAL_DISABLE(LSU);
54 		int offset = _IMM16;
55 		MIPSGPReg rt = _RT;
56 		MIPSGPReg rs = _RS;
57 
58 		gpr.Lock(rt, rs);
59 		gpr.MapReg(rt, rt == rs, true);
60 
61 		JitSafeMem safe(this, rs, offset);
62 		OpArg src;
63 		if (safe.PrepareRead(src, bits / 8))
64 			(this->*mov)(32, bits, gpr.RX(rt), src);
65 		if (safe.PrepareSlowRead(safeFunc))
66 			(this->*mov)(32, bits, gpr.RX(rt), R(EAX));
67 		safe.Finish();
68 
69 		gpr.UnlockAll();
70 	}
71 
DowncastImm(OpArg in,int bits)72 	static OpArg DowncastImm(OpArg in, int bits) {
73 		if (!in.IsImm())
74 			return in;
75 		if (in.GetImmBits() > bits) {
76 			in.SetImmBits(bits);
77 			return in;
78 		}
79 		return in;
80 	}
81 
CompITypeMemWrite(MIPSOpcode op,u32 bits,const void * safeFunc)82 	void Jit::CompITypeMemWrite(MIPSOpcode op, u32 bits, const void *safeFunc)
83 	{
84 		CONDITIONAL_DISABLE(LSU);
85 		int offset = _IMM16;
86 		MIPSGPReg rt = _RT;
87 		MIPSGPReg rs = _RS;
88 
89 		gpr.Lock(rt, rs);
90 
91 		if (rt == MIPS_REG_ZERO || gpr.R(rt).IsImm()) {
92 			// NOTICE_LOG(JIT, "%d-bit Imm at %08x : %08x", bits, js.blockStart, (u32)gpr.R(rt).GetImmValue());
93 		} else {
94 			gpr.MapReg(rt, true, false);
95 		}
96 
97 #if PPSSPP_ARCH(X86)
98 		// We use EDX so we can have DL for 8-bit ops.
99 		const bool needSwap = bits == 8 && !gpr.R(rt).IsSimpleReg(EDX) && !gpr.R(rt).IsSimpleReg(ECX);
100 		if (needSwap)
101 			gpr.FlushLockX(EDX);
102 #else
103 		const bool needSwap = false;
104 #endif
105 
106 		JitSafeMem safe(this, rs, offset);
107 		OpArg dest;
108 		if (safe.PrepareWrite(dest, bits / 8))
109 		{
110 			if (needSwap)
111 			{
112 				MOV(32, R(EDX), gpr.R(rt));
113 				MOV(bits, dest, R(EDX));
114 			}
115 			else {
116 				if (rt == MIPS_REG_ZERO) {
117 					switch (bits) {
118 					case 8: MOV(8, dest, Imm8(0)); break;
119 					case 16: MOV(16, dest, Imm16(0)); break;
120 					case 32: MOV(32, dest, Imm32(0)); break;
121 					}
122 				} else {
123 					// The downcast is needed so we don't try to generate a 8-bit write with a 32-bit imm
124 					// (that might have been generated from an li instruction) which is illegal.
125 					MOV(bits, dest, DowncastImm(gpr.R(rt), bits));
126 				}
127 			}
128 		}
129 		if (safe.PrepareSlowWrite())
130 			safe.DoSlowWrite(safeFunc, gpr.R(rt));
131 		safe.Finish();
132 
133 		if (needSwap)
134 			gpr.UnlockAllX();
135 		gpr.UnlockAll();
136 	}
137 
CompITypeMemUnpairedLR(MIPSOpcode op,bool isStore)138 	void Jit::CompITypeMemUnpairedLR(MIPSOpcode op, bool isStore)
139 	{
140 		CONDITIONAL_DISABLE(LSU);
141 		int offset = _IMM16;
142 		MIPSGPReg rt = _RT;
143 		MIPSGPReg rs = _RS;
144 
145 		X64Reg shiftReg = ECX;
146 		gpr.FlushLockX(ECX, EDX);
147 #if PPSSPP_ARCH(AMD64)
148 		// On x64, we need ECX for CL, but it's also the first arg and gets lost.  Annoying.
149 		gpr.FlushLockX(R9);
150 		shiftReg = R9;
151 #endif
152 
153 		gpr.Lock(rt, rs);
154 		gpr.MapReg(rt, true, !isStore);
155 
156 		// Grab the offset from alignment for shifting (<< 3 for bytes -> bits.)
157 		MOV(32, R(shiftReg), gpr.R(rs));
158 		ADD(32, R(shiftReg), Imm32(offset));
159 		AND(32, R(shiftReg), Imm32(3));
160 		SHL(32, R(shiftReg), Imm8(3));
161 
162 		{
163 			JitSafeMem safe(this, rs, offset, ~3);
164 			OpArg src;
165 			if (safe.PrepareRead(src, 4))
166 			{
167 				if (!src.IsSimpleReg(EAX))
168 					MOV(32, R(EAX), src);
169 
170 				CompITypeMemUnpairedLRInner(op, shiftReg);
171 			}
172 			if (safe.PrepareSlowRead(safeMemFuncs.readU32))
173 				CompITypeMemUnpairedLRInner(op, shiftReg);
174 			safe.Finish();
175 		}
176 
177 		// For store ops, write EDX back to memory.
178 		if (isStore)
179 		{
180 			JitSafeMem safe(this, rs, offset, ~3);
181 			OpArg dest;
182 			if (safe.PrepareWrite(dest, 4))
183 				MOV(32, dest, R(EDX));
184 			if (safe.PrepareSlowWrite())
185 				safe.DoSlowWrite(safeMemFuncs.writeU32, R(EDX));
186 			safe.Finish();
187 		}
188 
189 		gpr.UnlockAll();
190 		gpr.UnlockAllX();
191 	}
192 
CompITypeMemUnpairedLRInner(MIPSOpcode op,X64Reg shiftReg)193 	void Jit::CompITypeMemUnpairedLRInner(MIPSOpcode op, X64Reg shiftReg)
194 	{
195 		CONDITIONAL_DISABLE(LSU);
196 		int o = op>>26;
197 		MIPSGPReg rt = _RT;
198 
199 		// Make sure we have the shift for the target in ECX.
200 		if (shiftReg != ECX)
201 			MOV(32, R(ECX), R(shiftReg));
202 
203 		// Now use that shift (left on target, right on source.)
204 		switch (o)
205 		{
206 		case 34: //lwl
207 			MOV(32, R(EDX), Imm32(0x00ffffff));
208 			SHR(32, R(EDX), R(CL));
209 			AND(32, gpr.R(rt), R(EDX));
210 			break;
211 
212 		case 38: //lwr
213 			SHR(32, R(EAX), R(CL));
214 			break;
215 
216 		case 42: //swl
217 			MOV(32, R(EDX), Imm32(0xffffff00));
218 			SHL(32, R(EDX), R(CL));
219 			AND(32, R(EAX), R(EDX));
220 			break;
221 
222 		case 46: //swr
223 			MOV(32, R(EDX), gpr.R(rt));
224 			SHL(32, R(EDX), R(CL));
225 			// EDX is already the target value to write, but may be overwritten below.  Save it.
226 			PUSH(EDX);
227 			break;
228 
229 		default:
230 			_dbg_assert_msg_(false, "Unsupported left/right load/store instruction.");
231 		}
232 
233 		// Flip ECX around from 3 bytes / 24 bits.
234 		if (shiftReg == ECX)
235 		{
236 			MOV(32, R(EDX), Imm32(24));
237 			SUB(32, R(EDX), R(ECX));
238 			MOV(32, R(ECX), R(EDX));
239 		}
240 		else
241 		{
242 			MOV(32, R(ECX), Imm32(24));
243 			SUB(32, R(ECX), R(shiftReg));
244 		}
245 
246 		// Use the flipped shift (left on source, right on target) and write target.
247 		switch (o)
248 		{
249 		case 34: //lwl
250 			SHL(32, R(EAX), R(CL));
251 
252 			OR(32, gpr.R(rt), R(EAX));
253 			break;
254 
255 		case 38: //lwr
256 			MOV(32, R(EDX), Imm32(0xffffff00));
257 			SHL(32, R(EDX), R(CL));
258 			AND(32, gpr.R(rt), R(EDX));
259 
260 			OR(32, gpr.R(rt), R(EAX));
261 			break;
262 
263 		case 42: //swl
264 			MOV(32, R(EDX), gpr.R(rt));
265 			SHR(32, R(EDX), R(CL));
266 
267 			OR(32, R(EDX), R(EAX));
268 			break;
269 
270 		case 46: //swr
271 			MOV(32, R(EDX), Imm32(0x00ffffff));
272 			SHR(32, R(EDX), R(CL));
273 			AND(32, R(EAX), R(EDX));
274 
275 			// This is the target value we saved earlier.
276 			POP(EDX);
277 			OR(32, R(EDX), R(EAX));
278 			break;
279 
280 		default:
281 			_dbg_assert_msg_(false, "Unsupported left/right load/store instruction.");
282 		}
283 	}
284 
Comp_ITypeMem(MIPSOpcode op)285 	void Jit::Comp_ITypeMem(MIPSOpcode op)
286 	{
287 		CONDITIONAL_DISABLE(LSU);
288 		int offset = _IMM16;
289 		MIPSGPReg rt = _RT;
290 		int o = op>>26;
291 		if (((op >> 29) & 1) == 0 && rt == MIPS_REG_ZERO) {
292 			// Don't load anything into $zr
293 			return;
294 		}
295 
296 		switch (o)
297 		{
298 		case 37: //R(rt) = ReadMem16(addr); break; //lhu
299 			CompITypeMemRead(op, 16, &XEmitter::MOVZX, safeMemFuncs.readU16);
300 			break;
301 
302 		case 36: //R(rt) = ReadMem8 (addr); break; //lbu
303 			CompITypeMemRead(op, 8, &XEmitter::MOVZX,  safeMemFuncs.readU8);
304 			break;
305 
306 		case 35: //R(rt) = ReadMem32(addr); break; //lw
307 			CompITypeMemRead(op, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
308 			break;
309 
310 		case 32: //R(rt) = SignExtend8ToU32 (ReadMem8 (addr)); break; //lb
311 			CompITypeMemRead(op, 8, &XEmitter::MOVSX, safeMemFuncs.readU8);
312 			break;
313 
314 		case 33: //R(rt) = SignExtend16ToU32(ReadMem16(addr)); break; //lh
315 			CompITypeMemRead(op, 16, &XEmitter::MOVSX, safeMemFuncs.readU16);
316 			break;
317 
318 		case 40: //WriteMem8 (addr, R(rt)); break; //sb
319 			CompITypeMemWrite(op, 8, safeMemFuncs.writeU8);
320 			break;
321 
322 		case 41: //WriteMem16(addr, R(rt)); break; //sh
323 			CompITypeMemWrite(op, 16, safeMemFuncs.writeU16);
324 			break;
325 
326 		case 43: //WriteMem32(addr, R(rt)); break; //sw
327 			CompITypeMemWrite(op, 32, safeMemFuncs.writeU32);
328 			break;
329 
330 		case 34: //lwl
331 			{
332 				MIPSOpcode nextOp = GetOffsetInstruction(1);
333 				// Looking for lwr rd, offset-3(rs) which makes a pair.
334 				u32 desiredOp = ((op & 0xFFFF0000) + (4 << 26)) + (offset - 3);
335 				if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
336 				{
337 					EatInstruction(nextOp);
338 					// nextOp has the correct address.
339 					CompITypeMemRead(nextOp, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
340 				}
341 				else
342 					CompITypeMemUnpairedLR(op, false);
343 			}
344 			break;
345 
346 		case 38: //lwr
347 			{
348 				MIPSOpcode nextOp = GetOffsetInstruction(1);
349 				// Looking for lwl rd, offset+3(rs) which makes a pair.
350 				u32 desiredOp = ((op & 0xFFFF0000) - (4 << 26)) + (offset + 3);
351 				if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
352 				{
353 					EatInstruction(nextOp);
354 					// op has the correct address.
355 					CompITypeMemRead(op, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
356 				}
357 				else
358 					CompITypeMemUnpairedLR(op, false);
359 			}
360 			break;
361 
362 		case 42: //swl
363 			{
364 				MIPSOpcode nextOp = GetOffsetInstruction(1);
365 				// Looking for swr rd, offset-3(rs) which makes a pair.
366 				u32 desiredOp = ((op & 0xFFFF0000) + (4 << 26)) + (offset - 3);
367 				if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
368 				{
369 					EatInstruction(nextOp);
370 					// nextOp has the correct address.
371 					CompITypeMemWrite(nextOp, 32, safeMemFuncs.writeU32);
372 				}
373 				else
374 					CompITypeMemUnpairedLR(op, true);
375 			}
376 			break;
377 
378 		case 46: //swr
379 			{
380 				MIPSOpcode nextOp = GetOffsetInstruction(1);
381 				// Looking for swl rd, offset+3(rs) which makes a pair.
382 				u32 desiredOp = ((op & 0xFFFF0000) - (4 << 26)) + (offset + 3);
383 				if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
384 				{
385 					EatInstruction(nextOp);
386 					// op has the correct address.
387 					CompITypeMemWrite(op, 32, safeMemFuncs.writeU32);
388 				}
389 				else
390 					CompITypeMemUnpairedLR(op, true);
391 			}
392 			break;
393 
394 		default:
395 			Comp_Generic(op);
396 			return;
397 		}
398 
399 	}
400 
Comp_Cache(MIPSOpcode op)401 	void Jit::Comp_Cache(MIPSOpcode op) {
402 		CONDITIONAL_DISABLE(LSU);
403 
404 		int func = (op >> 16) & 0x1F;
405 
406 		// See Int_Cache for the definitions.
407 		switch (func) {
408 		case 24: break;
409 		case 25: break;
410 		case 27: break;
411 		case 30: break;
412 		default:
413 			// Fall back to the interpreter.
414 			DISABLE;
415 		}
416 	}
417 }
418 
419 #endif // PPSSPP_ARCH(X86) || PPSSPP_ARCH(AMD64)
420