1 // Copyright (c) 2013- PPSSPP Project.
2 
3 // This program is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, version 2.0 or later versions.
6 
7 // This program is distributed in the hope that it will be useful,
8 // but WITHOUT ANY WARRANTY; without even the implied warranty of
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 // GNU General Public License 2.0 for more details.
11 
12 // A copy of the GPL 2.0 should have been included with the program.
13 // If not, see http://www.gnu.org/licenses/
14 
15 // Official git repository and contact information can be found at
16 // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17 
18 // NEON VFPU
19 // This is where we will create an alternate implementation of the VFPU emulation
20 // that uses NEON Q registers to cache pairs/tris/quads, and so on.
21 // Will require major extensions to the reg cache and other things.
22 
23 // ARM NEON can only do pairs and quads, not tris and scalars.
24 // We can do scalars, though, for many operations if all the operands
25 // are below Q8 (D16, S32) using regular VFP instructions but really not sure
26 // if it's worth it.
27 
28 #include "ppsspp_config.h"
29 #if PPSSPP_ARCH(ARM)
30 
31 #include <cmath>
32 
33 #include "Common/Data/Convert/SmallDataConvert.h"
34 #include "Common/Math/math_util.h"
35 
36 #include "Common/CPUDetect.h"
37 #include "Core/MemMap.h"
38 #include "Core/MIPS/MIPS.h"
39 #include "Core/MIPS/MIPSAnalyst.h"
40 #include "Core/MIPS/MIPSCodeUtils.h"
41 #include "Core/MIPS/MIPSVFPUUtils.h"
42 #include "Core/Config.h"
43 #include "Core/Reporting.h"
44 
45 #include "Core/MIPS/ARM/ArmJit.h"
46 #include "Core/MIPS/ARM/ArmRegCache.h"
47 #include "Core/MIPS/ARM/ArmRegCacheFPU.h"
48 #include "Core/MIPS/ARM/ArmCompVFPUNEONUtil.h"
49 
50 // TODO: Somehow #ifdef away on ARMv5eabi, without breaking the linker.
51 
52 // All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
53 // Currently known non working ones should have DISABLE.
54 
55 // #define CONDITIONAL_DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
56 #define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
57 #define DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
58 #define DISABLE_UNKNOWN_PREFIX { WARN_LOG(JIT, "DISABLE: Unknown Prefix in %s", __FUNCTION__); fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
59 
60 #define _RS MIPS_GET_RS(op)
61 #define _RT MIPS_GET_RT(op)
62 #define _RD MIPS_GET_RD(op)
63 #define _FS MIPS_GET_FS(op)
64 #define _FT MIPS_GET_FT(op)
65 #define _FD MIPS_GET_FD(op)
66 #define _SA MIPS_GET_SA(op)
67 #define _POS  ((op>> 6) & 0x1F)
68 #define _SIZE ((op>>11) & 0x1F)
69 #define _IMM16 (signed short)(op & 0xFFFF)
70 #define _IMM26 (op & 0x03FFFFFF)
71 
72 
73 namespace MIPSComp {
74 
75 using namespace ArmGen;
76 using namespace ArmJitConstants;
77 
78 static const float minus_one = -1.0f;
79 static const float one = 1.0f;
80 static const float zero = 0.0f;
81 
82 
CompNEON_VecDo3(MIPSOpcode op)83 void ArmJit::CompNEON_VecDo3(MIPSOpcode op) {
84 	CONDITIONAL_DISABLE(VFPU_VEC);
85 	if (js.HasUnknownPrefix()) {
86 		DISABLE_UNKNOWN_PREFIX;
87 	}
88 
89 	VectorSize sz = GetVecSize(op);
90 	int n = GetNumVectorElements(sz);
91 
92 	MappedRegs r = NEONMapDirtyInIn(op, sz, sz, sz);
93 	ARMReg temp = MatchSize(Q0, r.vs);
94 	// TODO: Special case for scalar
95 	switch (op >> 26) {
96 	case 24: //VFPU0
97 		switch ((op >> 23) & 7) {
98 		case 0: VADD(F_32, r.vd, r.vs, r.vt); break; // vadd
99 		case 1: VSUB(F_32, r.vd, r.vs, r.vt); break; // vsub
100 		case 7: // vdiv  // vdiv  THERE IS NO NEON SIMD VDIV :(  There's a fast reciprocal iterator thing though.
101 			{
102 				// Implement by falling back to VFP
103 				VMOV(D0, D_0(r.vs));
104 				VMOV(D1, D_0(r.vt));
105 				VDIV(S0, S0, S2);
106 				if (sz >= V_Pair)
107 					VDIV(S1, S1, S3);
108 				VMOV(D_0(r.vd), D0);
109 				if (sz >= V_Triple) {
110 					VMOV(D0, D_1(r.vs));
111 					VMOV(D1, D_1(r.vt));
112 					VDIV(S0, S0, S2);
113 					if (sz == V_Quad)
114 						VDIV(S1, S1, S3);
115 					VMOV(D_1(r.vd), D0);
116 				}
117 			}
118 			break;
119 		default:
120 			DISABLE;
121 		}
122 		break;
123 	case 25: //VFPU1
124 		switch ((op >> 23) & 7) {
125 		case 0: VMUL(F_32, r.vd, r.vs, r.vt); break;  // vmul
126 		default:
127 			DISABLE;
128 		}
129 		break;
130 	case 27: //VFPU3
131 		switch ((op >> 23) & 7)	{
132 		case 2: VMIN(F_32, r.vd, r.vs, r.vt); break;   // vmin
133 		case 3: VMAX(F_32, r.vd, r.vs, r.vt); break;   // vmax
134 		case 6:  // vsge
135 			VMOV_immf(temp, 1.0f);
136 			VCGE(F_32, r.vd, r.vs, r.vt);
137 			VAND(r.vd, r.vd, temp);
138 			break;
139 		case 7:  // vslt
140 			VMOV_immf(temp, 1.0f);
141 			VCLT(F_32, r.vd, r.vs, r.vt);
142 			VAND(r.vd, r.vd, temp);
143 			break;
144 		}
145 		break;
146 
147 	default:
148 		DISABLE;
149 	}
150 
151 	NEONApplyPrefixD(r.vd);
152 
153 	fpr.ReleaseSpillLocksAndDiscardTemps();
154 }
155 
156 
157 // #define CONDITIONAL_DISABLE { fpr.ReleaseSpillLocksAndDiscardTemps(); Comp_Generic(op); return; }
158 
CompNEON_SV(MIPSOpcode op)159 void ArmJit::CompNEON_SV(MIPSOpcode op) {
160 	CONDITIONAL_DISABLE(LSU_VFPU);
161 	CheckMemoryBreakpoint();
162 
163 	// Remember to use single lane stores here and not VLDR/VSTR - switching usage
164 	// between NEON and VFPU can be expensive on some chips.
165 
166 	// Here's a common idiom we should optimize:
167 	// lv.s S200, 0(s4)
168 	// lv.s S201, 4(s4)
169 	// lv.s S202, 8(s4)
170 	// vone.s S203
171 	// vtfm4.q C000, E600, C200
172 	// Would be great if we could somehow combine the lv.s into one vector instead of mapping three
173 	// separate quads.
174 
175 	s32 offset = (signed short)(op & 0xFFFC);
176 	int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5);
177 	MIPSGPReg rs = _RS;
178 
179 	bool doCheck = false;
180 	switch (op >> 26)
181 	{
182 	case 50: //lv.s  // VI(vt) = Memory::Read_U32(addr);
183 		{
184 			if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
185 				INFO_LOG(HLE, "LV.S fastmode!");
186 				// TODO: Also look forward and combine multiple loads.
187 				gpr.MapRegAsPointer(rs);
188 				ARMReg ar = fpr.QMapReg(vt, V_Single, MAP_NOINIT | MAP_DIRTY);
189 				if (offset) {
190 					ADDI2R(R0, gpr.RPtr(rs), offset, R1);
191 					VLD1_lane(F_32, ar, R0, 0, true);
192 				} else {
193 					VLD1_lane(F_32, ar, gpr.RPtr(rs), 0, true);
194 				}
195 				break;
196 			}
197 			INFO_LOG(HLE, "LV.S slowmode!");
198 
199 			// CC might be set by slow path below, so load regs first.
200 			ARMReg ar = fpr.QMapReg(vt, V_Single, MAP_DIRTY | MAP_NOINIT);
201 			if (gpr.IsImm(rs)) {
202 				u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
203 				gpr.SetRegImm(R0, addr + (u32)Memory::base);
204 			} else {
205 				gpr.MapReg(rs);
206 				if (g_Config.bFastMemory) {
207 					SetR0ToEffectiveAddress(rs, offset);
208 				} else {
209 					SetCCAndR0ForSafeAddress(rs, offset, R1);
210 					doCheck = true;
211 				}
212 				ADD(R0, R0, MEMBASEREG);
213 			}
214 			FixupBranch skip;
215 			if (doCheck) {
216 				skip = B_CC(CC_EQ);
217 			}
218 			VLD1_lane(F_32, ar, R0, 0, true);
219 			if (doCheck) {
220 				SetJumpTarget(skip);
221 				SetCC(CC_AL);
222 			}
223 		}
224 		break;
225 
226 	case 58: //sv.s   // Memory::Write_U32(VI(vt), addr);
227 		{
228 			if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && (offset & 3) == 0 && offset < 0x400 && offset > -0x400) {
229 				INFO_LOG(HLE, "SV.S fastmode!");
230 				// TODO: Also look forward and combine multiple stores.
231 				gpr.MapRegAsPointer(rs);
232 				ARMReg ar = fpr.QMapReg(vt, V_Single, 0);
233 				if (offset) {
234 					ADDI2R(R0, gpr.RPtr(rs), offset, R1);
235 					VST1_lane(F_32, ar, R0, 0, true);
236 				} else {
237 					VST1_lane(F_32, ar, gpr.RPtr(rs), 0, true);
238 				}
239 				break;
240 			}
241 
242 			INFO_LOG(HLE, "SV.S slowmode!");
243 			// CC might be set by slow path below, so load regs first.
244 			ARMReg ar = fpr.QMapReg(vt, V_Single, 0);
245 			if (gpr.IsImm(rs)) {
246 				u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
247 				gpr.SetRegImm(R0, addr + (u32)Memory::base);
248 			} else {
249 				gpr.MapReg(rs);
250 				if (g_Config.bFastMemory) {
251 					SetR0ToEffectiveAddress(rs, offset);
252 				} else {
253 					SetCCAndR0ForSafeAddress(rs, offset, R1);
254 					doCheck = true;
255 				}
256 				ADD(R0, R0, MEMBASEREG);
257 			}
258 			FixupBranch skip;
259 			if (doCheck) {
260 				skip = B_CC(CC_EQ);
261 			}
262 			VST1_lane(F_32, ar, R0, 0, true);
263 			if (doCheck) {
264 				SetJumpTarget(skip);
265 				SetCC(CC_AL);
266 			}
267 		}
268 		break;
269 	}
270 	fpr.ReleaseSpillLocksAndDiscardTemps();
271 }
272 
MIPS_GET_VQVT(u32 op)273 inline int MIPS_GET_VQVT(u32 op) {
274 	return (((op >> 16) & 0x1f)) | ((op & 1) << 5);
275 }
276 
CompNEON_SVQ(MIPSOpcode op)277 void ArmJit::CompNEON_SVQ(MIPSOpcode op) {
278 	CONDITIONAL_DISABLE(LSU_VFPU);
279 	CheckMemoryBreakpoint();
280 
281 	int offset = (signed short)(op & 0xFFFC);
282 	int vt = MIPS_GET_VQVT(op.encoding);
283 	MIPSGPReg rs = _RS;
284 	bool doCheck = false;
285 	switch (op >> 26)
286 	{
287 	case 54: //lv.q
288 		{
289 			// Check for four-in-a-row
290 			const u32 ops[4] = {
291 				op.encoding,
292 				GetOffsetInstruction(1).encoding,
293 				GetOffsetInstruction(2).encoding,
294 				GetOffsetInstruction(3).encoding,
295 			};
296 			if (g_Config.bFastMemory && (ops[1] >> 26) == 54 && (ops[2] >> 26) == 54 && (ops[3] >> 26) == 54) {
297 				int offsets[4] = {offset, (s16)(ops[1] & 0xFFFC), (s16)(ops[2] & 0xFFFC), (s16)(ops[3] & 0xFFFC)};
298 				int rss[4] = {MIPS_GET_RS(op), MIPS_GET_RS(ops[1]), MIPS_GET_RS(ops[2]), MIPS_GET_RS(ops[3])};
299 				if (offsets[1] == offset + 16 && offsets[2] == offsets[1] + 16 && offsets[3] == offsets[2] + 16 &&
300 					  rss[0] == rss[1] && rss[1] == rss[2] && rss[2] == rss[3]) {
301 					int vts[4] = {MIPS_GET_VQVT(op.encoding), MIPS_GET_VQVT(ops[1]), MIPS_GET_VQVT(ops[2]), MIPS_GET_VQVT(ops[3])};
302 					// TODO: Also check the destination registers!
303 					// Detected four consecutive ones!
304 					// gpr.MapRegAsPointer(rs);
305 					// fpr.QLoad4x4(vts[4], rs, offset);
306 					INFO_LOG(JIT, "Matrix load detected! TODO: optimize");
307 					// break;
308 				}
309 			}
310 
311 			if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && offset < 0x400-16 && offset > -0x400-16) {
312 				gpr.MapRegAsPointer(rs);
313 				ARMReg ar = fpr.QMapReg(vt, V_Quad, MAP_DIRTY | MAP_NOINIT);
314 				if (offset) {
315 					ADDI2R(R0, gpr.RPtr(rs), offset, R1);
316 					VLD1(F_32, ar, R0, 2, ALIGN_128);
317 				} else {
318 					VLD1(F_32, ar, gpr.RPtr(rs), 2, ALIGN_128);
319 				}
320 				break;
321 			}
322 
323 			// CC might be set by slow path below, so load regs first.
324 			ARMReg ar = fpr.QMapReg(vt, V_Quad, MAP_DIRTY | MAP_NOINIT);
325 			if (gpr.IsImm(rs)) {
326 				u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
327 				gpr.SetRegImm(R0, addr + (u32)Memory::base);
328 			} else {
329 				gpr.MapReg(rs);
330 				if (g_Config.bFastMemory) {
331 					SetR0ToEffectiveAddress(rs, offset);
332 				} else {
333 					SetCCAndR0ForSafeAddress(rs, offset, R1);
334 					doCheck = true;
335 				}
336 				ADD(R0, R0, MEMBASEREG);
337 			}
338 
339 			FixupBranch skip;
340 			if (doCheck) {
341 				skip = B_CC(CC_EQ);
342 			}
343 
344 			VLD1(F_32, ar, R0, 2, ALIGN_128);
345 
346 			if (doCheck) {
347 				SetJumpTarget(skip);
348 				SetCC(CC_AL);
349 			}
350 		}
351 		break;
352 
353 	case 62: //sv.q
354 		{
355 			const u32 ops[4] = {
356 				op.encoding,
357 				GetOffsetInstruction(1).encoding,
358 				GetOffsetInstruction(2).encoding,
359 				GetOffsetInstruction(3).encoding,
360 			};
361 			if (g_Config.bFastMemory && (ops[1] >> 26) == 54 && (ops[2] >> 26) == 54 && (ops[3] >> 26) == 54) {
362 				int offsets[4] = { offset, (s16)(ops[1] & 0xFFFC), (s16)(ops[2] & 0xFFFC), (s16)(ops[3] & 0xFFFC) };
363 				int rss[4] = { MIPS_GET_RS(op), MIPS_GET_RS(ops[1]), MIPS_GET_RS(ops[2]), MIPS_GET_RS(ops[3]) };
364 				if (offsets[1] == offset + 16 && offsets[2] == offsets[1] + 16 && offsets[3] == offsets[2] + 16 &&
365 					rss[0] == rss[1] && rss[1] == rss[2] && rss[2] == rss[3]) {
366 					int vts[4] = { MIPS_GET_VQVT(op.encoding), MIPS_GET_VQVT(ops[1]), MIPS_GET_VQVT(ops[2]), MIPS_GET_VQVT(ops[3]) };
367 					// TODO: Also check the destination registers!
368 					// Detected four consecutive ones!
369 					// gpr.MapRegAsPointer(rs);
370 					// fpr.QLoad4x4(vts[4], rs, offset);
371 					INFO_LOG(JIT, "Matrix store detected! TODO: optimize");
372 					// break;
373 				}
374 			}
375 
376 			if (!gpr.IsImm(rs) && jo.cachePointers && g_Config.bFastMemory && offset < 0x400-16 && offset > -0x400-16) {
377 				gpr.MapRegAsPointer(rs);
378 				ARMReg ar = fpr.QMapReg(vt, V_Quad, 0);
379 				if (offset) {
380 					ADDI2R(R0, gpr.RPtr(rs), offset, R1);
381 					VST1(F_32, ar, R0, 2, ALIGN_128);
382 				} else {
383 					VST1(F_32, ar, gpr.RPtr(rs), 2, ALIGN_128);
384 				}
385 				break;
386 			}
387 
388 			// CC might be set by slow path below, so load regs first.
389 			ARMReg ar = fpr.QMapReg(vt, V_Quad, 0);
390 
391 			if (gpr.IsImm(rs)) {
392 				u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF;
393 				gpr.SetRegImm(R0, addr + (u32)Memory::base);
394 			} else {
395 				gpr.MapReg(rs);
396 				if (g_Config.bFastMemory) {
397 					SetR0ToEffectiveAddress(rs, offset);
398 				} else {
399 					SetCCAndR0ForSafeAddress(rs, offset, R1);
400 					doCheck = true;
401 				}
402 				ADD(R0, R0, MEMBASEREG);
403 			}
404 
405 			FixupBranch skip;
406 			if (doCheck) {
407 				skip = B_CC(CC_EQ);
408 			}
409 
410 			VST1(F_32, ar, R0, 2, ALIGN_128);
411 
412 			if (doCheck) {
413 				SetJumpTarget(skip);
414 				SetCC(CC_AL);
415 			}
416 		}
417 		break;
418 
419 	default:
420 		DISABLE;
421 		break;
422 	}
423 	fpr.ReleaseSpillLocksAndDiscardTemps();
424 }
425 
CompNEON_VVectorInit(MIPSOpcode op)426 void ArmJit::CompNEON_VVectorInit(MIPSOpcode op) {
427 	CONDITIONAL_DISABLE(VFPU_XFER);
428 	// WARNING: No prefix support!
429 	if (js.HasUnknownPrefix()) {
430 		DISABLE_UNKNOWN_PREFIX;
431 	}
432 	VectorSize sz = GetVecSize(op);
433 	DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_NOINIT | MAP_DIRTY);
434 
435 	switch ((op >> 16) & 0xF) {
436 	case 6:  // vzero
437 		VEOR(vd.rd, vd.rd, vd.rd);
438 		break;
439 	case 7:  // vone
440 		VMOV_immf(vd.rd, 1.0f);
441 		break;
442 	default:
443 		DISABLE;
444 		break;
445 	}
446 	NEONApplyPrefixD(vd);
447 
448 	fpr.ReleaseSpillLocksAndDiscardTemps();
449 }
450 
CompNEON_VDot(MIPSOpcode op)451 void ArmJit::CompNEON_VDot(MIPSOpcode op) {
452 	CONDITIONAL_DISABLE(VFPU_VEC);
453 	if (js.HasUnknownPrefix()) {
454 		DISABLE_UNKNOWN_PREFIX;
455 	}
456 
457 	VectorSize sz = GetVecSize(op);
458 	MappedRegs r = NEONMapDirtyInIn(op, V_Single, sz, sz);
459 
460 	switch (sz) {
461 	case V_Pair:
462 		VMUL(F_32, r.vd, r.vs, r.vt);
463 		VPADD(F_32, r.vd, r.vd, r.vd);
464 		break;
465 	case V_Triple:
466 		VMUL(F_32, Q0, r.vs, r.vt);
467 		VPADD(F_32, D0, D0, D0);
468 		VADD(F_32, r.vd, D0, D1);
469 		break;
470 	case V_Quad:
471 		VMUL(F_32, D0, D_0(r.vs), D_0(r.vt));
472 		VMLA(F_32, D0, D_1(r.vs), D_1(r.vt));
473 		VPADD(F_32, r.vd, D0, D0);
474 		break;
475 	case V_Single:
476 	case V_Invalid:
477 		;
478 	}
479 
480 	NEONApplyPrefixD(r.vd);
481 	fpr.ReleaseSpillLocksAndDiscardTemps();
482 }
483 
484 
CompNEON_VHdp(MIPSOpcode op)485 void ArmJit::CompNEON_VHdp(MIPSOpcode op) {
486 	CONDITIONAL_DISABLE(VFPU_VEC);
487 	if (js.HasUnknownPrefix()) {
488 		DISABLE_UNKNOWN_PREFIX;
489 	}
490 
491 	DISABLE;
492 
493 	// Similar to VDot but the last component is only s instead of s * t.
494 	// A bit tricky on NEON...
495 }
496 
CompNEON_VScl(MIPSOpcode op)497 void ArmJit::CompNEON_VScl(MIPSOpcode op) {
498 	CONDITIONAL_DISABLE(VFPU_VEC);
499 	if (js.HasUnknownPrefix()) {
500 		DISABLE_UNKNOWN_PREFIX;
501 	}
502 
503 	VectorSize sz = GetVecSize(op);
504 	MappedRegs r = NEONMapDirtyInIn(op, sz, sz, V_Single);
505 
506 	ARMReg temp = MatchSize(Q0, r.vt);
507 
508 	// TODO: VMUL_scalar directly when possible
509 	VMOV_neon(temp, r.vt);
510 	VMUL_scalar(F_32, r.vd, r.vs, DScalar(Q0, 0));
511 
512 	NEONApplyPrefixD(r.vd);
513 	fpr.ReleaseSpillLocksAndDiscardTemps();
514 }
515 
CompNEON_VV2Op(MIPSOpcode op)516 void ArmJit::CompNEON_VV2Op(MIPSOpcode op) {
517 	CONDITIONAL_DISABLE(VFPU_VEC);
518 	if (js.HasUnknownPrefix()) {
519 		DISABLE_UNKNOWN_PREFIX;
520 	}
521 
522 	// Pre-processing: Eliminate silly no-op VMOVs, common in Wipeout Pure
523 	if (((op >> 16) & 0x1f) == 0 && _VS == _VD && js.HasNoPrefix()) {
524 		return;
525 	}
526 
527 	// Must bail before we start mapping registers.
528 	switch ((op >> 16) & 0x1f) {
529 	case 0: // d[i] = s[i]; break; //vmov
530 	case 1: // d[i] = fabsf(s[i]); break; //vabs
531 	case 2: // d[i] = -s[i]; break; //vneg
532 	case 17: // d[i] = 1.0f / sqrtf(s[i]); break; //vrsq
533 		break;
534 
535 	default:
536 		DISABLE;
537 		break;
538 	}
539 
540 	VectorSize sz = GetVecSize(op);
541 	int n = GetNumVectorElements(sz);
542 
543 	MappedRegs r = NEONMapDirtyIn(op, sz, sz);
544 
545 	ARMReg temp = MatchSize(Q0, r.vs);
546 
547 	switch ((op >> 16) & 0x1f) {
548 	case 0: // d[i] = s[i]; break; //vmov
549 		// Probably for swizzle.
550 		VMOV_neon(r.vd, r.vs);
551 		break;
552 	case 1: // d[i] = fabsf(s[i]); break; //vabs
553 		VABS(F_32, r.vd, r.vs);
554 		break;
555 	case 2: // d[i] = -s[i]; break; //vneg
556 		VNEG(F_32, r.vd, r.vs);
557 		break;
558 
559 	case 4: // if (s[i] < 0) d[i] = 0; else {if(s[i] > 1.0f) d[i] = 1.0f; else d[i] = s[i];} break;    // vsat0
560 		if (IsD(r.vd)) {
561 			VMOV_immf(D0, 0.0f);
562 			VMOV_immf(D1, 1.0f);
563 			VMAX(F_32, r.vd, r.vs, D0);
564 			VMIN(F_32, r.vd, r.vd, D1);
565 		} else {
566 			VMOV_immf(Q0, 1.0f);
567 			VMIN(F_32, r.vd, r.vs, Q0);
568 			VMOV_immf(Q0, 0.0f);
569 			VMAX(F_32, r.vd, r.vd, Q0);
570 		}
571 		break;
572 	case 5: // if (s[i] < -1.0f) d[i] = -1.0f; else {if(s[i] > 1.0f) d[i] = 1.0f; else d[i] = s[i];} break;  // vsat1
573 		if (IsD(r.vd)) {
574 			VMOV_immf(D0, -1.0f);
575 			VMOV_immf(D1, 1.0f);
576 			VMAX(F_32, r.vd, r.vs, D0);
577 			VMIN(F_32, r.vd, r.vd, D1);
578 		} else {
579 			VMOV_immf(Q0, 1.0f);
580 			VMIN(F_32, r.vd, r.vs, Q0);
581 			VMOV_immf(Q0, -1.0f);
582 			VMAX(F_32, r.vd, r.vd, Q0);
583 		}
584 		break;
585 
586 	case 16: // d[i] = 1.0f / s[i]; break; //vrcp
587 		// Can just fallback to VFP and use VDIV.
588 		DISABLE;
589 		{
590 			ARMReg temp2 = fpr.QAllocTemp(sz);
591 			// Needs iterations on NEON. And two temps - which is a problem if vs == vd! Argh!
592 			VRECPE(F_32, temp, r.vs);
593 			VRECPS(temp2, r.vs, temp);
594 			VMUL(F_32, temp2, temp2, temp);
595 			VRECPS(temp2, r.vs, temp);
596 			VMUL(F_32, temp2, temp2, temp);
597 		}
598 		// http://stackoverflow.com/questions/6759897/how-to-divide-in-neon-intrinsics-by-a-float-number
599 		// reciprocal = vrecpeq_f32(b);
600 		// reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
601 		// reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
602 		DISABLE;
603 		break;
604 
605 	case 17: // d[i] = 1.0f / sqrtf(s[i]); break; //vrsq
606 		DISABLE;
607 		// Needs iterations on NEON
608 		{
609 			if (true) {
610 				// Not-very-accurate estimate
611 				VRSQRTE(F_32, r.vd, r.vs);
612 			} else {
613 				ARMReg temp2 = fpr.QAllocTemp(sz);
614 				// TODO: It's likely that some games will require one or two Newton-Raphson
615 				// iterations to refine the estimate.
616 				VRSQRTE(F_32, temp, r.vs);
617 				VRSQRTS(temp2, r.vs, temp);
618 				VMUL(F_32, r.vd, temp2, temp);
619 				//VRSQRTS(temp2, r.vs, temp);
620 				// VMUL(F_32, r.vd, temp2, temp);
621 			}
622 		}
623 		break;
624 	case 18: // d[i] = sinf((float)M_PI_2 * s[i]); break; //vsin
625 		DISABLE;
626 		break;
627 	case 19: // d[i] = cosf((float)M_PI_2 * s[i]); break; //vcos
628 		DISABLE;
629 		break;
630 	case 20: // d[i] = powf(2.0f, s[i]); break; //vexp2
631 		DISABLE;
632 		break;
633 	case 21: // d[i] = logf(s[i])/log(2.0f); break; //vlog2
634 		DISABLE;
635 		break;
636 	case 22: // d[i] = sqrtf(s[i]); break; //vsqrt
637 		// Let's just defer to VFP for now. Better than calling the interpreter for sure.
638 		VMOV_neon(MatchSize(Q0, r.vs), r.vs);
639 		for (int i = 0; i < n; i++) {
640 			VSQRT((ARMReg)(S0 + i), (ARMReg)(S0 + i));
641 		}
642 		VMOV_neon(MatchSize(Q0, r.vd), r.vd);
643 		break;
644 	case 23: // d[i] = asinf(s[i] * (float)M_2_PI); break; //vasin
645 		DISABLE;
646 		break;
647 	case 24: // d[i] = -1.0f / s[i]; break; // vnrcp
648 		// Needs iterations on NEON. Just do the same as vrcp and negate.
649 		DISABLE;
650 		break;
651 	case 26: // d[i] = -sinf((float)M_PI_2 * s[i]); break; // vnsin
652 		DISABLE;
653 		break;
654 	case 28: // d[i] = 1.0f / expf(s[i] * (float)M_LOG2E); break; // vrexp2
655 		DISABLE;
656 		break;
657 	default:
658 		DISABLE;
659 		break;
660 	}
661 
662 	NEONApplyPrefixD(r.vd);
663 
664 	fpr.ReleaseSpillLocksAndDiscardTemps();
665 }
666 
CompNEON_Mftv(MIPSOpcode op)667 void ArmJit::CompNEON_Mftv(MIPSOpcode op) {
668 	CONDITIONAL_DISABLE(VFPU_XFER);
669 	int imm = op & 0xFF;
670 	MIPSGPReg rt = _RT;
671 	switch ((op >> 21) & 0x1f) {
672 	case 3: //mfv / mfvc
673 		// rt = 0, imm = 255 appears to be used as a CPU interlock by some games.
674 		if (rt != 0) {
675 			if (imm < 128) {  //R(rt) = VI(imm);
676 				ARMReg r = fpr.QMapReg(imm, V_Single, MAP_READ);
677 				gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
678 				// TODO: Gotta be a faster way
679 				VMOV_neon(MatchSize(Q0, r), r);
680 				VMOV(gpr.R(rt), S0);
681 			} else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc
682 				// In case we have a saved prefix.
683 				FlushPrefixV();
684 				if (imm - 128 == VFPU_CTRL_CC) {
685 					gpr.MapDirtyIn(rt, MIPS_REG_VFPUCC);
686 					MOV(gpr.R(rt), gpr.R(MIPS_REG_VFPUCC));
687 				} else {
688 					gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY);
689 					LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
690 				}
691 			} else {
692 				//ERROR - maybe need to make this value too an "interlock" value?
693 				ERROR_LOG(CPU, "mfv - invalid register %i", imm);
694 			}
695 		}
696 		break;
697 
698 	case 7: // mtv
699 		if (imm < 128) {
700 			// TODO: It's pretty common that this is preceded by mfc1, that is, a value is being
701 			// moved from the regular floating point registers. It would probably be faster to do
702 			// the copy directly in the FPRs instead of going through the GPRs.
703 
704 			ARMReg r = fpr.QMapReg(imm, V_Single, MAP_DIRTY | MAP_NOINIT);
705 			if (gpr.IsMapped(rt)) {
706 				VMOV(S0, gpr.R(rt));
707 				VMOV_neon(r, MatchSize(Q0, r));
708 			} else {
709 				ADDI2R(R0, CTXREG, gpr.GetMipsRegOffset(rt), R1);
710 				VLD1_lane(F_32, r, R0, 0, true);
711 			}
712 		} else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc //currentMIPS->vfpuCtrl[imm - 128] = R(rt);
713 			if (imm - 128 == VFPU_CTRL_CC) {
714 				gpr.MapDirtyIn(MIPS_REG_VFPUCC, rt);
715 				MOV(gpr.R(MIPS_REG_VFPUCC), rt);
716 			} else {
717 				gpr.MapReg(rt);
718 				STR(gpr.R(rt), CTXREG, offsetof(MIPSState, vfpuCtrl) + 4 * (imm - 128));
719 			}
720 
721 			// TODO: Optimization if rt is Imm?
722 			// Set these BEFORE disable!
723 			if (imm - 128 == VFPU_CTRL_SPREFIX) {
724 				js.prefixSFlag = JitState::PREFIX_UNKNOWN;
725 			} else if (imm - 128 == VFPU_CTRL_TPREFIX) {
726 				js.prefixTFlag = JitState::PREFIX_UNKNOWN;
727 			} else if (imm - 128 == VFPU_CTRL_DPREFIX) {
728 				js.prefixDFlag = JitState::PREFIX_UNKNOWN;
729 			}
730 		} else {
731 			//ERROR
732 			_dbg_assert_msg_(false,"mtv - invalid register");
733 		}
734 		break;
735 
736 	default:
737 		DISABLE;
738 	}
739 
740 	fpr.ReleaseSpillLocksAndDiscardTemps();
741 }
742 
CompNEON_Vmfvc(MIPSOpcode op)743 void ArmJit::CompNEON_Vmfvc(MIPSOpcode op) {
744 	DISABLE;
745 }
746 
CompNEON_Vmtvc(MIPSOpcode op)747 void ArmJit::CompNEON_Vmtvc(MIPSOpcode op) {
748 	CONDITIONAL_DISABLE(VFPU_XFER);
749 
750 	int vs = _VS;
751 	int imm = op & 0xFF;
752 	if (imm >= 128 && imm < 128 + VFPU_CTRL_MAX) {
753 		ARMReg r = fpr.QMapReg(vs, V_Single, 0);
754 		ADDI2R(R0, CTXREG, offsetof(MIPSState, vfpuCtrl[0]) + (imm - 128) * 4, R1);
755 		VST1_lane(F_32, r, R0, 0, true);
756 		fpr.ReleaseSpillLocksAndDiscardTemps();
757 
758 		if (imm - 128 == VFPU_CTRL_SPREFIX) {
759 			js.prefixSFlag = JitState::PREFIX_UNKNOWN;
760 		} else if (imm - 128 == VFPU_CTRL_TPREFIX) {
761 			js.prefixTFlag = JitState::PREFIX_UNKNOWN;
762 		} else if (imm - 128 == VFPU_CTRL_DPREFIX) {
763 			js.prefixDFlag = JitState::PREFIX_UNKNOWN;
764 		}
765 	}
766 }
767 
CompNEON_VMatrixInit(MIPSOpcode op)768 void ArmJit::CompNEON_VMatrixInit(MIPSOpcode op) {
769 	CONDITIONAL_DISABLE(VFPU_XFER);
770 
771 	MatrixSize msz = GetMtxSize(op);
772 	int n = GetMatrixSide(msz);
773 
774 	ARMReg cols[4];
775 	fpr.QMapMatrix(cols, _VD, msz, MAP_NOINIT | MAP_DIRTY);
776 
777 	switch ((op >> 16) & 0xF) {
778 	case 3:  // vmidt
779 		// There has to be a better way to synthesize: 1.0, 0.0, 0.0, 1.0 in a quad
780 		VEOR(D0, D0, D0);
781 		VMOV_immf(D1, 1.0f);
782 		VTRN(F_32, D0, D1);
783 		VREV64(I_32, D0, D0);
784 		switch (msz) {
785 		case M_2x2:
786 			VMOV_neon(cols[0], D0);
787 			VMOV_neon(cols[1], D1);
788 			break;
789 		case M_3x3:
790 			VMOV_neon(D_0(cols[0]), D0);
791 			VMOV_imm(I_8, D_1(cols[0]), VIMMxxxxxxxx, 0);
792 			VMOV_neon(D_0(cols[1]), D1);
793 			VMOV_imm(I_8, D_1(cols[1]), VIMMxxxxxxxx, 0);
794 			VMOV_imm(I_8, D_0(cols[2]), VIMMxxxxxxxx, 0);
795 			VMOV_neon(D_1(cols[2]), D0);
796 			break;
797 		case M_4x4:
798 			VMOV_neon(D_0(cols[0]), D0);
799 			VMOV_imm(I_8, D_1(cols[0]), VIMMxxxxxxxx, 0);
800 			VMOV_neon(D_0(cols[1]), D1);
801 			VMOV_imm(I_8, D_1(cols[1]), VIMMxxxxxxxx, 0);
802 			VMOV_imm(I_8, D_0(cols[2]), VIMMxxxxxxxx, 0);
803 			VMOV_neon(D_1(cols[2]), D0);
804 			VMOV_imm(I_8, D_0(cols[3]), VIMMxxxxxxxx, 0);
805 			VMOV_neon(D_1(cols[3]), D1);
806 
807 			// NEONTranspose4x4(cols);
808 			break;
809 		default:
810 			_assert_msg_(false, "Bad matrix size");
811 			break;
812 		}
813 		break;
814 	case 6: // vmzero
815 		for (int i = 0; i < n; i++) {
816 			VEOR(cols[i], cols[i], cols[i]);
817 		}
818 		break;
819 	case 7: // vmone
820 		for (int i = 0; i < n; i++) {
821 			VMOV_immf(cols[i], 1.0f);
822 		}
823 		break;
824 	}
825 
826 	fpr.ReleaseSpillLocksAndDiscardTemps();
827 }
828 
CompNEON_Vmmov(MIPSOpcode op)829 void ArmJit::CompNEON_Vmmov(MIPSOpcode op) {
830 	CONDITIONAL_DISABLE(VFPU_MTX_VMMOV);
831 	if (_VS == _VD) {
832 		// A lot of these no-op matrix moves in Wipeout... Just drop the instruction entirely.
833 		return;
834 	}
835 
836 	MatrixSize msz = GetMtxSize(op);
837 
838 	MatrixOverlapType overlap = GetMatrixOverlap(_VD, _VS, msz);
839 	if (overlap != OVERLAP_NONE) {
840 		// Too complicated to bother handling in the JIT.
841 		// TODO: Special case for in-place (and other) transpose, etc.
842 		DISABLE;
843 	}
844 
845 	ARMReg s_cols[4], d_cols[4];
846 	fpr.QMapMatrix(s_cols, _VS, msz, 0);
847 	fpr.QMapMatrix(d_cols, _VD, msz, MAP_DIRTY | MAP_NOINIT);
848 
849 	int n = GetMatrixSide(msz);
850 	for (int i = 0; i < n; i++) {
851 		VMOV_neon(d_cols[i], s_cols[i]);
852 	}
853 
854 	fpr.ReleaseSpillLocksAndDiscardTemps();
855 }
856 
CompNEON_Vmmul(MIPSOpcode op)857 void ArmJit::CompNEON_Vmmul(MIPSOpcode op) {
858 	CONDITIONAL_DISABLE(VFPU_MTX_VMMUL);
859 
860 	MatrixSize msz = GetMtxSize(op);
861 	int n = GetMatrixSide(msz);
862 
863 	bool overlap = GetMatrixOverlap(_VD, _VS, msz) || GetMatrixOverlap(_VD, _VT, msz);
864 	if (overlap) {
865 		// Later. Fortunately, the VFPU also seems to prohibit overlap for matrix mul.
866 		INFO_LOG(JIT, "Matrix overlap, ignoring.");
867 		DISABLE;
868 	}
869 
870 	// Having problems with 2x2s for some reason.
871 	if (msz == M_2x2) {
872 		DISABLE;
873 	}
874 
875 	ARMReg s_cols[4], t_cols[4], d_cols[4];
876 
877 	// For some reason, vmmul is encoded with the first matrix (S) transposed from the real meaning.
878 	fpr.QMapMatrix(t_cols, _VT, msz, MAP_FORCE_LOW);  // Need to see if we can avoid having to force it low in some sane way. Will need crazy prediction logic for loads otherwise.
879 	fpr.QMapMatrix(s_cols, Xpose(_VS), msz, MAP_PREFER_HIGH);
880 	fpr.QMapMatrix(d_cols, _VD, msz, MAP_PREFER_HIGH | MAP_NOINIT | MAP_DIRTY);
881 
882 	// TODO: Getting there but still getting wrong results.
883 	for (int i = 0; i < n; i++) {
884 		for (int j = 0; j < n; j++) {
885 			if (i == 0) {
886 				VMUL_scalar(F_32, d_cols[j], s_cols[i], XScalar(t_cols[j], i));
887 			} else {
888 				VMLA_scalar(F_32, d_cols[j], s_cols[i], XScalar(t_cols[j], i));
889 			}
890 		}
891 	}
892 
893 	fpr.ReleaseSpillLocksAndDiscardTemps();
894 }
895 
CompNEON_Vmscl(MIPSOpcode op)896 void ArmJit::CompNEON_Vmscl(MIPSOpcode op) {
897 	CONDITIONAL_DISABLE(VFPU_MTX_VMSCL);
898 
899 	MatrixSize msz = GetMtxSize(op);
900 
901 	bool overlap = GetMatrixOverlap(_VD, _VS, msz) != OVERLAP_NONE;
902 	if (overlap) {
903 		DISABLE;
904 	}
905 
906 	int n = GetMatrixSide(msz);
907 
908 	ARMReg s_cols[4], t, d_cols[4];
909 	fpr.QMapMatrix(s_cols, _VS, msz, 0);
910 	fpr.QMapMatrix(d_cols, _VD, msz, MAP_NOINIT | MAP_DIRTY);
911 
912 	t = fpr.QMapReg(_VT, V_Single, 0);
913 	VMOV_neon(D0, t);
914 	for (int i = 0; i < n; i++) {
915 		VMUL_scalar(F_32, d_cols[i], s_cols[i], DScalar(D0, 0));
916 	}
917 
918 	fpr.ReleaseSpillLocksAndDiscardTemps();
919 }
920 
CompNEON_Vtfm(MIPSOpcode op)921 void ArmJit::CompNEON_Vtfm(MIPSOpcode op) {
922 	CONDITIONAL_DISABLE(VFPU_MTX_VTFM);
923 	if (js.HasUnknownPrefix()) {
924 		DISABLE;
925 	}
926 
927 	if (_VT == _VD) {
928 		DISABLE;
929 	}
930 
931 	VectorSize sz = GetVecSize(op);
932 	MatrixSize msz = GetMtxSize(op);
933 	int n = GetNumVectorElements(sz);
934 	int ins = (op >> 23) & 7;
935 
936 	bool homogenous = false;
937 	if (n == ins) {
938 		n++;
939 		sz = (VectorSize)((int)(sz)+1);
940 		msz = (MatrixSize)((int)(msz)+1);
941 		homogenous = true;
942 	}
943 	// Otherwise, n should already be ins + 1.
944 	else if (n != ins + 1) {
945 		DISABLE;
946 	}
947 
948 	ARMReg s_cols[4], t, d;
949 	t = fpr.QMapReg(_VT, sz, MAP_FORCE_LOW);
950 	fpr.QMapMatrix(s_cols, Xpose(_VS), msz, MAP_PREFER_HIGH);
951 	d = fpr.QMapReg(_VD, sz, MAP_DIRTY | MAP_NOINIT | MAP_PREFER_HIGH);
952 
953 	VMUL_scalar(F_32, d, s_cols[0], XScalar(t, 0));
954 	for (int i = 1; i < n; i++) {
955 		if (homogenous && i == n - 1) {
956 			VADD(F_32, d, d, s_cols[i]);
957 		} else {
958 			VMLA_scalar(F_32, d, s_cols[i], XScalar(t, i));
959 		}
960 	}
961 
962 	// VTFM does not have prefix support.
963 
964 	fpr.ReleaseSpillLocksAndDiscardTemps();
965 }
966 
CompNEON_VCrs(MIPSOpcode op)967 void ArmJit::CompNEON_VCrs(MIPSOpcode op) {
968 	DISABLE;
969 }
970 
CompNEON_VDet(MIPSOpcode op)971 void ArmJit::CompNEON_VDet(MIPSOpcode op) {
972 	DISABLE;
973 }
974 
CompNEON_Vi2x(MIPSOpcode op)975 void ArmJit::CompNEON_Vi2x(MIPSOpcode op) {
976 	DISABLE;
977 }
978 
CompNEON_Vx2i(MIPSOpcode op)979 void ArmJit::CompNEON_Vx2i(MIPSOpcode op) {
980 	DISABLE;
981 }
982 
CompNEON_Vf2i(MIPSOpcode op)983 void ArmJit::CompNEON_Vf2i(MIPSOpcode op) {
984 	DISABLE;
985 }
986 
CompNEON_Vi2f(MIPSOpcode op)987 void ArmJit::CompNEON_Vi2f(MIPSOpcode op) {
988 	CONDITIONAL_DISABLE(VFPU_VEC);
989 	if (js.HasUnknownPrefix()) {
990 		DISABLE;
991 	}
992 
993 	DISABLE;
994 
995 	VectorSize sz = GetVecSize(op);
996 	int n = GetNumVectorElements(sz);
997 
998 	int imm = (op >> 16) & 0x1f;
999 	const float mult = 1.0f / (float)(1UL << imm);
1000 
1001 	MappedRegs regs = NEONMapDirtyIn(op, sz, sz);
1002 
1003 	MOVI2F_neon(MatchSize(Q0, regs.vd), mult, R0);
1004 
1005 	VCVT(F_32, regs.vd, regs.vs);
1006 	VMUL(F_32, regs.vd, regs.vd, Q0);
1007 
1008 	NEONApplyPrefixD(regs.vd);
1009 
1010 	fpr.ReleaseSpillLocksAndDiscardTemps();
1011 }
1012 
CompNEON_Vh2f(MIPSOpcode op)1013 void ArmJit::CompNEON_Vh2f(MIPSOpcode op) {
1014 	CONDITIONAL_DISABLE(VFPU_VEC);
1015 	if (!cpu_info.bHalf) {
1016 		// No hardware support for half-to-float, fallback to interpreter
1017 		// TODO: Translate the fast SSE solution to standard integer/VFP stuff
1018 		// for the weaker CPUs.
1019 		DISABLE;
1020 	}
1021 
1022 	VectorSize sz = GetVecSize(op);
1023 
1024 	VectorSize outsize = V_Pair;
1025 	switch (sz) {
1026 	case V_Single:
1027 		outsize = V_Pair;
1028 		break;
1029 	case V_Pair:
1030 		outsize = V_Quad;
1031 		break;
1032 	default:
1033 		ERROR_LOG(JIT, "Vh2f: Must be pair or quad");
1034 		break;
1035 	}
1036 
1037 	ARMReg vs = NEONMapPrefixS(_VS, sz, 0);
1038 	// TODO: MAP_NOINIT if they're definitely not overlapping.
1039 	DestARMReg vd = NEONMapPrefixD(_VD, outsize, MAP_DIRTY);
1040 
1041 	VCVTF32F16(vd.rd, vs);
1042 
1043 	NEONApplyPrefixD(vd);
1044 	fpr.ReleaseSpillLocksAndDiscardTemps();
1045 }
1046 
CompNEON_Vcst(MIPSOpcode op)1047 void ArmJit::CompNEON_Vcst(MIPSOpcode op) {
1048 	CONDITIONAL_DISABLE(VFPU_XFER);
1049 	if (js.HasUnknownPrefix()) {
1050 		DISABLE_UNKNOWN_PREFIX;
1051 	}
1052 
1053 	int conNum = (op >> 16) & 0x1f;
1054 
1055 	VectorSize sz = GetVecSize(op);
1056 	int n = GetNumVectorElements(sz);
1057 	DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_DIRTY | MAP_NOINIT);
1058 	gpr.SetRegImm(R0, (u32)(void *)&cst_constants[conNum]);
1059 	VLD1_all_lanes(F_32, vd, R0, true);
1060 	NEONApplyPrefixD(vd);  // TODO: Could bake this into the constant we load.
1061 
1062 	fpr.ReleaseSpillLocksAndDiscardTemps();
1063 }
1064 
CompNEON_Vhoriz(MIPSOpcode op)1065 void ArmJit::CompNEON_Vhoriz(MIPSOpcode op) {
1066 	CONDITIONAL_DISABLE(VFPU_VEC);
1067 	if (js.HasUnknownPrefix()) {
1068 		DISABLE_UNKNOWN_PREFIX;
1069 	}
1070 	VectorSize sz = GetVecSize(op);
1071 	// Do any games use these a noticeable amount?
1072 	switch ((op >> 16) & 31) {
1073 	case 6:  // vfad
1074 		{
1075 			VMOV_neon(F_32, D1, 0.0f);
1076 			MappedRegs r = NEONMapDirtyIn(op, V_Single, sz);
1077 			switch (sz) {
1078 			case V_Pair:
1079 				VPADD(F_32, r.vd, r.vs, r.vs);
1080 				break;
1081 			case V_Triple:
1082 				VPADD(F_32, D0, D_0(r.vs), D_0(r.vs));
1083 				VADD(F_32, r.vd, D0, D_1(r.vs));
1084 				break;
1085 			case V_Quad:
1086 				VADD(F_32, D0, D_0(r.vs), D_1(r.vs));
1087 				VPADD(F_32, r.vd, D0, D0);
1088 				break;
1089 			default:
1090 				;
1091 			}
1092 			// This forces the sign of -0.000 to +0.000.
1093 			VADD(F_32, r.vd, r.vd, D1);
1094 			break;
1095 		}
1096 
1097 	case 7:  // vavg
1098 		DISABLE;
1099 		break;
1100 	}
1101 	fpr.ReleaseSpillLocksAndDiscardTemps();
1102 }
1103 
CompNEON_VRot(MIPSOpcode op)1104 void ArmJit::CompNEON_VRot(MIPSOpcode op) {
1105 	CONDITIONAL_DISABLE(VFPU_VEC);
1106 
1107 	if (js.HasUnknownPrefix()) {
1108 		DISABLE_UNKNOWN_PREFIX;
1109 	}
1110 
1111 	DISABLE;
1112 
1113 	int vd = _VD;
1114 	int vs = _VS;
1115 
1116 	VectorSize sz = GetVecSize(op);
1117 	int n = GetNumVectorElements(sz);
1118 
1119 	// ...
1120 	fpr.ReleaseSpillLocksAndDiscardTemps();
1121 }
1122 
CompNEON_VIdt(MIPSOpcode op)1123 void ArmJit::CompNEON_VIdt(MIPSOpcode op) {
1124 	CONDITIONAL_DISABLE(VFPU_XFER);
1125 	if (js.HasUnknownPrefix()) {
1126 		DISABLE_UNKNOWN_PREFIX;
1127 	}
1128 
1129 	VectorSize sz = GetVecSize(op);
1130 	DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_NOINIT | MAP_DIRTY);
1131 	switch (sz) {
1132 	case V_Pair:
1133 		VMOV_immf(vd, 1.0f);
1134 		if ((_VD & 1) == 0) {
1135 			// Load with 1.0, 0.0
1136 			VMOV_imm(I_64, D0, VIMMbits2bytes, 0x0F);
1137 			VAND(vd, vd, D0);
1138 		} else {
1139 			VMOV_imm(I_64, D0, VIMMbits2bytes, 0xF0);
1140 			VAND(vd, vd, D0);
1141 		}
1142 		break;
1143 	case V_Triple:
1144 	case V_Quad:
1145 		{
1146 			// TODO: This can be optimized.
1147 			VEOR(vd, vd, vd);
1148 			ARMReg dest = (_VD & 2) ? D_1(vd) : D_0(vd);
1149 			VMOV_immf(dest, 1.0f);
1150 			if ((_VD & 1) == 0) {
1151 				// Load with 1.0, 0.0
1152 				VMOV_imm(I_64, D0, VIMMbits2bytes, 0x0F);
1153 				VAND(dest, dest, D0);
1154 			} else {
1155 				VMOV_imm(I_64, D0, VIMMbits2bytes, 0xF0);
1156 				VAND(dest, dest, D0);
1157 			}
1158 		}
1159 		break;
1160 	default:
1161 		_dbg_assert_msg_(false,"Bad vidt instruction");
1162 		break;
1163 	}
1164 
1165 	NEONApplyPrefixD(vd);
1166 	fpr.ReleaseSpillLocksAndDiscardTemps();
1167 }
1168 
CompNEON_Vcmp(MIPSOpcode op)1169 void ArmJit::CompNEON_Vcmp(MIPSOpcode op) {
1170 	CONDITIONAL_DISABLE(VFPU_COMP);
1171 	if (js.HasUnknownPrefix())
1172 		DISABLE;
1173 
1174 	// Not a chance that this works on the first try :P
1175 	DISABLE;
1176 
1177 	VectorSize sz = GetVecSize(op);
1178 	int n = GetNumVectorElements(sz);
1179 
1180 	VCondition cond = (VCondition)(op & 0xF);
1181 
1182 	MappedRegs regs = NEONMapInIn(op, sz, sz);
1183 
1184 	ARMReg vs = regs.vs, vt = regs.vt;
1185 	ARMReg res = fpr.QAllocTemp(sz);
1186 
1187 	// Some, we just fall back to the interpreter.
1188 	// ES is just really equivalent to (value & 0x7F800000) == 0x7F800000.
1189 	switch (cond) {
1190 	case VC_EI: // c = my_isinf(s[i]); break;
1191 	case VC_NI: // c = !my_isinf(s[i]); break;
1192 		DISABLE;
1193 	case VC_ES: // c = my_isnan(s[i]) || my_isinf(s[i]); break;   // Tekken Dark Resurrection
1194 	case VC_NS: // c = !my_isnan(s[i]) && !my_isinf(s[i]); break;
1195 	case VC_EN: // c = my_isnan(s[i]); break;
1196 	case VC_NN: // c = !my_isnan(s[i]); break;
1197 		// if (_VS != _VT)
1198 			DISABLE;
1199 		break;
1200 
1201 	case VC_EZ:
1202 	case VC_NZ:
1203 		VMOV_immf(Q0, 0.0f);
1204 		break;
1205 	default:
1206 		;
1207 	}
1208 
1209 	int affected_bits = (1 << 4) | (1 << 5);  // 4 and 5
1210 	for (int i = 0; i < n; i++) {
1211 		affected_bits |= 1 << i;
1212 	}
1213 
1214 	// Preload the pointer to our magic mask
1215 	static const u32 collectorBits[4] = { 1, 2, 4, 8 };
1216 	MOVP2R(R1, &collectorBits);
1217 
1218 	// Do the compare
1219 	MOVI2R(R0, 0);
1220 	CCFlags flag = CC_AL;
1221 
1222 	bool oneIsFalse = false;
1223 	switch (cond) {
1224 	case VC_FL: // c = 0;
1225 		break;
1226 
1227 	case VC_TR: // c = 1
1228 		MOVI2R(R0, affected_bits);
1229 		break;
1230 
1231 	case VC_ES: // c = my_isnan(s[i]) || my_isinf(s[i]); break;   // Tekken Dark Resurrection
1232 	case VC_NS: // c = !(my_isnan(s[i]) || my_isinf(s[i])); break;
1233 		DISABLE;  // TODO: these shouldn't be that hard
1234 		break;
1235 
1236 	case VC_EN: // c = my_isnan(s[i]); break;  // Tekken 6
1237 	case VC_NN: // c = !my_isnan(s[i]); break;
1238 		DISABLE;  // TODO: these shouldn't be that hard
1239 		break;
1240 
1241 	case VC_EQ: // c = s[i] == t[i]
1242 		VCEQ(F_32, res, vs, vt);
1243 		break;
1244 
1245 	case VC_LT: // c = s[i] < t[i]
1246 		VCLT(F_32, res, vs, vt);
1247 		break;
1248 
1249 	case VC_LE: // c = s[i] <= t[i];
1250 		VCLE(F_32, res, vs, vt);
1251 		break;
1252 
1253 	case VC_NE: // c = s[i] != t[i]
1254 		VCEQ(F_32, res, vs, vt);
1255 		oneIsFalse = true;
1256 		break;
1257 
1258 	case VC_GE: // c = s[i] >= t[i]
1259 		VCGE(F_32, res, vs, vt);
1260 		break;
1261 
1262 	case VC_GT: // c = s[i] > t[i]
1263 		VCGT(F_32, res, vs, vt);
1264 		break;
1265 
1266 	case VC_EZ: // c = s[i] == 0.0f || s[i] == -0.0f
1267 		VCEQ(F_32, res, vs);
1268 		break;
1269 
1270 	case VC_NZ: // c = s[i] != 0
1271 		VCEQ(F_32, res, vs);
1272 		oneIsFalse = true;
1273 		break;
1274 
1275 	default:
1276 		DISABLE;
1277 	}
1278 	if (oneIsFalse) {
1279 		VMVN(res, res);
1280 	}
1281 	// Somehow collect the bits into a mask.
1282 
1283 	// Collect the bits. Where's my PMOVMSKB? :(
1284 	VLD1(I_32, Q0, R1, n < 2 ? 1 : 2);
1285 	VAND(Q0, Q0, res);
1286 	VPADD(I_32, Q0, Q0, Q0);
1287 	VPADD(I_32, D0, D0, D0);
1288 	// OK, bits now in S0.
1289 	VMOV(R0, S0);
1290 	// Zap irrelevant bits (V_Single, V_Triple)
1291 	AND(R0, R0, affected_bits);
1292 
1293 	// TODO: Now, how in the world do we generate the component OR and AND bits without burning tens of ALU instructions?? Lookup-table?
1294 
1295 	gpr.MapReg(MIPS_REG_VFPUCC, MAP_DIRTY);
1296 	BIC(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), affected_bits);
1297 	ORR(gpr.R(MIPS_REG_VFPUCC), gpr.R(MIPS_REG_VFPUCC), R0);
1298 }
1299 
CompNEON_Vcmov(MIPSOpcode op)1300 void ArmJit::CompNEON_Vcmov(MIPSOpcode op) {
1301 	CONDITIONAL_DISABLE(VFPU_COMP);
1302 	if (js.HasUnknownPrefix()) {
1303 		DISABLE;
1304 	}
1305 
1306 	DISABLE;
1307 
1308 	VectorSize sz = GetVecSize(op);
1309 	int n = GetNumVectorElements(sz);
1310 
1311 	ARMReg vs = NEONMapPrefixS(_VS, sz, 0);
1312 	DestARMReg vd = NEONMapPrefixD(_VD, sz, MAP_DIRTY);
1313 	int tf = (op >> 19) & 1;
1314 	int imm3 = (op >> 16) & 7;
1315 
1316 	if (imm3 < 6) {
1317 		// Test one bit of CC. This bit decides whether none or all subregisters are copied.
1318 		gpr.MapReg(MIPS_REG_VFPUCC);
1319 		TST(gpr.R(MIPS_REG_VFPUCC), 1 << imm3);
1320 		FixupBranch skip = B_CC(CC_NEQ);
1321 		VMOV_neon(vd, vs);
1322 		SetJumpTarget(skip);
1323 	} else {
1324 		// Look at the bottom four bits of CC to individually decide if the subregisters should be copied.
1325 		// This is the nasty one! Need to expand those bits into a full NEON register somehow.
1326 		DISABLE;
1327 		/*
1328 		gpr.MapReg(MIPS_REG_VFPUCC);
1329 		for (int i = 0; i < n; i++) {
1330 			TST(gpr.R(MIPS_REG_VFPUCC), 1 << i);
1331 			SetCC(tf ? CC_EQ : CC_NEQ);
1332 			VMOV(fpr.V(dregs[i]), fpr.V(sregs[i]));
1333 			SetCC(CC_AL);
1334 		}
1335 		*/
1336 	}
1337 
1338 	NEONApplyPrefixD(vd);
1339 
1340 	fpr.ReleaseSpillLocksAndDiscardTemps();
1341 }
1342 
CompNEON_Viim(MIPSOpcode op)1343 void ArmJit::CompNEON_Viim(MIPSOpcode op) {
1344 	CONDITIONAL_DISABLE(VFPU_XFER);
1345 	if (js.HasUnknownPrefix()) {
1346 		DISABLE;
1347 	}
1348 
1349 	DestARMReg vt = NEONMapPrefixD(_VT, V_Single, MAP_NOINIT | MAP_DIRTY);
1350 
1351 	s32 imm = SignExtend16ToS32(op);
1352 	// TODO: Optimize for low registers.
1353 	MOVI2F(S0, (float)imm, R0);
1354 	VMOV_neon(vt.rd, D0);
1355 
1356 	NEONApplyPrefixD(vt);
1357 	fpr.ReleaseSpillLocksAndDiscardTemps();
1358 }
1359 
CompNEON_Vfim(MIPSOpcode op)1360 void ArmJit::CompNEON_Vfim(MIPSOpcode op) {
1361 	CONDITIONAL_DISABLE(VFPU_XFER);
1362 	if (js.HasUnknownPrefix()) {
1363 		DISABLE;
1364 	}
1365 
1366 	DestARMReg vt = NEONMapPrefixD(_VT, V_Single, MAP_NOINIT | MAP_DIRTY);
1367 
1368 	FP16 half;
1369 	half.u = op & 0xFFFF;
1370 	FP32 fval = half_to_float_fast5(half);
1371 	// TODO: Optimize for low registers.
1372 	MOVI2F(S0, (float)fval.f, R0);
1373 	VMOV_neon(vt.rd, D0);
1374 
1375 	NEONApplyPrefixD(vt);
1376 	fpr.ReleaseSpillLocksAndDiscardTemps();
1377 }
1378 
1379 // https://code.google.com/p/bullet/source/browse/branches/PhysicsEffects/include/vecmath/neon/vectormath_neon_assembly_implementations.S?r=2488
CompNEON_VCrossQuat(MIPSOpcode op)1380 void ArmJit::CompNEON_VCrossQuat(MIPSOpcode op) {
1381 	// This op does not support prefixes anyway.
1382 	CONDITIONAL_DISABLE(VFPU_VEC);
1383 	if (js.HasUnknownPrefix()) {
1384 		DISABLE_UNKNOWN_PREFIX;
1385 	}
1386 
1387 	VectorSize sz = GetVecSize(op);
1388 	if (sz != V_Triple) {
1389 		// Quaternion product. Bleh.
1390 		DISABLE;
1391 	}
1392 
1393 	MappedRegs r = NEONMapDirtyInIn(op, sz, sz, sz, false);
1394 
1395 	ARMReg t1 = Q0;
1396 	ARMReg t2 = fpr.QAllocTemp(V_Triple);
1397 
1398 	// There has to be a faster way to do this. This is not really any better than
1399 	// scalar.
1400 
1401 	// d18, d19 (q9) = t1 = r.vt
1402 	// d16, d17 (q8) = t2 = r.vs
1403 	// d20, d21 (q10) = t
1404 	VMOV_neon(t1, r.vs);
1405 	VMOV_neon(t2, r.vt);
1406 	VTRN(F_32, D_0(t2), D_1(t2));    //	vtrn.32 d18,d19			@  q9 = <x2,z2,y2,w2> = d18,d19
1407 	VREV64(F_32, D_0(t1), D_0(t1));  // vrev64.32 d16,d16		@  q8 = <y1,x1,z1,w1> = d16,d17
1408 	VREV64(F_32, D_0(t2), D_0(t2));   // vrev64.32 d18,d18		@  q9 = <z2,x2,y2,w2> = d18,d19
1409 	VTRN(F_32, D_0(t1), D_1(t1));    // vtrn.32 d16,d17			@  q8 = <y1,z1,x1,w1> = d16,d17
1410 	// perform first half of cross product using rearranged inputs
1411 	VMUL(F_32, r.vd, t1, t2);           // vmul.f32 q10, q8, q9	@ q10 = <y1*z2,z1*x2,x1*y2,w1*w2>
1412 	// @ rearrange inputs again
1413 	VTRN(F_32, D_0(t2), D_1(t2));   // vtrn.32 d18,d19			@  q9 = <z2,y2,x2,w2> = d18,d19
1414 	VREV64(F_32, D_0(t1), D_0(t1));  // vrev64.32 d16,d16		@  q8 = <z1,y1,x1,w1> = d16,d17
1415 	VREV64(F_32, D_0(t2), D_0(t2));  // vrev64.32 d18,d18		@  q9 = <y2,z2,x2,w2> = d18,d19
1416 	VTRN(F_32, D_0(t1), D_1(t1));   // vtrn.32 d16,d17			@  q8 = <z1,x1,y1,w1> = d16,d17
1417 	// @ perform last half of cross product using rearranged inputs
1418 	VMLS(F_32, r.vd, t1, t2);   // vmls.f32 q10, q8, q9	@ q10 = <y1*z2-y2*z1,z1*x2-z2*x1,x1*y2-x2*y1,w1*w2-w2*w1>
1419 
1420 	fpr.ReleaseSpillLocksAndDiscardTemps();
1421 }
1422 
CompNEON_Vsgn(MIPSOpcode op)1423 void ArmJit::CompNEON_Vsgn(MIPSOpcode op) {
1424 	DISABLE;
1425 
1426 	// This will be a bunch of bit magic.
1427 }
1428 
CompNEON_Vocp(MIPSOpcode op)1429 void ArmJit::CompNEON_Vocp(MIPSOpcode op) {
1430 	CONDITIONAL_DISABLE(VFPU_VEC);
1431 	if (js.HasUnknownPrefix()) {
1432 		DISABLE;
1433 	}
1434 
1435 	// TODO: Handle T prefix.  Right now it uses 1.0f always.
1436 
1437 	// This is a hack that modifies prefixes.  We eat them later, so just overwrite.
1438 	// S prefix forces the negate flags.
1439 	js.prefixS |= 0x000F0000;
1440 	// T prefix forces constants on and regnum to 1.
1441 	// That means negate still works, and abs activates a different constant.
1442 	js.prefixT = (js.prefixT & ~0x000000FF) | 0x00000055 | 0x0000F000;
1443 
1444 	VectorSize sz = GetVecSize(op);
1445 	int n = GetNumVectorElements(sz);
1446 
1447 	MappedRegs regs = NEONMapDirtyIn(op, sz, sz);
1448 	MOVI2F_neon(Q0, 1.0f, R0);
1449 	VADD(F_32, regs.vd, Q0, regs.vs);
1450 	NEONApplyPrefixD(regs.vd);
1451 
1452 	fpr.ReleaseSpillLocksAndDiscardTemps();
1453 }
1454 
CompNEON_ColorConv(MIPSOpcode op)1455 void ArmJit::CompNEON_ColorConv(MIPSOpcode op) {
1456 	DISABLE;
1457 }
1458 
CompNEON_Vbfy(MIPSOpcode op)1459 void ArmJit::CompNEON_Vbfy(MIPSOpcode op) {
1460 	DISABLE;
1461 }
1462 
1463 }
1464 // namespace MIPSComp
1465 
1466 #endif // PPSSPP_ARCH(ARM)
1467