1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_CODEGEN_PPC_CONSTANTS_PPC_H_
6 #define V8_CODEGEN_PPC_CONSTANTS_PPC_H_
7
8 #include <stdint.h>
9
10 #include "src/base/logging.h"
11 #include "src/base/macros.h"
12 #include "src/common/globals.h"
13
14 // UNIMPLEMENTED_ macro for PPC.
15 #ifdef DEBUG
16 #define UNIMPLEMENTED_PPC() \
17 v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
18 __FILE__, __LINE__, __func__)
19 #else
20 #define UNIMPLEMENTED_PPC()
21 #endif
22
23 #if (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) && \
24 (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
25 (!defined(_CALL_ELF) || _CALL_ELF == 1)))
26 #define ABI_USES_FUNCTION_DESCRIPTORS 1
27 #else
28 #define ABI_USES_FUNCTION_DESCRIPTORS 0
29 #endif
30
31 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \
32 V8_TARGET_ARCH_PPC64
33 #define ABI_PASSES_HANDLES_IN_REGS 1
34 #else
35 #define ABI_PASSES_HANDLES_IN_REGS 0
36 #endif
37
38 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || !V8_TARGET_ARCH_PPC64 || \
39 V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)
40 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
41 #else
42 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
43 #endif
44
45 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || \
46 (V8_TARGET_ARCH_PPC64 && \
47 (V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)))
48 #define ABI_CALL_VIA_IP 1
49 #else
50 #define ABI_CALL_VIA_IP 0
51 #endif
52
53 #if !(V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) || V8_OS_AIX || \
54 V8_TARGET_ARCH_PPC64
55 #define ABI_TOC_REGISTER 2
56 #else
57 #define ABI_TOC_REGISTER 13
58 #endif
59 namespace v8 {
60 namespace internal {
61
62 // TODO(sigurds): Change this value once we use relative jumps.
63 constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
64
65 // Used to encode a boolean value when emitting 32 bit
66 // opcodes which will indicate the presence of function descriptors
67 constexpr int kHasFunctionDescriptorBitShift = 4;
68 constexpr int kHasFunctionDescriptorBitMask = 1
69 << kHasFunctionDescriptorBitShift;
70
71 // Number of registers
72 const int kNumRegisters = 32;
73
74 // FP support.
75 const int kNumDoubleRegisters = 32;
76
77 const int kNoRegister = -1;
78
79 // Used in embedded constant pool builder - max reach in bits for
80 // various load instructions (one less due to unsigned)
81 const int kLoadPtrMaxReachBits = 15;
82 const int kLoadDoubleMaxReachBits = 15;
83
84 // Actual value of root register is offset from the root array's start
85 // to take advantage of negative displacement values.
86 // TODO(sigurds): Choose best value.
87 constexpr int kRootRegisterBias = 128;
88
89 // sign-extend the least significant 5-bits of value <imm>
90 #define SIGN_EXT_IMM5(imm) ((static_cast<int>(imm) << 27) >> 27)
91
92 // sign-extend the least significant 16-bits of value <imm>
93 #define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
94
95 // sign-extend the least significant 22-bits of value <imm>
96 #define SIGN_EXT_IMM22(imm) ((static_cast<int>(imm) << 10) >> 10)
97
98 // sign-extend the least significant 26-bits of value <imm>
99 #define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
100
101 // -----------------------------------------------------------------------------
102 // Conditions.
103
104 // Defines constants and accessor classes to assemble, disassemble and
105 // simulate PPC instructions.
106 //
107 // Section references in the code refer to the "PowerPC Microprocessor
108 // Family: The Programmer.s Reference Guide" from 10/95
109 // https://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600741775/$file/prg.pdf
110 //
111
112 // Constants for specific fields are defined in their respective named enums.
113 // General constants are in an anonymous enum in class Instr.
114 enum Condition {
115 kNoCondition = -1,
116 eq = 0, // Equal.
117 ne = 1, // Not equal.
118 ge = 2, // Greater or equal.
119 lt = 3, // Less than.
120 gt = 4, // Greater than.
121 le = 5, // Less then or equal
122 unordered = 6, // Floating-point unordered
123 ordered = 7,
124 overflow = 8, // Summary overflow
125 nooverflow = 9,
126 al = 10 // Always.
127 };
128
NegateCondition(Condition cond)129 inline Condition NegateCondition(Condition cond) {
130 DCHECK(cond != al);
131 return static_cast<Condition>(cond ^ ne);
132 }
133
134 // -----------------------------------------------------------------------------
135 // Instructions encoding.
136
137 // Instr is merely used by the Assembler to distinguish 32bit integers
138 // representing instructions from usual 32 bit values.
139 // Instruction objects are pointers to 32bit values, and provide methods to
140 // access the various ISA fields.
141 using Instr = uint32_t;
142
143 #define PPC_XX3_OPCODE_SCALAR_LIST(V) \
144 /* VSX Scalar Add Double-Precision */ \
145 V(xsadddp, XSADDDP, 0xF0000100) \
146 /* VSX Scalar Add Single-Precision */ \
147 V(xsaddsp, XSADDSP, 0xF0000000) \
148 /* VSX Scalar Compare Ordered Double-Precision */ \
149 V(xscmpodp, XSCMPODP, 0xF0000158) \
150 /* VSX Scalar Compare Unordered Double-Precision */ \
151 V(xscmpudp, XSCMPUDP, 0xF0000118) \
152 /* VSX Scalar Copy Sign Double-Precision */ \
153 V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
154 /* VSX Scalar Divide Double-Precision */ \
155 V(xsdivdp, XSDIVDP, 0xF00001C0) \
156 /* VSX Scalar Divide Single-Precision */ \
157 V(xsdivsp, XSDIVSP, 0xF00000C0) \
158 /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
159 V(xsmaddadp, XSMADDADP, 0xF0000108) \
160 /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
161 V(xsmaddasp, XSMADDASP, 0xF0000008) \
162 /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
163 V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
164 /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
165 V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
166 /* VSX Scalar Maximum Double-Precision */ \
167 V(xsmaxdp, XSMAXDP, 0xF0000500) \
168 /* VSX Scalar Minimum Double-Precision */ \
169 V(xsmindp, XSMINDP, 0xF0000540) \
170 /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
171 V(xsmsubadp, XSMSUBADP, 0xF0000188) \
172 /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
173 V(xsmsubasp, XSMSUBASP, 0xF0000088) \
174 /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
175 V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
176 /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
177 V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
178 /* VSX Scalar Multiply Double-Precision */ \
179 V(xsmuldp, XSMULDP, 0xF0000180) \
180 /* VSX Scalar Multiply Single-Precision */ \
181 V(xsmulsp, XSMULSP, 0xF0000080) \
182 /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
183 V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
184 /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
185 V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
186 /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
187 V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
188 /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
189 V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
190 /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
191 V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
192 /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
193 V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
194 /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
195 V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
196 /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
197 V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
198 /* VSX Scalar Reciprocal Estimate Double-Precision */ \
199 V(xsredp, XSREDP, 0xF0000168) \
200 /* VSX Scalar Subtract Double-Precision */ \
201 V(xssubdp, XSSUBDP, 0xF0000140) \
202 /* VSX Scalar Subtract Single-Precision */ \
203 V(xssubsp, XSSUBSP, 0xF0000040) \
204 /* VSX Scalar Test for software Divide Double-Precision */ \
205 V(xstdivdp, XSTDIVDP, 0xF00001E8)
206
207 #define PPC_XX3_OPCODE_VECTOR_LIST(V) \
208 /* VSX Vector Add Double-Precision */ \
209 V(xvadddp, XVADDDP, 0xF0000300) \
210 /* VSX Vector Add Single-Precision */ \
211 V(xvaddsp, XVADDSP, 0xF0000200) \
212 /* VSX Vector Compare Equal To Double-Precision */ \
213 V(xvcmpeqdp, XVCMPEQDP, 0xF0000318) \
214 /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \
215 V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \
216 /* VSX Vector Compare Equal To Single-Precision */ \
217 V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \
218 /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \
219 V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \
220 /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \
221 V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \
222 /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \
223 /* CR6 */ \
224 V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \
225 /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \
226 V(xvcmpgesp, XVCMPGESP, 0xF0000298) \
227 /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \
228 /* CR6 */ \
229 V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \
230 /* VSX Vector Compare Greater Than Double-Precision */ \
231 V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \
232 /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \
233 V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \
234 /* VSX Vector Compare Greater Than Single-Precision */ \
235 V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \
236 /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \
237 V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \
238 /* VSX Vector Copy Sign Double-Precision */ \
239 V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \
240 /* VSX Vector Copy Sign Single-Precision */ \
241 V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \
242 /* VSX Vector Divide Double-Precision */ \
243 V(xvdivdp, XVDIVDP, 0xF00003C0) \
244 /* VSX Vector Divide Single-Precision */ \
245 V(xvdivsp, XVDIVSP, 0xF00002C0) \
246 /* VSX Vector Multiply-Add Type-A Double-Precision */ \
247 V(xvmaddadp, XVMADDADP, 0xF0000308) \
248 /* VSX Vector Multiply-Add Type-A Single-Precision */ \
249 V(xvmaddasp, XVMADDASP, 0xF0000208) \
250 /* VSX Vector Multiply-Add Type-M Double-Precision */ \
251 V(xvmaddmdp, XVMADDMDP, 0xF0000348) \
252 /* VSX Vector Multiply-Add Type-M Single-Precision */ \
253 V(xvmaddmsp, XVMADDMSP, 0xF0000248) \
254 /* VSX Vector Maximum Double-Precision */ \
255 V(xvmaxdp, XVMAXDP, 0xF0000700) \
256 /* VSX Vector Maximum Single-Precision */ \
257 V(xvmaxsp, XVMAXSP, 0xF0000600) \
258 /* VSX Vector Minimum Double-Precision */ \
259 V(xvmindp, XVMINDP, 0xF0000740) \
260 /* VSX Vector Minimum Single-Precision */ \
261 V(xvminsp, XVMINSP, 0xF0000640) \
262 /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \
263 V(xvmsubadp, XVMSUBADP, 0xF0000388) \
264 /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \
265 V(xvmsubasp, XVMSUBASP, 0xF0000288) \
266 /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \
267 V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \
268 /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \
269 V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \
270 /* VSX Vector Multiply Double-Precision */ \
271 V(xvmuldp, XVMULDP, 0xF0000380) \
272 /* VSX Vector Multiply Single-Precision */ \
273 V(xvmulsp, XVMULSP, 0xF0000280) \
274 /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \
275 V(xvnmaddadp, XVNMADDADP, 0xF0000708) \
276 /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \
277 V(xvnmaddasp, XVNMADDASP, 0xF0000608) \
278 /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \
279 V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \
280 /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \
281 V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \
282 /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \
283 V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \
284 /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \
285 V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \
286 /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \
287 V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \
288 /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \
289 V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \
290 /* VSX Vector Reciprocal Estimate Double-Precision */ \
291 V(xvredp, XVREDP, 0xF0000368) \
292 /* VSX Vector Subtract Double-Precision */ \
293 V(xvsubdp, XVSUBDP, 0xF0000340) \
294 /* VSX Vector Subtract Single-Precision */ \
295 V(xvsubsp, XVSUBSP, 0xF0000240) \
296 /* VSX Vector Test for software Divide Double-Precision */ \
297 V(xvtdivdp, XVTDIVDP, 0xF00003E8) \
298 /* VSX Vector Test for software Divide Single-Precision */ \
299 V(xvtdivsp, XVTDIVSP, 0xF00002E8) \
300 /* VSX Logical AND */ \
301 V(xxland, XXLAND, 0xF0000410) \
302 /* VSX Logical AND with Complement */ \
303 V(xxlandc, XXLANDC, 0xF0000450) \
304 /* VSX Logical Equivalence */ \
305 V(xxleqv, XXLEQV, 0xF00005D0) \
306 /* VSX Logical NAND */ \
307 V(xxlnand, XXLNAND, 0xF0000590) \
308 /* VSX Logical NOR */ \
309 V(xxlnor, XXLNOR, 0xF0000510) \
310 /* VSX Logical OR */ \
311 V(xxlor, XXLOR, 0xF0000490) \
312 /* VSX Logical OR with Complement */ \
313 V(xxlorc, XXLORC, 0xF0000550) \
314 /* VSX Logical XOR */ \
315 V(xxlxor, XXLXOR, 0xF00004D0) \
316 /* VSX Merge High Word */ \
317 V(xxmrghw, XXMRGHW, 0xF0000090) \
318 /* VSX Merge Low Word */ \
319 V(xxmrglw, XXMRGLW, 0xF0000190) \
320 /* VSX Permute Doubleword Immediate */ \
321 V(xxpermdi, XXPERMDI, 0xF0000050) \
322 /* VSX Shift Left Double by Word Immediate */ \
323 V(xxsldwi, XXSLDWI, 0xF0000010) \
324 /* VSX Splat Word */ \
325 V(xxspltw, XXSPLTW, 0xF0000290)
326
327 #define PPC_Z23_OPCODE_LIST(V) \
328 /* Decimal Quantize */ \
329 V(dqua, DQUA, 0xEC000006) \
330 /* Decimal Quantize Immediate */ \
331 V(dquai, DQUAI, 0xEC000086) \
332 /* Decimal Quantize Immediate Quad */ \
333 V(dquaiq, DQUAIQ, 0xFC000086) \
334 /* Decimal Quantize Quad */ \
335 V(dquaq, DQUAQ, 0xFC000006) \
336 /* Decimal Floating Round To FP Integer Without Inexact */ \
337 V(drintn, DRINTN, 0xEC0001C6) \
338 /* Decimal Floating Round To FP Integer Without Inexact Quad */ \
339 V(drintnq, DRINTNQ, 0xFC0001C6) \
340 /* Decimal Floating Round To FP Integer With Inexact */ \
341 V(drintx, DRINTX, 0xEC0000C6) \
342 /* Decimal Floating Round To FP Integer With Inexact Quad */ \
343 V(drintxq, DRINTXQ, 0xFC0000C6) \
344 /* Decimal Floating Reround */ \
345 V(drrnd, DRRND, 0xEC000046) \
346 /* Decimal Floating Reround Quad */ \
347 V(drrndq, DRRNDQ, 0xFC000046)
348
349 #define PPC_Z22_OPCODE_LIST(V) \
350 /* Decimal Floating Shift Coefficient Left Immediate */ \
351 V(dscli, DSCLI, 0xEC000084) \
352 /* Decimal Floating Shift Coefficient Left Immediate Quad */ \
353 V(dscliq, DSCLIQ, 0xFC000084) \
354 /* Decimal Floating Shift Coefficient Right Immediate */ \
355 V(dscri, DSCRI, 0xEC0000C4) \
356 /* Decimal Floating Shift Coefficient Right Immediate Quad */ \
357 V(dscriq, DSCRIQ, 0xFC0000C4) \
358 /* Decimal Floating Test Data Class */ \
359 V(dtstdc, DTSTDC, 0xEC000184) \
360 /* Decimal Floating Test Data Class Quad */ \
361 V(dtstdcq, DTSTDCQ, 0xFC000184) \
362 /* Decimal Floating Test Data Group */ \
363 V(dtstdg, DTSTDG, 0xEC0001C4) \
364 /* Decimal Floating Test Data Group Quad */ \
365 V(dtstdgq, DTSTDGQ, 0xFC0001C4)
366
367 #define PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
368 /* VSX Vector Absolute Value Double-Precision */ \
369 V(xvabsdp, XVABSDP, 0xF0000764) \
370 /* VSX Vector Negate Double-Precision */ \
371 V(xvnegdp, XVNEGDP, 0xF00007E4) \
372 /* VSX Vector Square Root Double-Precision */ \
373 V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
374 /* VSX Vector Absolute Value Single-Precision */ \
375 V(xvabssp, XVABSSP, 0xF0000664) \
376 /* VSX Vector Negate Single-Precision */ \
377 V(xvnegsp, XVNEGSP, 0xF00006E4) \
378 /* VSX Vector Reciprocal Estimate Single-Precision */ \
379 V(xvresp, XVRESP, 0xF0000268) \
380 /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
381 V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
382 /* VSX Vector Square Root Single-Precision */ \
383 V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
384 /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
385 /* Saturate */ \
386 V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
387 /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
388 /* Saturate */ \
389 V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
390 /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
391 V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
392 /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
393 V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
394 /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
395 V(xvrdpip, XVRDPIP, 0xF00003A4) \
396 /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
397 V(xvrdpim, XVRDPIM, 0xF00003E4) \
398 /* VSX Vector Round to Double-Precision Integer toward Zero */ \
399 V(xvrdpiz, XVRDPIZ, 0xF0000364) \
400 /* VSX Vector Round to Double-Precision Integer */ \
401 V(xvrdpi, XVRDPI, 0xF0000324) \
402 /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
403 V(xvrspip, XVRSPIP, 0xF00002A4) \
404 /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
405 V(xvrspim, XVRSPIM, 0xF00002E4) \
406 /* VSX Vector Round to Single-Precision Integer toward Zero */ \
407 V(xvrspiz, XVRSPIZ, 0xF0000264) \
408 /* VSX Vector Round to Single-Precision Integer */ \
409 V(xvrspi, XVRSPI, 0xF0000224) \
410 /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
411 V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
412 /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
413 /* Precision */ \
414 V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
415 /* VSX Vector Convert Single-Precision to Double-Precision */ \
416 V(xvcvspdp, XVCVSPDP, 0xF0000724) \
417 /* VSX Vector Convert Double-Precision to Single-Precision */ \
418 V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
419 /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
420 /* Saturate */ \
421 V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
422 /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
423 /* Saturate */ \
424 V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
425
426 #define PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
427 /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
428 /* signalling */ \
429 V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
430 /* VSX Scalar Convert Single-Precision to Double-Precision format Non- */ \
431 /* signalling */ \
432 V(xscvspdpn, XSCVSPDPN, 0xF000052C)
433
434 #define PPC_XX2_OPCODE_B_FORM_LIST(V) \
435 /* Vector Byte-Reverse Quadword */ \
436 V(xxbrq, XXBRQ, 0xF01F076C)
437
438 #define PPC_XX2_OPCODE_UNUSED_LIST(V) \
439 /* VSX Scalar Square Root Double-Precision */ \
440 V(xssqrtdp, XSSQRTDP, 0xF000012C) \
441 /* VSX Scalar Reciprocal Estimate Single-Precision */ \
442 V(xsresp, XSRESP, 0xF0000068) \
443 /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
444 V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
445 /* VSX Scalar Square Root Single-Precision */ \
446 V(xssqrtsp, XSSQRTSP, 0xF000002C) \
447 /* VSX Scalar Absolute Value Double-Precision */ \
448 V(xsabsdp, XSABSDP, 0xF0000564) \
449 /* VSX Scalar Convert Double-Precision to Single-Precision */ \
450 V(xscvdpsp, XSCVDPSP, 0xF0000424) \
451 /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
452 /* Saturate */ \
453 V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
454 /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \
455 /* Saturate */ \
456 V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \
457 /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \
458 /* Doubleword Saturate */ \
459 V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \
460 /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \
461 /* Saturate */ \
462 V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
463 /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
464 V(xscvspdp, XSCVSPDP, 0xF0000524) \
465 /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
466 V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
467 /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
468 V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \
469 /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \
470 /* Precision */ \
471 V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \
472 /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \
473 /* Precision */ \
474 V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \
475 /* VSX Scalar Negative Absolute Value Double-Precision */ \
476 V(xsnabsdp, XSNABSDP, 0xF00005A4) \
477 /* VSX Scalar Negate Double-Precision */ \
478 V(xsnegdp, XSNEGDP, 0xF00005E4) \
479 /* VSX Scalar Round to Double-Precision Integer */ \
480 V(xsrdpi, XSRDPI, 0xF0000124) \
481 /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \
482 /* mode */ \
483 V(xsrdpic, XSRDPIC, 0xF00001AC) \
484 /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \
485 V(xsrdpim, XSRDPIM, 0xF00001E4) \
486 /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \
487 V(xsrdpip, XSRDPIP, 0xF00001A4) \
488 /* VSX Scalar Round to Double-Precision Integer toward Zero */ \
489 V(xsrdpiz, XSRDPIZ, 0xF0000164) \
490 /* VSX Scalar Round to Single-Precision */ \
491 V(xsrsp, XSRSP, 0xF0000464) \
492 /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \
493 V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
494 /* VSX Scalar Test for software Square Root Double-Precision */ \
495 V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
496 /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
497 /* Saturate */ \
498 V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \
499 /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \
500 /* Doubleword Saturate */ \
501 V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \
502 /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
503 /* Saturate */ \
504 V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
505 /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
506 /* Doubleword Saturate */ \
507 V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
508 /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
509 V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
510 /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
511 V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
512 /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \
513 /* Precision */ \
514 V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
515 /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \
516 V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \
517 /* VSX Vector Negative Absolute Value Double-Precision */ \
518 V(xvnabsdp, XVNABSDP, 0xF00007A4) \
519 /* VSX Vector Negative Absolute Value Single-Precision */ \
520 V(xvnabssp, XVNABSSP, 0xF00006A4) \
521 /* VSX Vector Round to Double-Precision Integer using Current rounding */ \
522 /* mode */ \
523 V(xvrdpic, XVRDPIC, 0xF00003AC) \
524 /* VSX Vector Round to Single-Precision Integer using Current rounding */ \
525 /* mode */ \
526 V(xvrspic, XVRSPIC, 0xF00002AC) \
527 /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \
528 V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \
529 /* VSX Vector Test for software Square Root Double-Precision */ \
530 V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \
531 /* VSX Vector Test for software Square Root Single-Precision */ \
532 V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8) \
533 /* Vector Splat Immediate Byte */ \
534 V(xxspltib, XXSPLTIB, 0xF00002D0)
535
536 #define PPC_XX2_OPCODE_LIST(V) \
537 PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
538 PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
539 PPC_XX2_OPCODE_B_FORM_LIST(V) \
540 PPC_XX2_OPCODE_UNUSED_LIST(V)
541
542 #define PPC_EVX_OPCODE_LIST(V) \
543 /* Vector Load Double Word into Double Word by External PID Indexed */ \
544 V(evlddepx, EVLDDEPX, 0x7C00063E) \
545 /* Vector Store Double of Double by External PID Indexed */ \
546 V(evstddepx, EVSTDDEPX, 0x7C00073E) \
547 /* Bit Reversed Increment */ \
548 V(brinc, BRINC, 0x1000020F) \
549 /* Vector Absolute Value */ \
550 V(evabs, EVABS, 0x10000208) \
551 /* Vector Add Immediate Word */ \
552 V(evaddiw, EVADDIW, 0x10000202) \
553 /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \
554 V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \
555 /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \
556 V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \
557 /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \
558 V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \
559 /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \
560 V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \
561 /* Vector Add Word */ \
562 V(evaddw, EVADDW, 0x10000200) \
563 /* Vector AND */ \
564 V(evand, EVAND, 0x10000211) \
565 /* Vector AND with Complement */ \
566 V(evandc, EVANDC, 0x10000212) \
567 /* Vector Compare Equal */ \
568 V(evcmpeq, EVCMPEQ, 0x10000234) \
569 /* Vector Compare Greater Than Signed */ \
570 V(evcmpgts, EVCMPGTS, 0x10000231) \
571 /* Vector Compare Greater Than Unsigned */ \
572 V(evcmpgtu, EVCMPGTU, 0x10000230) \
573 /* Vector Compare Less Than Signed */ \
574 V(evcmplts, EVCMPLTS, 0x10000233) \
575 /* Vector Compare Less Than Unsigned */ \
576 V(evcmpltu, EVCMPLTU, 0x10000232) \
577 /* Vector Count Leading Signed Bits Word */ \
578 V(evcntlsw, EVCNTLSW, 0x1000020E) \
579 /* Vector Count Leading Zeros Word */ \
580 V(evcntlzw, EVCNTLZW, 0x1000020D) \
581 /* Vector Divide Word Signed */ \
582 V(evdivws, EVDIVWS, 0x100004C6) \
583 /* Vector Divide Word Unsigned */ \
584 V(evdivwu, EVDIVWU, 0x100004C7) \
585 /* Vector Equivalent */ \
586 V(eveqv, EVEQV, 0x10000219) \
587 /* Vector Extend Sign Byte */ \
588 V(evextsb, EVEXTSB, 0x1000020A) \
589 /* Vector Extend Sign Half Word */ \
590 V(evextsh, EVEXTSH, 0x1000020B) \
591 /* Vector Load Double Word into Double Word */ \
592 V(evldd, EVLDD, 0x10000301) \
593 /* Vector Load Double Word into Double Word Indexed */ \
594 V(evlddx, EVLDDX, 0x10000300) \
595 /* Vector Load Double into Four Half Words */ \
596 V(evldh, EVLDH, 0x10000305) \
597 /* Vector Load Double into Four Half Words Indexed */ \
598 V(evldhx, EVLDHX, 0x10000304) \
599 /* Vector Load Double into Two Words */ \
600 V(evldw, EVLDW, 0x10000303) \
601 /* Vector Load Double into Two Words Indexed */ \
602 V(evldwx, EVLDWX, 0x10000302) \
603 /* Vector Load Half Word into Half Words Even and Splat */ \
604 V(evlhhesplat, EVLHHESPLAT, 0x10000309) \
605 /* Vector Load Half Word into Half Words Even and Splat Indexed */ \
606 V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \
607 /* Vector Load Half Word into Half Word Odd Signed and Splat */ \
608 V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \
609 /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \
610 V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \
611 /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \
612 V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \
613 /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \
614 V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \
615 /* Vector Load Word into Two Half Words Even */ \
616 V(evlwhe, EVLWHE, 0x10000311) \
617 /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
618 V(evlwhos, EVLWHOS, 0x10000317) \
619 /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \
620 /* extension) */ \
621 V(evlwhosx, EVLWHOSX, 0x10000316) \
622 /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \
623 V(evlwhou, EVLWHOU, 0x10000315) \
624 /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \
625 /* extended) */ \
626 V(evlwhoux, EVLWHOUX, 0x10000314) \
627 /* Vector Load Word into Two Half Words and Splat */ \
628 V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \
629 /* Vector Load Word into Two Half Words and Splat Indexed */ \
630 V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \
631 /* Vector Load Word into Word and Splat */ \
632 V(evlwwsplat, EVLWWSPLAT, 0x10000319) \
633 /* Vector Load Word into Word and Splat Indexed */ \
634 V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \
635 /* Vector Merge High */ \
636 V(evmergehi, EVMERGEHI, 0x1000022C) \
637 /* Vector Merge High/Low */ \
638 V(evmergehilo, EVMERGEHILO, 0x1000022E) \
639 /* Vector Merge Low */ \
640 V(evmergelo, EVMERGELO, 0x1000022D) \
641 /* Vector Merge Low/High */ \
642 V(evmergelohi, EVMERGELOHI, 0x1000022F) \
643 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
644 /* and Accumulate */ \
645 V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \
646 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
647 /* and Accumulate Negative */ \
648 V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \
649 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
650 /* and Accumulate */ \
651 V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \
652 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
653 /* and Accumulate Negative */ \
654 V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \
655 /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
656 /* and Accumulate */ \
657 V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \
658 /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
659 /* and Accumulate Negative */ \
660 V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \
661 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \
662 V(evmhesmf, EVMHESMF, 0x1000040B) \
663 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \
664 /* Accumulator */ \
665 V(evmhesmfa, EVMHESMFA, 0x1000042B) \
666 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
667 /* Accumulate into Words */ \
668 V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \
669 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
670 /* Accumulate Negative into Words */ \
671 V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \
672 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \
673 V(evmhesmi, EVMHESMI, 0x10000409) \
674 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \
675 /* Accumulator */ \
676 V(evmhesmia, EVMHESMIA, 0x10000429) \
677 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
678 /* Accumulate into Words */ \
679 V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \
680 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
681 /* Accumulate Negative into Words */ \
682 V(evmhesmianw, EVMHESMIANW, 0x10000589) \
683 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \
684 V(evmhessf, EVMHESSF, 0x10000403) \
685 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \
686 /* Accumulator */ \
687 V(evmhessfa, EVMHESSFA, 0x10000423) \
688 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
689 /* Accumulate into Words */ \
690 V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \
691 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
692 /* Accumulate Negative into Words */ \
693 V(evmhessfanw, EVMHESSFANW, 0x10000583) \
694 /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
695 /* Accumulate into Words */ \
696 V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \
697 /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
698 /* Accumulate Negative into Words */ \
699 V(evmhessianw, EVMHESSIANW, 0x10000581) \
700 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \
701 V(evmheumi, EVMHEUMI, 0x10000408) \
702 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \
703 /* Accumulator */ \
704 V(evmheumia, EVMHEUMIA, 0x10000428) \
705 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
706 /* Accumulate into Words */ \
707 V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \
708 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
709 /* Accumulate Negative into Words */ \
710 V(evmheumianw, EVMHEUMIANW, 0x10000588) \
711 /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
712 /* Accumulate into Words */ \
713 V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \
714 /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
715 /* Accumulate Negative into Words */ \
716 V(evmheusianw, EVMHEUSIANW, 0x10000580) \
717 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
718 /* and Accumulate */ \
719 V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \
720 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
721 /* and Accumulate Negative */ \
722 V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \
723 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \
724 /* and Accumulate */ \
725 V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \
726 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \
727 /* Accumulate Negative */ \
728 V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \
729 /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
730 /* and Accumulate */ \
731 V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \
732 /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
733 /* and Accumulate Negative */ \
734 V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \
735 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \
736 V(evmhosmf, EVMHOSMF, 0x1000040F) \
737 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \
738 /* Accumulator */ \
739 V(evmhosmfa, EVMHOSMFA, 0x1000042F) \
740 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
741 /* Accumulate into Words */ \
742 V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \
743 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
744 /* Accumulate Negative into Words */ \
745 V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \
746 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \
747 V(evmhosmi, EVMHOSMI, 0x1000040D) \
748 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \
749 /* Accumulator */ \
750 V(evmhosmia, EVMHOSMIA, 0x1000042D) \
751 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
752 /* Accumulate into Words */ \
753 V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \
754 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
755 /* Accumulate Negative into Words */ \
756 V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \
757 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \
758 V(evmhossf, EVMHOSSF, 0x10000407) \
759 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \
760 /* Accumulator */ \
761 V(evmhossfa, EVMHOSSFA, 0x10000427) \
762 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
763 /* Accumulate into Words */ \
764 V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \
765 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
766 /* Accumulate Negative into Words */ \
767 V(evmhossfanw, EVMHOSSFANW, 0x10000587) \
768 /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
769 /* Accumulate into Words */ \
770 V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \
771 /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
772 /* Accumulate Negative into Words */ \
773 V(evmhossianw, EVMHOSSIANW, 0x10000585) \
774 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \
775 V(evmhoumi, EVMHOUMI, 0x1000040C) \
776 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \
777 /* Accumulator */ \
778 V(evmhoumia, EVMHOUMIA, 0x1000042C) \
779 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
780 /* Accumulate into Words */ \
781 V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \
782 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
783 /* Accumulate Negative into Words */ \
784 V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \
785 /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
786 /* Accumulate into Words */ \
787 V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \
788 /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
789 /* Accumulate Negative into Words */ \
790 V(evmhousianw, EVMHOUSIANW, 0x10000584) \
791 /* Initialize Accumulator */ \
792 V(evmra, EVMRA, 0x100004C4) \
793 /* Vector Multiply Word High Signed, Modulo, Fractional */ \
794 V(evmwhsmf, EVMWHSMF, 0x1000044F) \
795 /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \
796 V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \
797 /* Vector Multiply Word High Signed, Modulo, Integer */ \
798 V(evmwhsmi, EVMWHSMI, 0x1000044D) \
799 /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \
800 V(evmwhsmia, EVMWHSMIA, 0x1000046D) \
801 /* Vector Multiply Word High Signed, Saturate, Fractional */ \
802 V(evmwhssf, EVMWHSSF, 0x10000447) \
803 /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \
804 V(evmwhssfa, EVMWHSSFA, 0x10000467) \
805 /* Vector Multiply Word High Unsigned, Modulo, Integer */ \
806 V(evmwhumi, EVMWHUMI, 0x1000044C) \
807 /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \
808 V(evmwhumia, EVMWHUMIA, 0x1000046C) \
809 /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \
810 /* Words */ \
811 V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \
812 /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \
813 /* Negative in Words */ \
814 V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \
815 /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \
816 /* Words */ \
817 V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \
818 /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \
819 /* Negative in Words */ \
820 V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \
821 /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \
822 V(evmwlumi, EVMWLUMI, 0x10000448) \
823 /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \
824 V(evmwlumia, EVMWLUMIA, 0x10000468) \
825 /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \
826 /* Words */ \
827 V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \
828 /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \
829 /* Negative in Words */ \
830 V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \
831 /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
832 /* in Words */ \
833 V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \
834 /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
835 /* Negative in Words */ \
836 V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \
837 /* Vector Multiply Word Signed, Modulo, Fractional */ \
838 V(evmwsmf, EVMWSMF, 0x1000045B) \
839 /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \
840 V(evmwsmfa, EVMWSMFA, 0x1000047B) \
841 /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
842 V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \
843 /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
844 /* Negative */ \
845 V(evmwsmfan, EVMWSMFAN, 0x100005DB) \
846 /* Vector Multiply Word Signed, Modulo, Integer */ \
847 V(evmwsmi, EVMWSMI, 0x10000459) \
848 /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \
849 V(evmwsmia, EVMWSMIA, 0x10000479) \
850 /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \
851 V(evmwsmiaa, EVMWSMIAA, 0x10000559) \
852 /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \
853 V(evmwsmian, EVMWSMIAN, 0x100005D9) \
854 /* Vector Multiply Word Signed, Saturate, Fractional */ \
855 V(evmwssf, EVMWSSF, 0x10000453) \
856 /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \
857 V(evmwssfa, EVMWSSFA, 0x10000473) \
858 /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
859 V(evmwssfaa, EVMWSSFAA, 0x10000553) \
860 /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
861 /* Negative */ \
862 V(evmwssfan, EVMWSSFAN, 0x100005D3) \
863 /* Vector Multiply Word Unsigned, Modulo, Integer */ \
864 V(evmwumi, EVMWUMI, 0x10000458) \
865 /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \
866 V(evmwumia, EVMWUMIA, 0x10000478) \
867 /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
868 V(evmwumiaa, EVMWUMIAA, 0x10000558) \
869 /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
870 /* Negative */ \
871 V(evmwumian, EVMWUMIAN, 0x100005D8) \
872 /* Vector NAND */ \
873 V(evnand, EVNAND, 0x1000021E) \
874 /* Vector Negate */ \
875 V(evneg, EVNEG, 0x10000209) \
876 /* Vector NOR */ \
877 V(evnor, EVNOR, 0x10000218) \
878 /* Vector OR */ \
879 V(evor, EVOR, 0x10000217) \
880 /* Vector OR with Complement */ \
881 V(evorc, EVORC, 0x1000021B) \
882 /* Vector Rotate Left Word */ \
883 V(evrlw, EVRLW, 0x10000228) \
884 /* Vector Rotate Left Word Immediate */ \
885 V(evrlwi, EVRLWI, 0x1000022A) \
886 /* Vector Round Word */ \
887 V(evrndw, EVRNDW, 0x1000020C) \
888 /* Vector Shift Left Word */ \
889 V(evslw, EVSLW, 0x10000224) \
890 /* Vector Shift Left Word Immediate */ \
891 V(evslwi, EVSLWI, 0x10000226) \
892 /* Vector Splat Fractional Immediate */ \
893 V(evsplatfi, EVSPLATFI, 0x1000022B) \
894 /* Vector Splat Immediate */ \
895 V(evsplati, EVSPLATI, 0x10000229) \
896 /* Vector Shift Right Word Immediate Signed */ \
897 V(evsrwis, EVSRWIS, 0x10000223) \
898 /* Vector Shift Right Word Immediate Unsigned */ \
899 V(evsrwiu, EVSRWIU, 0x10000222) \
900 /* Vector Shift Right Word Signed */ \
901 V(evsrws, EVSRWS, 0x10000221) \
902 /* Vector Shift Right Word Unsigned */ \
903 V(evsrwu, EVSRWU, 0x10000220) \
904 /* Vector Store Double of Double */ \
905 V(evstdd, EVSTDD, 0x10000321) \
906 /* Vector Store Double of Double Indexed */ \
907 V(evstddx, EVSTDDX, 0x10000320) \
908 /* Vector Store Double of Four Half Words */ \
909 V(evstdh, EVSTDH, 0x10000325) \
910 /* Vector Store Double of Four Half Words Indexed */ \
911 V(evstdhx, EVSTDHX, 0x10000324) \
912 /* Vector Store Double of Two Words */ \
913 V(evstdw, EVSTDW, 0x10000323) \
914 /* Vector Store Double of Two Words Indexed */ \
915 V(evstdwx, EVSTDWX, 0x10000322) \
916 /* Vector Store Word of Two Half Words from Even */ \
917 V(evstwhe, EVSTWHE, 0x10000331) \
918 /* Vector Store Word of Two Half Words from Even Indexed */ \
919 V(evstwhex, EVSTWHEX, 0x10000330) \
920 /* Vector Store Word of Two Half Words from Odd */ \
921 V(evstwho, EVSTWHO, 0x10000335) \
922 /* Vector Store Word of Two Half Words from Odd Indexed */ \
923 V(evstwhox, EVSTWHOX, 0x10000334) \
924 /* Vector Store Word of Word from Even */ \
925 V(evstwwe, EVSTWWE, 0x10000339) \
926 /* Vector Store Word of Word from Even Indexed */ \
927 V(evstwwex, EVSTWWEX, 0x10000338) \
928 /* Vector Store Word of Word from Odd */ \
929 V(evstwwo, EVSTWWO, 0x1000033D) \
930 /* Vector Store Word of Word from Odd Indexed */ \
931 V(evstwwox, EVSTWWOX, 0x1000033C) \
932 /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \
933 V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \
934 /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \
935 V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \
936 /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \
937 V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \
938 /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \
939 V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \
940 /* Vector Subtract from Word */ \
941 V(evsubfw, EVSUBFW, 0x10000204) \
942 /* Vector Subtract Immediate from Word */ \
943 V(evsubifw, EVSUBIFW, 0x10000206) \
944 /* Vector XOR */ \
945 V(evxor, EVXOR, 0x10000216) \
946 /* Floating-Point Double-Precision Absolute Value */ \
947 V(efdabs, EFDABS, 0x100002E4) \
948 /* Floating-Point Double-Precision Add */ \
949 V(efdadd, EFDADD, 0x100002E0) \
950 /* Floating-Point Double-Precision Convert from Single-Precision */ \
951 V(efdcfs, EFDCFS, 0x100002EF) \
952 /* Convert Floating-Point Double-Precision from Signed Fraction */ \
953 V(efdcfsf, EFDCFSF, 0x100002F3) \
954 /* Convert Floating-Point Double-Precision from Signed Integer */ \
955 V(efdcfsi, EFDCFSI, 0x100002F1) \
956 /* Convert Floating-Point Double-Precision from Signed Integer */ \
957 /* Doubleword */ \
958 V(efdcfsid, EFDCFSID, 0x100002E3) \
959 /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \
960 V(efdcfuf, EFDCFUF, 0x100002F2) \
961 /* Convert Floating-Point Double-Precision from Unsigned Integer */ \
962 V(efdcfui, EFDCFUI, 0x100002F0) \
963 /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \
964 /* Doubleword */ \
965 V(efdcfuid, EFDCFUID, 0x100002E2) \
966 /* Floating-Point Double-Precision Compare Equal */ \
967 V(efdcmpeq, EFDCMPEQ, 0x100002EE) \
968 /* Floating-Point Double-Precision Compare Greater Than */ \
969 V(efdcmpgt, EFDCMPGT, 0x100002EC) \
970 /* Floating-Point Double-Precision Compare Less Than */ \
971 V(efdcmplt, EFDCMPLT, 0x100002ED) \
972 /* Convert Floating-Point Double-Precision to Signed Fraction */ \
973 V(efdctsf, EFDCTSF, 0x100002F7) \
974 /* Convert Floating-Point Double-Precision to Signed Integer */ \
975 V(efdctsi, EFDCTSI, 0x100002F5) \
976 /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \
977 /* with Round toward Zero */ \
978 V(efdctsidz, EFDCTSIDZ, 0x100002EB) \
979 /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \
980 /* toward Zero */ \
981 V(efdctsiz, EFDCTSIZ, 0x100002FA) \
982 /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \
983 V(efdctuf, EFDCTUF, 0x100002F6) \
984 /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
985 V(efdctui, EFDCTUI, 0x100002F4) \
986 /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
987 /* Doubleword with Round toward Zero */ \
988 V(efdctuidz, EFDCTUIDZ, 0x100002EA) \
989 /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \
990 /* Round toward Zero */ \
991 V(efdctuiz, EFDCTUIZ, 0x100002F8) \
992 /* Floating-Point Double-Precision Divide */ \
993 V(efddiv, EFDDIV, 0x100002E9) \
994 /* Floating-Point Double-Precision Multiply */ \
995 V(efdmul, EFDMUL, 0x100002E8) \
996 /* Floating-Point Double-Precision Negative Absolute Value */ \
997 V(efdnabs, EFDNABS, 0x100002E5) \
998 /* Floating-Point Double-Precision Negate */ \
999 V(efdneg, EFDNEG, 0x100002E6) \
1000 /* Floating-Point Double-Precision Subtract */ \
1001 V(efdsub, EFDSUB, 0x100002E1) \
1002 /* Floating-Point Double-Precision Test Equal */ \
1003 V(efdtsteq, EFDTSTEQ, 0x100002FE) \
1004 /* Floating-Point Double-Precision Test Greater Than */ \
1005 V(efdtstgt, EFDTSTGT, 0x100002FC) \
1006 /* Floating-Point Double-Precision Test Less Than */ \
1007 V(efdtstlt, EFDTSTLT, 0x100002FD) \
1008 /* Floating-Point Single-Precision Convert from Double-Precision */ \
1009 V(efscfd, EFSCFD, 0x100002CF) \
1010 /* Floating-Point Absolute Value */ \
1011 V(efsabs, EFSABS, 0x100002C4) \
1012 /* Floating-Point Add */ \
1013 V(efsadd, EFSADD, 0x100002C0) \
1014 /* Convert Floating-Point from Signed Fraction */ \
1015 V(efscfsf, EFSCFSF, 0x100002D3) \
1016 /* Convert Floating-Point from Signed Integer */ \
1017 V(efscfsi, EFSCFSI, 0x100002D1) \
1018 /* Convert Floating-Point from Unsigned Fraction */ \
1019 V(efscfuf, EFSCFUF, 0x100002D2) \
1020 /* Convert Floating-Point from Unsigned Integer */ \
1021 V(efscfui, EFSCFUI, 0x100002D0) \
1022 /* Floating-Point Compare Equal */ \
1023 V(efscmpeq, EFSCMPEQ, 0x100002CE) \
1024 /* Floating-Point Compare Greater Than */ \
1025 V(efscmpgt, EFSCMPGT, 0x100002CC) \
1026 /* Floating-Point Compare Less Than */ \
1027 V(efscmplt, EFSCMPLT, 0x100002CD) \
1028 /* Convert Floating-Point to Signed Fraction */ \
1029 V(efsctsf, EFSCTSF, 0x100002D7) \
1030 /* Convert Floating-Point to Signed Integer */ \
1031 V(efsctsi, EFSCTSI, 0x100002D5) \
1032 /* Convert Floating-Point to Signed Integer with Round toward Zero */ \
1033 V(efsctsiz, EFSCTSIZ, 0x100002DA) \
1034 /* Convert Floating-Point to Unsigned Fraction */ \
1035 V(efsctuf, EFSCTUF, 0x100002D6) \
1036 /* Convert Floating-Point to Unsigned Integer */ \
1037 V(efsctui, EFSCTUI, 0x100002D4) \
1038 /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \
1039 V(efsctuiz, EFSCTUIZ, 0x100002D8) \
1040 /* Floating-Point Divide */ \
1041 V(efsdiv, EFSDIV, 0x100002C9) \
1042 /* Floating-Point Multiply */ \
1043 V(efsmul, EFSMUL, 0x100002C8) \
1044 /* Floating-Point Negative Absolute Value */ \
1045 V(efsnabs, EFSNABS, 0x100002C5) \
1046 /* Floating-Point Negate */ \
1047 V(efsneg, EFSNEG, 0x100002C6) \
1048 /* Floating-Point Subtract */ \
1049 V(efssub, EFSSUB, 0x100002C1) \
1050 /* Floating-Point Test Equal */ \
1051 V(efststeq, EFSTSTEQ, 0x100002DE) \
1052 /* Floating-Point Test Greater Than */ \
1053 V(efststgt, EFSTSTGT, 0x100002DC) \
1054 /* Floating-Point Test Less Than */ \
1055 V(efststlt, EFSTSTLT, 0x100002DD) \
1056 /* Vector Floating-Point Absolute Value */ \
1057 V(evfsabs, EVFSABS, 0x10000284) \
1058 /* Vector Floating-Point Add */ \
1059 V(evfsadd, EVFSADD, 0x10000280) \
1060 /* Vector Convert Floating-Point from Signed Fraction */ \
1061 V(evfscfsf, EVFSCFSF, 0x10000293) \
1062 /* Vector Convert Floating-Point from Signed Integer */ \
1063 V(evfscfsi, EVFSCFSI, 0x10000291) \
1064 /* Vector Convert Floating-Point from Unsigned Fraction */ \
1065 V(evfscfuf, EVFSCFUF, 0x10000292) \
1066 /* Vector Convert Floating-Point from Unsigned Integer */ \
1067 V(evfscfui, EVFSCFUI, 0x10000290) \
1068 /* Vector Floating-Point Compare Equal */ \
1069 V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \
1070 /* Vector Floating-Point Compare Greater Than */ \
1071 V(evfscmpgt, EVFSCMPGT, 0x1000028C) \
1072 /* Vector Floating-Point Compare Less Than */ \
1073 V(evfscmplt, EVFSCMPLT, 0x1000028D) \
1074 /* Vector Convert Floating-Point to Signed Fraction */ \
1075 V(evfsctsf, EVFSCTSF, 0x10000297) \
1076 /* Vector Convert Floating-Point to Signed Integer */ \
1077 V(evfsctsi, EVFSCTSI, 0x10000295) \
1078 /* Vector Convert Floating-Point to Signed Integer with Round toward */ \
1079 /* Zero */ \
1080 V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \
1081 /* Vector Convert Floating-Point to Unsigned Fraction */ \
1082 V(evfsctuf, EVFSCTUF, 0x10000296) \
1083 /* Vector Convert Floating-Point to Unsigned Integer */ \
1084 V(evfsctui, EVFSCTUI, 0x10000294) \
1085 /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \
1086 /* Zero */ \
1087 V(evfsctuiz, EVFSCTUIZ, 0x10000298) \
1088 /* Vector Floating-Point Divide */ \
1089 V(evfsdiv, EVFSDIV, 0x10000289) \
1090 /* Vector Floating-Point Multiply */ \
1091 V(evfsmul, EVFSMUL, 0x10000288) \
1092 /* Vector Floating-Point Negative Absolute Value */ \
1093 V(evfsnabs, EVFSNABS, 0x10000285) \
1094 /* Vector Floating-Point Negate */ \
1095 V(evfsneg, EVFSNEG, 0x10000286) \
1096 /* Vector Floating-Point Subtract */ \
1097 V(evfssub, EVFSSUB, 0x10000281) \
1098 /* Vector Floating-Point Test Equal */ \
1099 V(evfststeq, EVFSTSTEQ, 0x1000029E) \
1100 /* Vector Floating-Point Test Greater Than */ \
1101 V(evfststgt, EVFSTSTGT, 0x1000029C) \
1102 /* Vector Floating-Point Test Less Than */ \
1103 V(evfststlt, EVFSTSTLT, 0x1000029D)
1104
1105 #define PPC_VC_OPCODE_LIST(V) \
1106 /* Vector Compare Bounds Single-Precision */ \
1107 V(vcmpbfp, VCMPBFP, 0x100003C6) \
1108 /* Vector Compare Equal To Single-Precision */ \
1109 V(vcmpeqfp, VCMPEQFP, 0x100000C6) \
1110 /* Vector Compare Equal To Unsigned Byte */ \
1111 V(vcmpequb, VCMPEQUB, 0x10000006) \
1112 /* Vector Compare Equal To Unsigned Doubleword */ \
1113 V(vcmpequd, VCMPEQUD, 0x100000C7) \
1114 /* Vector Compare Equal To Unsigned Halfword */ \
1115 V(vcmpequh, VCMPEQUH, 0x10000046) \
1116 /* Vector Compare Equal To Unsigned Word */ \
1117 V(vcmpequw, VCMPEQUW, 0x10000086) \
1118 /* Vector Compare Greater Than or Equal To Single-Precision */ \
1119 V(vcmpgefp, VCMPGEFP, 0x100001C6) \
1120 /* Vector Compare Greater Than Single-Precision */ \
1121 V(vcmpgtfp, VCMPGTFP, 0x100002C6) \
1122 /* Vector Compare Greater Than Signed Byte */ \
1123 V(vcmpgtsb, VCMPGTSB, 0x10000306) \
1124 /* Vector Compare Greater Than Signed Doubleword */ \
1125 V(vcmpgtsd, VCMPGTSD, 0x100003C7) \
1126 /* Vector Compare Greater Than Signed Halfword */ \
1127 V(vcmpgtsh, VCMPGTSH, 0x10000346) \
1128 /* Vector Compare Greater Than Signed Word */ \
1129 V(vcmpgtsw, VCMPGTSW, 0x10000386) \
1130 /* Vector Compare Greater Than Unsigned Byte */ \
1131 V(vcmpgtub, VCMPGTUB, 0x10000206) \
1132 /* Vector Compare Greater Than Unsigned Doubleword */ \
1133 V(vcmpgtud, VCMPGTUD, 0x100002C7) \
1134 /* Vector Compare Greater Than Unsigned Halfword */ \
1135 V(vcmpgtuh, VCMPGTUH, 0x10000246) \
1136 /* Vector Compare Greater Than Unsigned Word */ \
1137 V(vcmpgtuw, VCMPGTUW, 0x10000286)
1138
1139 #define PPC_X_OPCODE_A_FORM_LIST(V) \
1140 /* Modulo Signed Dword */ \
1141 V(modsd, MODSD, 0x7C000612) \
1142 /* Modulo Unsigned Dword */ \
1143 V(modud, MODUD, 0x7C000212) \
1144 /* Modulo Signed Word */ \
1145 V(modsw, MODSW, 0x7C000616) \
1146 /* Modulo Unsigned Word */ \
1147 V(moduw, MODUW, 0x7C000216)
1148
1149 #define PPC_X_OPCODE_B_FORM_LIST(V) \
1150 /* XOR */ \
1151 V(xor_, XORX, 0x7C000278) \
1152 /* AND */ \
1153 V(and_, ANDX, 0x7C000038) \
1154 /* AND with Complement */ \
1155 V(andc, ANDCX, 0x7C000078) \
1156 /* OR */ \
1157 V(orx, ORX, 0x7C000378) \
1158 /* OR with Complement */ \
1159 V(orc, ORC, 0x7C000338) \
1160 /* NOR */ \
1161 V(nor, NORX, 0x7C0000F8) \
1162 /* Shift Right Word */ \
1163 V(srw, SRWX, 0x7C000430) \
1164 /* Shift Left Word */ \
1165 V(slw, SLWX, 0x7C000030) \
1166 /* Shift Right Algebraic Word */ \
1167 V(sraw, SRAW, 0x7C000630) \
1168 /* Shift Left Doubleword */ \
1169 V(sld, SLDX, 0x7C000036) \
1170 /* Shift Right Algebraic Doubleword */ \
1171 V(srad, SRAD, 0x7C000634) \
1172 /* Shift Right Doubleword */ \
1173 V(srd, SRDX, 0x7C000436)
1174
1175 #define PPC_X_OPCODE_C_FORM_LIST(V) \
1176 /* Count Leading Zeros Word */ \
1177 V(cntlzw, CNTLZWX, 0x7C000034) \
1178 /* Count Leading Zeros Doubleword */ \
1179 V(cntlzd, CNTLZDX, 0x7C000074) \
1180 /* Count Tailing Zeros Word */ \
1181 V(cnttzw, CNTTZWX, 0x7C000434) \
1182 /* Count Tailing Zeros Doubleword */ \
1183 V(cnttzd, CNTTZDX, 0x7C000474) \
1184 /* Population Count Byte-wise */ \
1185 V(popcntb, POPCNTB, 0x7C0000F4) \
1186 /* Population Count Words */ \
1187 V(popcntw, POPCNTW, 0x7C0002F4) \
1188 /* Population Count Doubleword */ \
1189 V(popcntd, POPCNTD, 0x7C0003F4) \
1190 /* Extend Sign Byte */ \
1191 V(extsb, EXTSB, 0x7C000774) \
1192 /* Extend Sign Halfword */ \
1193 V(extsh, EXTSH, 0x7C000734)
1194
1195 #define PPC_X_OPCODE_D_FORM_LIST(V) \
1196 /* Load Halfword Byte-Reverse Indexed */ \
1197 V(lhbrx, LHBRX, 0x7C00062C) \
1198 /* Load Word Byte-Reverse Indexed */ \
1199 V(lwbrx, LWBRX, 0x7C00042C) \
1200 /* Load Doubleword Byte-Reverse Indexed */ \
1201 V(ldbrx, LDBRX, 0x7C000428) \
1202 /* Load Byte and Zero Indexed */ \
1203 V(lbzx, LBZX, 0x7C0000AE) \
1204 /* Load Byte and Zero with Update Indexed */ \
1205 V(lbzux, LBZUX, 0x7C0000EE) \
1206 /* Load Halfword and Zero Indexed */ \
1207 V(lhzx, LHZX, 0x7C00022E) \
1208 /* Load Halfword and Zero with Update Indexed */ \
1209 V(lhzux, LHZUX, 0x7C00026E) \
1210 /* Load Halfword Algebraic Indexed */ \
1211 V(lhax, LHAX, 0x7C0002AE) \
1212 /* Load Word and Zero Indexed */ \
1213 V(lwzx, LWZX, 0x7C00002E) \
1214 /* Load Word and Zero with Update Indexed */ \
1215 V(lwzux, LWZUX, 0x7C00006E) \
1216 /* Load Doubleword Indexed */ \
1217 V(ldx, LDX, 0x7C00002A) \
1218 /* Load Doubleword with Update Indexed */ \
1219 V(ldux, LDUX, 0x7C00006A) \
1220 /* Load Floating-Point Double Indexed */ \
1221 V(lfdx, LFDX, 0x7C0004AE) \
1222 /* Load Floating-Point Single Indexed */ \
1223 V(lfsx, LFSX, 0x7C00042E) \
1224 /* Load Floating-Point Double with Update Indexed */ \
1225 V(lfdux, LFDUX, 0x7C0004EE) \
1226 /* Load Floating-Point Single with Update Indexed */ \
1227 V(lfsux, LFSUX, 0x7C00046E) \
1228 /* Store Byte with Update Indexed */ \
1229 V(stbux, STBUX, 0x7C0001EE) \
1230 /* Store Byte Indexed */ \
1231 V(stbx, STBX, 0x7C0001AE) \
1232 /* Store Halfword with Update Indexed */ \
1233 V(sthux, STHUX, 0x7C00036E) \
1234 /* Store Halfword Indexed */ \
1235 V(sthx, STHX, 0x7C00032E) \
1236 /* Store Word with Update Indexed */ \
1237 V(stwux, STWUX, 0x7C00016E) \
1238 /* Store Word Indexed */ \
1239 V(stwx, STWX, 0x7C00012E) \
1240 /* Store Doubleword with Update Indexed */ \
1241 V(stdux, STDUX, 0x7C00016A) \
1242 /* Store Doubleword Indexed */ \
1243 V(stdx, STDX, 0x7C00012A) \
1244 /* Store Floating-Point Double with Update Indexed */ \
1245 V(stfdux, STFDUX, 0x7C0005EE) \
1246 /* Store Floating-Point Double Indexed */ \
1247 V(stfdx, STFDX, 0x7C0005AE) \
1248 /* Store Floating-Point Single with Update Indexed */ \
1249 V(stfsux, STFSUX, 0x7C00056E) \
1250 /* Store Floating-Point Single Indexed */ \
1251 V(stfsx, STFSX, 0x7C00052E) \
1252 /* Store Doubleword Byte-Reverse Indexed */ \
1253 V(stdbrx, STDBRX, 0x7C000528) \
1254 /* Store Word Byte-Reverse Indexed */ \
1255 V(stwbrx, STWBRX, 0x7C00052C) \
1256 /* Store Halfword Byte-Reverse Indexed */ \
1257 V(sthbrx, STHBRX, 0x7C00072C) \
1258 /* Load Vector Indexed */ \
1259 V(lvx, LVX, 0x7C0000CE) \
1260 /* Store Vector Indexed */ \
1261 V(stvx, STVX, 0x7C0001CE)
1262
1263 #define PPC_X_OPCODE_E_FORM_LIST(V) \
1264 /* Shift Right Algebraic Word Immediate */ \
1265 V(srawi, SRAWIX, 0x7C000670)
1266
1267 #define PPC_X_OPCODE_F_FORM_LIST(V) \
1268 /* Compare */ \
1269 V(cmp, CMP, 0x7C000000) \
1270 /* Compare Logical */ \
1271 V(cmpl, CMPL, 0x7C000040)
1272
1273 #define PPC_X_OPCODE_G_FORM_LIST(V) \
1274 /* Byte-Reverse Halfword */ \
1275 V(brh, BRH, 0x7C0001B6) \
1276 /* Byte-Reverse Word */ \
1277 V(brw, BRW, 0x7C000136) \
1278 /* Byte-Reverse Doubleword */ \
1279 V(brd, BRD, 0x7C000176)
1280
1281 #define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
1282 /* Store Byte Conditional Indexed */ \
1283 V(stbcx, STBCX, 0x7C00056D) \
1284 /* Store Halfword Conditional Indexed Xform */ \
1285 V(sthcx, STHCX, 0x7C0005AD) \
1286 /* Store Word Conditional Indexed & record CR0 */ \
1287 V(stwcx, STWCX, 0x7C00012D) \
1288 /* Store Doubleword Conditional Indexed & record CR0 */ \
1289 V(stdcx, STDCX, 0x7C0001AD)
1290
1291 #define PPC_X_OPCODE_EH_L_FORM_LIST(V) \
1292 /* Load Byte And Reserve Indexed */ \
1293 V(lbarx, LBARX, 0x7C000068) \
1294 /* Load Halfword And Reserve Indexed Xform */ \
1295 V(lharx, LHARX, 0x7C0000E8) \
1296 /* Load Word and Reserve Indexed */ \
1297 V(lwarx, LWARX, 0x7C000028) \
1298 /* Load Doubleword And Reserve Indexed */ \
1299 V(ldarx, LDARX, 0x7C0000A8)
1300
1301 #define PPC_X_OPCODE_UNUSED_LIST(V) \
1302 /* Bit Permute Doubleword */ \
1303 V(bpermd, BPERMD, 0x7C0001F8) \
1304 /* Extend Sign Word */ \
1305 V(extsw, EXTSW, 0x7C0007B4) \
1306 /* Load Word Algebraic with Update Indexed */ \
1307 V(lwaux, LWAUX, 0x7C0002EA) \
1308 /* Load Word Algebraic Indexed */ \
1309 V(lwax, LWAX, 0x7C0002AA) \
1310 /* Parity Doubleword */ \
1311 V(prtyd, PRTYD, 0x7C000174) \
1312 /* Trap Doubleword */ \
1313 V(td, TD, 0x7C000088) \
1314 /* Branch Conditional to Branch Target Address Register */ \
1315 V(bctar, BCTAR, 0x4C000460) \
1316 /* Compare Byte */ \
1317 V(cmpb, CMPB, 0x7C0003F8) \
1318 /* Data Cache Block Flush */ \
1319 V(dcbf, DCBF, 0x7C0000AC) \
1320 /* Data Cache Block Store */ \
1321 V(dcbst, DCBST, 0x7C00006C) \
1322 /* Data Cache Block Touch */ \
1323 V(dcbt, DCBT, 0x7C00022C) \
1324 /* Data Cache Block Touch for Store */ \
1325 V(dcbtst, DCBTST, 0x7C0001EC) \
1326 /* Data Cache Block Zero */ \
1327 V(dcbz, DCBZ, 0x7C0007EC) \
1328 /* Equivalent */ \
1329 V(eqv, EQV, 0x7C000238) \
1330 /* Instruction Cache Block Invalidate */ \
1331 V(icbi, ICBI, 0x7C0007AC) \
1332 /* NAND */ \
1333 V(nand, NAND, 0x7C0003B8) \
1334 /* Parity Word */ \
1335 V(prtyw, PRTYW, 0x7C000134) \
1336 /* Synchronize */ \
1337 V(sync, SYNC, 0x7C0004AC) \
1338 /* Trap Word */ \
1339 V(tw, TW, 0x7C000008) \
1340 /* ExecuExecuted No Operation */ \
1341 V(xnop, XNOP, 0x68000000) \
1342 /* Convert Binary Coded Decimal To Declets */ \
1343 V(cbcdtd, CBCDTD, 0x7C000274) \
1344 /* Convert Declets To Binary Coded Decimal */ \
1345 V(cdtbcd, CDTBCD, 0x7C000234) \
1346 /* Decimal Floating Add */ \
1347 V(dadd, DADD, 0xEC000004) \
1348 /* Decimal Floating Add Quad */ \
1349 V(daddq, DADDQ, 0xFC000004) \
1350 /* Decimal Floating Convert From Fixed */ \
1351 V(dcffix, DCFFIX, 0xEC000644) \
1352 /* Decimal Floating Convert From Fixed Quad */ \
1353 V(dcffixq, DCFFIXQ, 0xFC000644) \
1354 /* Decimal Floating Compare Ordered */ \
1355 V(dcmpo, DCMPO, 0xEC000104) \
1356 /* Decimal Floating Compare Ordered Quad */ \
1357 V(dcmpoq, DCMPOQ, 0xFC000104) \
1358 /* Decimal Floating Compare Unordered */ \
1359 V(dcmpu, DCMPU, 0xEC000504) \
1360 /* Decimal Floating Compare Unordered Quad */ \
1361 V(dcmpuq, DCMPUQ, 0xFC000504) \
1362 /* Decimal Floating Convert To DFP Long */ \
1363 V(dctdp, DCTDP, 0xEC000204) \
1364 /* Decimal Floating Convert To Fixed */ \
1365 V(dctfix, DCTFIX, 0xEC000244) \
1366 /* Decimal Floating Convert To Fixed Quad */ \
1367 V(dctfixq, DCTFIXQ, 0xFC000244) \
1368 /* Decimal Floating Convert To DFP Extended */ \
1369 V(dctqpq, DCTQPQ, 0xFC000204) \
1370 /* Decimal Floating Decode DPD To BCD */ \
1371 V(ddedpd, DDEDPD, 0xEC000284) \
1372 /* Decimal Floating Decode DPD To BCD Quad */ \
1373 V(ddedpdq, DDEDPDQ, 0xFC000284) \
1374 /* Decimal Floating Divide */ \
1375 V(ddiv, DDIV, 0xEC000444) \
1376 /* Decimal Floating Divide Quad */ \
1377 V(ddivq, DDIVQ, 0xFC000444) \
1378 /* Decimal Floating Encode BCD To DPD */ \
1379 V(denbcd, DENBCD, 0xEC000684) \
1380 /* Decimal Floating Encode BCD To DPD Quad */ \
1381 V(denbcdq, DENBCDQ, 0xFC000684) \
1382 /* Decimal Floating Insert Exponent */ \
1383 V(diex, DIEX, 0xEC0006C4) \
1384 /* Decimal Floating Insert Exponent Quad */ \
1385 V(diexq, DIEXQ, 0xFC0006C4) \
1386 /* Decimal Floating Multiply */ \
1387 V(dmul, DMUL, 0xEC000044) \
1388 /* Decimal Floating Multiply Quad */ \
1389 V(dmulq, DMULQ, 0xFC000044) \
1390 /* Decimal Floating Round To DFP Long */ \
1391 V(drdpq, DRDPQ, 0xFC000604) \
1392 /* Decimal Floating Round To DFP Short */ \
1393 V(drsp, DRSP, 0xEC000604) \
1394 /* Decimal Floating Subtract */ \
1395 V(dsub, DSUB, 0xEC000404) \
1396 /* Decimal Floating Subtract Quad */ \
1397 V(dsubq, DSUBQ, 0xFC000404) \
1398 /* Decimal Floating Test Exponent */ \
1399 V(dtstex, DTSTEX, 0xEC000144) \
1400 /* Decimal Floating Test Exponent Quad */ \
1401 V(dtstexq, DTSTEXQ, 0xFC000144) \
1402 /* Decimal Floating Test Significance */ \
1403 V(dtstsf, DTSTSF, 0xEC000544) \
1404 /* Decimal Floating Test Significance Quad */ \
1405 V(dtstsfq, DTSTSFQ, 0xFC000544) \
1406 /* Decimal Floating Extract Exponent */ \
1407 V(dxex, DXEX, 0xEC0002C4) \
1408 /* Decimal Floating Extract Exponent Quad */ \
1409 V(dxexq, DXEXQ, 0xFC0002C4) \
1410 /* Decorated Storage Notify */ \
1411 V(dsn, DSN, 0x7C0003C6) \
1412 /* Load Byte with Decoration Indexed */ \
1413 V(lbdx, LBDX, 0x7C000406) \
1414 /* Load Doubleword with Decoration Indexed */ \
1415 V(lddx, LDDX, 0x7C0004C6) \
1416 /* Load Floating Doubleword with Decoration Indexed */ \
1417 V(lfddx, LFDDX, 0x7C000646) \
1418 /* Load Halfword with Decoration Indexed */ \
1419 V(lhdx, LHDX, 0x7C000446) \
1420 /* Load Word with Decoration Indexed */ \
1421 V(lwdx, LWDX, 0x7C000486) \
1422 /* Store Byte with Decoration Indexed */ \
1423 V(stbdx, STBDX, 0x7C000506) \
1424 /* Store Doubleword with Decoration Indexed */ \
1425 V(stddx, STDDX, 0x7C0005C6) \
1426 /* Store Floating Doubleword with Decoration Indexed */ \
1427 V(stfddx, STFDDX, 0x7C000746) \
1428 /* Store Halfword with Decoration Indexed */ \
1429 V(sthdx, STHDX, 0x7C000546) \
1430 /* Store Word with Decoration Indexed */ \
1431 V(stwdx, STWDX, 0x7C000586) \
1432 /* Data Cache Block Allocate */ \
1433 V(dcba, DCBA, 0x7C0005EC) \
1434 /* Data Cache Block Invalidate */ \
1435 V(dcbi, DCBI, 0x7C0003AC) \
1436 /* Instruction Cache Block Touch */ \
1437 V(icbt, ICBT, 0x7C00002C) \
1438 /* Move to Condition Register from XER */ \
1439 V(mcrxr, MCRXR, 0x7C000400) \
1440 /* TLB Invalidate Local Indexed */ \
1441 V(tlbilx, TLBILX, 0x7C000024) \
1442 /* TLB Invalidate Virtual Address Indexed */ \
1443 V(tlbivax, TLBIVAX, 0x7C000624) \
1444 /* TLB Read Entry */ \
1445 V(tlbre, TLBRE, 0x7C000764) \
1446 /* TLB Search Indexed */ \
1447 V(tlbsx, TLBSX, 0x7C000724) \
1448 /* TLB Write Entry */ \
1449 V(tlbwe, TLBWE, 0x7C0007A4) \
1450 /* Write External Enable */ \
1451 V(wrtee, WRTEE, 0x7C000106) \
1452 /* Write External Enable Immediate */ \
1453 V(wrteei, WRTEEI, 0x7C000146) \
1454 /* Data Cache Read */ \
1455 V(dcread, DCREAD, 0x7C00028C) \
1456 /* Instruction Cache Read */ \
1457 V(icread, ICREAD, 0x7C0007CC) \
1458 /* Data Cache Invalidate */ \
1459 V(dci, DCI, 0x7C00038C) \
1460 /* Instruction Cache Invalidate */ \
1461 V(ici, ICI, 0x7C00078C) \
1462 /* Move From Device Control Register User Mode Indexed */ \
1463 V(mfdcrux, MFDCRUX, 0x7C000246) \
1464 /* Move From Device Control Register Indexed */ \
1465 V(mfdcrx, MFDCRX, 0x7C000206) \
1466 /* Move To Device Control Register User Mode Indexed */ \
1467 V(mtdcrux, MTDCRUX, 0x7C000346) \
1468 /* Move To Device Control Register Indexed */ \
1469 V(mtdcrx, MTDCRX, 0x7C000306) \
1470 /* Return From Debug Interrupt */ \
1471 V(rfdi, RFDI, 0x4C00004E) \
1472 /* Data Cache Block Flush by External PID */ \
1473 V(dcbfep, DCBFEP, 0x7C0000FE) \
1474 /* Data Cache Block Store by External PID */ \
1475 V(dcbstep, DCBSTEP, 0x7C00007E) \
1476 /* Data Cache Block Touch by External PID */ \
1477 V(dcbtep, DCBTEP, 0x7C00027E) \
1478 /* Data Cache Block Touch for Store by External PID */ \
1479 V(dcbtstep, DCBTSTEP, 0x7C0001FE) \
1480 /* Data Cache Block Zero by External PID */ \
1481 V(dcbzep, DCBZEP, 0x7C0007FE) \
1482 /* Instruction Cache Block Invalidate by External PID */ \
1483 V(icbiep, ICBIEP, 0x7C0007BE) \
1484 /* Load Byte and Zero by External PID Indexed */ \
1485 V(lbepx, LBEPX, 0x7C0000BE) \
1486 /* Load Floating-Point Double by External PID Indexed */ \
1487 V(lfdepx, LFDEPX, 0x7C0004BE) \
1488 /* Load Halfword and Zero by External PID Indexed */ \
1489 V(lhepx, LHEPX, 0x7C00023E) \
1490 /* Load Vector by External PID Indexed */ \
1491 V(lvepx, LVEPX, 0x7C00024E) \
1492 /* Load Vector by External PID Indexed Last */ \
1493 V(lvepxl, LVEPXL, 0x7C00020E) \
1494 /* Load Word and Zero by External PID Indexed */ \
1495 V(lwepx, LWEPX, 0x7C00003E) \
1496 /* Store Byte by External PID Indexed */ \
1497 V(stbepx, STBEPX, 0x7C0001BE) \
1498 /* Store Floating-Point Double by External PID Indexed */ \
1499 V(stfdepx, STFDEPX, 0x7C0005BE) \
1500 /* Store Halfword by External PID Indexed */ \
1501 V(sthepx, STHEPX, 0x7C00033E) \
1502 /* Store Vector by External PID Indexed */ \
1503 V(stvepx, STVEPX, 0x7C00064E) \
1504 /* Store Vector by External PID Indexed Last */ \
1505 V(stvepxl, STVEPXL, 0x7C00060E) \
1506 /* Store Word by External PID Indexed */ \
1507 V(stwepx, STWEPX, 0x7C00013E) \
1508 /* Load Doubleword by External PID Indexed */ \
1509 V(ldepx, LDEPX, 0x7C00003A) \
1510 /* Store Doubleword by External PID Indexed */ \
1511 V(stdepx, STDEPX, 0x7C00013A) \
1512 /* TLB Search and Reserve Indexed */ \
1513 V(tlbsrx, TLBSRX, 0x7C0006A5) \
1514 /* External Control In Word Indexed */ \
1515 V(eciwx, ECIWX, 0x7C00026C) \
1516 /* External Control Out Word Indexed */ \
1517 V(ecowx, ECOWX, 0x7C00036C) \
1518 /* Data Cache Block Lock Clear */ \
1519 V(dcblc, DCBLC, 0x7C00030C) \
1520 /* Data Cache Block Lock Query */ \
1521 V(dcblq, DCBLQ, 0x7C00034D) \
1522 /* Data Cache Block Touch and Lock Set */ \
1523 V(dcbtls, DCBTLS, 0x7C00014C) \
1524 /* Data Cache Block Touch for Store and Lock Set */ \
1525 V(dcbtstls, DCBTSTLS, 0x7C00010C) \
1526 /* Instruction Cache Block Lock Clear */ \
1527 V(icblc, ICBLC, 0x7C0001CC) \
1528 /* Instruction Cache Block Lock Query */ \
1529 V(icblq, ICBLQ, 0x7C00018D) \
1530 /* Instruction Cache Block Touch and Lock Set */ \
1531 V(icbtls, ICBTLS, 0x7C0003CC) \
1532 /* Floating Compare Ordered */ \
1533 V(fcmpo, FCMPO, 0xFC000040) \
1534 /* Floating Compare Unordered */ \
1535 V(fcmpu, FCMPU, 0xFC000000) \
1536 /* Floating Test for software Divide */ \
1537 V(ftdiv, FTDIV, 0xFC000100) \
1538 /* Floating Test for software Square Root */ \
1539 V(ftsqrt, FTSQRT, 0xFC000140) \
1540 /* Load Floating-Point as Integer Word Algebraic Indexed */ \
1541 V(lfiwax, LFIWAX, 0x7C0006AE) \
1542 /* Load Floating-Point as Integer Word and Zero Indexed */ \
1543 V(lfiwzx, LFIWZX, 0x7C0006EE) \
1544 /* Move To Condition Register from FPSCR */ \
1545 V(mcrfs, MCRFS, 0xFC000080) \
1546 /* Store Floating-Point as Integer Word Indexed */ \
1547 V(stfiwx, STFIWX, 0x7C0007AE) \
1548 /* Load Floating-Point Double Pair Indexed */ \
1549 V(lfdpx, LFDPX, 0x7C00062E) \
1550 /* Store Floating-Point Double Pair Indexed */ \
1551 V(stfdpx, STFDPX, 0x7C00072E) \
1552 /* Floating Absolute Value */ \
1553 V(fabs, FABS, 0xFC000210) \
1554 /* Floating Convert From Integer Doubleword */ \
1555 V(fcfid, FCFID, 0xFC00069C) \
1556 /* Floating Convert From Integer Doubleword Single */ \
1557 V(fcfids, FCFIDS, 0xEC00069C) \
1558 /* Floating Convert From Integer Doubleword Unsigned */ \
1559 V(fcfidu, FCFIDU, 0xFC00079C) \
1560 /* Floating Convert From Integer Doubleword Unsigned Single */ \
1561 V(fcfidus, FCFIDUS, 0xEC00079C) \
1562 /* Floating Copy Sign */ \
1563 V(fcpsgn, FCPSGN, 0xFC000010) \
1564 /* Floating Convert To Integer Doubleword */ \
1565 V(fctid, FCTID, 0xFC00065C) \
1566 /* Floating Convert To Integer Doubleword Unsigned */ \
1567 V(fctidu, FCTIDU, 0xFC00075C) \
1568 /* Floating Convert To Integer Doubleword Unsigned with round toward */ \
1569 /* Zero */ \
1570 V(fctiduz, FCTIDUZ, 0xFC00075E) \
1571 /* Floating Convert To Integer Doubleword with round toward Zero */ \
1572 V(fctidz, FCTIDZ, 0xFC00065E) \
1573 /* Floating Convert To Integer Word */ \
1574 V(fctiw, FCTIW, 0xFC00001C) \
1575 /* Floating Convert To Integer Word Unsigned */ \
1576 V(fctiwu, FCTIWU, 0xFC00011C) \
1577 /* Floating Convert To Integer Word Unsigned with round toward Zero */ \
1578 V(fctiwuz, FCTIWUZ, 0xFC00011E) \
1579 /* Floating Convert To Integer Word with round to Zero */ \
1580 V(fctiwz, FCTIWZ, 0xFC00001E) \
1581 /* Floating Move Register */ \
1582 V(fmr, FMR, 0xFC000090) \
1583 /* Floating Negative Absolute Value */ \
1584 V(fnabs, FNABS, 0xFC000110) \
1585 /* Floating Negate */ \
1586 V(fneg, FNEG, 0xFC000050) \
1587 /* Floating Round to Single-Precision */ \
1588 V(frsp, FRSP, 0xFC000018) \
1589 /* Move From FPSCR */ \
1590 V(mffs, MFFS, 0xFC00048E) \
1591 /* Move To FPSCR Bit 0 */ \
1592 V(mtfsb0, MTFSB0, 0xFC00008C) \
1593 /* Move To FPSCR Bit 1 */ \
1594 V(mtfsb1, MTFSB1, 0xFC00004C) \
1595 /* Move To FPSCR Field Immediate */ \
1596 V(mtfsfi, MTFSFI, 0xFC00010C) \
1597 /* Floating Round To Integer Minus */ \
1598 V(frim, FRIM, 0xFC0003D0) \
1599 /* Floating Round To Integer Nearest */ \
1600 V(frin, FRIN, 0xFC000310) \
1601 /* Floating Round To Integer Plus */ \
1602 V(frip, FRIP, 0xFC000390) \
1603 /* Floating Round To Integer toward Zero */ \
1604 V(friz, FRIZ, 0xFC000350) \
1605 /* Multiply Cross Halfword to Word Signed */ \
1606 V(mulchw, MULCHW, 0x10000150) \
1607 /* Multiply Cross Halfword to Word Unsigned */ \
1608 V(mulchwu, MULCHWU, 0x10000110) \
1609 /* Multiply High Halfword to Word Signed */ \
1610 V(mulhhw, MULHHW, 0x10000050) \
1611 /* Multiply High Halfword to Word Unsigned */ \
1612 V(mulhhwu, MULHHWU, 0x10000010) \
1613 /* Multiply Low Halfword to Word Signed */ \
1614 V(mullhw, MULLHW, 0x10000350) \
1615 /* Multiply Low Halfword to Word Unsigned */ \
1616 V(mullhwu, MULLHWU, 0x10000310) \
1617 /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \
1618 V(dlmzb, DLMZB, 0x7C00009C) \
1619 /* Load Quadword And Reserve Indexed */ \
1620 V(lqarx, LQARX, 0x7C000228) \
1621 /* Store Quadword Conditional Indexed and record CR0 */ \
1622 V(stqcx, STQCX, 0x7C00016D) \
1623 /* Load String Word Immediate */ \
1624 V(lswi, LSWI, 0x7C0004AA) \
1625 /* Load String Word Indexed */ \
1626 V(lswx, LSWX, 0x7C00042A) \
1627 /* Store String Word Immediate */ \
1628 V(stswi, STSWI, 0x7C0005AA) \
1629 /* Store String Word Indexed */ \
1630 V(stswx, STSWX, 0x7C00052A) \
1631 /* Clear BHRB */ \
1632 V(clrbhrb, CLRBHRB, 0x7C00035C) \
1633 /* Enforce In-order Execution of I/O */ \
1634 V(eieio, EIEIO, 0x7C0006AC) \
1635 /* Load Byte and Zero Caching Inhibited Indexed */ \
1636 V(lbzcix, LBZCIX, 0x7C0006AA) \
1637 /* Load Doubleword Caching Inhibited Indexed */ \
1638 V(ldcix, LDCIX, 0x7C0006EA) \
1639 /* Load Halfword and Zero Caching Inhibited Indexed */ \
1640 V(lhzcix, LHZCIX, 0x7C00066A) \
1641 /* Load Word and Zero Caching Inhibited Indexed */ \
1642 V(lwzcix, LWZCIX, 0x7C00062A) \
1643 /* Move From Segment Register */ \
1644 V(mfsr, MFSR, 0x7C0004A6) \
1645 /* Move From Segment Register Indirect */ \
1646 V(mfsrin, MFSRIN, 0x7C000526) \
1647 /* Move To Machine State Register Doubleword */ \
1648 V(mtmsrd, MTMSRD, 0x7C000164) \
1649 /* Move To Split Little Endian */ \
1650 V(mtsle, MTSLE, 0x7C000126) \
1651 /* Move To Segment Register */ \
1652 V(mtsr, MTSR, 0x7C0001A4) \
1653 /* Move To Segment Register Indirect */ \
1654 V(mtsrin, MTSRIN, 0x7C0001E4) \
1655 /* SLB Find Entry ESID */ \
1656 V(slbfee, SLBFEE, 0x7C0007A7) \
1657 /* SLB Invalidate All */ \
1658 V(slbia, SLBIA, 0x7C0003E4) \
1659 /* SLB Invalidate Entry */ \
1660 V(slbie, SLBIE, 0x7C000364) \
1661 /* SLB Move From Entry ESID */ \
1662 V(slbmfee, SLBMFEE, 0x7C000726) \
1663 /* SLB Move From Entry VSID */ \
1664 V(slbmfev, SLBMFEV, 0x7C0006A6) \
1665 /* SLB Move To Entry */ \
1666 V(slbmte, SLBMTE, 0x7C000324) \
1667 /* Store Byte Caching Inhibited Indexed */ \
1668 V(stbcix, STBCIX, 0x7C0007AA) \
1669 /* Store Doubleword Caching Inhibited Indexed */ \
1670 V(stdcix, STDCIX, 0x7C0007EA) \
1671 /* Store Halfword and Zero Caching Inhibited Indexed */ \
1672 V(sthcix, STHCIX, 0x7C00076A) \
1673 /* Store Word and Zero Caching Inhibited Indexed */ \
1674 V(stwcix, STWCIX, 0x7C00072A) \
1675 /* TLB Invalidate All */ \
1676 V(tlbia, TLBIA, 0x7C0002E4) \
1677 /* TLB Invalidate Entry */ \
1678 V(tlbie, TLBIE, 0x7C000264) \
1679 /* TLB Invalidate Entry Local */ \
1680 V(tlbiel, TLBIEL, 0x7C000224) \
1681 /* Message Clear Privileged */ \
1682 V(msgclrp, MSGCLRP, 0x7C00015C) \
1683 /* Message Send Privileged */ \
1684 V(msgsndp, MSGSNDP, 0x7C00011C) \
1685 /* Message Clear */ \
1686 V(msgclr, MSGCLR, 0x7C0001DC) \
1687 /* Message Send */ \
1688 V(msgsnd, MSGSND, 0x7C00019C) \
1689 /* Move From Machine State Register */ \
1690 V(mfmsr, MFMSR, 0x7C0000A6) \
1691 /* Move To Machine State Register */ \
1692 V(mtmsr, MTMSR, 0x7C000124) \
1693 /* TLB Synchronize */ \
1694 V(tlbsync, TLBSYNC, 0x7C00046C) \
1695 /* Transaction Abort */ \
1696 V(tabort, TABORT, 0x7C00071D) \
1697 /* Transaction Abort Doubleword Conditional */ \
1698 V(tabortdc, TABORTDC, 0x7C00065D) \
1699 /* Transaction Abort Doubleword Conditional Immediate */ \
1700 V(tabortdci, TABORTDCI, 0x7C0006DD) \
1701 /* Transaction Abort Word Conditional */ \
1702 V(tabortwc, TABORTWC, 0x7C00061D) \
1703 /* Transaction Abort Word Conditional Immediate */ \
1704 V(tabortwci, TABORTWCI, 0x7C00069D) \
1705 /* Transaction Begin */ \
1706 V(tbegin, TBEGIN, 0x7C00051D) \
1707 /* Transaction Check */ \
1708 V(tcheck, TCHECK, 0x7C00059C) \
1709 /* Transaction End */ \
1710 V(tend, TEND, 0x7C00055C) \
1711 /* Transaction Recheckpoint */ \
1712 V(trechkpt, TRECHKPT, 0x7C0007DD) \
1713 /* Transaction Reclaim */ \
1714 V(treclaim, TRECLAIM, 0x7C00075D) \
1715 /* Transaction Suspend or Resume */ \
1716 V(tsr, TSR, 0x7C0005DC) \
1717 /* Load Vector Element Byte Indexed */ \
1718 V(lvebx, LVEBX, 0x7C00000E) \
1719 /* Load Vector Element Halfword Indexed */ \
1720 V(lvehx, LVEHX, 0x7C00004E) \
1721 /* Load Vector Element Word Indexed */ \
1722 V(lvewx, LVEWX, 0x7C00008E) \
1723 /* Load Vector for Shift Left */ \
1724 V(lvsl, LVSL, 0x7C00000C) \
1725 /* Load Vector for Shift Right */ \
1726 V(lvsr, LVSR, 0x7C00004C) \
1727 /* Load Vector Indexed Last */ \
1728 V(lvxl, LVXL, 0x7C0002CE) \
1729 /* Store Vector Element Byte Indexed */ \
1730 V(stvebx, STVEBX, 0x7C00010E) \
1731 /* Store Vector Element Halfword Indexed */ \
1732 V(stvehx, STVEHX, 0x7C00014E) \
1733 /* Store Vector Element Word Indexed */ \
1734 V(stvewx, STVEWX, 0x7C00018E) \
1735 /* Store Vector Indexed Last */ \
1736 V(stvxl, STVXL, 0x7C0003CE) \
1737 /* Floating Merge Even Word */ \
1738 V(fmrgew, FMRGEW, 0xFC00078C) \
1739 /* Floating Merge Odd Word */ \
1740 V(fmrgow, FMRGOW, 0xFC00068C) \
1741 /* Wait for Interrupt */ \
1742 V(wait, WAIT, 0x7C00007C)
1743
1744 #define PPC_X_OPCODE_LIST(V) \
1745 PPC_X_OPCODE_A_FORM_LIST(V) \
1746 PPC_X_OPCODE_B_FORM_LIST(V) \
1747 PPC_X_OPCODE_C_FORM_LIST(V) \
1748 PPC_X_OPCODE_D_FORM_LIST(V) \
1749 PPC_X_OPCODE_E_FORM_LIST(V) \
1750 PPC_X_OPCODE_F_FORM_LIST(V) \
1751 PPC_X_OPCODE_G_FORM_LIST(V) \
1752 PPC_X_OPCODE_EH_L_FORM_LIST(V) \
1753 PPC_X_OPCODE_UNUSED_LIST(V)
1754
1755 #define PPC_EVS_OPCODE_LIST(V) \
1756 /* Vector Select */ \
1757 V(evsel, EVSEL, 0x10000278)
1758
1759 #define PPC_DS_OPCODE_LIST(V) \
1760 /* Load Doubleword */ \
1761 V(ld, LD, 0xE8000000) \
1762 /* Load Doubleword with Update */ \
1763 V(ldu, LDU, 0xE8000001) \
1764 /* Load Word Algebraic */ \
1765 V(lwa, LWA, 0xE8000002) \
1766 /* Store Doubleword */ \
1767 V(std, STD, 0xF8000000) \
1768 /* Store Doubleword with Update */ \
1769 V(stdu, STDU, 0xF8000001) \
1770 /* Load Floating-Point Double Pair */ \
1771 V(lfdp, LFDP, 0xE4000000) \
1772 /* Store Floating-Point Double Pair */ \
1773 V(stfdp, STFDP, 0xF4000000) \
1774 /* Store Quadword */ \
1775 V(stq, STQ, 0xF8000002)
1776
1777 #define PPC_DQ_OPCODE_LIST(V) V(lsq, LSQ, 0xE0000000)
1778
1779 #define PPC_D_OPCODE_LIST(V) \
1780 /* Trap Doubleword Immediate */ \
1781 V(tdi, TDI, 0x08000000) \
1782 /* Add Immediate */ \
1783 V(addi, ADDI, 0x38000000) \
1784 /* Add Immediate Carrying */ \
1785 V(addic, ADDIC, 0x30000000) \
1786 /* Add Immediate Carrying & record CR0 */ \
1787 V(addicx, ADDICx, 0x34000000) \
1788 /* Add Immediate Shifted */ \
1789 V(addis, ADDIS, 0x3C000000) \
1790 /* AND Immediate & record CR0 */ \
1791 V(andix, ANDIx, 0x70000000) \
1792 /* AND Immediate Shifted & record CR0 */ \
1793 V(andisx, ANDISx, 0x74000000) \
1794 /* Compare Immediate */ \
1795 V(cmpi, CMPI, 0x2C000000) \
1796 /* Compare Logical Immediate */ \
1797 V(cmpli, CMPLI, 0x28000000) \
1798 /* Load Byte and Zero */ \
1799 V(lbz, LBZ, 0x88000000) \
1800 /* Load Byte and Zero with Update */ \
1801 V(lbzu, LBZU, 0x8C000000) \
1802 /* Load Halfword Algebraic */ \
1803 V(lha, LHA, 0xA8000000) \
1804 /* Load Halfword Algebraic with Update */ \
1805 V(lhau, LHAU, 0xAC000000) \
1806 /* Load Halfword and Zero */ \
1807 V(lhz, LHZ, 0xA0000000) \
1808 /* Load Halfword and Zero with Update */ \
1809 V(lhzu, LHZU, 0xA4000000) \
1810 /* Load Multiple Word */ \
1811 V(lmw, LMW, 0xB8000000) \
1812 /* Load Word and Zero */ \
1813 V(lwz, LWZ, 0x80000000) \
1814 /* Load Word and Zero with Update */ \
1815 V(lwzu, LWZU, 0x84000000) \
1816 /* Multiply Low Immediate */ \
1817 V(mulli, MULLI, 0x1C000000) \
1818 /* OR Immediate */ \
1819 V(ori, ORI, 0x60000000) \
1820 /* OR Immediate Shifted */ \
1821 V(oris, ORIS, 0x64000000) \
1822 /* Store Byte */ \
1823 V(stb, STB, 0x98000000) \
1824 /* Store Byte with Update */ \
1825 V(stbu, STBU, 0x9C000000) \
1826 /* Store Halfword */ \
1827 V(sth, STH, 0xB0000000) \
1828 /* Store Halfword with Update */ \
1829 V(sthu, STHU, 0xB4000000) \
1830 /* Store Multiple Word */ \
1831 V(stmw, STMW, 0xBC000000) \
1832 /* Store Word */ \
1833 V(stw, STW, 0x90000000) \
1834 /* Store Word with Update */ \
1835 V(stwu, STWU, 0x94000000) \
1836 /* Subtract From Immediate Carrying */ \
1837 V(subfic, SUBFIC, 0x20000000) \
1838 /* Trap Word Immediate */ \
1839 V(twi, TWI, 0x0C000000) \
1840 /* XOR Immediate */ \
1841 V(xori, XORI, 0x68000000) \
1842 /* XOR Immediate Shifted */ \
1843 V(xoris, XORIS, 0x6C000000) \
1844 /* Load Floating-Point Double */ \
1845 V(lfd, LFD, 0xC8000000) \
1846 /* Load Floating-Point Double with Update */ \
1847 V(lfdu, LFDU, 0xCC000000) \
1848 /* Load Floating-Point Single */ \
1849 V(lfs, LFS, 0xC0000000) \
1850 /* Load Floating-Point Single with Update */ \
1851 V(lfsu, LFSU, 0xC4000000) \
1852 /* Store Floating-Point Double */ \
1853 V(stfd, STFD, 0xD8000000) \
1854 /* Store Floating-Point Double with Update */ \
1855 V(stfdu, STFDU, 0xDC000000) \
1856 /* Store Floating-Point Single */ \
1857 V(stfs, STFS, 0xD0000000) \
1858 /* Store Floating-Point Single with Update */ \
1859 V(stfsu, STFSU, 0xD4000000)
1860
1861 #define PPC_XFL_OPCODE_LIST(V) \
1862 /* Move To FPSCR Fields */ \
1863 V(mtfsf, MTFSF, 0xFC00058E)
1864
1865 #define PPC_XFX_OPCODE_LIST(V) \
1866 /* Move From Condition Register */ \
1867 V(mfcr, MFCR, 0x7C000026) \
1868 /* Move From One Condition Register Field */ \
1869 V(mfocrf, MFOCRF, 0x7C100026) \
1870 /* Move From Special Purpose Register */ \
1871 V(mfspr, MFSPR, 0x7C0002A6) \
1872 /* Move To Condition Register Fields */ \
1873 V(mtcrf, MTCRF, 0x7C000120) \
1874 /* Move To One Condition Register Field */ \
1875 V(mtocrf, MTOCRF, 0x7C100120) \
1876 /* Move To Special Purpose Register */ \
1877 V(mtspr, MTSPR, 0x7C0003A6) \
1878 /* Debugger Notify Halt */ \
1879 V(dnh, DNH, 0x4C00018C) \
1880 /* Move From Device Control Register */ \
1881 V(mfdcr, MFDCR, 0x7C000286) \
1882 /* Move To Device Control Register */ \
1883 V(mtdcr, MTDCR, 0x7C000386) \
1884 /* Move from Performance Monitor Register */ \
1885 V(mfpmr, MFPMR, 0x7C00029C) \
1886 /* Move To Performance Monitor Register */ \
1887 V(mtpmr, MTPMR, 0x7C00039C) \
1888 /* Move From Branch History Rolling Buffer */ \
1889 V(mfbhrbe, MFBHRBE, 0x7C00025C) \
1890 /* Move From Time Base */ \
1891 V(mftb, MFTB, 0x7C0002E6)
1892
1893 #define PPC_MDS_OPCODE_LIST(V) \
1894 /* Rotate Left Doubleword then Clear Left */ \
1895 V(rldcl, RLDCL, 0x78000010) \
1896 /* Rotate Left Doubleword then Clear Right */ \
1897 V(rldcr, RLDCR, 0x78000012)
1898
1899 #define PPC_A_OPCODE_LIST(V) \
1900 /* Integer Select */ \
1901 V(isel, ISEL, 0x7C00001E) \
1902 /* Floating Add */ \
1903 V(fadd, FADD, 0xFC00002A) \
1904 /* Floating Add Single */ \
1905 V(fadds, FADDS, 0xEC00002A) \
1906 /* Floating Divide */ \
1907 V(fdiv, FDIV, 0xFC000024) \
1908 /* Floating Divide Single */ \
1909 V(fdivs, FDIVS, 0xEC000024) \
1910 /* Floating Multiply-Add */ \
1911 V(fmadd, FMADD, 0xFC00003A) \
1912 /* Floating Multiply-Add Single */ \
1913 V(fmadds, FMADDS, 0xEC00003A) \
1914 /* Floating Multiply-Subtract */ \
1915 V(fmsub, FMSUB, 0xFC000038) \
1916 /* Floating Multiply-Subtract Single */ \
1917 V(fmsubs, FMSUBS, 0xEC000038) \
1918 /* Floating Multiply */ \
1919 V(fmul, FMUL, 0xFC000032) \
1920 /* Floating Multiply Single */ \
1921 V(fmuls, FMULS, 0xEC000032) \
1922 /* Floating Negative Multiply-Add */ \
1923 V(fnmadd, FNMADD, 0xFC00003E) \
1924 /* Floating Negative Multiply-Add Single */ \
1925 V(fnmadds, FNMADDS, 0xEC00003E) \
1926 /* Floating Negative Multiply-Subtract */ \
1927 V(fnmsub, FNMSUB, 0xFC00003C) \
1928 /* Floating Negative Multiply-Subtract Single */ \
1929 V(fnmsubs, FNMSUBS, 0xEC00003C) \
1930 /* Floating Reciprocal Estimate Single */ \
1931 V(fres, FRES, 0xEC000030) \
1932 /* Floating Reciprocal Square Root Estimate */ \
1933 V(frsqrte, FRSQRTE, 0xFC000034) \
1934 /* Floating Select */ \
1935 V(fsel, FSEL, 0xFC00002E) \
1936 /* Floating Square Root */ \
1937 V(fsqrt, FSQRT, 0xFC00002C) \
1938 /* Floating Square Root Single */ \
1939 V(fsqrts, FSQRTS, 0xEC00002C) \
1940 /* Floating Subtract */ \
1941 V(fsub, FSUB, 0xFC000028) \
1942 /* Floating Subtract Single */ \
1943 V(fsubs, FSUBS, 0xEC000028) \
1944 /* Floating Reciprocal Estimate */ \
1945 V(fre, FRE, 0xFC000030) \
1946 /* Floating Reciprocal Square Root Estimate Single */ \
1947 V(frsqrtes, FRSQRTES, 0xEC000034)
1948
1949 #define PPC_VA_OPCODE_A_FORM_LIST(V) \
1950 /* Vector Permute */ \
1951 V(vperm, VPERM, 0x1000002B) \
1952 /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
1953 V(vmladduhm, VMLADDUHM, 0x10000022) \
1954 /* Vector Select */ \
1955 V(vsel, VSEL, 0x1000002A) \
1956 /* Vector Multiply-Sum Signed Halfword Modulo */ \
1957 V(vmsumshm, VMSUMSHM, 0x10000028) \
1958 /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
1959 V(vmhraddshs, VMHRADDSHS, 0x10000021)
1960
1961 #define PPC_VA_OPCODE_UNUSED_LIST(V) \
1962 /* Vector Add Extended & write Carry Unsigned Quadword */ \
1963 V(vaddecuq, VADDECUQ, 0x1000003D) \
1964 /* Vector Add Extended Unsigned Quadword Modulo */ \
1965 V(vaddeuqm, VADDEUQM, 0x1000003C) \
1966 /* Vector Multiply-Add Single-Precision */ \
1967 V(vmaddfp, VMADDFP, 0x1000002E) \
1968 /* Vector Multiply-High-Add Signed Halfword Saturate */ \
1969 V(vmhaddshs, VMHADDSHS, 0x10000020) \
1970 /* Vector Multiply-Sum Mixed Byte Modulo */ \
1971 V(vmsummbm, VMSUMMBM, 0x10000025) \
1972 /* Vector Multiply-Sum Signed Halfword Saturate */ \
1973 V(vmsumshs, VMSUMSHS, 0x10000029) \
1974 /* Vector Multiply-Sum Unsigned Byte Modulo */ \
1975 V(vmsumubm, VMSUMUBM, 0x10000024) \
1976 /* Vector Multiply-Sum Unsigned Halfword Modulo */ \
1977 V(vmsumuhm, VMSUMUHM, 0x10000026) \
1978 /* Vector Multiply-Sum Unsigned Halfword Saturate */ \
1979 V(vmsumuhs, VMSUMUHS, 0x10000027) \
1980 /* Vector Negative Multiply-Subtract Single-Precision */ \
1981 V(vnmsubfp, VNMSUBFP, 0x1000002F) \
1982 /* Vector Shift Left Double by Octet Immediate */ \
1983 V(vsldoi, VSLDOI, 0x1000002C) \
1984 /* Vector Subtract Extended & write Carry Unsigned Quadword */ \
1985 V(vsubecuq, VSUBECUQ, 0x1000003F) \
1986 /* Vector Subtract Extended Unsigned Quadword Modulo */ \
1987 V(vsubeuqm, VSUBEUQM, 0x1000003E) \
1988 /* Vector Permute and Exclusive-OR */ \
1989 V(vpermxor, VPERMXOR, 0x1000002D)
1990
1991 #define PPC_VA_OPCODE_LIST(V) \
1992 PPC_VA_OPCODE_A_FORM_LIST(V) \
1993 PPC_VA_OPCODE_UNUSED_LIST(V)
1994
1995 #define PPC_XX1_OPCODE_LIST(V) \
1996 /* Load VSR Scalar Doubleword Indexed */ \
1997 V(lxsdx, LXSDX, 0x7C000498) \
1998 /* Load VSX Scalar as Integer Word Algebraic Indexed */ \
1999 V(lxsiwax, LXSIWAX, 0x7C000098) \
2000 /* Load VSX Scalar as Integer Byte & Zero Indexed */ \
2001 V(lxsibzx, LXSIBZX, 0x7C00061A) \
2002 /* Load VSX Scalar as Integer Halfword & Zero Indexed */ \
2003 V(lxsihzx, LXSIHZX, 0x7C00065A) \
2004 /* Load VSX Scalar as Integer Word and Zero Indexed */ \
2005 V(lxsiwzx, LXSIWZX, 0x7C000018) \
2006 /* Load VSX Scalar Single-Precision Indexed */ \
2007 V(lxsspx, LXSSPX, 0x7C000418) \
2008 /* Load VSR Vector Doubleword*2 Indexed */ \
2009 V(lxvd, LXVD, 0x7C000698) \
2010 /* Load VSX Vector Indexed */ \
2011 V(lxvx, LXVX, 0x7C000218) \
2012 /* Load VSR Vector Doubleword & Splat Indexed */ \
2013 V(lxvdsx, LXVDSX, 0x7C000298) \
2014 /* Load VSR Vector Word*4 Indexed */ \
2015 V(lxvw, LXVW, 0x7C000618) \
2016 /* Move To VSR Doubleword */ \
2017 V(mtvsrd, MTVSRD, 0x7C000166) \
2018 /* Move To VSR Double Doubleword */ \
2019 V(mtvsrdd, MTVSRDD, 0x7C000366) \
2020 /* Move To VSR Word Algebraic */ \
2021 V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
2022 /* Move To VSR Word and Zero */ \
2023 V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
2024 /* Move From VSR Doubleword */ \
2025 V(mfvsrd, MFVSRD, 0x7C000066) \
2026 /* Move From VSR Word and Zero */ \
2027 V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \
2028 /* Store VSR Scalar Doubleword Indexed */ \
2029 V(stxsdx, STXSDX, 0x7C000598) \
2030 /* Store VSX Scalar as Integer Word Indexed */ \
2031 V(stxsiwx, STXSIWX, 0x7C000118) \
2032 /* Store VSX Scalar as Integer Halfword Indexed */ \
2033 V(stxsihx, STXSIHX, 0x7C00075A) \
2034 /* Store VSX Scalar as Integer Byte Indexed */ \
2035 V(stxsibx, STXSIBX, 0x7C00071A) \
2036 /* Store VSR Scalar Word Indexed */ \
2037 V(stxsspx, STXSSPX, 0x7C000518) \
2038 /* Store VSR Vector Doubleword*2 Indexed */ \
2039 V(stxvd, STXVD, 0x7C000798) \
2040 /* Store VSX Vector Indexed */ \
2041 V(stxvx, STXVX, 0x7C000318) \
2042 /* Store VSR Vector Word*4 Indexed */ \
2043 V(stxvw, STXVW, 0x7C000718)
2044
2045 #define PPC_B_OPCODE_LIST(V) \
2046 /* Branch Conditional */ \
2047 V(bc, BCX, 0x40000000)
2048
2049 #define PPC_XO_OPCODE_LIST(V) \
2050 /* Divide Doubleword */ \
2051 V(divd, DIVD, 0x7C0003D2) \
2052 /* Divide Doubleword Extended */ \
2053 V(divde, DIVDE, 0x7C000352) \
2054 /* Divide Doubleword Extended & record OV */ \
2055 V(divdeo, DIVDEO, 0x7C000752) \
2056 /* Divide Doubleword Extended Unsigned */ \
2057 V(divdeu, DIVDEU, 0x7C000312) \
2058 /* Divide Doubleword Extended Unsigned & record OV */ \
2059 V(divdeuo, DIVDEUO, 0x7C000712) \
2060 /* Divide Doubleword & record OV */ \
2061 V(divdo, DIVDO, 0x7C0007D2) \
2062 /* Divide Doubleword Unsigned */ \
2063 V(divdu, DIVDU, 0x7C000392) \
2064 /* Divide Doubleword Unsigned & record OV */ \
2065 V(divduo, DIVDUO, 0x7C000792) \
2066 /* Multiply High Doubleword */ \
2067 V(mulhd, MULHD, 0x7C000092) \
2068 /* Multiply High Doubleword Unsigned */ \
2069 V(mulhdu, MULHDU, 0x7C000012) \
2070 /* Multiply Low Doubleword */ \
2071 V(mulld, MULLD, 0x7C0001D2) \
2072 /* Multiply Low Doubleword & record OV */ \
2073 V(mulldo, MULLDO, 0x7C0005D2) \
2074 /* Add */ \
2075 V(add, ADDX, 0x7C000214) \
2076 /* Add Carrying */ \
2077 V(addc, ADDCX, 0x7C000014) \
2078 /* Add Carrying & record OV */ \
2079 V(addco, ADDCO, 0x7C000414) \
2080 /* Add Extended */ \
2081 V(adde, ADDEX, 0x7C000114) \
2082 /* Add Extended & record OV & record OV */ \
2083 V(addeo, ADDEO, 0x7C000514) \
2084 /* Add to Minus One Extended */ \
2085 V(addme, ADDME, 0x7C0001D4) \
2086 /* Add to Minus One Extended & record OV */ \
2087 V(addmeo, ADDMEO, 0x7C0005D4) \
2088 /* Add & record OV */ \
2089 V(addo, ADDO, 0x7C000614) \
2090 /* Add to Zero Extended */ \
2091 V(addze, ADDZEX, 0x7C000194) \
2092 /* Add to Zero Extended & record OV */ \
2093 V(addzeo, ADDZEO, 0x7C000594) \
2094 /* Divide Word Format */ \
2095 V(divw, DIVW, 0x7C0003D6) \
2096 /* Divide Word Extended */ \
2097 V(divwe, DIVWE, 0x7C000356) \
2098 /* Divide Word Extended & record OV */ \
2099 V(divweo, DIVWEO, 0x7C000756) \
2100 /* Divide Word Extended Unsigned */ \
2101 V(divweu, DIVWEU, 0x7C000316) \
2102 /* Divide Word Extended Unsigned & record OV */ \
2103 V(divweuo, DIVWEUO, 0x7C000716) \
2104 /* Divide Word & record OV */ \
2105 V(divwo, DIVWO, 0x7C0007D6) \
2106 /* Divide Word Unsigned */ \
2107 V(divwu, DIVWU, 0x7C000396) \
2108 /* Divide Word Unsigned & record OV */ \
2109 V(divwuo, DIVWUO, 0x7C000796) \
2110 /* Multiply High Word */ \
2111 V(mulhw, MULHWX, 0x7C000096) \
2112 /* Multiply High Word Unsigned */ \
2113 V(mulhwu, MULHWUX, 0x7C000016) \
2114 /* Multiply Low Word */ \
2115 V(mullw, MULLW, 0x7C0001D6) \
2116 /* Multiply Low Word & record OV */ \
2117 V(mullwo, MULLWO, 0x7C0005D6) \
2118 /* Negate */ \
2119 V(neg, NEGX, 0x7C0000D0) \
2120 /* Negate & record OV */ \
2121 V(nego, NEGO, 0x7C0004D0) \
2122 /* Subtract From */ \
2123 V(subf, SUBFX, 0x7C000050) \
2124 /* Subtract From Carrying */ \
2125 V(subfc, SUBFCX, 0x7C000010) \
2126 /* Subtract From Carrying & record OV */ \
2127 V(subfco, SUBFCO, 0x7C000410) \
2128 /* Subtract From Extended */ \
2129 V(subfe, SUBFEX, 0x7C000110) \
2130 /* Subtract From Extended & record OV */ \
2131 V(subfeo, SUBFEO, 0x7C000510) \
2132 /* Subtract From Minus One Extended */ \
2133 V(subfme, SUBFME, 0x7C0001D0) \
2134 /* Subtract From Minus One Extended & record OV */ \
2135 V(subfmeo, SUBFMEO, 0x7C0005D0) \
2136 /* Subtract From & record OV */ \
2137 V(subfo, SUBFO, 0x7C000450) \
2138 /* Subtract From Zero Extended */ \
2139 V(subfze, SUBFZE, 0x7C000190) \
2140 /* Subtract From Zero Extended & record OV */ \
2141 V(subfzeo, SUBFZEO, 0x7C000590) \
2142 /* Add and Generate Sixes */ \
2143 V(addg, ADDG, 0x7C000094) \
2144 /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
2145 V(macchw, MACCHW, 0x10000158) \
2146 /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
2147 V(macchws, MACCHWS, 0x100001D8) \
2148 /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \
2149 V(macchwsu, MACCHWSU, 0x10000198) \
2150 /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \
2151 V(macchwu, MACCHWU, 0x10000118) \
2152 /* Multiply Accumulate High Halfword to Word Modulo Signed */ \
2153 V(machhw, MACHHW, 0x10000058) \
2154 /* Multiply Accumulate High Halfword to Word Saturate Signed */ \
2155 V(machhws, MACHHWS, 0x100000D8) \
2156 /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \
2157 V(machhwsu, MACHHWSU, 0x10000098) \
2158 /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \
2159 V(machhwu, MACHHWU, 0x10000018) \
2160 /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \
2161 V(maclhw, MACLHW, 0x10000358) \
2162 /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \
2163 V(maclhws, MACLHWS, 0x100003D8) \
2164 /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \
2165 V(maclhwsu, MACLHWSU, 0x10000398) \
2166 /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \
2167 V(maclhwu, MACLHWU, 0x10000318) \
2168 /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
2169 V(nmacchw, NMACCHW, 0x1000015C) \
2170 /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
2171 V(nmacchws, NMACCHWS, 0x100001DC) \
2172 /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \
2173 V(nmachhw, NMACHHW, 0x1000005C) \
2174 /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \
2175 V(nmachhws, NMACHHWS, 0x100000DC) \
2176 /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \
2177 V(nmaclhw, NMACLHW, 0x1000035C) \
2178 /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \
2179 V(nmaclhws, NMACLHWS, 0x100003DC)
2180
2181 #define PPC_XL_OPCODE_LIST(V) \
2182 /* Branch Conditional to Count Register */ \
2183 V(bcctr, BCCTRX, 0x4C000420) \
2184 /* Branch Conditional to Link Register */ \
2185 V(bclr, BCLRX, 0x4C000020) \
2186 /* Condition Register AND */ \
2187 V(crand, CRAND, 0x4C000202) \
2188 /* Condition Register AND with Complement */ \
2189 V(crandc, CRANDC, 0x4C000102) \
2190 /* Condition Register Equivalent */ \
2191 V(creqv, CREQV, 0x4C000242) \
2192 /* Condition Register NAND */ \
2193 V(crnand, CRNAND, 0x4C0001C2) \
2194 /* Condition Register NOR */ \
2195 V(crnor, CRNOR, 0x4C000042) \
2196 /* Condition Register OR */ \
2197 V(cror, CROR, 0x4C000382) \
2198 /* Condition Register OR with Complement */ \
2199 V(crorc, CRORC, 0x4C000342) \
2200 /* Condition Register XOR */ \
2201 V(crxor, CRXOR, 0x4C000182) \
2202 /* Instruction Synchronize */ \
2203 V(isync, ISYNC, 0x4C00012C) \
2204 /* Move Condition Register Field */ \
2205 V(mcrf, MCRF, 0x4C000000) \
2206 /* Return From Critical Interrupt */ \
2207 V(rfci, RFCI, 0x4C000066) \
2208 /* Return From Interrupt */ \
2209 V(rfi, RFI, 0x4C000064) \
2210 /* Return From Machine Check Interrupt */ \
2211 V(rfmci, RFMCI, 0x4C00004C) \
2212 /* Embedded Hypervisor Privilege */ \
2213 V(ehpriv, EHPRIV, 0x7C00021C) \
2214 /* Return From Guest Interrupt */ \
2215 V(rfgi, RFGI, 0x4C0000CC) \
2216 /* Doze */ \
2217 V(doze, DOZE, 0x4C000324) \
2218 /* Return From Interrupt Doubleword Hypervisor */ \
2219 V(hrfid, HRFID, 0x4C000224) \
2220 /* Nap */ \
2221 V(nap, NAP, 0x4C000364) \
2222 /* Return from Event Based Branch */ \
2223 V(rfebb, RFEBB, 0x4C000124) \
2224 /* Return from Interrupt Doubleword */ \
2225 V(rfid, RFID, 0x4C000024) \
2226 /* Rip Van Winkle */ \
2227 V(rvwinkle, RVWINKLE, 0x4C0003E4) \
2228 /* Sleep */ \
2229 V(sleep, SLEEP, 0x4C0003A4)
2230
2231 #define PPC_XX4_OPCODE_LIST(V) \
2232 /* VSX Select */ \
2233 V(xxsel, XXSEL, 0xF0000030)
2234
2235 #define PPC_I_OPCODE_LIST(V) \
2236 /* Branch */ \
2237 V(b, BX, 0x48000000)
2238
2239 #define PPC_M_OPCODE_LIST(V) \
2240 /* Rotate Left Word Immediate then Mask Insert */ \
2241 V(rlwimi, RLWIMIX, 0x50000000) \
2242 /* Rotate Left Word Immediate then AND with Mask */ \
2243 V(rlwinm, RLWINMX, 0x54000000) \
2244 /* Rotate Left Word then AND with Mask */ \
2245 V(rlwnm, RLWNMX, 0x5C000000)
2246
2247 #define PPC_VX_OPCODE_A_FORM_LIST(V) \
2248 /* Vector Splat Byte */ \
2249 V(vspltb, VSPLTB, 0x1000020C) \
2250 /* Vector Splat Word */ \
2251 V(vspltw, VSPLTW, 0x1000028C) \
2252 /* Vector Splat Halfword */ \
2253 V(vsplth, VSPLTH, 0x1000024C) \
2254 /* Vector Extract Unsigned Byte */ \
2255 V(vextractub, VEXTRACTUB, 0x1000020D) \
2256 /* Vector Extract Unsigned Halfword */ \
2257 V(vextractuh, VEXTRACTUH, 0x1000024D) \
2258 /* Vector Extract Unsigned Word */ \
2259 V(vextractuw, VEXTRACTUW, 0x1000028D) \
2260 /* Vector Extract Doubleword */ \
2261 V(vextractd, VEXTRACTD, 0x100002CD) \
2262 /* Vector Insert Byte */ \
2263 V(vinsertb, VINSERTB, 0x1000030D) \
2264 /* Vector Insert Halfword */ \
2265 V(vinserth, VINSERTH, 0x1000034D) \
2266 /* Vector Insert Word */ \
2267 V(vinsertw, VINSERTW, 0x1000038D) \
2268 /* Vector Insert Doubleword */ \
2269 V(vinsertd, VINSERTD, 0x100003CD)
2270
2271 #define PPC_VX_OPCODE_B_FORM_LIST(V) \
2272 /* Vector Logical OR */ \
2273 V(vor, VOR, 0x10000484) \
2274 /* Vector Logical XOR */ \
2275 V(vxor, VXOR, 0x100004C4) \
2276 /* Vector Logical NOR */ \
2277 V(vnor, VNOR, 0x10000504) \
2278 /* Vector Shift Right by Octet */ \
2279 V(vsro, VSRO, 0x1000044C) \
2280 /* Vector Shift Left by Octet */ \
2281 V(vslo, VSLO, 0x1000040C) \
2282 /* Vector Add Unsigned Doubleword Modulo */ \
2283 V(vaddudm, VADDUDM, 0x100000C0) \
2284 /* Vector Add Unsigned Word Modulo */ \
2285 V(vadduwm, VADDUWM, 0x10000080) \
2286 /* Vector Add Unsigned Halfword Modulo */ \
2287 V(vadduhm, VADDUHM, 0x10000040) \
2288 /* Vector Add Unsigned Byte Modulo */ \
2289 V(vaddubm, VADDUBM, 0x10000000) \
2290 /* Vector Add Single-Precision */ \
2291 V(vaddfp, VADDFP, 0x1000000A) \
2292 /* Vector Subtract Single-Precision */ \
2293 V(vsubfp, VSUBFP, 0x1000004A) \
2294 /* Vector Subtract Unsigned Doubleword Modulo */ \
2295 V(vsubudm, VSUBUDM, 0x100004C0) \
2296 /* Vector Subtract Unsigned Word Modulo */ \
2297 V(vsubuwm, VSUBUWM, 0x10000480) \
2298 /* Vector Subtract Unsigned Halfword Modulo */ \
2299 V(vsubuhm, VSUBUHM, 0x10000440) \
2300 /* Vector Subtract Unsigned Byte Modulo */ \
2301 V(vsububm, VSUBUBM, 0x10000400) \
2302 /* Vector Multiply Unsigned Word Modulo */ \
2303 V(vmuluwm, VMULUWM, 0x10000089) \
2304 /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
2305 V(vpkuhum, VPKUHUM, 0x1000000E) \
2306 /* Vector Multiply Even Signed Byte */ \
2307 V(vmulesb, VMULESB, 0x10000308) \
2308 /* Vector Multiply Even Unsigned Byte */ \
2309 V(vmuleub, VMULEUB, 0x10000208) \
2310 /* Vector Multiply Odd Signed Byte */ \
2311 V(vmulosb, VMULOSB, 0x10000108) \
2312 /* Vector Multiply Odd Unsigned Byte */ \
2313 V(vmuloub, VMULOUB, 0x10000008) \
2314 /* Vector Multiply Even Unsigned Halfword */ \
2315 V(vmuleuh, VMULEUH, 0x10000248) \
2316 /* Vector Multiply Even Signed Halfword */ \
2317 V(vmulesh, VMULESH, 0x10000348) \
2318 /* Vector Multiply Odd Unsigned Halfword */ \
2319 V(vmulouh, VMULOUH, 0x10000048) \
2320 /* Vector Multiply Odd Signed Halfword */ \
2321 V(vmulosh, VMULOSH, 0x10000148) \
2322 /* Vector Multiply Even Signed Word */ \
2323 V(vmulesw, VMULESW, 0x10000388) \
2324 /* Vector Multiply Even Unsigned Word */ \
2325 V(vmuleuw, VMULEUW, 0x10000288) \
2326 /* Vector Multiply Odd Signed Word */ \
2327 V(vmulosw, VMULOSW, 0x10000188) \
2328 /* Vector Multiply Odd Unsigned Word */ \
2329 V(vmulouw, VMULOUW, 0x10000088) \
2330 /* Vector Multiply Low Doubleword */ \
2331 V(vmulld, VMULLD, 0x100001C9) \
2332 /* Vector Sum across Quarter Signed Halfword Saturate */ \
2333 V(vsum4shs, VSUM4SHS, 0x10000648) \
2334 /* Vector Pack Unsigned Word Unsigned Saturate */ \
2335 V(vpkuwus, VPKUWUS, 0x100000CE) \
2336 /* Vector Sum across Half Signed Word Saturate */ \
2337 V(vsum2sws, VSUM2SWS, 0x10000688) \
2338 /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
2339 V(vpkudum, VPKUDUM, 0x1000044E) \
2340 /* Vector Maximum Signed Byte */ \
2341 V(vmaxsb, VMAXSB, 0x10000102) \
2342 /* Vector Maximum Unsigned Byte */ \
2343 V(vmaxub, VMAXUB, 0x10000002) \
2344 /* Vector Maximum Signed Doubleword */ \
2345 V(vmaxsd, VMAXSD, 0x100001C2) \
2346 /* Vector Maximum Unsigned Doubleword */ \
2347 V(vmaxud, VMAXUD, 0x100000C2) \
2348 /* Vector Maximum Signed Halfword */ \
2349 V(vmaxsh, VMAXSH, 0x10000142) \
2350 /* Vector Maximum Unsigned Halfword */ \
2351 V(vmaxuh, VMAXUH, 0x10000042) \
2352 /* Vector Maximum Signed Word */ \
2353 V(vmaxsw, VMAXSW, 0x10000182) \
2354 /* Vector Maximum Unsigned Word */ \
2355 V(vmaxuw, VMAXUW, 0x10000082) \
2356 /* Vector Minimum Signed Byte */ \
2357 V(vminsb, VMINSB, 0x10000302) \
2358 /* Vector Minimum Unsigned Byte */ \
2359 V(vminub, VMINUB, 0x10000202) \
2360 /* Vector Minimum Signed Doubleword */ \
2361 V(vminsd, VMINSD, 0x100003C2) \
2362 /* Vector Minimum Unsigned Doubleword */ \
2363 V(vminud, VMINUD, 0x100002C2) \
2364 /* Vector Minimum Signed Halfword */ \
2365 V(vminsh, VMINSH, 0x10000342) \
2366 /* Vector Minimum Unsigned Halfword */ \
2367 V(vminuh, VMINUH, 0x10000242) \
2368 /* Vector Minimum Signed Word */ \
2369 V(vminsw, VMINSW, 0x10000382) \
2370 /* Vector Minimum Unsigned Word */ \
2371 V(vminuw, VMINUW, 0x10000282) \
2372 /* Vector Shift Left Byte */ \
2373 V(vslb, VSLB, 0x10000104) \
2374 /* Vector Shift Left Word */ \
2375 V(vslw, VSLW, 0x10000184) \
2376 /* Vector Shift Left Halfword */ \
2377 V(vslh, VSLH, 0x10000144) \
2378 /* Vector Shift Left Doubleword */ \
2379 V(vsld, VSLD, 0x100005C4) \
2380 /* Vector Shift Right Byte */ \
2381 V(vsrb, VSRB, 0x10000204) \
2382 /* Vector Shift Right Word */ \
2383 V(vsrw, VSRW, 0x10000284) \
2384 /* Vector Shift Right Halfword */ \
2385 V(vsrh, VSRH, 0x10000244) \
2386 /* Vector Shift Right Doubleword */ \
2387 V(vsrd, VSRD, 0x100006C4) \
2388 /* Vector Shift Right Algebraic Byte */ \
2389 V(vsrab, VSRAB, 0x10000304) \
2390 /* Vector Shift Right Algebraic Word */ \
2391 V(vsraw, VSRAW, 0x10000384) \
2392 /* Vector Shift Right Algebraic Halfword */ \
2393 V(vsrah, VSRAH, 0x10000344) \
2394 /* Vector Shift Right Algebraic Doubleword */ \
2395 V(vsrad, VSRAD, 0x100003C4) \
2396 /* Vector Logical AND */ \
2397 V(vand, VAND, 0x10000404) \
2398 /* Vector Pack Signed Word Signed Saturate */ \
2399 V(vpkswss, VPKSWSS, 0x100001CE) \
2400 /* Vector Pack Signed Word Unsigned Saturate */ \
2401 V(vpkswus, VPKSWUS, 0x1000014E) \
2402 /* Vector Pack Signed Halfword Signed Saturate */ \
2403 V(vpkshss, VPKSHSS, 0x1000018E) \
2404 /* Vector Pack Signed Halfword Unsigned Saturate */ \
2405 V(vpkshus, VPKSHUS, 0x1000010E) \
2406 /* Vector Add Signed Halfword Saturate */ \
2407 V(vaddshs, VADDSHS, 0x10000340) \
2408 /* Vector Subtract Signed Halfword Saturate */ \
2409 V(vsubshs, VSUBSHS, 0x10000740) \
2410 /* Vector Add Unsigned Halfword Saturate */ \
2411 V(vadduhs, VADDUHS, 0x10000240) \
2412 /* Vector Subtract Unsigned Halfword Saturate */ \
2413 V(vsubuhs, VSUBUHS, 0x10000640) \
2414 /* Vector Add Signed Byte Saturate */ \
2415 V(vaddsbs, VADDSBS, 0x10000300) \
2416 /* Vector Subtract Signed Byte Saturate */ \
2417 V(vsubsbs, VSUBSBS, 0x10000700) \
2418 /* Vector Add Unsigned Byte Saturate */ \
2419 V(vaddubs, VADDUBS, 0x10000200) \
2420 /* Vector Subtract Unsigned Byte Saturate */ \
2421 V(vsububs, VSUBUBS, 0x10000600) \
2422 /* Vector Average Unsigned Byte */ \
2423 V(vavgub, VAVGUB, 0x10000402) \
2424 /* Vector Average Unsigned Halfword */ \
2425 V(vavguh, VAVGUH, 0x10000442) \
2426 /* Vector Logical AND with Complement */ \
2427 V(vandc, VANDC, 0x10000444) \
2428 /* Vector Minimum Single-Precision */ \
2429 V(vminfp, VMINFP, 0x1000044A) \
2430 /* Vector Maximum Single-Precision */ \
2431 V(vmaxfp, VMAXFP, 0x1000040A) \
2432 /* Vector Bit Permute Quadword */ \
2433 V(vbpermq, VBPERMQ, 0x1000054C) \
2434 /* Vector Merge High Byte */ \
2435 V(vmrghb, VMRGHB, 0x1000000C) \
2436 /* Vector Merge High Halfword */ \
2437 V(vmrghh, VMRGHH, 0x1000004C) \
2438 /* Vector Merge High Word */ \
2439 V(vmrghw, VMRGHW, 0x1000008C) \
2440 /* Vector Merge Low Byte */ \
2441 V(vmrglb, VMRGLB, 0x1000010C) \
2442 /* Vector Merge Low Halfword */ \
2443 V(vmrglh, VMRGLH, 0x1000014C) \
2444 /* Vector Merge Low Word */ \
2445 V(vmrglw, VMRGLW, 0x1000018C)
2446
2447 #define PPC_VX_OPCODE_C_FORM_LIST(V) \
2448 /* Vector Unpack Low Signed Word */ \
2449 V(vupklsw, VUPKLSW, 0x100006CE) \
2450 /* Vector Unpack High Signed Word */ \
2451 V(vupkhsw, VUPKHSW, 0x1000064E) \
2452 /* Vector Unpack Low Signed Halfword */ \
2453 V(vupklsh, VUPKLSH, 0x100002CE) \
2454 /* Vector Unpack High Signed Halfword */ \
2455 V(vupkhsh, VUPKHSH, 0x1000024E) \
2456 /* Vector Unpack Low Signed Byte */ \
2457 V(vupklsb, VUPKLSB, 0x1000028E) \
2458 /* Vector Unpack High Signed Byte */ \
2459 V(vupkhsb, VUPKHSB, 0x1000020E) \
2460 /* Vector Population Count Byte */ \
2461 V(vpopcntb, VPOPCNTB, 0x10000703)
2462
2463 #define PPC_VX_OPCODE_D_FORM_LIST(V) \
2464 /* Vector Negate Word */ \
2465 V(vnegw, VNEGW, 0x10060602) \
2466 /* Vector Negate Doubleword */ \
2467 V(vnegd, VNEGD, 0x10070602)
2468
2469 #define PPC_VX_OPCODE_E_FORM_LIST(V) \
2470 /* Vector Splat Immediate Signed Byte */ \
2471 V(vspltisb, VSPLTISB, 0x1000030C) \
2472 /* Vector Splat Immediate Signed Halfword */ \
2473 V(vspltish, VSPLTISH, 0x1000034C) \
2474 /* Vector Splat Immediate Signed Word */ \
2475 V(vspltisw, VSPLTISW, 0x1000038C)
2476
2477 #define PPC_VX_OPCODE_F_FORM_LIST(V) \
2478 /* Vector Extract Byte Mask */ \
2479 V(vextractbm, VEXTRACTBM, 0x10080642) \
2480 /* Vector Extract Halfword Mask */ \
2481 V(vextracthm, VEXTRACTHM, 0x10090642) \
2482 /* Vector Extract Word Mask */ \
2483 V(vextractwm, VEXTRACTWM, 0x100A0642) \
2484 /* Vector Extract Doubleword Mask */ \
2485 V(vextractdm, VEXTRACTDM, 0x100B0642)
2486
2487 #define PPC_VX_OPCODE_G_FORM_LIST(V) \
2488 /* Vector Insert Word from GPR using \
2489 immediate-specified index */ \
2490 V(vinsw, VINSW, 0x100000CF) \
2491 /* Vector Insert Doubleword from GPR using \
2492 immediate-specified index */ \
2493 V(vinsd, VINSD, 0x100001CF)
2494
2495 #define PPC_VX_OPCODE_UNUSED_LIST(V) \
2496 /* Decimal Add Modulo */ \
2497 V(bcdadd, BCDADD, 0xF0000400) \
2498 /* Decimal Subtract Modulo */ \
2499 V(bcdsub, BCDSUB, 0xF0000440) \
2500 /* Move From Vector Status and Control Register */ \
2501 V(mfvscr, MFVSCR, 0x10000604) \
2502 /* Move To Vector Status and Control Register */ \
2503 V(mtvscr, MTVSCR, 0x10000644) \
2504 /* Vector Add & write Carry Unsigned Quadword */ \
2505 V(vaddcuq, VADDCUQ, 0x10000140) \
2506 /* Vector Add and Write Carry-Out Unsigned Word */ \
2507 V(vaddcuw, VADDCUW, 0x10000180) \
2508 /* Vector Add Signed Word Saturate */ \
2509 V(vaddsws, VADDSWS, 0x10000380) \
2510 /* Vector Add Unsigned Quadword Modulo */ \
2511 V(vadduqm, VADDUQM, 0x10000100) \
2512 /* Vector Add Unsigned Word Saturate */ \
2513 V(vadduws, VADDUWS, 0x10000280) \
2514 /* Vector Average Signed Byte */ \
2515 V(vavgsb, VAVGSB, 0x10000502) \
2516 /* Vector Average Signed Halfword */ \
2517 V(vavgsh, VAVGSH, 0x10000542) \
2518 /* Vector Average Signed Word */ \
2519 V(vavgsw, VAVGSW, 0x10000582) \
2520 /* Vector Average Unsigned Word */ \
2521 V(vavguw, VAVGUW, 0x10000482) \
2522 /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
2523 V(vcfsx, VCFSX, 0x1000034A) \
2524 /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
2525 V(vcfux, VCFUX, 0x1000030A) \
2526 /* Vector Count Leading Zeros Byte */ \
2527 V(vclzb, VCLZB, 0x10000702) \
2528 /* Vector Count Leading Zeros Doubleword */ \
2529 V(vclzd, VCLZD, 0x100007C2) \
2530 /* Vector Count Leading Zeros Halfword */ \
2531 V(vclzh, VCLZH, 0x10000742) \
2532 /* Vector Count Leading Zeros Word */ \
2533 V(vclzw, VCLZW, 0x10000782) \
2534 /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \
2535 /* Saturate */ \
2536 V(vctsxs, VCTSXS, 0x100003CA) \
2537 /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \
2538 /* Saturate */ \
2539 V(vctuxs, VCTUXS, 0x1000038A) \
2540 /* Vector Equivalence */ \
2541 V(veqv, VEQV, 0x10000684) \
2542 /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \
2543 V(vexptefp, VEXPTEFP, 0x1000018A) \
2544 /* Vector Gather Bits by Byte by Doubleword */ \
2545 V(vgbbd, VGBBD, 0x1000050C) \
2546 /* Vector Log Base 2 Estimate Single-Precision */ \
2547 V(vlogefp, VLOGEFP, 0x100001CA) \
2548 /* Vector NAND */ \
2549 V(vnand, VNAND, 0x10000584) \
2550 /* Vector OR with Complement */ \
2551 V(vorc, VORC, 0x10000544) \
2552 /* Vector Pack Pixel */ \
2553 V(vpkpx, VPKPX, 0x1000030E) \
2554 /* Vector Pack Signed Doubleword Signed Saturate */ \
2555 V(vpksdss, VPKSDSS, 0x100005CE) \
2556 /* Vector Pack Signed Doubleword Unsigned Saturate */ \
2557 V(vpksdus, VPKSDUS, 0x1000054E) \
2558 /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
2559 V(vpkudus, VPKUDUS, 0x100004CE) \
2560 /* Vector Pack Unsigned Halfword Unsigned Saturate */ \
2561 V(vpkuhus, VPKUHUS, 0x1000008E) \
2562 /* Vector Pack Unsigned Word Unsigned Modulo */ \
2563 V(vpkuwum, VPKUWUM, 0x1000004E) \
2564 /* Vector Polynomial Multiply-Sum Byte */ \
2565 V(vpmsumb, VPMSUMB, 0x10000408) \
2566 /* Vector Polynomial Multiply-Sum Doubleword */ \
2567 V(vpmsumd, VPMSUMD, 0x100004C8) \
2568 /* Vector Polynomial Multiply-Sum Halfword */ \
2569 V(vpmsumh, VPMSUMH, 0x10000448) \
2570 /* Vector Polynomial Multiply-Sum Word */ \
2571 V(vpmsumw, VPMSUMW, 0x10000488) \
2572 /* Vector Population Count Doubleword */ \
2573 V(vpopcntd, VPOPCNTD, 0x100007C3) \
2574 /* Vector Population Count Halfword */ \
2575 V(vpopcnth, VPOPCNTH, 0x10000743) \
2576 /* Vector Population Count Word */ \
2577 V(vpopcntw, VPOPCNTW, 0x10000783) \
2578 /* Vector Reciprocal Estimate Single-Precision */ \
2579 V(vrefp, VREFP, 0x1000010A) \
2580 /* Vector Round to Single-Precision Integer toward -Infinity */ \
2581 V(vrfim, VRFIM, 0x100002CA) \
2582 /* Vector Round to Single-Precision Integer Nearest */ \
2583 V(vrfin, VRFIN, 0x1000020A) \
2584 /* Vector Round to Single-Precision Integer toward +Infinity */ \
2585 V(vrfip, VRFIP, 0x1000028A) \
2586 /* Vector Round to Single-Precision Integer toward Zero */ \
2587 V(vrfiz, VRFIZ, 0x1000024A) \
2588 /* Vector Rotate Left Byte */ \
2589 V(vrlb, VRLB, 0x10000004) \
2590 /* Vector Rotate Left Doubleword */ \
2591 V(vrld, VRLD, 0x100000C4) \
2592 /* Vector Rotate Left Halfword */ \
2593 V(vrlh, VRLH, 0x10000044) \
2594 /* Vector Rotate Left Word */ \
2595 V(vrlw, VRLW, 0x10000084) \
2596 /* Vector Reciprocal Square Root Estimate Single-Precision */ \
2597 V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
2598 /* Vector Shift Left */ \
2599 V(vsl, VSL, 0x100001C4) \
2600 /* Vector Shift Right */ \
2601 V(vsr, VSR, 0x100002C4) \
2602 /* Vector Subtract & write Carry Unsigned Quadword */ \
2603 V(vsubcuq, VSUBCUQ, 0x10000540) \
2604 /* Vector Subtract and Write Carry-Out Unsigned Word */ \
2605 V(vsubcuw, VSUBCUW, 0x10000580) \
2606 /* Vector Subtract Signed Word Saturate */ \
2607 V(vsubsws, VSUBSWS, 0x10000780) \
2608 /* Vector Subtract Unsigned Quadword Modulo */ \
2609 V(vsubuqm, VSUBUQM, 0x10000500) \
2610 /* Vector Subtract Unsigned Word Saturate */ \
2611 V(vsubuws, VSUBUWS, 0x10000680) \
2612 /* Vector Sum across Quarter Signed Byte Saturate */ \
2613 V(vsum4sbs, VSUM4SBS, 0x10000708) \
2614 /* Vector Sum across Quarter Unsigned Byte Saturate */ \
2615 V(vsum4bus, VSUM4BUS, 0x10000608) \
2616 /* Vector Sum across Signed Word Saturate */ \
2617 V(vsumsws, VSUMSWS, 0x10000788) \
2618 /* Vector Unpack High Pixel */ \
2619 V(vupkhpx, VUPKHPX, 0x1000034E) \
2620 /* Vector Unpack Low Pixel */ \
2621 V(vupklpx, VUPKLPX, 0x100003CE) \
2622 /* Vector AES Cipher */ \
2623 V(vcipher, VCIPHER, 0x10000508) \
2624 /* Vector AES Cipher Last */ \
2625 V(vcipherlast, VCIPHERLAST, 0x10000509) \
2626 /* Vector AES Inverse Cipher */ \
2627 V(vncipher, VNCIPHER, 0x10000548) \
2628 /* Vector AES Inverse Cipher Last */ \
2629 V(vncipherlast, VNCIPHERLAST, 0x10000549) \
2630 /* Vector AES S-Box */ \
2631 V(vsbox, VSBOX, 0x100005C8) \
2632 /* Vector SHA-512 Sigma Doubleword */ \
2633 V(vshasigmad, VSHASIGMAD, 0x100006C2) \
2634 /* Vector SHA-256 Sigma Word */ \
2635 V(vshasigmaw, VSHASIGMAW, 0x10000682) \
2636 /* Vector Merge Even Word */ \
2637 V(vmrgew, VMRGEW, 0x1000078C) \
2638 /* Vector Merge Odd Word */ \
2639 V(vmrgow, VMRGOW, 0x1000068C)
2640
2641 #define PPC_VX_OPCODE_LIST(V) \
2642 PPC_VX_OPCODE_A_FORM_LIST(V) \
2643 PPC_VX_OPCODE_B_FORM_LIST(V) \
2644 PPC_VX_OPCODE_C_FORM_LIST(V) \
2645 PPC_VX_OPCODE_D_FORM_LIST(V) \
2646 PPC_VX_OPCODE_E_FORM_LIST(V) \
2647 PPC_VX_OPCODE_F_FORM_LIST(V) \
2648 PPC_VX_OPCODE_G_FORM_LIST(V) \
2649 PPC_VX_OPCODE_UNUSED_LIST(V)
2650
2651 #define PPC_XS_OPCODE_LIST(V) \
2652 /* Shift Right Algebraic Doubleword Immediate */ \
2653 V(sradi, SRADIX, 0x7C000674)
2654
2655 #define PPC_MD_OPCODE_LIST(V) \
2656 /* Rotate Left Doubleword Immediate then Clear */ \
2657 V(rldic, RLDIC, 0x78000008) \
2658 /* Rotate Left Doubleword Immediate then Clear Left */ \
2659 V(rldicl, RLDICL, 0x78000000) \
2660 /* Rotate Left Doubleword Immediate then Clear Right */ \
2661 V(rldicr, RLDICR, 0x78000004) \
2662 /* Rotate Left Doubleword Immediate then Mask Insert */ \
2663 V(rldimi, RLDIMI, 0x7800000C)
2664
2665 #define PPC_SC_OPCODE_LIST(V) \
2666 /* System Call */ \
2667 V(sc, SC, 0x44000002)
2668
2669 #define PPC_OPCODE_LIST(V) \
2670 PPC_X_OPCODE_LIST(V) \
2671 PPC_X_OPCODE_EH_S_FORM_LIST(V) \
2672 PPC_XO_OPCODE_LIST(V) \
2673 PPC_DS_OPCODE_LIST(V) \
2674 PPC_DQ_OPCODE_LIST(V) \
2675 PPC_MDS_OPCODE_LIST(V) \
2676 PPC_MD_OPCODE_LIST(V) \
2677 PPC_XS_OPCODE_LIST(V) \
2678 PPC_D_OPCODE_LIST(V) \
2679 PPC_I_OPCODE_LIST(V) \
2680 PPC_B_OPCODE_LIST(V) \
2681 PPC_XL_OPCODE_LIST(V) \
2682 PPC_A_OPCODE_LIST(V) \
2683 PPC_XFX_OPCODE_LIST(V) \
2684 PPC_M_OPCODE_LIST(V) \
2685 PPC_SC_OPCODE_LIST(V) \
2686 PPC_Z23_OPCODE_LIST(V) \
2687 PPC_Z22_OPCODE_LIST(V) \
2688 PPC_EVX_OPCODE_LIST(V) \
2689 PPC_XFL_OPCODE_LIST(V) \
2690 PPC_EVS_OPCODE_LIST(V) \
2691 PPC_VX_OPCODE_LIST(V) \
2692 PPC_VA_OPCODE_LIST(V) \
2693 PPC_VC_OPCODE_LIST(V) \
2694 PPC_XX1_OPCODE_LIST(V) \
2695 PPC_XX2_OPCODE_LIST(V) \
2696 PPC_XX3_OPCODE_VECTOR_LIST(V) \
2697 PPC_XX3_OPCODE_SCALAR_LIST(V) \
2698 PPC_XX4_OPCODE_LIST(V)
2699
2700 enum Opcode : uint32_t {
2701 #define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
2702 opcode_name = opcode_value,
2703 PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
2704 #undef DECLARE_INSTRUCTION
2705 EXT0 = 0x10000000, // Extended code set 0
2706 EXT1 = 0x4C000000, // Extended code set 1
2707 EXT2 = 0x7C000000, // Extended code set 2
2708 EXT3 = 0xEC000000, // Extended code set 3
2709 EXT4 = 0xFC000000, // Extended code set 4
2710 EXT5 = 0x78000000, // Extended code set 5 - 64bit only
2711 EXT6 = 0xF0000000, // Extended code set 6
2712 };
2713
2714 // Instruction encoding bits and masks.
2715 enum {
2716 // Instruction encoding bit
2717 B1 = 1 << 1,
2718 B2 = 1 << 2,
2719 B3 = 1 << 3,
2720 B4 = 1 << 4,
2721 B5 = 1 << 5,
2722 B7 = 1 << 7,
2723 B8 = 1 << 8,
2724 B9 = 1 << 9,
2725 B12 = 1 << 12,
2726 B18 = 1 << 18,
2727 B19 = 1 << 19,
2728 B20 = 1 << 20,
2729 B22 = 1 << 22,
2730 B23 = 1 << 23,
2731 B24 = 1 << 24,
2732 B25 = 1 << 25,
2733 B26 = 1 << 26,
2734 B27 = 1 << 27,
2735 B28 = 1 << 28,
2736 B6 = 1 << 6,
2737 B10 = 1 << 10,
2738 B11 = 1 << 11,
2739 B16 = 1 << 16,
2740 B17 = 1 << 17,
2741 B21 = 1 << 21,
2742
2743 // Instruction bit masks
2744 kCondMask = 0x1F << 21,
2745 kOff12Mask = (1 << 12) - 1,
2746 kImm24Mask = (1 << 24) - 1,
2747 kOff16Mask = (1 << 16) - 1,
2748 kImm16Mask = (1 << 16) - 1,
2749 kImm22Mask = (1 << 22) - 1,
2750 kImm26Mask = (1 << 26) - 1,
2751 kBOfieldMask = 0x1f << 21,
2752 kOpcodeMask = 0x3f << 26,
2753 kExt1OpcodeMask = 0x3ff << 1,
2754 kExt2OpcodeMask = 0x3ff << 1,
2755 kExt2OpcodeVariant2Mask = 0x1ff << 2,
2756 kExt5OpcodeMask = 0x3 << 2,
2757 kBOMask = 0x1f << 21,
2758 kBIMask = 0x1F << 16,
2759 kBDMask = 0x14 << 2,
2760 kAAMask = 0x01 << 1,
2761 kLKMask = 0x01,
2762 kRCMask = 0x01,
2763 kTOMask = 0x1f << 21
2764 };
2765
2766 // -----------------------------------------------------------------------------
2767 // Addressing modes and instruction variants.
2768
2769 // Overflow Exception
2770 enum OEBit {
2771 SetOE = 1 << 10, // Set overflow exception
2772 LeaveOE = 0 << 10 // No overflow exception
2773 };
2774
2775 // Record bit
2776 enum RCBit { // Bit 0
2777 SetRC = 1, // LT,GT,EQ,SO
2778 LeaveRC = 0 // None
2779 };
2780 // Exclusive Access hint bit
2781 enum EHBit { // Bit 0
2782 SetEH = 1, // Exclusive Access
2783 LeaveEH = 0 // Atomic Update
2784 };
2785
2786 // Link bit
2787 enum LKBit { // Bit 0
2788 SetLK = 1, // Load effective address of next instruction
2789 LeaveLK = 0 // No action
2790 };
2791
2792 enum BOfield { // Bits 25-21
2793 DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
2794 DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
2795 BF = 4 << 21, // Branch if condition false
2796 DCBNZT = 8 << 21, // Decrement CTR; branch if CTR != 0 and condition true
2797 DCBEZT = 10 << 21, // Decrement CTR; branch if CTR == 0 and condition true
2798 BT = 12 << 21, // Branch if condition true
2799 DCBNZ = 16 << 21, // Decrement CTR; branch if CTR != 0
2800 DCBEZ = 18 << 21, // Decrement CTR; branch if CTR == 0
2801 BA = 20 << 21 // Branch always
2802 };
2803
2804 #if V8_OS_AIX
2805 #undef CR_LT
2806 #undef CR_GT
2807 #undef CR_EQ
2808 #undef CR_SO
2809 #endif
2810
2811 enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
2812
2813 #define CRWIDTH 4
2814
2815 // These are the documented bit positions biased down by 32
2816 enum FPSCRBit {
2817 VXSOFT = 21, // 53: Software-Defined Condition
2818 VXSQRT = 22, // 54: Invalid Square Root
2819 VXCVI = 23 // 55: Invalid Integer Convert
2820 };
2821
2822 // -----------------------------------------------------------------------------
2823 // Supervisor Call (svc) specific support.
2824
2825 // Special Software Interrupt codes when used in the presence of the PPC
2826 // simulator.
2827 // svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
2828 // standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
2829 enum SoftwareInterruptCodes {
2830 // transition to C code
2831 kCallRtRedirected = 0x10,
2832 // break point
2833 kBreakpoint = 0x821008, // bits23-0 of 0x7d821008 = twge r2, r2
2834 // stop
2835 kStopCode = 1 << 23
2836 };
2837 const uint32_t kStopCodeMask = kStopCode - 1;
2838 const uint32_t kMaxStopCode = kStopCode - 1;
2839 const int32_t kDefaultStopCode = -1;
2840
2841 // FP rounding modes.
2842 enum FPRoundingMode {
2843 RN = 0, // Round to Nearest.
2844 RZ = 1, // Round towards zero.
2845 RP = 2, // Round towards Plus Infinity.
2846 RM = 3, // Round towards Minus Infinity.
2847
2848 // Aliases.
2849 kRoundToNearest = RN,
2850 kRoundToZero = RZ,
2851 kRoundToPlusInf = RP,
2852 kRoundToMinusInf = RM
2853 };
2854
2855 const uint32_t kFPRoundingModeMask = 3;
2856
2857 enum CheckForInexactConversion {
2858 kCheckForInexactConversion,
2859 kDontCheckForInexactConversion
2860 };
2861
2862 // -----------------------------------------------------------------------------
2863 // Specific instructions, constants, and masks.
2864 // These constants are declared in assembler-arm.cc, as they use named registers
2865 // and other constants.
2866
2867 // add(sp, sp, 4) instruction (aka Pop())
2868 extern const Instr kPopInstruction;
2869
2870 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
2871 // register r is not encoded.
2872 extern const Instr kPushRegPattern;
2873
2874 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
2875 // register r is not encoded.
2876 extern const Instr kPopRegPattern;
2877
2878 // use TWI to indicate redirection call for simulation mode
2879 const Instr rtCallRedirInstr = TWI;
2880
2881 // -----------------------------------------------------------------------------
2882 // Instruction abstraction.
2883
2884 // The class Instruction enables access to individual fields defined in the PPC
2885 // architecture instruction set encoding.
2886 // Note that the Assembler uses typedef int32_t Instr.
2887 //
2888 // Example: Test whether the instruction at ptr does set the condition code
2889 // bits.
2890 //
2891 // bool InstructionSetsConditionCodes(byte* ptr) {
2892 // Instruction* instr = Instruction::At(ptr);
2893 // int type = instr->TypeValue();
2894 // return ((type == 0) || (type == 1)) && instr->HasS();
2895 // }
2896 //
2897
2898 constexpr uint8_t kInstrSize = 4;
2899 constexpr uint8_t kInstrSizeLog2 = 2;
2900 constexpr uint8_t kPcLoadDelta = 8;
2901
2902 class Instruction {
2903 public:
2904 // Helper macro to define static accessors.
2905 // We use the cast to char* trick to bypass the strict anti-aliasing rules.
2906 #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
2907 static inline return_type Name(Instr instr) { \
2908 char* temp = reinterpret_cast<char*>(&instr); \
2909 return reinterpret_cast<Instruction*>(temp)->Name(); \
2910 }
2911
2912 #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
2913
2914 // Get the raw instruction bits.
InstructionBits()2915 inline Instr InstructionBits() const {
2916 return *reinterpret_cast<const Instr*>(this);
2917 }
2918
2919 // Set the raw instruction bits to value.
SetInstructionBits(Instr value)2920 inline void SetInstructionBits(Instr value) {
2921 *reinterpret_cast<Instr*>(this) = value;
2922 }
2923
2924 // Read one particular bit out of the instruction bits.
Bit(int nr)2925 inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
2926
2927 // Read a bit field's value out of the instruction bits.
Bits(int hi,int lo)2928 inline int Bits(int hi, int lo) const {
2929 return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
2930 }
2931
2932 // Read a bit field out of the instruction bits.
BitField(int hi,int lo)2933 inline uint32_t BitField(int hi, int lo) const {
2934 return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
2935 }
2936
2937 // Static support.
2938
2939 // Read one particular bit out of the instruction bits.
Bit(Instr instr,int nr)2940 static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
2941
2942 // Read the value of a bit field out of the instruction bits.
Bits(Instr instr,int hi,int lo)2943 static inline int Bits(Instr instr, int hi, int lo) {
2944 return (instr >> lo) & ((2 << (hi - lo)) - 1);
2945 }
2946
2947 // Read a bit field out of the instruction bits.
BitField(Instr instr,int hi,int lo)2948 static inline uint32_t BitField(Instr instr, int hi, int lo) {
2949 return instr & (((2 << (hi - lo)) - 1) << lo);
2950 }
2951
RSValue()2952 inline int RSValue() const { return Bits(25, 21); }
RTValue()2953 inline int RTValue() const { return Bits(25, 21); }
RAValue()2954 inline int RAValue() const { return Bits(20, 16); }
DECLARE_STATIC_ACCESSOR(RAValue)2955 DECLARE_STATIC_ACCESSOR(RAValue)
2956 inline int RBValue() const { return Bits(15, 11); }
DECLARE_STATIC_ACCESSOR(RBValue)2957 DECLARE_STATIC_ACCESSOR(RBValue)
2958 inline int RCValue() const { return Bits(10, 6); }
DECLARE_STATIC_ACCESSOR(RCValue)2959 DECLARE_STATIC_ACCESSOR(RCValue)
2960
2961 inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
OpcodeField()2962 inline uint32_t OpcodeField() const {
2963 return static_cast<Opcode>(BitField(31, 26));
2964 }
2965
2966 #define OPCODE_CASES(name, opcode_name, opcode_value) case opcode_name:
2967
OpcodeBase()2968 inline Opcode OpcodeBase() const {
2969 uint32_t opcode = OpcodeField();
2970 uint32_t extcode = OpcodeField();
2971 switch (opcode) {
2972 PPC_D_OPCODE_LIST(OPCODE_CASES)
2973 PPC_I_OPCODE_LIST(OPCODE_CASES)
2974 PPC_B_OPCODE_LIST(OPCODE_CASES)
2975 PPC_M_OPCODE_LIST(OPCODE_CASES)
2976 return static_cast<Opcode>(opcode);
2977 }
2978 opcode = extcode | BitField(5, 0);
2979 switch (opcode) {
2980 PPC_VA_OPCODE_LIST(OPCODE_CASES)
2981 return static_cast<Opcode>(opcode);
2982 }
2983 // Some VX opcodes have integers hard coded in the middle, handle those
2984 // first.
2985 opcode = extcode | BitField(20, 16) | BitField(10, 0);
2986 switch (opcode) {
2987 PPC_VX_OPCODE_D_FORM_LIST(OPCODE_CASES)
2988 PPC_VX_OPCODE_F_FORM_LIST(OPCODE_CASES)
2989 return static_cast<Opcode>(opcode);
2990 }
2991 opcode = extcode | BitField(10, 0);
2992 switch (opcode) {
2993 PPC_VX_OPCODE_A_FORM_LIST(OPCODE_CASES)
2994 PPC_VX_OPCODE_B_FORM_LIST(OPCODE_CASES)
2995 PPC_VX_OPCODE_C_FORM_LIST(OPCODE_CASES)
2996 PPC_VX_OPCODE_E_FORM_LIST(OPCODE_CASES)
2997 PPC_VX_OPCODE_G_FORM_LIST(OPCODE_CASES)
2998 PPC_VX_OPCODE_UNUSED_LIST(OPCODE_CASES)
2999 PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
3000 return static_cast<Opcode>(opcode);
3001 }
3002 opcode = extcode | BitField(9, 0);
3003 switch (opcode) {
3004 PPC_VC_OPCODE_LIST(OPCODE_CASES)
3005 return static_cast<Opcode>(opcode);
3006 }
3007 opcode = extcode | BitField(10, 1) | BitField(20, 20);
3008 switch (opcode) {
3009 PPC_XFX_OPCODE_LIST(OPCODE_CASES)
3010 return static_cast<Opcode>(opcode);
3011 }
3012 // Some XX2 opcodes have integers hard coded in the middle, handle those
3013 // first.
3014 opcode = extcode | BitField(20, 16) | BitField(10, 2);
3015 switch (opcode) {
3016 PPC_XX2_OPCODE_B_FORM_LIST(OPCODE_CASES)
3017 return static_cast<Opcode>(opcode);
3018 }
3019 opcode = extcode | BitField(10, 2);
3020 switch (opcode) {
3021 PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(OPCODE_CASES)
3022 PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(OPCODE_CASES)
3023 PPC_XX2_OPCODE_UNUSED_LIST(OPCODE_CASES)
3024 return static_cast<Opcode>(opcode);
3025 }
3026 opcode = extcode | BitField(10, 1);
3027 switch (opcode) {
3028 PPC_X_OPCODE_LIST(OPCODE_CASES)
3029 PPC_XL_OPCODE_LIST(OPCODE_CASES)
3030 PPC_XFL_OPCODE_LIST(OPCODE_CASES)
3031 PPC_XX1_OPCODE_LIST(OPCODE_CASES)
3032 PPC_EVX_OPCODE_LIST(OPCODE_CASES)
3033 return static_cast<Opcode>(opcode);
3034 }
3035 opcode = extcode | BitField(9, 1);
3036 switch (opcode) {
3037 PPC_XO_OPCODE_LIST(OPCODE_CASES)
3038 PPC_Z22_OPCODE_LIST(OPCODE_CASES)
3039 return static_cast<Opcode>(opcode);
3040 }
3041 opcode = extcode | BitField(10, 2);
3042 switch (opcode) {
3043 PPC_XS_OPCODE_LIST(OPCODE_CASES)
3044 return static_cast<Opcode>(opcode);
3045 }
3046 opcode = extcode | BitField(10, 3);
3047 switch (opcode) {
3048 PPC_EVS_OPCODE_LIST(OPCODE_CASES)
3049 PPC_XX3_OPCODE_VECTOR_LIST(OPCODE_CASES)
3050 PPC_XX3_OPCODE_SCALAR_LIST(OPCODE_CASES)
3051 return static_cast<Opcode>(opcode);
3052 }
3053 opcode = extcode | BitField(8, 1);
3054 switch (opcode) {
3055 PPC_Z23_OPCODE_LIST(OPCODE_CASES)
3056 return static_cast<Opcode>(opcode);
3057 }
3058 opcode = extcode | BitField(5, 1);
3059 switch (opcode) {
3060 PPC_A_OPCODE_LIST(OPCODE_CASES)
3061 return static_cast<Opcode>(opcode);
3062 }
3063 opcode = extcode | BitField(4, 1);
3064 switch (opcode) {
3065 PPC_MDS_OPCODE_LIST(OPCODE_CASES)
3066 return static_cast<Opcode>(opcode);
3067 }
3068 opcode = extcode | BitField(4, 2);
3069 switch (opcode) {
3070 PPC_MD_OPCODE_LIST(OPCODE_CASES)
3071 return static_cast<Opcode>(opcode);
3072 }
3073 opcode = extcode | BitField(5, 4);
3074 switch (opcode) {
3075 PPC_XX4_OPCODE_LIST(OPCODE_CASES)
3076 return static_cast<Opcode>(opcode);
3077 }
3078 opcode = extcode | BitField(2, 0);
3079 switch (opcode) {
3080 PPC_DQ_OPCODE_LIST(OPCODE_CASES)
3081 return static_cast<Opcode>(opcode);
3082 }
3083 opcode = extcode | BitField(1, 0);
3084 switch (opcode) {
3085 PPC_DS_OPCODE_LIST(OPCODE_CASES)
3086 return static_cast<Opcode>(opcode);
3087 }
3088 opcode = extcode | BitField(1, 1);
3089 switch (opcode) {
3090 PPC_SC_OPCODE_LIST(OPCODE_CASES)
3091 return static_cast<Opcode>(opcode);
3092 }
3093 UNIMPLEMENTED();
3094 return static_cast<Opcode>(0);
3095 }
3096
3097 #undef OPCODE_CASES
3098
3099 // Fields used in Software interrupt instructions
SvcValue()3100 inline SoftwareInterruptCodes SvcValue() const {
3101 return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
3102 }
3103
3104 // Instructions are read of out a code stream. The only way to get a
3105 // reference to an instruction is to convert a pointer. There is no way
3106 // to allocate or create instances of class Instruction.
3107 // Use the At(pc) function to create references to Instruction.
At(byte * pc)3108 static Instruction* At(byte* pc) {
3109 return reinterpret_cast<Instruction*>(pc);
3110 }
3111
3112 private:
3113 // We need to prevent the creation of instances of class Instruction.
3114 DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
3115 };
3116
3117 // Helper functions for converting between register numbers and names.
3118 class Registers {
3119 public:
3120 // Lookup the register number for the name provided.
3121 static int Number(const char* name);
3122
3123 private:
3124 static const char* names_[kNumRegisters];
3125 };
3126
3127 // Helper functions for converting between FP register numbers and names.
3128 class DoubleRegisters {
3129 public:
3130 // Lookup the register number for the name provided.
3131 static int Number(const char* name);
3132
3133 private:
3134 static const char* names_[kNumDoubleRegisters];
3135 };
3136 } // namespace internal
3137 } // namespace v8
3138
3139 static constexpr int kR0DwarfCode = 0;
3140 static constexpr int kFpDwarfCode = 31; // frame-pointer
3141 static constexpr int kLrDwarfCode = 65; // return-address(lr)
3142 static constexpr int kSpDwarfCode = 1; // stack-pointer (sp)
3143
3144 #endif // V8_CODEGEN_PPC_CONSTANTS_PPC_H_
3145