1//===-- VOP3Instructions.td - Vector Instruction Defintions ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10// VOP3 Classes
11//===----------------------------------------------------------------------===//
12
13class getVOP3ModPat<VOPProfile P, SDPatternOperator node> {
14  dag src0 = !if(P.HasOMod,
15    (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
16    (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp));
17
18  list<dag> ret3 = [(set P.DstVT:$vdst,
19    (DivergentFragOrOp<node, P>.ret (P.Src0VT src0),
20          (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
21          (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))];
22
23  list<dag> ret2 = [(set P.DstVT:$vdst,
24    (DivergentFragOrOp<node, P>.ret (P.Src0VT src0),
25          (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))];
26
27  list<dag> ret1 = [(set P.DstVT:$vdst,
28    (DivergentFragOrOp<node, P>.ret (P.Src0VT src0)))];
29
30  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
31                  !if(!eq(P.NumSrcArgs, 2), ret2,
32                  ret1));
33}
34
35class getVOP3PModPat<VOPProfile P, SDPatternOperator node> {
36  list<dag> ret3 = [(set P.DstVT:$vdst,
37    (DivergentFragOrOp<node, P>.ret (P.Src0VT !if(P.HasClamp, (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp),
38                                    (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers))),
39          (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)),
40          (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers))))];
41
42  list<dag> ret2 = [(set P.DstVT:$vdst,
43    (DivergentFragOrOp<node, P>.ret !if(P.HasClamp, (P.Src0VT (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)),
44                          (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers))),
45          (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers))))];
46
47  list<dag> ret1 = [(set P.DstVT:$vdst,
48    (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3PMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))];
49
50  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
51                  !if(!eq(P.NumSrcArgs, 2), ret2,
52                  ret1));
53}
54
55class getVOP3OpSelPat<VOPProfile P, SDPatternOperator node> {
56  list<dag> ret3 = [(set P.DstVT:$vdst,
57    (DivergentFragOrOp<node, P>.ret (P.Src0VT !if(P.HasClamp, (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp),
58                                    (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers))),
59          (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers)),
60          (P.Src2VT (VOP3OpSel P.Src2VT:$src2, i32:$src2_modifiers))))];
61
62  list<dag> ret2 = [(set P.DstVT:$vdst,
63    (DivergentFragOrOp<node, P>.ret !if(P.HasClamp, (P.Src0VT (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)),
64                          (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers))),
65          (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers))))];
66
67  list<dag> ret1 = [(set P.DstVT:$vdst,
68    (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSel0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))];
69
70  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
71                  !if(!eq(P.NumSrcArgs, 2), ret2,
72                  ret1));
73}
74
75class getVOP3OpSelModPat<VOPProfile P, SDPatternOperator node> {
76  list<dag> ret3 = [(set P.DstVT:$vdst,
77    (DivergentFragOrOp<node, P>.ret (P.Src0VT !if(P.HasClamp, (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp),
78                                    (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))),
79          (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers)),
80          (P.Src2VT (VOP3OpSelMods P.Src2VT:$src2, i32:$src2_modifiers))))];
81
82  list<dag> ret2 = [(set P.DstVT:$vdst,
83    (DivergentFragOrOp<node, P>.ret !if(P.HasClamp, (P.Src0VT (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)),
84                          (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))),
85          (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers))))];
86
87  list<dag> ret1 = [(set P.DstVT:$vdst,
88    (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSelMods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))))];
89
90  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
91                  !if(!eq(P.NumSrcArgs, 2), ret2,
92                  ret1));
93}
94
95class getVOP3Pat<VOPProfile P, SDPatternOperator node> {
96  list<dag> ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2))];
97  list<dag> ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0, P.Src1VT:$src1))];
98  list<dag> ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0))];
99  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
100                  !if(!eq(P.NumSrcArgs, 2), ret2,
101                  ret1));
102}
103
104class getVOP3ClampPat<VOPProfile P, SDPatternOperator node> {
105  list<dag> ret3 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, i1:$clamp))];
106  list<dag> ret2 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, i1:$clamp))];
107  list<dag> ret1 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, i1:$clamp))];
108  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
109                  !if(!eq(P.NumSrcArgs, 2), ret2,
110                  ret1));
111}
112
113class getVOP3MAIPat<VOPProfile P, SDPatternOperator node> {
114  list<dag> ret = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2,
115                                        imm:$cbsz, imm:$abid, imm:$blgp))];
116}
117
118class VOP3Inst<string OpName, VOPProfile P, SDPatternOperator node = null_frag, bit VOP3Only = 0> :
119  VOP3_Pseudo<OpName, P,
120    !if(P.HasOpSel,
121        !if(P.HasModifiers,
122            getVOP3OpSelModPat<P, node>.ret,
123            getVOP3OpSelPat<P, node>.ret),
124        !if(P.HasModifiers,
125            getVOP3ModPat<P, node>.ret,
126            !if(P.HasIntClamp,
127                getVOP3ClampPat<P, node>.ret,
128                !if (P.IsMAI,
129                    getVOP3MAIPat<P, node>.ret,
130                    getVOP3Pat<P, node>.ret)))),
131    VOP3Only, 0, P.HasOpSel> {
132
133  let IntClamp = P.HasIntClamp;
134  let AsmMatchConverter =
135    !if(P.HasOpSel,
136        "cvtVOP3OpSel",
137        !if(!or(P.HasModifiers, !or(P.HasOMod, P.HasIntClamp)),
138            "cvtVOP3",
139            ""));
140}
141
142// Special case for v_div_fmas_{f32|f64}, since it seems to be the
143// only VOP instruction that implicitly reads VCC.
144let Asm64 = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod" in {
145def VOP_F32_F32_F32_F32_VCC : VOPProfile<[f32, f32, f32, f32]> {
146  let Outs64 = (outs DstRC.RegClass:$vdst);
147}
148def VOP_F64_F64_F64_F64_VCC : VOPProfile<[f64, f64, f64, f64]> {
149  let Outs64 = (outs DstRC.RegClass:$vdst);
150}
151}
152
153class VOP3Features<bit Clamp, bit OpSel, bit Packed, bit MAI> {
154  bit HasClamp = Clamp;
155  bit HasOpSel = OpSel;
156  bit IsPacked = Packed;
157  bit IsMAI = MAI;
158}
159
160def VOP3_REGULAR : VOP3Features<0, 0, 0, 0>;
161def VOP3_CLAMP   : VOP3Features<1, 0, 0, 0>;
162def VOP3_OPSEL   : VOP3Features<1, 1, 0, 0>;
163def VOP3_PACKED  : VOP3Features<1, 1, 1, 0>;
164def VOP3_MAI     : VOP3Features<0, 0, 0, 1>;
165
166class VOP3_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR> : VOPProfile<P.ArgVT> {
167
168  let HasClamp = !if(Features.HasClamp, 1, P.HasClamp);
169  let HasOpSel = !if(Features.HasOpSel, 1, P.HasOpSel);
170  let IsMAI    = !if(Features.IsMAI,    1, P.IsMAI);
171  let IsPacked = !if(Features.IsPacked, 1, P.IsPacked);
172
173  let HasModifiers = !if(Features.IsPacked, !if(Features.IsMAI, 0, 1), P.HasModifiers);
174
175  // FIXME: Hack to stop printing _e64
176  let Outs64 = (outs DstRC.RegClass:$vdst);
177  let Asm64 =
178    " " # !if(Features.HasOpSel,
179              getAsmVOP3OpSel<NumSrcArgs,
180                              HasIntClamp,
181                              HasSrc0FloatMods,
182                              HasSrc1FloatMods,
183                              HasSrc2FloatMods>.ret,
184              !if(Features.HasClamp,
185                  getAsm64<HasDst, NumSrcArgs, HasIntClamp,
186                           HasModifiers, HasOMod, DstVT>.ret,
187                  P.Asm64));
188  let NeedPatGen = P.NeedPatGen;
189}
190
191class VOP3b_Profile<ValueType vt> : VOPProfile<[vt, vt, vt, vt]> {
192  // v_div_scale_{f32|f64} do not support input modifiers.
193  let HasModifiers = 0;
194  let HasClamp = 0;
195  let HasOMod = 0;
196  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
197  let Asm64 = " $vdst, $sdst, $src0, $src1, $src2";
198}
199
200def VOP3b_F32_I1_F32_F32_F32 : VOP3b_Profile<f32> {
201  // FIXME: Hack to stop printing _e64
202  let DstRC = RegisterOperand<VGPR_32>;
203}
204
205def VOP3b_F64_I1_F64_F64_F64 : VOP3b_Profile<f64> {
206  // FIXME: Hack to stop printing _e64
207  let DstRC = RegisterOperand<VReg_64>;
208}
209
210def VOP3b_I64_I1_I32_I32_I64 : VOPProfile<[i64, i32, i32, i64]> {
211  let HasClamp = 1;
212
213  // FIXME: Hack to stop printing _e64
214  let DstRC = RegisterOperand<VReg_64>;
215
216  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
217  let Asm64 = " $vdst, $sdst, $src0, $src1, $src2$clamp";
218}
219
220//===----------------------------------------------------------------------===//
221// VOP3 INTERP
222//===----------------------------------------------------------------------===//
223
224class VOP3Interp<string OpName, VOPProfile P, list<dag> pattern = []> :
225                 VOP3_Pseudo<OpName, P, pattern> {
226  let AsmMatchConverter = "cvtVOP3Interp";
227}
228
229def VOP3_INTERP : VOPProfile<[f32, f32, i32, untyped]> {
230  let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
231                   Attr:$attr, AttrChan:$attrchan,
232                   clampmod:$clamp, omod:$omod);
233
234  let Asm64 = "$vdst, $src0_modifiers, $attr$attrchan$clamp$omod";
235}
236
237def VOP3_INTERP_MOV : VOPProfile<[f32, i32, i32, untyped]> {
238  let Ins64 = (ins InterpSlot:$src0,
239                   Attr:$attr, AttrChan:$attrchan,
240                   clampmod:$clamp, omod:$omod);
241
242  let Asm64 = "$vdst, $src0, $attr$attrchan$clamp$omod";
243
244  let HasClamp = 1;
245}
246
247class getInterp16Asm <bit HasSrc2, bit HasOMod> {
248  string src2 = !if(HasSrc2, ", $src2_modifiers", "");
249  string omod = !if(HasOMod, "$omod", "");
250  string ret =
251    " $vdst, $src0_modifiers, $attr$attrchan"#src2#"$high$clamp"#omod;
252}
253
254class getInterp16Ins <bit HasSrc2, bit HasOMod,
255                      Operand Src0Mod, Operand Src2Mod> {
256  dag ret = !if(HasSrc2,
257                !if(HasOMod,
258                    (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
259                         Attr:$attr, AttrChan:$attrchan,
260                         Src2Mod:$src2_modifiers, VRegSrc_32:$src2,
261                         highmod:$high, clampmod:$clamp, omod:$omod),
262                    (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
263                         Attr:$attr, AttrChan:$attrchan,
264                         Src2Mod:$src2_modifiers, VRegSrc_32:$src2,
265                         highmod:$high, clampmod:$clamp)
266                ),
267                (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
268                     Attr:$attr, AttrChan:$attrchan,
269                     highmod:$high, clampmod:$clamp, omod:$omod)
270            );
271}
272
273class VOP3_INTERP16 <list<ValueType> ArgVT> : VOPProfile<ArgVT> {
274
275  let HasOMod = !if(!eq(DstVT.Value, f16.Value), 0, 1);
276  let HasHigh = 1;
277
278  let Outs64 = (outs VGPR_32:$vdst);
279  let Ins64 = getInterp16Ins<HasSrc2, HasOMod, Src0Mod, Src2Mod>.ret;
280  let Asm64 = getInterp16Asm<HasSrc2, HasOMod>.ret;
281}
282
283//===----------------------------------------------------------------------===//
284// VOP3 Instructions
285//===----------------------------------------------------------------------===//
286
287let isCommutable = 1 in {
288
289def V_MAD_LEGACY_F32 : VOP3Inst <"v_mad_legacy_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
290def V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fmad>;
291def V_MAD_I32_I24 : VOP3Inst <"v_mad_i32_i24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
292def V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
293def V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fma>;
294def V_LERP_U8 : VOP3Inst <"v_lerp_u8", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_lerp>;
295
296let SchedRW = [WriteDoubleAdd] in {
297let FPDPRounding = 1 in {
298def V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, fma>;
299def V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile<VOP_F64_F64_F64>, fadd, 1>;
300def V_MUL_F64 : VOP3Inst <"v_mul_f64", VOP3_Profile<VOP_F64_F64_F64>, fmul, 1>;
301} // End FPDPRounding = 1
302def V_MIN_F64 : VOP3Inst <"v_min_f64", VOP3_Profile<VOP_F64_F64_F64>, fminnum_like, 1>;
303def V_MAX_F64 : VOP3Inst <"v_max_f64", VOP3_Profile<VOP_F64_F64_F64>, fmaxnum_like, 1>;
304} // End SchedRW = [WriteDoubleAdd]
305
306let SchedRW = [WriteQuarterRate32] in {
307def V_MUL_LO_U32 : VOP3Inst <"v_mul_lo_u32", VOP3_Profile<VOP_I32_I32_I32>, mul>;
308def V_MUL_HI_U32 : VOP3Inst <"v_mul_hi_u32", VOP3_Profile<VOP_I32_I32_I32>, mulhu>;
309def V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", VOP3_Profile<VOP_I32_I32_I32>>;
310def V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", VOP3_Profile<VOP_I32_I32_I32>, mulhs>;
311} // End SchedRW = [WriteQuarterRate32]
312
313let Uses = [VCC, EXEC] in {
314// v_div_fmas_f32:
315//   result = src0 * src1 + src2
316//   if (vcc)
317//     result *= 2^32
318//
319def V_DIV_FMAS_F32 : VOP3_Pseudo <"v_div_fmas_f32", VOP_F32_F32_F32_F32_VCC, []> {
320  let SchedRW = [WriteFloatFMA];
321}
322// v_div_fmas_f64:
323//   result = src0 * src1 + src2
324//   if (vcc)
325//     result *= 2^64
326//
327def V_DIV_FMAS_F64 : VOP3_Pseudo <"v_div_fmas_f64", VOP_F64_F64_F64_F64_VCC, []> {
328  let SchedRW = [WriteDouble];
329  let FPDPRounding = 1;
330}
331} // End Uses = [VCC, EXEC]
332
333} // End isCommutable = 1
334
335def V_CUBEID_F32 : VOP3Inst <"v_cubeid_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubeid>;
336def V_CUBESC_F32 : VOP3Inst <"v_cubesc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubesc>;
337def V_CUBETC_F32 : VOP3Inst <"v_cubetc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubetc>;
338def V_CUBEMA_F32 : VOP3Inst <"v_cubema_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubema>;
339def V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_u32>;
340def V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_i32>;
341def V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfi>;
342def V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_alignbit>;
343def V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_alignbyte>;
344def V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmin3>;
345def V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmin3>;
346def V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumin3>;
347def V_MAX3_F32 : VOP3Inst <"v_max3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmax3>;
348def V_MAX3_I32 : VOP3Inst <"v_max3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmax3>;
349def V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumax3>;
350def V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmed3>;
351def V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmed3>;
352def V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumed3>;
353def V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
354def V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
355def V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
356def V_SAD_U32 : VOP3Inst <"v_sad_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
357def V_CVT_PK_U8_F32 : VOP3Inst<"v_cvt_pk_u8_f32", VOP3_Profile<VOP_I32_F32_I32_I32>, int_amdgcn_cvt_pk_u8_f32>;
358def V_DIV_FIXUP_F32 : VOP3Inst <"v_div_fixup_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUdiv_fixup>;
359
360let SchedRW = [WriteDoubleAdd], FPDPRounding = 1 in {
361def V_DIV_FIXUP_F64 : VOP3Inst <"v_div_fixup_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, AMDGPUdiv_fixup>;
362def V_LDEXP_F64 : VOP3Inst <"v_ldexp_f64", VOP3_Profile<VOP_F64_F64_I32>, AMDGPUldexp, 1>;
363} // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1
364
365def V_DIV_SCALE_F32 : VOP3_Pseudo <"v_div_scale_f32", VOP3b_F32_I1_F32_F32_F32, [], 1> {
366  let SchedRW = [WriteFloatFMA, WriteSALU];
367  let AsmMatchConverter = "";
368}
369
370// Double precision division pre-scale.
371def V_DIV_SCALE_F64 : VOP3_Pseudo <"v_div_scale_f64", VOP3b_F64_I1_F64_F64_F64, [], 1> {
372  let SchedRW = [WriteDouble, WriteSALU];
373  let AsmMatchConverter = "";
374  let FPDPRounding = 1;
375}
376
377def V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
378
379let Constraints = "@earlyclobber $vdst" in {
380def V_MQSAD_PK_U16_U8 : VOP3Inst <"v_mqsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
381} // End Constraints = "@earlyclobber $vdst"
382
383def V_TRIG_PREOP_F64 : VOP3Inst <"v_trig_preop_f64", VOP3_Profile<VOP_F64_F64_I32>, AMDGPUtrig_preop> {
384  let SchedRW = [WriteDouble];
385}
386
387let SchedRW = [Write64Bit] in {
388let SubtargetPredicate = isGFX6GFX7GFX10, Predicates = [isGFX6GFX7GFX10] in {
389def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile<VOP_PAT_GEN<VOP_I64_I64_I32>>, shl>;
390def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile<VOP_PAT_GEN<VOP_I64_I64_I32>>, srl>;
391def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile<VOP_PAT_GEN<VOP_I64_I64_I32>>, sra>;
392def V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
393} // End SubtargetPredicate = isGFX6GFX7GFX10, Predicates = [isGFX6GFX7GFX10]
394
395let SubtargetPredicate = isGFX8Plus in {
396def V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile<VOP_I64_I32_I64>, lshl_rev>;
397def V_LSHRREV_B64 : VOP3Inst <"v_lshrrev_b64", VOP3_Profile<VOP_I64_I32_I64>, lshr_rev>;
398def V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile<VOP_I64_I32_I64>, ashr_rev>;
399} // End SubtargetPredicate = isGFX8Plus
400} // End SchedRW = [Write64Bit]
401
402let Predicates = [isGFX8Plus] in {
403def : GCNPat <
404 (getDivergentFrag<shl>.ret i64:$x, i32:$y),
405 (V_LSHLREV_B64 $y, $x)
406>;
407def : AMDGPUPat <
408 (getDivergentFrag<srl>.ret i64:$x, i32:$y),
409 (V_LSHRREV_B64 $y, $x)
410>;
411def : AMDGPUPat <
412 (getDivergentFrag<sra>.ret i64:$x, i32:$y),
413 (V_ASHRREV_I64 $y, $x)
414>;
415}
416
417
418let SchedRW = [Write32Bit] in {
419let SubtargetPredicate = isGFX8Plus in {
420def V_PERM_B32 : VOP3Inst <"v_perm_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUperm>;
421} // End SubtargetPredicate = isGFX8Plus
422} // End SchedRW = [Write32Bit]
423
424let SubtargetPredicate = isGFX7Plus in {
425
426let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in {
427def V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
428def V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOP3_Profile<VOP_V4I32_I64_I32_V4I32, VOP3_CLAMP>>;
429} // End Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32]
430
431let isCommutable = 1 in {
432let SchedRW = [WriteQuarterRate32, WriteSALU] in {
433def V_MAD_U64_U32 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>;
434def V_MAD_I64_I32 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>;
435} // End SchedRW = [WriteDouble, WriteSALU]
436} // End isCommutable = 1
437
438} // End SubtargetPredicate = isGFX7Plus
439
440
441def V_DIV_FIXUP_F16 : VOP3Inst <"v_div_fixup_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, AMDGPUdiv_fixup> {
442  let Predicates = [Has16BitInsts, isGFX8Only];
443  let FPDPRounding = 1;
444}
445def V_DIV_FIXUP_F16_gfx9 : VOP3Inst <"v_div_fixup_f16_gfx9",
446                                      VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUdiv_fixup> {
447  let renamedInGFX9 = 1;
448  let Predicates = [Has16BitInsts, isGFX9Plus];
449  let FPDPRounding = 1;
450}
451
452def V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fma> {
453  let Predicates = [Has16BitInsts, isGFX8Only];
454  let FPDPRounding = 1;
455}
456def V_FMA_F16_gfx9 : VOP3Inst <"v_fma_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, fma> {
457  let renamedInGFX9 = 1;
458  let Predicates = [Has16BitInsts, isGFX9Plus];
459  let FPDPRounding = 1;
460}
461
462let SubtargetPredicate = Has16BitInsts, isCommutable = 1 in {
463
464let renamedInGFX9 = 1 in {
465def V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
466def V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
467let FPDPRounding = 1 in {
468def V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fmad>;
469let Uses = [M0, EXEC] in {
470def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>,
471       [(set f16:$vdst, (AMDGPUinterp_p2_f16 f32:$src0, (i32 imm:$attrchan),
472                                                        (i32 imm:$attr),
473                                                        (i32 imm:$src0_modifiers),
474                                                        (f32 VRegSrc_32:$src2),
475                                                        (i32 imm:$src2_modifiers),
476                                                        (i1 imm:$high),
477                                                        (i1 imm:$clamp)))]>;
478} // End Uses = [M0, EXEC]
479} // End FPDPRounding = 1
480} // End renamedInGFX9 = 1
481
482let SubtargetPredicate = isGFX9Only in {
483def V_MAD_F16_gfx9   : VOP3Inst <"v_mad_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>> {
484  let FPDPRounding = 1;
485}
486} // End SubtargetPredicate = isGFX9Only
487
488let SubtargetPredicate = isGFX9Plus in {
489def V_MAD_U16_gfx9   : VOP3Inst <"v_mad_u16_gfx9", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>>;
490def V_MAD_I16_gfx9   : VOP3Inst <"v_mad_i16_gfx9", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>>;
491def V_INTERP_P2_F16_gfx9 : VOP3Interp <"v_interp_p2_f16_gfx9", VOP3_INTERP16<[f16, f32, i32, f32]>>;
492} // End SubtargetPredicate = isGFX9Plus
493
494let Uses = [M0, EXEC], FPDPRounding = 1 in {
495def V_INTERP_P1LL_F16 : VOP3Interp <"v_interp_p1ll_f16", VOP3_INTERP16<[f32, f32, i32, untyped]>,
496       [(set f32:$vdst, (AMDGPUinterp_p1ll_f16 f32:$src0, (i32 imm:$attrchan),
497                                                          (i32 imm:$attr),
498                                                          (i32 imm:$src0_modifiers),
499                                                          (i1 imm:$high),
500                                                          (i1 imm:$clamp),
501                                                          (i32 imm:$omod)))]>;
502def V_INTERP_P1LV_F16 : VOP3Interp <"v_interp_p1lv_f16", VOP3_INTERP16<[f32, f32, i32, f16]>,
503       [(set f32:$vdst, (AMDGPUinterp_p1lv_f16 f32:$src0, (i32 imm:$attrchan),
504                                                          (i32 imm:$attr),
505                                                          (i32 imm:$src0_modifiers),
506                                                          (f32 VRegSrc_32:$src2),
507                                                          (i32 imm:$src2_modifiers),
508                                                          (i1 imm:$high),
509                                                          (i1 imm:$clamp),
510                                                          (i32 imm:$omod)))]>;
511} // End Uses = [M0, EXEC], FPDPRounding = 1
512
513} // End SubtargetPredicate = Has16BitInsts, isCommutable = 1
514
515let SubtargetPredicate = isGFX8GFX9 in {
516def V_INTERP_P1_F32_e64  : VOP3Interp <"v_interp_p1_f32", VOP3_INTERP>;
517def V_INTERP_P2_F32_e64  : VOP3Interp <"v_interp_p2_f32", VOP3_INTERP>;
518def V_INTERP_MOV_F32_e64 : VOP3Interp <"v_interp_mov_f32", VOP3_INTERP_MOV>;
519} // End SubtargetPredicate = isGFX8GFX9
520
521let Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] in {
522
523multiclass Ternary_i16_Pats <SDPatternOperator op1, SDPatternOperator op2,
524                             Instruction inst, SDPatternOperator op3> {
525def : GCNPat <
526  (op2 (op1 i16:$src0, i16:$src1), i16:$src2),
527  (inst i16:$src0, i16:$src1, i16:$src2, (i1 0))
528>;
529
530}
531
532defm: Ternary_i16_Pats<mul, add, V_MAD_U16, zext>;
533defm: Ternary_i16_Pats<mul, add, V_MAD_I16, sext>;
534
535} // End Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9]
536
537let Predicates = [Has16BitInsts, isGFX10Plus] in {
538
539multiclass Ternary_i16_Pats_gfx9<SDPatternOperator op1, SDPatternOperator op2,
540                                 Instruction inst, SDPatternOperator op3> {
541def : GCNPat <
542  (op2 (op1 i16:$src0, i16:$src1), i16:$src2),
543  (inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1, SRCMODS.NONE, $src2, DSTCLAMP.NONE)
544>;
545
546}
547
548defm: Ternary_i16_Pats_gfx9<mul, add, V_MAD_U16_gfx9, zext>;
549defm: Ternary_i16_Pats_gfx9<mul, add, V_MAD_I16_gfx9, sext>;
550
551} // End Predicates = [Has16BitInsts, isGFX10Plus]
552
553class ThreeOpFrag<SDPatternOperator op1, SDPatternOperator op2> : PatFrag<
554  (ops node:$x, node:$y, node:$z),
555  // When the inner operation is used multiple times, selecting 3-op
556  // instructions may still be beneficial -- if the other users can be
557  // combined similarly. Let's be conservative for now.
558  (op2 (HasOneUseBinOp<op1> node:$x, node:$y), node:$z),
559  [{
560    // Only use VALU ops when the result is divergent.
561    if (!N->isDivergent())
562      return false;
563
564    // Check constant bus limitations.
565    //
566    // Note: Use !isDivergent as a conservative proxy for whether the value
567    //       is in an SGPR (uniform values can end up in VGPRs as well).
568    unsigned ConstantBusUses = 0;
569    for (unsigned i = 0; i < 3; ++i) {
570      if (!Operands[i]->isDivergent() &&
571          !isInlineImmediate(Operands[i].getNode())) {
572        ConstantBusUses++;
573        // This uses AMDGPU::V_ADD3_U32, but all three operand instructions
574        // have the same constant bus limit.
575        if (ConstantBusUses > Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32))
576          return false;
577      }
578    }
579
580    return true;
581  }]
582> {
583  let PredicateCodeUsesOperands = 1;
584}
585
586let SubtargetPredicate = isGFX9Plus in {
587def V_PACK_B32_F16 : VOP3Inst <"v_pack_b32_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
588def V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
589def V_ADD_LSHL_U32 : VOP3Inst <"v_add_lshl_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
590def V_ADD3_U32 : VOP3Inst <"v_add3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
591def V_LSHL_OR_B32 : VOP3Inst <"v_lshl_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
592def V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
593def V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
594
595def V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
596
597def V_MED3_F16 : VOP3Inst <"v_med3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmed3>;
598def V_MED3_I16 : VOP3Inst <"v_med3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmed3>;
599def V_MED3_U16 : VOP3Inst <"v_med3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumed3>;
600
601def V_MIN3_F16 : VOP3Inst <"v_min3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmin3>;
602def V_MIN3_I16 : VOP3Inst <"v_min3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmin3>;
603def V_MIN3_U16 : VOP3Inst <"v_min3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumin3>;
604
605def V_MAX3_F16 : VOP3Inst <"v_max3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmax3>;
606def V_MAX3_I16 : VOP3Inst <"v_max3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmax3>;
607def V_MAX3_U16 : VOP3Inst <"v_max3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumax3>;
608
609def V_ADD_I16 : VOP3Inst <"v_add_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
610def V_SUB_I16 : VOP3Inst <"v_sub_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
611
612def V_MAD_U32_U16 : VOP3Inst <"v_mad_u32_u16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
613def V_MAD_I32_I16 : VOP3Inst <"v_mad_i32_i16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
614
615def V_CVT_PKNORM_I16_F16 : VOP3Inst <"v_cvt_pknorm_i16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
616def V_CVT_PKNORM_U16_F16 : VOP3Inst <"v_cvt_pknorm_u16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
617
618def V_ADD_I32_gfx9 : VOP3Inst <"v_add_i32_gfx9", VOP3_Profile<VOP_I32_I32_I32>>;
619def V_SUB_I32_gfx9 : VOP3Inst <"v_sub_i32_gfx9", VOP3_Profile<VOP_I32_I32_I32>>;
620
621
622class ThreeOp_i32_Pats <SDPatternOperator op1, SDPatternOperator op2, Instruction inst> : GCNPat <
623  // This matches (op2 (op1 i32:$src0, i32:$src1), i32:$src2) with conditions.
624  (ThreeOpFrag<op1, op2> i32:$src0, i32:$src1, i32:$src2),
625  (inst i32:$src0, i32:$src1, i32:$src2)
626>;
627
628def : ThreeOp_i32_Pats<shl, add, V_LSHL_ADD_U32>;
629def : ThreeOp_i32_Pats<add, shl, V_ADD_LSHL_U32>;
630def : ThreeOp_i32_Pats<add, add, V_ADD3_U32>;
631def : ThreeOp_i32_Pats<shl, or, V_LSHL_OR_B32>;
632def : ThreeOp_i32_Pats<and, or, V_AND_OR_B32>;
633def : ThreeOp_i32_Pats<or, or, V_OR3_B32>;
634def : ThreeOp_i32_Pats<xor, add, V_XAD_U32>;
635
636} // End SubtargetPredicate = isGFX9Plus
637
638def VOP3_PERMLANE_Profile : VOP3_Profile<VOPProfile <[i32, i32, i32, i32]>, VOP3_OPSEL> {
639  let Src0RC64 = VRegSrc_32;
640  let Src1RC64 = SCSrc_b32;
641  let Src2RC64 = SCSrc_b32;
642  let InsVOP3OpSel = (ins IntOpSelMods:$src0_modifiers, VRegSrc_32:$src0,
643                          IntOpSelMods:$src1_modifiers, SCSrc_b32:$src1,
644                          IntOpSelMods:$src2_modifiers, SCSrc_b32:$src2,
645                          VGPR_32:$vdst_in, op_sel:$op_sel);
646  let HasClamp = 0;
647  let HasOMod = 0;
648}
649
650let SubtargetPredicate = isGFX10Plus in {
651  def V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
652  def : ThreeOp_i32_Pats<xor, xor, V_XOR3_B32>;
653
654  let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
655    def V_PERMLANE16_B32 : VOP3Inst <"v_permlane16_b32", VOP3_PERMLANE_Profile>;
656    def V_PERMLANEX16_B32 : VOP3Inst <"v_permlanex16_b32", VOP3_PERMLANE_Profile>;
657  } // End $vdst = $vdst_in, DisableEncoding $vdst_in
658
659  def : GCNPat<
660    (int_amdgcn_permlane16 i32:$vdst_in, i32:$src0, i32:$src1, i32:$src2, imm:$fi, imm:$bc),
661    (V_PERMLANE16_B32 (as_i1imm $fi), $src0, (as_i1imm $bc), $src1, 0, $src2, $vdst_in)
662  >;
663  def : GCNPat<
664    (int_amdgcn_permlanex16 i32:$vdst_in, i32:$src0, i32:$src1, i32:$src2, imm:$fi, imm:$bc),
665    (V_PERMLANEX16_B32 (as_i1imm $fi), $src0, (as_i1imm $bc), $src1, 0, $src2, $vdst_in)
666  >;
667} // End SubtargetPredicate = isGFX10Plus
668
669//===----------------------------------------------------------------------===//
670// Integer Clamp Patterns
671//===----------------------------------------------------------------------===//
672
673class getClampPat<VOPProfile P, SDPatternOperator node> {
674  dag ret3 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2));
675  dag ret2 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1));
676  dag ret1 = (P.DstVT (node P.Src0VT:$src0));
677  dag ret = !if(!eq(P.NumSrcArgs, 3), ret3,
678            !if(!eq(P.NumSrcArgs, 2), ret2,
679            ret1));
680}
681
682class getClampRes<VOPProfile P, Instruction inst> {
683  dag ret3 = (inst P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, (i1 0));
684  dag ret2 = (inst P.Src0VT:$src0, P.Src1VT:$src1, (i1 0));
685  dag ret1 = (inst P.Src0VT:$src0, (i1 0));
686  dag ret = !if(!eq(P.NumSrcArgs, 3), ret3,
687            !if(!eq(P.NumSrcArgs, 2), ret2,
688            ret1));
689}
690
691class IntClampPat<VOP3Inst inst, SDPatternOperator node> : GCNPat<
692  getClampPat<inst.Pfl, node>.ret,
693  getClampRes<inst.Pfl, inst>.ret
694>;
695
696def : IntClampPat<V_MAD_I32_I24, AMDGPUmad_i24>;
697def : IntClampPat<V_MAD_U32_U24, AMDGPUmad_u24>;
698
699def : IntClampPat<V_SAD_U8, int_amdgcn_sad_u8>;
700def : IntClampPat<V_SAD_HI_U8, int_amdgcn_sad_hi_u8>;
701def : IntClampPat<V_SAD_U16, int_amdgcn_sad_u16>;
702
703def : IntClampPat<V_MSAD_U8, int_amdgcn_msad_u8>;
704def : IntClampPat<V_MQSAD_PK_U16_U8, int_amdgcn_mqsad_pk_u16_u8>;
705
706def : IntClampPat<V_QSAD_PK_U16_U8, int_amdgcn_qsad_pk_u16_u8>;
707def : IntClampPat<V_MQSAD_U32_U8, int_amdgcn_mqsad_u32_u8>;
708
709
710//===----------------------------------------------------------------------===//
711// Target-specific instruction encodings.
712//===----------------------------------------------------------------------===//
713
714//===----------------------------------------------------------------------===//
715// GFX10.
716//===----------------------------------------------------------------------===//
717
718let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in {
719  multiclass VOP3_Real_gfx10<bits<10> op> {
720    def _gfx10 :
721      VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.GFX10>,
722      VOP3e_gfx10<op, !cast<VOP_Pseudo>(NAME).Pfl>;
723  }
724  multiclass VOP3_Real_gfx10_with_name<bits<10> op, string opName,
725                                       string asmName> {
726    def _gfx10 :
727      VOP3_Real<!cast<VOP3_Pseudo>(opName), SIEncodingFamily.GFX10>,
728      VOP3e_gfx10<op, !cast<VOP3_Pseudo>(opName).Pfl> {
729        VOP3_Pseudo ps = !cast<VOP3_Pseudo>(opName);
730        let AsmString = asmName # ps.AsmOperands;
731      }
732  }
733  multiclass VOP3be_Real_gfx10<bits<10> op> {
734    def _gfx10 :
735      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX10>,
736      VOP3be_gfx10<op, !cast<VOP3_Pseudo>(NAME).Pfl>;
737  }
738  multiclass VOP3Interp_Real_gfx10<bits<10> op> {
739    def _gfx10 :
740      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX10>,
741      VOP3Interp_gfx10<op, !cast<VOP3_Pseudo>(NAME).Pfl>;
742  }
743  multiclass VOP3OpSel_Real_gfx10<bits<10> op> {
744    def _gfx10 :
745      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX10>,
746      VOP3OpSel_gfx10<op, !cast<VOP3_Pseudo>(NAME).Pfl>;
747  }
748  multiclass VOP3OpSel_Real_gfx10_with_name<bits<10> op, string opName,
749                                            string asmName> {
750    def _gfx10 :
751      VOP3_Real<!cast<VOP3_Pseudo>(opName), SIEncodingFamily.GFX10>,
752      VOP3OpSel_gfx10<op, !cast<VOP3_Pseudo>(opName).Pfl> {
753        VOP3_Pseudo ps = !cast<VOP3_Pseudo>(opName);
754        let AsmString = asmName # ps.AsmOperands;
755      }
756  }
757} // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10"
758
759defm V_READLANE_B32  : VOP3_Real_gfx10<0x360>;
760
761let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VSrc_b32:$vdst_in) in {
762  defm V_WRITELANE_B32 : VOP3_Real_gfx10<0x361>;
763} // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VSrc_b32:$vdst_in)
764
765defm V_XOR3_B32           : VOP3_Real_gfx10<0x178>;
766defm V_LSHLREV_B64        : VOP3_Real_gfx10<0x2ff>;
767defm V_LSHRREV_B64        : VOP3_Real_gfx10<0x300>;
768defm V_ASHRREV_I64        : VOP3_Real_gfx10<0x301>;
769defm V_PERM_B32           : VOP3_Real_gfx10<0x344>;
770defm V_XAD_U32            : VOP3_Real_gfx10<0x345>;
771defm V_LSHL_ADD_U32       : VOP3_Real_gfx10<0x346>;
772defm V_ADD_LSHL_U32       : VOP3_Real_gfx10<0x347>;
773defm V_ADD3_U32           : VOP3_Real_gfx10<0x36d>;
774defm V_LSHL_OR_B32        : VOP3_Real_gfx10<0x36f>;
775defm V_AND_OR_B32         : VOP3_Real_gfx10<0x371>;
776defm V_OR3_B32            : VOP3_Real_gfx10<0x372>;
777
778// TODO-GFX10: add MC tests for v_add/sub_nc_i16
779defm V_ADD_NC_I16 :
780  VOP3OpSel_Real_gfx10_with_name<0x30d, "V_ADD_I16", "v_add_nc_i16">;
781defm V_SUB_NC_I16 :
782  VOP3OpSel_Real_gfx10_with_name<0x30e, "V_SUB_I16", "v_sub_nc_i16">;
783defm V_SUB_NC_I32 :
784  VOP3_Real_gfx10_with_name<0x376, "V_SUB_I32_gfx9", "v_sub_nc_i32">;
785defm V_ADD_NC_I32 :
786  VOP3_Real_gfx10_with_name<0x37f, "V_ADD_I32_gfx9", "v_add_nc_i32">;
787
788defm V_INTERP_P1LL_F16    : VOP3Interp_Real_gfx10<0x342>;
789defm V_INTERP_P1LV_F16    : VOP3Interp_Real_gfx10<0x343>;
790defm V_INTERP_P2_F16      : VOP3Interp_Real_gfx10<0x35a>;
791
792defm V_PACK_B32_F16       : VOP3OpSel_Real_gfx10<0x311>;
793defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx10<0x312>;
794defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx10<0x313>;
795
796defm V_MIN3_F16           : VOP3OpSel_Real_gfx10<0x351>;
797defm V_MIN3_I16           : VOP3OpSel_Real_gfx10<0x352>;
798defm V_MIN3_U16           : VOP3OpSel_Real_gfx10<0x353>;
799defm V_MAX3_F16           : VOP3OpSel_Real_gfx10<0x354>;
800defm V_MAX3_I16           : VOP3OpSel_Real_gfx10<0x355>;
801defm V_MAX3_U16           : VOP3OpSel_Real_gfx10<0x356>;
802defm V_MED3_F16           : VOP3OpSel_Real_gfx10<0x357>;
803defm V_MED3_I16           : VOP3OpSel_Real_gfx10<0x358>;
804defm V_MED3_U16           : VOP3OpSel_Real_gfx10<0x359>;
805defm V_MAD_U32_U16        : VOP3OpSel_Real_gfx10<0x373>;
806defm V_MAD_I32_I16        : VOP3OpSel_Real_gfx10<0x375>;
807
808defm V_MAD_U16 :
809  VOP3OpSel_Real_gfx10_with_name<0x340, "V_MAD_U16_gfx9", "v_mad_u16">;
810defm V_FMA_F16 :
811  VOP3OpSel_Real_gfx10_with_name<0x34b, "V_FMA_F16_gfx9", "v_fma_f16">;
812defm V_MAD_I16 :
813  VOP3OpSel_Real_gfx10_with_name<0x35e, "V_MAD_I16_gfx9", "v_mad_i16">;
814defm V_DIV_FIXUP_F16 :
815  VOP3OpSel_Real_gfx10_with_name<0x35f, "V_DIV_FIXUP_F16_gfx9", "v_div_fixup_f16">;
816
817// FIXME-GFX10-OPSEL: Need to add "selective" opsel support to some of these
818// (they do not support SDWA or DPP).
819defm V_ADD_NC_U16      : VOP3_Real_gfx10_with_name<0x303, "V_ADD_U16_e64", "v_add_nc_u16">;
820defm V_SUB_NC_U16      : VOP3_Real_gfx10_with_name<0x304, "V_SUB_U16_e64", "v_sub_nc_u16">;
821defm V_MUL_LO_U16      : VOP3_Real_gfx10_with_name<0x305, "V_MUL_LO_U16_e64", "v_mul_lo_u16">;
822defm V_LSHRREV_B16     : VOP3_Real_gfx10_with_name<0x307, "V_LSHRREV_B16_e64", "v_lshrrev_b16">;
823defm V_ASHRREV_I16     : VOP3_Real_gfx10_with_name<0x308, "V_ASHRREV_I16_e64", "v_ashrrev_i16">;
824defm V_MAX_U16         : VOP3_Real_gfx10_with_name<0x309, "V_MAX_U16_e64", "v_max_u16">;
825defm V_MAX_I16         : VOP3_Real_gfx10_with_name<0x30a, "V_MAX_I16_e64", "v_max_i16">;
826defm V_MIN_U16         : VOP3_Real_gfx10_with_name<0x30b, "V_MIN_U16_e64", "v_min_u16">;
827defm V_MIN_I16         : VOP3_Real_gfx10_with_name<0x30c, "V_MIN_I16_e64", "v_min_i16">;
828defm V_LSHLREV_B16     : VOP3_Real_gfx10_with_name<0x314, "V_LSHLREV_B16_e64", "v_lshlrev_b16">;
829defm V_PERMLANE16_B32  : VOP3OpSel_Real_gfx10<0x377>;
830defm V_PERMLANEX16_B32 : VOP3OpSel_Real_gfx10<0x378>;
831
832//===----------------------------------------------------------------------===//
833// GFX7, GFX10.
834//===----------------------------------------------------------------------===//
835
836let AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" in {
837  multiclass VOP3_Real_gfx7<bits<10> op> {
838    def _gfx7 :
839      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>,
840      VOP3e_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>;
841  }
842  multiclass VOP3be_Real_gfx7<bits<10> op> {
843    def _gfx7 :
844      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>,
845      VOP3be_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>;
846  }
847} // End AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7"
848
849multiclass VOP3_Real_gfx7_gfx10<bits<10> op> :
850  VOP3_Real_gfx7<op>, VOP3_Real_gfx10<op>;
851
852multiclass VOP3be_Real_gfx7_gfx10<bits<10> op> :
853  VOP3be_Real_gfx7<op>, VOP3be_Real_gfx10<op>;
854
855defm V_QSAD_PK_U16_U8   : VOP3_Real_gfx7_gfx10<0x172>;
856defm V_MQSAD_U32_U8     : VOP3_Real_gfx7_gfx10<0x175>;
857defm V_MAD_U64_U32      : VOP3be_Real_gfx7_gfx10<0x176>;
858defm V_MAD_I64_I32      : VOP3be_Real_gfx7_gfx10<0x177>;
859
860//===----------------------------------------------------------------------===//
861// GFX6, GFX7, GFX10.
862//===----------------------------------------------------------------------===//
863
864let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in {
865  multiclass VOP3_Real_gfx6_gfx7<bits<10> op> {
866    def _gfx6_gfx7 :
867      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>,
868      VOP3e_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>;
869  }
870  multiclass VOP3be_Real_gfx6_gfx7<bits<10> op> {
871    def _gfx6_gfx7 :
872      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.SI>,
873      VOP3be_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME).Pfl>;
874  }
875} // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7"
876
877multiclass VOP3_Real_gfx6_gfx7_gfx10<bits<10> op> :
878  VOP3_Real_gfx6_gfx7<op>, VOP3_Real_gfx10<op>;
879
880multiclass VOP3be_Real_gfx6_gfx7_gfx10<bits<10> op> :
881  VOP3be_Real_gfx6_gfx7<op>, VOP3be_Real_gfx10<op>;
882
883defm V_LSHL_B64        : VOP3_Real_gfx6_gfx7<0x161>;
884defm V_LSHR_B64        : VOP3_Real_gfx6_gfx7<0x162>;
885defm V_ASHR_I64        : VOP3_Real_gfx6_gfx7<0x163>;
886
887defm V_MAD_LEGACY_F32  : VOP3_Real_gfx6_gfx7_gfx10<0x140>;
888defm V_MAD_F32         : VOP3_Real_gfx6_gfx7_gfx10<0x141>;
889defm V_MAD_I32_I24     : VOP3_Real_gfx6_gfx7_gfx10<0x142>;
890defm V_MAD_U32_U24     : VOP3_Real_gfx6_gfx7_gfx10<0x143>;
891defm V_CUBEID_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x144>;
892defm V_CUBESC_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x145>;
893defm V_CUBETC_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x146>;
894defm V_CUBEMA_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x147>;
895defm V_BFE_U32         : VOP3_Real_gfx6_gfx7_gfx10<0x148>;
896defm V_BFE_I32         : VOP3_Real_gfx6_gfx7_gfx10<0x149>;
897defm V_BFI_B32         : VOP3_Real_gfx6_gfx7_gfx10<0x14a>;
898defm V_FMA_F32         : VOP3_Real_gfx6_gfx7_gfx10<0x14b>;
899defm V_FMA_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x14c>;
900defm V_LERP_U8         : VOP3_Real_gfx6_gfx7_gfx10<0x14d>;
901defm V_ALIGNBIT_B32    : VOP3_Real_gfx6_gfx7_gfx10<0x14e>;
902defm V_ALIGNBYTE_B32   : VOP3_Real_gfx6_gfx7_gfx10<0x14f>;
903defm V_MULLIT_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x150>;
904defm V_MIN3_F32        : VOP3_Real_gfx6_gfx7_gfx10<0x151>;
905defm V_MIN3_I32        : VOP3_Real_gfx6_gfx7_gfx10<0x152>;
906defm V_MIN3_U32        : VOP3_Real_gfx6_gfx7_gfx10<0x153>;
907defm V_MAX3_F32        : VOP3_Real_gfx6_gfx7_gfx10<0x154>;
908defm V_MAX3_I32        : VOP3_Real_gfx6_gfx7_gfx10<0x155>;
909defm V_MAX3_U32        : VOP3_Real_gfx6_gfx7_gfx10<0x156>;
910defm V_MED3_F32        : VOP3_Real_gfx6_gfx7_gfx10<0x157>;
911defm V_MED3_I32        : VOP3_Real_gfx6_gfx7_gfx10<0x158>;
912defm V_MED3_U32        : VOP3_Real_gfx6_gfx7_gfx10<0x159>;
913defm V_SAD_U8          : VOP3_Real_gfx6_gfx7_gfx10<0x15a>;
914defm V_SAD_HI_U8       : VOP3_Real_gfx6_gfx7_gfx10<0x15b>;
915defm V_SAD_U16         : VOP3_Real_gfx6_gfx7_gfx10<0x15c>;
916defm V_SAD_U32         : VOP3_Real_gfx6_gfx7_gfx10<0x15d>;
917defm V_CVT_PK_U8_F32   : VOP3_Real_gfx6_gfx7_gfx10<0x15e>;
918defm V_DIV_FIXUP_F32   : VOP3_Real_gfx6_gfx7_gfx10<0x15f>;
919defm V_DIV_FIXUP_F64   : VOP3_Real_gfx6_gfx7_gfx10<0x160>;
920defm V_ADD_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x164>;
921defm V_MUL_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x165>;
922defm V_MIN_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x166>;
923defm V_MAX_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x167>;
924defm V_LDEXP_F64       : VOP3_Real_gfx6_gfx7_gfx10<0x168>;
925defm V_MUL_LO_U32      : VOP3_Real_gfx6_gfx7_gfx10<0x169>;
926defm V_MUL_HI_U32      : VOP3_Real_gfx6_gfx7_gfx10<0x16a>;
927defm V_MUL_LO_I32      : VOP3_Real_gfx6_gfx7_gfx10<0x16b>;
928defm V_MUL_HI_I32      : VOP3_Real_gfx6_gfx7_gfx10<0x16c>;
929defm V_DIV_FMAS_F32    : VOP3_Real_gfx6_gfx7_gfx10<0x16f>;
930defm V_DIV_FMAS_F64    : VOP3_Real_gfx6_gfx7_gfx10<0x170>;
931defm V_MSAD_U8         : VOP3_Real_gfx6_gfx7_gfx10<0x171>;
932defm V_MQSAD_PK_U16_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x173>;
933defm V_TRIG_PREOP_F64  : VOP3_Real_gfx6_gfx7_gfx10<0x174>;
934defm V_DIV_SCALE_F32   : VOP3be_Real_gfx6_gfx7_gfx10<0x16d>;
935defm V_DIV_SCALE_F64   : VOP3be_Real_gfx6_gfx7_gfx10<0x16e>;
936
937//===----------------------------------------------------------------------===//
938// GFX8, GFX9 (VI).
939//===----------------------------------------------------------------------===//
940
941let AssemblerPredicates = [isGFX8GFX9], DecoderNamespace = "GFX8" in {
942
943multiclass VOP3_Real_vi<bits<10> op> {
944  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>,
945            VOP3e_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>;
946}
947
948multiclass VOP3be_Real_vi<bits<10> op> {
949  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>,
950            VOP3be_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>;
951}
952
953multiclass VOP3OpSel_Real_gfx9<bits<10> op> {
954  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>,
955            VOP3OpSel_gfx9 <op, !cast<VOP_Pseudo>(NAME).Pfl>;
956}
957
958multiclass VOP3Interp_Real_vi<bits<10> op> {
959  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>,
960            VOP3Interp_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>;
961}
962
963} // End AssemblerPredicates = [isGFX8GFX9], DecoderNamespace = "GFX8"
964
965let AssemblerPredicates = [isGFX8Only], DecoderNamespace = "GFX8" in {
966
967multiclass VOP3_F16_Real_vi<bits<10> op> {
968  def _vi : VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
969            VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME).Pfl>;
970}
971
972multiclass VOP3Interp_F16_Real_vi<bits<10> op> {
973  def _vi : VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
974            VOP3Interp_vi <op, !cast<VOP3_Pseudo>(NAME).Pfl>;
975}
976
977} // End AssemblerPredicates = [isGFX8Only], DecoderNamespace = "GFX8"
978
979let AssemblerPredicates = [isGFX9Only], DecoderNamespace = "GFX9" in {
980
981multiclass VOP3_F16_Real_gfx9<bits<10> op, string OpName, string AsmName> {
982  def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(OpName), SIEncodingFamily.GFX9>,
983            VOP3e_vi <op, !cast<VOP3_Pseudo>(OpName).Pfl> {
984              VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName);
985              let AsmString = AsmName # ps.AsmOperands;
986            }
987}
988
989multiclass VOP3OpSel_F16_Real_gfx9<bits<10> op, string AsmName> {
990  def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX9>,
991            VOP3OpSel_gfx9 <op, !cast<VOP3_Pseudo>(NAME).Pfl> {
992              VOP3_Pseudo ps = !cast<VOP3_Pseudo>(NAME);
993              let AsmString = AsmName # ps.AsmOperands;
994            }
995}
996
997multiclass VOP3Interp_F16_Real_gfx9<bits<10> op, string OpName, string AsmName> {
998  def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(OpName), SIEncodingFamily.GFX9>,
999            VOP3Interp_vi <op, !cast<VOP3_Pseudo>(OpName).Pfl> {
1000              VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName);
1001              let AsmString = AsmName # ps.AsmOperands;
1002            }
1003}
1004
1005multiclass VOP3_Real_gfx9<bits<10> op, string AsmName> {
1006  def _gfx9 : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.GFX9>,
1007              VOP3e_vi <op, !cast<VOP_Pseudo>(NAME).Pfl> {
1008              VOP_Pseudo ps = !cast<VOP_Pseudo>(NAME);
1009              let AsmString = AsmName # ps.AsmOperands;
1010            }
1011}
1012
1013} // End AssemblerPredicates = [isGFX9Only], DecoderNamespace = "GFX9"
1014
1015defm V_MAD_U64_U32      : VOP3be_Real_vi <0x1E8>;
1016defm V_MAD_I64_I32      : VOP3be_Real_vi <0x1E9>;
1017
1018defm V_MAD_LEGACY_F32   : VOP3_Real_vi <0x1c0>;
1019defm V_MAD_F32          : VOP3_Real_vi <0x1c1>;
1020defm V_MAD_I32_I24      : VOP3_Real_vi <0x1c2>;
1021defm V_MAD_U32_U24      : VOP3_Real_vi <0x1c3>;
1022defm V_CUBEID_F32       : VOP3_Real_vi <0x1c4>;
1023defm V_CUBESC_F32       : VOP3_Real_vi <0x1c5>;
1024defm V_CUBETC_F32       : VOP3_Real_vi <0x1c6>;
1025defm V_CUBEMA_F32       : VOP3_Real_vi <0x1c7>;
1026defm V_BFE_U32          : VOP3_Real_vi <0x1c8>;
1027defm V_BFE_I32          : VOP3_Real_vi <0x1c9>;
1028defm V_BFI_B32          : VOP3_Real_vi <0x1ca>;
1029defm V_FMA_F32          : VOP3_Real_vi <0x1cb>;
1030defm V_FMA_F64          : VOP3_Real_vi <0x1cc>;
1031defm V_LERP_U8          : VOP3_Real_vi <0x1cd>;
1032defm V_ALIGNBIT_B32     : VOP3_Real_vi <0x1ce>;
1033defm V_ALIGNBYTE_B32    : VOP3_Real_vi <0x1cf>;
1034defm V_MIN3_F32         : VOP3_Real_vi <0x1d0>;
1035defm V_MIN3_I32         : VOP3_Real_vi <0x1d1>;
1036defm V_MIN3_U32         : VOP3_Real_vi <0x1d2>;
1037defm V_MAX3_F32         : VOP3_Real_vi <0x1d3>;
1038defm V_MAX3_I32         : VOP3_Real_vi <0x1d4>;
1039defm V_MAX3_U32         : VOP3_Real_vi <0x1d5>;
1040defm V_MED3_F32         : VOP3_Real_vi <0x1d6>;
1041defm V_MED3_I32         : VOP3_Real_vi <0x1d7>;
1042defm V_MED3_U32         : VOP3_Real_vi <0x1d8>;
1043defm V_SAD_U8           : VOP3_Real_vi <0x1d9>;
1044defm V_SAD_HI_U8        : VOP3_Real_vi <0x1da>;
1045defm V_SAD_U16          : VOP3_Real_vi <0x1db>;
1046defm V_SAD_U32          : VOP3_Real_vi <0x1dc>;
1047defm V_CVT_PK_U8_F32    : VOP3_Real_vi <0x1dd>;
1048defm V_DIV_FIXUP_F32    : VOP3_Real_vi <0x1de>;
1049defm V_DIV_FIXUP_F64    : VOP3_Real_vi <0x1df>;
1050defm V_DIV_SCALE_F32    : VOP3be_Real_vi <0x1e0>;
1051defm V_DIV_SCALE_F64    : VOP3be_Real_vi <0x1e1>;
1052defm V_DIV_FMAS_F32     : VOP3_Real_vi <0x1e2>;
1053defm V_DIV_FMAS_F64     : VOP3_Real_vi <0x1e3>;
1054defm V_MSAD_U8          : VOP3_Real_vi <0x1e4>;
1055defm V_QSAD_PK_U16_U8   : VOP3_Real_vi <0x1e5>;
1056defm V_MQSAD_PK_U16_U8  : VOP3_Real_vi <0x1e6>;
1057defm V_MQSAD_U32_U8     : VOP3_Real_vi <0x1e7>;
1058
1059defm V_PERM_B32         : VOP3_Real_vi <0x1ed>;
1060
1061defm V_MAD_F16          : VOP3_F16_Real_vi <0x1ea>;
1062defm V_MAD_U16          : VOP3_F16_Real_vi <0x1eb>;
1063defm V_MAD_I16          : VOP3_F16_Real_vi <0x1ec>;
1064defm V_FMA_F16          : VOP3_F16_Real_vi <0x1ee>;
1065defm V_DIV_FIXUP_F16    : VOP3_F16_Real_vi <0x1ef>;
1066defm V_INTERP_P2_F16    : VOP3Interp_F16_Real_vi <0x276>;
1067
1068let FPDPRounding = 1 in {
1069defm V_MAD_LEGACY_F16       : VOP3_F16_Real_gfx9 <0x1ea, "V_MAD_F16",       "v_mad_legacy_f16">;
1070defm V_FMA_LEGACY_F16       : VOP3_F16_Real_gfx9 <0x1ee, "V_FMA_F16",       "v_fma_legacy_f16">;
1071defm V_DIV_FIXUP_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ef, "V_DIV_FIXUP_F16", "v_div_fixup_legacy_f16">;
1072defm V_INTERP_P2_LEGACY_F16 : VOP3Interp_F16_Real_gfx9 <0x276, "V_INTERP_P2_F16", "v_interp_p2_legacy_f16">;
1073} // End FPDPRounding = 1
1074
1075defm V_MAD_LEGACY_U16       : VOP3_F16_Real_gfx9 <0x1eb, "V_MAD_U16",       "v_mad_legacy_u16">;
1076defm V_MAD_LEGACY_I16       : VOP3_F16_Real_gfx9 <0x1ec, "V_MAD_I16",       "v_mad_legacy_i16">;
1077
1078defm V_MAD_F16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x203, "v_mad_f16">;
1079defm V_MAD_U16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x204, "v_mad_u16">;
1080defm V_MAD_I16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x205, "v_mad_i16">;
1081defm V_FMA_F16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x206, "v_fma_f16">;
1082defm V_DIV_FIXUP_F16_gfx9   : VOP3OpSel_F16_Real_gfx9 <0x207, "v_div_fixup_f16">;
1083defm V_INTERP_P2_F16_gfx9   : VOP3Interp_F16_Real_gfx9 <0x277, "V_INTERP_P2_F16_gfx9", "v_interp_p2_f16">;
1084
1085defm V_ADD_I32_gfx9         : VOP3_Real_gfx9 <0x29c, "v_add_i32">;
1086defm V_SUB_I32_gfx9         : VOP3_Real_gfx9 <0x29d, "v_sub_i32">;
1087
1088defm V_INTERP_P1_F32_e64  : VOP3Interp_Real_vi <0x270>;
1089defm V_INTERP_P2_F32_e64  : VOP3Interp_Real_vi <0x271>;
1090defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_vi <0x272>;
1091
1092defm V_INTERP_P1LL_F16  : VOP3Interp_Real_vi <0x274>;
1093defm V_INTERP_P1LV_F16  : VOP3Interp_Real_vi <0x275>;
1094defm V_ADD_F64          : VOP3_Real_vi <0x280>;
1095defm V_MUL_F64          : VOP3_Real_vi <0x281>;
1096defm V_MIN_F64          : VOP3_Real_vi <0x282>;
1097defm V_MAX_F64          : VOP3_Real_vi <0x283>;
1098defm V_LDEXP_F64        : VOP3_Real_vi <0x284>;
1099defm V_MUL_LO_U32       : VOP3_Real_vi <0x285>;
1100
1101// removed from VI as identical to V_MUL_LO_U32
1102let isAsmParserOnly = 1 in {
1103defm V_MUL_LO_I32       : VOP3_Real_vi <0x285>;
1104}
1105
1106defm V_MUL_HI_U32       : VOP3_Real_vi <0x286>;
1107defm V_MUL_HI_I32       : VOP3_Real_vi <0x287>;
1108
1109defm V_READLANE_B32     : VOP3_Real_vi <0x289>;
1110defm V_WRITELANE_B32    : VOP3_Real_vi <0x28a>;
1111
1112defm V_LSHLREV_B64      : VOP3_Real_vi <0x28f>;
1113defm V_LSHRREV_B64      : VOP3_Real_vi <0x290>;
1114defm V_ASHRREV_I64      : VOP3_Real_vi <0x291>;
1115defm V_TRIG_PREOP_F64   : VOP3_Real_vi <0x292>;
1116
1117defm V_LSHL_ADD_U32 : VOP3_Real_vi <0x1fd>;
1118defm V_ADD_LSHL_U32 : VOP3_Real_vi <0x1fe>;
1119defm V_ADD3_U32 : VOP3_Real_vi <0x1ff>;
1120defm V_LSHL_OR_B32 : VOP3_Real_vi <0x200>;
1121defm V_AND_OR_B32 : VOP3_Real_vi <0x201>;
1122defm V_OR3_B32 : VOP3_Real_vi <0x202>;
1123defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx9 <0x2a0>;
1124
1125defm V_XAD_U32 : VOP3_Real_vi <0x1f3>;
1126
1127defm V_MIN3_F16 : VOP3OpSel_Real_gfx9 <0x1f4>;
1128defm V_MIN3_I16 : VOP3OpSel_Real_gfx9 <0x1f5>;
1129defm V_MIN3_U16 : VOP3OpSel_Real_gfx9 <0x1f6>;
1130
1131defm V_MAX3_F16 : VOP3OpSel_Real_gfx9 <0x1f7>;
1132defm V_MAX3_I16 : VOP3OpSel_Real_gfx9 <0x1f8>;
1133defm V_MAX3_U16 : VOP3OpSel_Real_gfx9 <0x1f9>;
1134
1135defm V_MED3_F16 : VOP3OpSel_Real_gfx9 <0x1fa>;
1136defm V_MED3_I16 : VOP3OpSel_Real_gfx9 <0x1fb>;
1137defm V_MED3_U16 : VOP3OpSel_Real_gfx9 <0x1fc>;
1138
1139defm V_ADD_I16  : VOP3OpSel_Real_gfx9 <0x29e>;
1140defm V_SUB_I16  : VOP3OpSel_Real_gfx9 <0x29f>;
1141
1142defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx9 <0x1f1>;
1143defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx9 <0x1f2>;
1144
1145defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx9 <0x299>;
1146defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx9 <0x29a>;
1147