1//===-- VOP3Instructions.td - Vector Instruction Definitions --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10// VOP3 Classes
11//===----------------------------------------------------------------------===//
12
13class getVOP3ModPat<VOPProfile P, SDPatternOperator node> {
14  dag src0 = !if(P.HasOMod,
15    (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
16    (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp));
17
18  list<dag> ret3 = [(set P.DstVT:$vdst,
19    (DivergentFragOrOp<node, P>.ret (P.Src0VT src0),
20          (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
21          (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))];
22
23  list<dag> ret2 = [(set P.DstVT:$vdst,
24    (DivergentFragOrOp<node, P>.ret (P.Src0VT src0),
25          (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))];
26
27  list<dag> ret1 = [(set P.DstVT:$vdst,
28    (DivergentFragOrOp<node, P>.ret (P.Src0VT src0)))];
29
30  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
31                  !if(!eq(P.NumSrcArgs, 2), ret2,
32                  ret1));
33}
34
35class getVOP3PModPat<VOPProfile P, SDPatternOperator node, bit HasExplicitClamp> {
36  dag src0_dag = (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers));
37  dag src1_dag = (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers));
38  dag src2_dag = (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers));
39  dag clamp_dag = (i1 timm:$clamp);
40
41  list<dag> ret3 = [(set P.DstVT:$vdst,
42    !if(HasExplicitClamp,
43        (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag, src2_dag, clamp_dag),
44        (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag, src2_dag)))];
45
46  list<dag> ret2 = [(set P.DstVT:$vdst,
47    !if(HasExplicitClamp,
48        (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag, clamp_dag),
49        (DivergentFragOrOp<node, P>.ret src0_dag, src1_dag)))];
50
51  list<dag> ret1 = [(set P.DstVT:$vdst,
52    !if(HasExplicitClamp,
53        (DivergentFragOrOp<node, P>.ret src0_dag, clamp_dag),
54        (DivergentFragOrOp<node, P>.ret src0_dag)))];
55
56  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
57                  !if(!eq(P.NumSrcArgs, 2), ret2,
58                  ret1));
59}
60
61class getVOP3OpSelPat<VOPProfile P, SDPatternOperator node> {
62  list<dag> ret3 = [(set P.DstVT:$vdst,
63        (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers)),
64          (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers)),
65          (P.Src2VT (VOP3OpSel P.Src2VT:$src2, i32:$src2_modifiers))))];
66
67  list<dag> ret2 = [(set P.DstVT:$vdst,
68    (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers)),
69                                    (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers))))];
70
71  list<dag> ret1 = [(set P.DstVT:$vdst,
72    (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers))))];
73
74  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
75                  !if(!eq(P.NumSrcArgs, 2), ret2,
76                  ret1));
77}
78
79class getVOP3OpSelModPat<VOPProfile P, SDPatternOperator node> {
80  list<dag> ret3 = [(set P.DstVT:$vdst,
81    (DivergentFragOrOp<node, P>.ret (P.Src0VT !if(P.HasClamp, (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers),
82                                    (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))),
83          (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers)),
84          (P.Src2VT (VOP3OpSelMods P.Src2VT:$src2, i32:$src2_modifiers))))];
85
86  list<dag> ret2 = [(set P.DstVT:$vdst,
87    (DivergentFragOrOp<node, P>.ret !if(P.HasClamp, (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers)),
88                          (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))),
89          (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers))))];
90
91  list<dag> ret1 = [(set P.DstVT:$vdst,
92    (DivergentFragOrOp<node, P>.ret (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))))];
93
94  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
95                  !if(!eq(P.NumSrcArgs, 2), ret2,
96                  ret1));
97}
98
99class getVOP3Pat<VOPProfile P, SDPatternOperator node> {
100  list<dag> ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2))];
101  list<dag> ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0, P.Src1VT:$src1))];
102  list<dag> ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp<node, P>.ret P.Src0VT:$src0))];
103  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
104                  !if(!eq(P.NumSrcArgs, 2), ret2,
105                  ret1));
106}
107
108class getVOP3ClampPat<VOPProfile P, SDPatternOperator node> {
109  list<dag> ret3 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, i1:$clamp))];
110  list<dag> ret2 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, i1:$clamp))];
111  list<dag> ret1 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, i1:$clamp))];
112  list<dag> ret = !if(!eq(P.NumSrcArgs, 3), ret3,
113                  !if(!eq(P.NumSrcArgs, 2), ret2,
114                  ret1));
115}
116
117class getVOP3MAIPat<VOPProfile P, SDPatternOperator node> {
118  list<dag> ret = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2,
119                                        timm:$cbsz, timm:$abid, timm:$blgp))];
120}
121
122// Consistently gives instructions a _e64 suffix.
123multiclass VOP3Inst_Pseudo_Wrapper<string opName, VOPProfile P, list<dag> pattern = []> {
124  def _e64 : VOP3_Pseudo<opName, P, pattern>;
125}
126
127class VOP3InstBase<string OpName, VOPProfile P, SDPatternOperator node = null_frag> :
128  VOP3_Pseudo<OpName, P,
129  !if(P.HasOpSel,
130      !if(P.HasModifiers,
131          getVOP3OpSelModPat<P, node>.ret,
132          getVOP3OpSelPat<P, node>.ret),
133      !if(P.HasModifiers,
134          getVOP3ModPat<P, node>.ret,
135          !if(P.HasIntClamp,
136              getVOP3ClampPat<P, node>.ret,
137              !if (P.IsMAI,
138                  getVOP3MAIPat<P, node>.ret,
139                  getVOP3Pat<P, node>.ret)))),
140  0, P.HasOpSel> {
141
142  let IntClamp = P.HasIntClamp;
143  let AsmMatchConverter =
144  !if(P.HasOpSel,
145      "cvtVOP3OpSel",
146      !if(!or(P.HasModifiers, P.HasOMod, P.HasIntClamp),
147          "cvtVOP3",
148          ""));
149}
150
151multiclass VOP3Inst<string OpName, VOPProfile P, SDPatternOperator node = null_frag> {
152  def _e64 : VOP3InstBase<OpName, P, node>;
153}
154
155// Special case for v_div_fmas_{f32|f64}, since it seems to be the
156// only VOP instruction that implicitly reads VCC.
157let Asm64 = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod" in {
158def VOP_F32_F32_F32_F32_VCC : VOPProfile<[f32, f32, f32, f32]> {
159  let Outs64 = (outs DstRC.RegClass:$vdst);
160}
161def VOP_F64_F64_F64_F64_VCC : VOPProfile<[f64, f64, f64, f64]> {
162  let Outs64 = (outs DstRC.RegClass:$vdst);
163}
164}
165
166class VOP3Features<bit Clamp, bit OpSel, bit Packed, bit MAI> {
167  bit HasClamp = Clamp;
168  bit HasOpSel = OpSel;
169  bit IsPacked = Packed;
170  bit IsMAI = MAI;
171}
172
173def VOP3_REGULAR : VOP3Features<0, 0, 0, 0>;
174def VOP3_CLAMP   : VOP3Features<1, 0, 0, 0>;
175def VOP3_OPSEL   : VOP3Features<1, 1, 0, 0>;
176def VOP3_PACKED  : VOP3Features<1, 1, 1, 0>;
177def VOP3_MAI     : VOP3Features<0, 0, 0, 1>;
178
179class VOP3_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR> : VOPProfile<P.ArgVT> {
180
181  let HasClamp = !if(Features.HasClamp, 1, P.HasClamp);
182  let HasOpSel = !if(Features.HasOpSel, 1, P.HasOpSel);
183  let IsMAI    = !if(Features.IsMAI,    1, P.IsMAI);
184  let IsPacked = !if(Features.IsPacked, 1, P.IsPacked);
185
186  let HasModifiers = !if(Features.IsMAI, 0, !or(Features.IsPacked, P.HasModifiers));
187  let IsSingle = 1;
188}
189
190class VOP3b_Profile<ValueType vt> : VOPProfile<[vt, vt, vt, vt]> {
191  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
192  let Asm64 = "$vdst, $sdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod";
193  let IsSingle = 1;
194}
195
196def VOP3b_F32_I1_F32_F32_F32 : VOP3b_Profile<f32>;
197def VOP3b_F64_I1_F64_F64_F64 : VOP3b_Profile<f64>;
198
199def VOP3b_I64_I1_I32_I32_I64 : VOPProfile<[i64, i32, i32, i64]> {
200  let HasClamp = 1;
201  let IsSingle = 1;
202
203  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
204  let Asm64 = "$vdst, $sdst, $src0, $src1, $src2$clamp";
205}
206
207//===----------------------------------------------------------------------===//
208// VOP3 INTERP
209//===----------------------------------------------------------------------===//
210
211class VOP3Interp<string OpName, VOPProfile P, list<dag> pattern = []> :
212                 VOP3_Pseudo<OpName, P, pattern> {
213  let AsmMatchConverter = "cvtVOP3Interp";
214  let mayRaiseFPException = 0;
215}
216
217def VOP3_INTERP : VOPProfile<[f32, f32, i32, untyped]> {
218  let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
219                   Attr:$attr, AttrChan:$attrchan,
220                   clampmod0:$clamp, omod0:$omod);
221
222  let Asm64 = "$vdst, $src0_modifiers, $attr$attrchan$clamp$omod";
223}
224
225def VOP3_INTERP_MOV : VOPProfile<[f32, i32, i32, untyped]> {
226  let Ins64 = (ins InterpSlot:$src0,
227                   Attr:$attr, AttrChan:$attrchan,
228                   clampmod0:$clamp, omod0:$omod);
229
230  let Asm64 = "$vdst, $src0, $attr$attrchan$clamp$omod";
231
232  let HasClamp = 1;
233  let HasSrc0Mods = 0;
234}
235
236class getInterp16Asm <bit HasSrc2, bit HasOMod> {
237  string src2 = !if(HasSrc2, ", $src2_modifiers", "");
238  string omod = !if(HasOMod, "$omod", "");
239  string ret =
240    " $vdst, $src0_modifiers, $attr$attrchan"#src2#"$high$clamp"#omod;
241}
242
243class getInterp16Ins <bit HasSrc2, bit HasOMod,
244                      Operand Src0Mod, Operand Src2Mod> {
245  dag ret = !if(HasSrc2,
246                !if(HasOMod,
247                    (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
248                         Attr:$attr, AttrChan:$attrchan,
249                         Src2Mod:$src2_modifiers, VRegSrc_32:$src2,
250                         highmod:$high, clampmod0:$clamp, omod0:$omod),
251                    (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
252                         Attr:$attr, AttrChan:$attrchan,
253                         Src2Mod:$src2_modifiers, VRegSrc_32:$src2,
254                         highmod:$high, clampmod0:$clamp)
255                ),
256                (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0,
257                     Attr:$attr, AttrChan:$attrchan,
258                     highmod:$high, clampmod0:$clamp, omod0:$omod)
259            );
260}
261
262class VOP3_INTERP16 <list<ValueType> ArgVT> : VOPProfile<ArgVT> {
263
264  let HasOMod = !ne(DstVT.Value, f16.Value);
265  let HasHigh = 1;
266
267  let Outs64 = (outs DstRC.RegClass:$vdst);
268  let Ins64 = getInterp16Ins<HasSrc2, HasOMod, Src0Mod, Src2Mod>.ret;
269  let Asm64 = getInterp16Asm<HasSrc2, HasOMod>.ret;
270}
271
272//===----------------------------------------------------------------------===//
273// VOP3 Instructions
274//===----------------------------------------------------------------------===//
275
276let isCommutable = 1 in {
277
278let isReMaterializable = 1 in {
279let mayRaiseFPException = 0 in {
280let SubtargetPredicate = HasMadMacF32Insts in {
281defm V_MAD_LEGACY_F32 : VOP3Inst <"v_mad_legacy_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
282defm V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fmad>;
283} // End SubtargetPredicate = HasMadMacInsts
284
285let SubtargetPredicate = HasFmaLegacy32 in
286defm V_FMA_LEGACY_F32 : VOP3Inst <"v_fma_legacy_f32",
287                                 VOP3_Profile<VOP_F32_F32_F32_F32>,
288                                 int_amdgcn_fma_legacy>;
289}
290
291defm V_MAD_I32_I24 : VOP3Inst <"v_mad_i32_i24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
292defm V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
293defm V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, any_fma>;
294defm V_LERP_U8 : VOP3Inst <"v_lerp_u8", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_lerp>;
295
296let SchedRW = [WriteDoubleAdd] in {
297let FPDPRounding = 1 in {
298defm V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, any_fma>;
299defm V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile<VOP_F64_F64_F64>, any_fadd>;
300defm V_MUL_F64 : VOP3Inst <"v_mul_f64", VOP3_Profile<VOP_F64_F64_F64>, fmul>;
301} // End FPDPRounding = 1
302defm V_MIN_F64 : VOP3Inst <"v_min_f64", VOP3_Profile<VOP_F64_F64_F64>, fminnum_like>;
303defm V_MAX_F64 : VOP3Inst <"v_max_f64", VOP3_Profile<VOP_F64_F64_F64>, fmaxnum_like>;
304} // End SchedRW = [WriteDoubleAdd]
305
306let SchedRW = [WriteIntMul] in {
307defm V_MUL_LO_U32 : VOP3Inst <"v_mul_lo_u32", VOP3_Profile<VOP_I32_I32_I32>, DivergentBinFrag<mul>>;
308defm V_MUL_HI_U32 : VOP3Inst <"v_mul_hi_u32", VOP3_Profile<VOP_I32_I32_I32>, mulhu>;
309defm V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", VOP3_Profile<VOP_I32_I32_I32>>;
310defm V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", VOP3_Profile<VOP_I32_I32_I32>, mulhs>;
311} // End SchedRW = [WriteIntMul]
312} // End isReMaterializable = 1
313
314let Uses = [MODE, VCC, EXEC] in {
315// v_div_fmas_f32:
316//   result = src0 * src1 + src2
317//   if (vcc)
318//     result *= 2^32
319//
320let SchedRW = [WriteFloatFMA] in
321defm V_DIV_FMAS_F32 : VOP3Inst_Pseudo_Wrapper <"v_div_fmas_f32", VOP_F32_F32_F32_F32_VCC, []>;
322// v_div_fmas_f64:
323//   result = src0 * src1 + src2
324//   if (vcc)
325//     result *= 2^64
326//
327let SchedRW = [WriteDouble], FPDPRounding = 1 in
328defm V_DIV_FMAS_F64 : VOP3Inst_Pseudo_Wrapper  <"v_div_fmas_f64", VOP_F64_F64_F64_F64_VCC, []>;
329} // End Uses = [MODE, VCC, EXEC]
330
331} // End isCommutable = 1
332
333let isReMaterializable = 1 in {
334let mayRaiseFPException = 0 in {
335defm V_CUBEID_F32 : VOP3Inst <"v_cubeid_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubeid>;
336defm V_CUBESC_F32 : VOP3Inst <"v_cubesc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubesc>;
337defm V_CUBETC_F32 : VOP3Inst <"v_cubetc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubetc>;
338defm V_CUBEMA_F32 : VOP3Inst <"v_cubema_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubema>;
339} // End mayRaiseFPException
340
341defm V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_u32>;
342defm V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_i32>;
343defm V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfi>;
344defm V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, fshr>;
345defm V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_alignbyte>;
346
347// XXX - No FPException seems suspect but manual doesn't say it does
348let mayRaiseFPException = 0 in {
349  let isCommutable = 1 in {
350    defm V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmin3>;
351    defm V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumin3>;
352    defm V_MAX3_I32 : VOP3Inst <"v_max3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmax3>;
353    defm V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumax3>;
354    defm V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmed3>;
355    defm V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumed3>;
356  } // End isCommutable = 1
357  defm V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmin3>;
358  defm V_MAX3_F32 : VOP3Inst <"v_max3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmax3>;
359  defm V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmed3>;
360} // End mayRaiseFPException = 0
361
362let isCommutable = 1 in {
363  defm V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
364  defm V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
365  defm V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
366  defm V_SAD_U32 : VOP3Inst <"v_sad_u32", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
367} // End isCommutable = 1
368defm V_CVT_PK_U8_F32 : VOP3Inst<"v_cvt_pk_u8_f32", VOP3_Profile<VOP_I32_F32_I32_I32>, int_amdgcn_cvt_pk_u8_f32>;
369
370defm V_DIV_FIXUP_F32 : VOP3Inst <"v_div_fixup_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUdiv_fixup>;
371
372let SchedRW = [WriteDoubleAdd], FPDPRounding = 1 in {
373  defm V_DIV_FIXUP_F64 : VOP3Inst <"v_div_fixup_f64", VOP3_Profile<VOP_F64_F64_F64_F64>, AMDGPUdiv_fixup>;
374  defm V_LDEXP_F64 : VOP3Inst <"v_ldexp_f64", VOP3_Profile<VOP_F64_F64_I32>, AMDGPUldexp>;
375} // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1
376} // End isReMaterializable = 1
377
378
379let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it does.
380  let SchedRW = [WriteFloatFMA, WriteSALU] in
381  defm V_DIV_SCALE_F32 : VOP3Inst_Pseudo_Wrapper <"v_div_scale_f32", VOP3b_F32_I1_F32_F32_F32> ;
382
383  // Double precision division pre-scale.
384  let SchedRW = [WriteDouble, WriteSALU], FPDPRounding = 1 in
385  defm V_DIV_SCALE_F64 : VOP3Inst_Pseudo_Wrapper <"v_div_scale_f64", VOP3b_F64_I1_F64_F64_F64>;
386} // End mayRaiseFPException = 0
387
388let isReMaterializable = 1 in
389defm V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
390
391let Constraints = "@earlyclobber $vdst" in {
392defm V_MQSAD_PK_U16_U8 : VOP3Inst <"v_mqsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
393} // End Constraints = "@earlyclobber $vdst"
394
395
396let isReMaterializable = 1 in {
397let SchedRW = [WriteDouble] in {
398defm V_TRIG_PREOP_F64 : VOP3Inst <"v_trig_preop_f64", VOP3_Profile<VOP_F64_F64_I32>, int_amdgcn_trig_preop>;
399} // End SchedRW = [WriteDouble]
400
401let SchedRW = [Write64Bit] in {
402  let SubtargetPredicate = isGFX6GFX7 in {
403  defm V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile<VOP_I64_I64_I32>, cshl_64>;
404  defm V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile<VOP_I64_I64_I32>, csrl_64>;
405  defm V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile<VOP_I64_I64_I32>, csra_64>;
406  } // End SubtargetPredicate = isGFX6GFX7
407
408  let SubtargetPredicate = isGFX8Plus in {
409  defm V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile<VOP_I64_I32_I64>, clshl_rev_64>;
410  defm V_LSHRREV_B64 : VOP3Inst <"v_lshrrev_b64", VOP3_Profile<VOP_I64_I32_I64>, clshr_rev_64>;
411  defm V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile<VOP_I64_I32_I64>, cashr_rev_64>;
412  } // End SubtargetPredicate = isGFX8Plus
413} // End SchedRW = [Write64Bit]
414} // End isReMaterializable = 1
415
416def : GCNPat<
417  (i32 (getDivergentFrag<sext>.ret i16:$src)),
418  (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10))))
419>;
420
421let isReMaterializable = 1 in {
422let SubtargetPredicate = isGFX6GFX7GFX10 in {
423defm V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
424} // End SubtargetPredicate = isGFX6GFX7GFX10
425
426let SchedRW = [Write32Bit] in {
427let SubtargetPredicate = isGFX8Plus in {
428defm V_PERM_B32 : VOP3Inst <"v_perm_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUperm>;
429} // End SubtargetPredicate = isGFX8Plus
430} // End SchedRW = [Write32Bit]
431} // End isReMaterializable = 1
432
433let SubtargetPredicate = isGFX7Plus in {
434
435let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in {
436defm V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
437defm V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOP3_Profile<VOP_V4I32_I64_I32_V4I32, VOP3_CLAMP>>;
438} // End Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32]
439
440let isCommutable = 1 in {
441let SchedRW = [WriteIntMul, WriteSALU] in {
442defm V_MAD_U64_U32 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>;
443defm V_MAD_I64_I32 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>;
444} // End SchedRW = [WriteIntMul, WriteSALU]
445} // End isCommutable = 1
446
447} // End SubtargetPredicate = isGFX7Plus
448
449let FPDPRounding = 1 in {
450  let Predicates = [Has16BitInsts, isGFX8Only] in {
451    defm V_DIV_FIXUP_F16 : VOP3Inst <"v_div_fixup_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, AMDGPUdiv_fixup>;
452    defm V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, any_fma>;
453  } // End Predicates = [Has16BitInsts, isGFX8Only]
454
455  let renamedInGFX9 = 1, Predicates = [Has16BitInsts, isGFX9Plus] in {
456    defm V_DIV_FIXUP_F16_gfx9 : VOP3Inst <"v_div_fixup_f16_gfx9",
457                                          VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUdiv_fixup>;
458    defm V_FMA_F16_gfx9 : VOP3Inst <"v_fma_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, any_fma>;
459  } // End renamedInGFX9 = 1, Predicates = [Has16BitInsts, isGFX9Plus]
460} // End FPDPRounding = 1
461
462let SubtargetPredicate = Has16BitInsts, isCommutable = 1 in {
463
464let renamedInGFX9 = 1 in {
465  defm V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
466  defm V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
467  let FPDPRounding = 1 in {
468    defm V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fmad>;
469    let Uses = [MODE, M0, EXEC] in {
470    let OtherPredicates = [isNotGFX90APlus] in
471    // For some reason the intrinsic operands are in a different order
472    // from the instruction operands.
473    def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>,
474           [(set f16:$vdst,
475             (int_amdgcn_interp_p2_f16 (VOP3Mods f32:$src2, i32:$src2_modifiers),
476                                       (VOP3Mods f32:$src0, i32:$src0_modifiers),
477                                       (i32 timm:$attrchan),
478                                       (i32 timm:$attr),
479                                       (i1 timm:$high),
480                                       M0))]>;
481    } // End Uses = [M0, MODE, EXEC]
482  } // End FPDPRounding = 1
483} // End renamedInGFX9 = 1
484
485let SubtargetPredicate = isGFX9Only, FPDPRounding = 1 in {
486  defm V_MAD_F16_gfx9   : VOP3Inst <"v_mad_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>> ;
487} // End SubtargetPredicate = isGFX9Only, FPDPRounding = 1
488
489let SubtargetPredicate = isGFX9Plus in {
490defm V_MAD_U16_gfx9   : VOP3Inst <"v_mad_u16_gfx9", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>>;
491defm V_MAD_I16_gfx9   : VOP3Inst <"v_mad_i16_gfx9", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>>;
492let OtherPredicates = [isNotGFX90APlus] in
493def V_INTERP_P2_F16_gfx9 : VOP3Interp <"v_interp_p2_f16_gfx9", VOP3_INTERP16<[f16, f32, i32, f32]>>;
494} // End SubtargetPredicate = isGFX9Plus
495
496// This predicate should only apply to the selection pattern. The
497// instruction still exists and should decode on subtargets with
498// other bank counts.
499let OtherPredicates = [isNotGFX90APlus, has32BankLDS], Uses = [MODE, M0, EXEC], FPDPRounding = 1 in {
500def V_INTERP_P1LL_F16 : VOP3Interp <"v_interp_p1ll_f16", VOP3_INTERP16<[f32, f32, i32, untyped]>,
501       [(set f32:$vdst, (int_amdgcn_interp_p1_f16 (VOP3Mods f32:$src0, i32:$src0_modifiers),
502                                                  (i32 timm:$attrchan),
503                                                  (i32 timm:$attr),
504                                                  (i1 timm:$high), M0))]>;
505} // End OtherPredicates = [isNotGFX90APlus, has32BankLDS], Uses = [MODE, M0, EXEC], FPDPRounding = 1
506
507let OtherPredicates = [isNotGFX90APlus], Uses = [MODE, M0, EXEC], FPDPRounding = 1 in {
508def V_INTERP_P1LV_F16 : VOP3Interp <"v_interp_p1lv_f16", VOP3_INTERP16<[f32, f32, i32, f16]>>;
509} // End OtherPredicates = [isNotGFX90APlus], Uses = [MODE, M0, EXEC], FPDPRounding = 1
510
511} // End SubtargetPredicate = Has16BitInsts, isCommutable = 1
512
513def : GCNPat<
514  (i64 (getDivergentFrag<sext>.ret i16:$src)),
515    (REG_SEQUENCE VReg_64,
516      (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))), sub0,
517      (i32 (COPY_TO_REGCLASS
518         (V_ASHRREV_I32_e32 (S_MOV_B32 (i32 0x1f)), (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10))))
519      ), VGPR_32)), sub1)
520>;
521
522let SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC], OtherPredicates = [isNotGFX90APlus] in {
523def V_INTERP_P1_F32_e64  : VOP3Interp <"v_interp_p1_f32", VOP3_INTERP>;
524def V_INTERP_P2_F32_e64  : VOP3Interp <"v_interp_p2_f32", VOP3_INTERP>;
525def V_INTERP_MOV_F32_e64 : VOP3Interp <"v_interp_mov_f32", VOP3_INTERP_MOV>;
526} // End SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC], OtherPredicates = [isNotGFX90APlus]
527
528let Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] in {
529
530multiclass Ternary_i16_Pats <SDPatternOperator op1, SDPatternOperator op2,
531                             Instruction inst> {
532def : GCNPat <
533  (op2 (op1 i16:$src0, i16:$src1), i16:$src2),
534  (inst i16:$src0, i16:$src1, i16:$src2, (i1 0))
535>;
536
537}
538
539defm: Ternary_i16_Pats<mul, add, V_MAD_U16_e64>;
540defm: Ternary_i16_Pats<mul, add, V_MAD_I16_e64>;
541
542} // End Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9]
543
544let Predicates = [Has16BitInsts, isGFX10Plus] in {
545
546multiclass Ternary_i16_Pats_gfx9<SDPatternOperator op1, SDPatternOperator op2,
547                                 Instruction inst> {
548def : GCNPat <
549  (op2 (op1 i16:$src0, i16:$src1), i16:$src2),
550  (inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1, SRCMODS.NONE, $src2, DSTCLAMP.NONE)
551>;
552
553}
554
555defm: Ternary_i16_Pats_gfx9<mul, add, V_MAD_U16_gfx9_e64>;
556defm: Ternary_i16_Pats_gfx9<mul, add, V_MAD_I16_gfx9_e64>;
557
558} // End Predicates = [Has16BitInsts, isGFX10Plus]
559
560class ThreeOpFrag<SDPatternOperator op1, SDPatternOperator op2> : PatFrag<
561  (ops node:$x, node:$y, node:$z),
562  // When the inner operation is used multiple times, selecting 3-op
563  // instructions may still be beneficial -- if the other users can be
564  // combined similarly. Let's be conservative for now.
565  (op2 (HasOneUseBinOp<op1> node:$x, node:$y), node:$z),
566  [{
567    // Only use VALU ops when the result is divergent.
568    if (!N->isDivergent())
569      return false;
570
571    // Check constant bus limitations.
572    //
573    // Note: Use !isDivergent as a conservative proxy for whether the value
574    //       is in an SGPR (uniform values can end up in VGPRs as well).
575    unsigned ConstantBusUses = 0;
576    for (unsigned i = 0; i < 3; ++i) {
577      if (!Operands[i]->isDivergent() &&
578          !isInlineImmediate(Operands[i].getNode())) {
579        ConstantBusUses++;
580        // This uses AMDGPU::V_ADD3_U32_e64, but all three operand instructions
581        // have the same constant bus limit.
582        if (ConstantBusUses > Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32_e64))
583          return false;
584      }
585    }
586
587    return true;
588  }]> {
589  let PredicateCodeUsesOperands = 1;
590
591  // The divergence predicate is irrelevant in GlobalISel, as we have
592  // proper register bank checks. We just need to verify the constant
593  // bus restriction when all the sources are considered.
594  //
595  // FIXME: With unlucky SGPR operands, we could penalize code by
596  // blocking folding SGPR->VGPR copies later.
597  // FIXME: There's no register bank verifier
598  let GISelPredicateCode = [{
599    const int ConstantBusLimit = Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32_e64);
600    int ConstantBusUses = 0;
601    for (unsigned i = 0; i < 3; ++i) {
602      const RegisterBank *RegBank = RBI.getRegBank(Operands[i]->getReg(), MRI, TRI);
603      if (RegBank->getID() == AMDGPU::SGPRRegBankID) {
604        if (++ConstantBusUses > ConstantBusLimit)
605          return false;
606      }
607    }
608    return true;
609  }];
610}
611
612let SubtargetPredicate = isGFX9Plus in {
613let isCommutable = 1, isReMaterializable = 1 in {
614  defm V_ADD3_U32 : VOP3Inst <"v_add3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
615  defm V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
616  defm V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
617  defm V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
618  defm V_ADD_I32 : VOP3Inst <"v_add_i32", VOP3_Profile<VOP_I32_I32_I32_ARITH>>;
619  defm V_ADD_LSHL_U32 : VOP3Inst <"v_add_lshl_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
620} // End isCommutable = 1, isReMaterializable = 1
621// TODO src0 contains the opsel bit for dst, so if we commute, need to mask and swap this
622// to the new src0.
623defm V_MED3_F16 : VOP3Inst <"v_med3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmed3>;
624defm V_MED3_I16 : VOP3Inst <"v_med3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmed3>;
625defm V_MED3_U16 : VOP3Inst <"v_med3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumed3>;
626
627defm V_MIN3_F16 : VOP3Inst <"v_min3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmin3>;
628defm V_MIN3_I16 : VOP3Inst <"v_min3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmin3>;
629defm V_MIN3_U16 : VOP3Inst <"v_min3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumin3>;
630
631defm V_MAX3_F16 : VOP3Inst <"v_max3_f16", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUfmax3>;
632defm V_MAX3_I16 : VOP3Inst <"v_max3_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUsmax3>;
633defm V_MAX3_U16 : VOP3Inst <"v_max3_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_OPSEL>, AMDGPUumax3>;
634
635defm V_ADD_I16 : VOP3Inst <"v_add_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
636defm V_SUB_I16 : VOP3Inst <"v_sub_i16", VOP3_Profile<VOP_I16_I16_I16, VOP3_OPSEL>>;
637
638defm V_MAD_U32_U16 : VOP3Inst <"v_mad_u32_u16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
639defm V_MAD_I32_I16 : VOP3Inst <"v_mad_i32_i16", VOP3_Profile<VOP_I32_I16_I16_I32, VOP3_OPSEL>>;
640
641defm V_CVT_PKNORM_I16_F16 : VOP3Inst <"v_cvt_pknorm_i16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
642defm V_CVT_PKNORM_U16_F16 : VOP3Inst <"v_cvt_pknorm_u16_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
643
644defm V_PACK_B32_F16 : VOP3Inst <"v_pack_b32_f16", VOP3_Profile<VOP_B32_F16_F16, VOP3_OPSEL>>;
645
646let isReMaterializable = 1 in {
647defm V_SUB_I32 : VOP3Inst <"v_sub_i32", VOP3_Profile<VOP_I32_I32_I32_ARITH>>;
648defm V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
649defm V_LSHL_OR_B32 : VOP3Inst <"v_lshl_or_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
650} // End isReMaterializable = 1
651
652
653class ThreeOp_i32_Pats <SDPatternOperator op1, SDPatternOperator op2, Instruction inst> : GCNPat <
654  // This matches (op2 (op1 i32:$src0, i32:$src1), i32:$src2) with conditions.
655  (ThreeOpFrag<op1, op2> i32:$src0, i32:$src1, i32:$src2),
656  (inst VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2)
657>;
658
659def : ThreeOp_i32_Pats<cshl_32, add, V_LSHL_ADD_U32_e64>;
660def : ThreeOp_i32_Pats<add, cshl_32, V_ADD_LSHL_U32_e64>;
661def : ThreeOp_i32_Pats<add, add, V_ADD3_U32_e64>;
662def : ThreeOp_i32_Pats<cshl_32, or, V_LSHL_OR_B32_e64>;
663def : ThreeOp_i32_Pats<and, or, V_AND_OR_B32_e64>;
664def : ThreeOp_i32_Pats<or, or, V_OR3_B32_e64>;
665def : ThreeOp_i32_Pats<xor, add, V_XAD_U32_e64>;
666
667def : VOPBinOpClampPat<saddsat, V_ADD_I32_e64, i32>;
668def : VOPBinOpClampPat<ssubsat, V_SUB_I32_e64, i32>;
669
670def : GCNPat<(getDivergentFrag<or>.ret (or_oneuse i64:$src0, i64:$src1), i64:$src2),
671             (REG_SEQUENCE VReg_64,
672               (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub0)),
673                              (i32 (EXTRACT_SUBREG $src1, sub0)),
674                              (i32 (EXTRACT_SUBREG $src2, sub0))), sub0,
675               (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub1)),
676                              (i32 (EXTRACT_SUBREG $src1, sub1)),
677                              (i32 (EXTRACT_SUBREG $src2, sub1))), sub1)>;
678
679// FIXME: Probably should hardcode clamp bit in pseudo and avoid this.
680class OpSelBinOpClampPat<SDPatternOperator node,
681                         Instruction inst> : GCNPat<
682 (node (i16 (VOP3OpSel i16:$src0, i32:$src0_modifiers)),
683       (i16 (VOP3OpSel i16:$src1, i32:$src1_modifiers))),
684  (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE, 0)
685>;
686
687def : OpSelBinOpClampPat<saddsat, V_ADD_I16_e64>;
688def : OpSelBinOpClampPat<ssubsat, V_SUB_I16_e64>;
689} // End SubtargetPredicate = isGFX9Plus
690
691def VOP3_PERMLANE_Profile : VOP3_Profile<VOPProfile <[i32, i32, i32, i32]>, VOP3_OPSEL> {
692  let Src0RC64 = VRegSrc_32;
693  let Src1RC64 = SCSrc_b32;
694  let Src2RC64 = SCSrc_b32;
695  let InsVOP3OpSel = (ins IntOpSelMods:$src0_modifiers, VRegSrc_32:$src0,
696                          IntOpSelMods:$src1_modifiers, SCSrc_b32:$src1,
697                          IntOpSelMods:$src2_modifiers, SCSrc_b32:$src2,
698                          VGPR_32:$vdst_in, op_sel0:$op_sel);
699  let HasClamp = 0;
700}
701
702class PermlanePat<SDPatternOperator permlane,
703  Instruction inst> : GCNPat<
704  (permlane i32:$vdst_in, i32:$src0, i32:$src1, i32:$src2,
705            timm:$fi, timm:$bc),
706  (inst (as_i1timm $fi), VGPR_32:$src0, (as_i1timm $bc),
707        SCSrc_b32:$src1, 0, SCSrc_b32:$src2, VGPR_32:$vdst_in)
708>;
709
710// Permlane intrinsic that has either fetch invalid or bound control
711// fields enabled.
712class BoundControlOrFetchInvalidPermlane<SDPatternOperator permlane> :
713  PatFrag<(ops node:$vdst_in, node:$src0, node:$src1, node:$src2,
714               node:$fi, node:$bc),
715          (permlane node:$vdst_in, node:$src0, node:
716                    $src1, node:$src2, node:$fi, node:$bc)> {
717  let PredicateCode = [{ return N->getConstantOperandVal(5) != 0 ||
718                                N->getConstantOperandVal(6) != 0; }];
719  let GISelPredicateCode = [{
720    return MI.getOperand(6).getImm() != 0 ||
721           MI.getOperand(7).getImm() != 0;
722  }];
723}
724
725// Drop the input value if it won't be read.
726class PermlaneDiscardVDstIn<SDPatternOperator permlane,
727                            Instruction inst> : GCNPat<
728  (permlane srcvalue, i32:$src0, i32:$src1, i32:$src2,
729            timm:$fi, timm:$bc),
730  (inst (as_i1timm $fi), VGPR_32:$src0, (as_i1timm $bc),
731        SCSrc_b32:$src1, 0, SCSrc_b32:$src2,
732        (IMPLICIT_DEF))
733>;
734
735
736let SubtargetPredicate = isGFX10Plus in {
737  let isCommutable = 1, isReMaterializable = 1 in {
738    defm V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile<VOP_I32_I32_I32_I32>>;
739  } // End isCommutable = 1, isReMaterializable = 1
740  def : ThreeOp_i32_Pats<xor, xor, V_XOR3_B32_e64>;
741
742  let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
743    defm V_PERMLANE16_B32 : VOP3Inst<"v_permlane16_b32", VOP3_PERMLANE_Profile>;
744    defm V_PERMLANEX16_B32 : VOP3Inst<"v_permlanex16_b32", VOP3_PERMLANE_Profile>;
745  } // End $vdst = $vdst_in, DisableEncoding $vdst_in
746
747  def : PermlanePat<int_amdgcn_permlane16, V_PERMLANE16_B32_e64>;
748  def : PermlanePat<int_amdgcn_permlanex16, V_PERMLANEX16_B32_e64>;
749
750  def : PermlaneDiscardVDstIn<
751    BoundControlOrFetchInvalidPermlane<int_amdgcn_permlane16>,
752    V_PERMLANE16_B32_e64>;
753  def : PermlaneDiscardVDstIn<
754    BoundControlOrFetchInvalidPermlane<int_amdgcn_permlanex16>,
755    V_PERMLANEX16_B32_e64>;
756} // End SubtargetPredicate = isGFX10Plus
757
758class DivFmasPat<ValueType vt, Instruction inst, Register CondReg> : GCNPat<
759  (AMDGPUdiv_fmas (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)),
760                  (vt (VOP3Mods vt:$src1, i32:$src1_modifiers)),
761                  (vt (VOP3Mods vt:$src2, i32:$src2_modifiers)),
762                  (i1 CondReg)),
763  (inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2)
764>;
765
766let WaveSizePredicate = isWave64 in {
767def : DivFmasPat<f32, V_DIV_FMAS_F32_e64, VCC>;
768def : DivFmasPat<f64, V_DIV_FMAS_F64_e64, VCC>;
769}
770
771let WaveSizePredicate = isWave32 in {
772def : DivFmasPat<f32, V_DIV_FMAS_F32_e64, VCC_LO>;
773def : DivFmasPat<f64, V_DIV_FMAS_F64_e64, VCC_LO>;
774}
775
776//===----------------------------------------------------------------------===//
777// Integer Clamp Patterns
778//===----------------------------------------------------------------------===//
779
780class getClampPat<VOPProfile P, SDPatternOperator node> {
781  dag ret3 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2));
782  dag ret2 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1));
783  dag ret1 = (P.DstVT (node P.Src0VT:$src0));
784  dag ret = !if(!eq(P.NumSrcArgs, 3), ret3,
785            !if(!eq(P.NumSrcArgs, 2), ret2,
786            ret1));
787}
788
789class getClampRes<VOPProfile P, Instruction inst> {
790  dag ret3 = (inst P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, (i1 0));
791  dag ret2 = (inst P.Src0VT:$src0, P.Src1VT:$src1, (i1 0));
792  dag ret1 = (inst P.Src0VT:$src0, (i1 0));
793  dag ret = !if(!eq(P.NumSrcArgs, 3), ret3,
794            !if(!eq(P.NumSrcArgs, 2), ret2,
795            ret1));
796}
797
798class IntClampPat<VOP3InstBase inst, SDPatternOperator node> : GCNPat<
799  getClampPat<inst.Pfl, node>.ret,
800  getClampRes<inst.Pfl, inst>.ret
801>;
802
803def : IntClampPat<V_MAD_I32_I24_e64, AMDGPUmad_i24>;
804def : IntClampPat<V_MAD_U32_U24_e64, AMDGPUmad_u24>;
805
806def : IntClampPat<V_SAD_U8_e64, int_amdgcn_sad_u8>;
807def : IntClampPat<V_SAD_HI_U8_e64, int_amdgcn_sad_hi_u8>;
808def : IntClampPat<V_SAD_U16_e64, int_amdgcn_sad_u16>;
809
810def : IntClampPat<V_MSAD_U8_e64, int_amdgcn_msad_u8>;
811def : IntClampPat<V_MQSAD_PK_U16_U8_e64, int_amdgcn_mqsad_pk_u16_u8>;
812
813def : IntClampPat<V_QSAD_PK_U16_U8_e64, int_amdgcn_qsad_pk_u16_u8>;
814def : IntClampPat<V_MQSAD_U32_U8_e64, int_amdgcn_mqsad_u32_u8>;
815
816
817//===----------------------------------------------------------------------===//
818// Target-specific instruction encodings.
819//===----------------------------------------------------------------------===//
820
821//===----------------------------------------------------------------------===//
822// GFX10.
823//===----------------------------------------------------------------------===//
824
825let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in {
826  multiclass VOP3_Real_gfx10<bits<10> op> {
827    def _gfx10 :
828      VOP3_Real<!cast<VOP_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX10>,
829      VOP3e_gfx10<op, !cast<VOP_Pseudo>(NAME#"_e64").Pfl>;
830  }
831  multiclass VOP3_Real_No_Suffix_gfx10<bits<10> op> {
832    def _gfx10 :
833      VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.GFX10>,
834      VOP3e_gfx10<op, !cast<VOP_Pseudo>(NAME).Pfl>;
835  }
836  multiclass VOP3_Real_gfx10_with_name<bits<10> op, string opName,
837                                       string asmName> {
838    def _gfx10 :
839      VOP3_Real<!cast<VOP3_Pseudo>(opName#"_e64"), SIEncodingFamily.GFX10>,
840      VOP3e_gfx10<op, !cast<VOP3_Pseudo>(opName#"_e64").Pfl> {
841        VOP3_Pseudo ps = !cast<VOP3_Pseudo>(opName#"_e64");
842        let AsmString = asmName # ps.AsmOperands;
843        let IsSingle = 1;
844      }
845  }
846  multiclass VOP3be_Real_gfx10<bits<10> op> {
847    def _gfx10 :
848      VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX10>,
849      VOP3be_gfx10<op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
850  }
851  multiclass VOP3Interp_Real_gfx10<bits<10> op> {
852    def _gfx10 :
853      VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.GFX10>,
854      VOP3Interp_gfx10<op, !cast<VOP3_Pseudo>(NAME).Pfl>;
855  }
856  multiclass VOP3OpSel_Real_gfx10<bits<10> op> {
857    def _gfx10 :
858      VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX10>,
859      VOP3OpSel_gfx10<op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
860  }
861  multiclass VOP3OpSel_Real_gfx10_with_name<bits<10> op, string opName,
862                                            string asmName> {
863    def _gfx10 :
864      VOP3_Real<!cast<VOP3_Pseudo>(opName#"_e64"), SIEncodingFamily.GFX10>,
865      VOP3OpSel_gfx10<op, !cast<VOP3_Pseudo>(opName#"_e64").Pfl> {
866        VOP3_Pseudo ps = !cast<VOP3_Pseudo>(opName#"_e64");
867        let AsmString = asmName # ps.AsmOperands;
868      }
869  }
870} // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10"
871
872defm V_READLANE_B32  : VOP3_Real_No_Suffix_gfx10<0x360>;
873
874let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) in {
875  defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx10<0x361>;
876} // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in)
877
878let SubtargetPredicate = isGFX10Before1030 in {
879  defm V_MUL_LO_I32      : VOP3_Real_gfx10<0x16b>;
880}
881
882defm V_XOR3_B32           : VOP3_Real_gfx10<0x178>;
883defm V_LSHLREV_B64        : VOP3_Real_gfx10<0x2ff>;
884defm V_LSHRREV_B64        : VOP3_Real_gfx10<0x300>;
885defm V_ASHRREV_I64        : VOP3_Real_gfx10<0x301>;
886defm V_PERM_B32           : VOP3_Real_gfx10<0x344>;
887defm V_XAD_U32            : VOP3_Real_gfx10<0x345>;
888defm V_LSHL_ADD_U32       : VOP3_Real_gfx10<0x346>;
889defm V_ADD_LSHL_U32       : VOP3_Real_gfx10<0x347>;
890defm V_ADD3_U32           : VOP3_Real_gfx10<0x36d>;
891defm V_LSHL_OR_B32        : VOP3_Real_gfx10<0x36f>;
892defm V_AND_OR_B32         : VOP3_Real_gfx10<0x371>;
893defm V_OR3_B32            : VOP3_Real_gfx10<0x372>;
894
895// TODO-GFX10: add MC tests for v_add/sub_nc_i16
896defm V_ADD_NC_I16 :
897  VOP3OpSel_Real_gfx10_with_name<0x30d, "V_ADD_I16", "v_add_nc_i16">;
898defm V_SUB_NC_I16 :
899  VOP3OpSel_Real_gfx10_with_name<0x30e, "V_SUB_I16", "v_sub_nc_i16">;
900defm V_SUB_NC_I32 :
901  VOP3_Real_gfx10_with_name<0x376, "V_SUB_I32", "v_sub_nc_i32">;
902defm V_ADD_NC_I32 :
903  VOP3_Real_gfx10_with_name<0x37f, "V_ADD_I32", "v_add_nc_i32">;
904
905defm V_INTERP_P1_F32_e64  : VOP3Interp_Real_gfx10<0x200>;
906defm V_INTERP_P2_F32_e64  : VOP3Interp_Real_gfx10<0x201>;
907defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_gfx10<0x202>;
908
909defm V_INTERP_P1LL_F16    : VOP3Interp_Real_gfx10<0x342>;
910defm V_INTERP_P1LV_F16    : VOP3Interp_Real_gfx10<0x343>;
911defm V_INTERP_P2_F16      : VOP3Interp_Real_gfx10<0x35a>;
912
913defm V_PACK_B32_F16       : VOP3OpSel_Real_gfx10<0x311>;
914defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx10<0x312>;
915defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx10<0x313>;
916
917defm V_MIN3_F16           : VOP3OpSel_Real_gfx10<0x351>;
918defm V_MIN3_I16           : VOP3OpSel_Real_gfx10<0x352>;
919defm V_MIN3_U16           : VOP3OpSel_Real_gfx10<0x353>;
920defm V_MAX3_F16           : VOP3OpSel_Real_gfx10<0x354>;
921defm V_MAX3_I16           : VOP3OpSel_Real_gfx10<0x355>;
922defm V_MAX3_U16           : VOP3OpSel_Real_gfx10<0x356>;
923defm V_MED3_F16           : VOP3OpSel_Real_gfx10<0x357>;
924defm V_MED3_I16           : VOP3OpSel_Real_gfx10<0x358>;
925defm V_MED3_U16           : VOP3OpSel_Real_gfx10<0x359>;
926defm V_MAD_U32_U16        : VOP3OpSel_Real_gfx10<0x373>;
927defm V_MAD_I32_I16        : VOP3OpSel_Real_gfx10<0x375>;
928
929defm V_MAD_U16 :
930  VOP3OpSel_Real_gfx10_with_name<0x340, "V_MAD_U16_gfx9", "v_mad_u16">;
931defm V_FMA_F16 :
932  VOP3OpSel_Real_gfx10_with_name<0x34b, "V_FMA_F16_gfx9", "v_fma_f16">;
933defm V_MAD_I16 :
934  VOP3OpSel_Real_gfx10_with_name<0x35e, "V_MAD_I16_gfx9", "v_mad_i16">;
935defm V_DIV_FIXUP_F16 :
936  VOP3OpSel_Real_gfx10_with_name<0x35f, "V_DIV_FIXUP_F16_gfx9", "v_div_fixup_f16">;
937
938// FIXME-GFX10-OPSEL: Need to add "selective" opsel support to some of these
939// (they do not support SDWA or DPP).
940defm V_ADD_NC_U16      : VOP3_Real_gfx10_with_name<0x303, "V_ADD_U16", "v_add_nc_u16">;
941defm V_SUB_NC_U16      : VOP3_Real_gfx10_with_name<0x304, "V_SUB_U16", "v_sub_nc_u16">;
942defm V_MUL_LO_U16      : VOP3_Real_gfx10_with_name<0x305, "V_MUL_LO_U16", "v_mul_lo_u16">;
943defm V_LSHRREV_B16     : VOP3_Real_gfx10_with_name<0x307, "V_LSHRREV_B16", "v_lshrrev_b16">;
944defm V_ASHRREV_I16     : VOP3_Real_gfx10_with_name<0x308, "V_ASHRREV_I16", "v_ashrrev_i16">;
945defm V_MAX_U16         : VOP3_Real_gfx10_with_name<0x309, "V_MAX_U16", "v_max_u16">;
946defm V_MAX_I16         : VOP3_Real_gfx10_with_name<0x30a, "V_MAX_I16", "v_max_i16">;
947defm V_MIN_U16         : VOP3_Real_gfx10_with_name<0x30b, "V_MIN_U16", "v_min_u16">;
948defm V_MIN_I16         : VOP3_Real_gfx10_with_name<0x30c, "V_MIN_I16", "v_min_i16">;
949defm V_LSHLREV_B16     : VOP3_Real_gfx10_with_name<0x314, "V_LSHLREV_B16", "v_lshlrev_b16">;
950defm V_PERMLANE16_B32  : VOP3OpSel_Real_gfx10<0x377>;
951defm V_PERMLANEX16_B32 : VOP3OpSel_Real_gfx10<0x378>;
952
953//===----------------------------------------------------------------------===//
954// GFX7, GFX10.
955//===----------------------------------------------------------------------===//
956
957let AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" in {
958  multiclass VOP3_Real_gfx7<bits<10> op> {
959    def _gfx7 :
960      VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
961      VOP3e_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
962  }
963  multiclass VOP3be_Real_gfx7<bits<10> op> {
964    def _gfx7 :
965      VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
966      VOP3be_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
967  }
968} // End AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7"
969
970multiclass VOP3_Real_gfx7_gfx10<bits<10> op> :
971  VOP3_Real_gfx7<op>, VOP3_Real_gfx10<op>;
972
973multiclass VOP3be_Real_gfx7_gfx10<bits<10> op> :
974  VOP3be_Real_gfx7<op>, VOP3be_Real_gfx10<op>;
975
976defm V_QSAD_PK_U16_U8   : VOP3_Real_gfx7_gfx10<0x172>;
977defm V_MQSAD_U32_U8     : VOP3_Real_gfx7_gfx10<0x175>;
978defm V_MAD_U64_U32      : VOP3be_Real_gfx7_gfx10<0x176>;
979defm V_MAD_I64_I32      : VOP3be_Real_gfx7_gfx10<0x177>;
980
981//===----------------------------------------------------------------------===//
982// GFX6, GFX7, GFX10.
983//===----------------------------------------------------------------------===//
984
985let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in {
986  multiclass VOP3_Real_gfx6_gfx7<bits<10> op> {
987    def _gfx6_gfx7 :
988      VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
989      VOP3e_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
990  }
991  multiclass VOP3be_Real_gfx6_gfx7<bits<10> op> {
992    def _gfx6_gfx7 :
993      VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
994      VOP3be_gfx6_gfx7<op{8-0}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
995  }
996} // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7"
997
998multiclass VOP3_Real_gfx6_gfx7_gfx10<bits<10> op> :
999  VOP3_Real_gfx6_gfx7<op>, VOP3_Real_gfx10<op>;
1000
1001multiclass VOP3be_Real_gfx6_gfx7_gfx10<bits<10> op> :
1002  VOP3be_Real_gfx6_gfx7<op>, VOP3be_Real_gfx10<op>;
1003
1004defm V_LSHL_B64        : VOP3_Real_gfx6_gfx7<0x161>;
1005defm V_LSHR_B64        : VOP3_Real_gfx6_gfx7<0x162>;
1006defm V_ASHR_I64        : VOP3_Real_gfx6_gfx7<0x163>;
1007defm V_MUL_LO_I32      : VOP3_Real_gfx6_gfx7<0x16b>;
1008
1009defm V_MAD_LEGACY_F32  : VOP3_Real_gfx6_gfx7_gfx10<0x140>;
1010defm V_MAD_F32         : VOP3_Real_gfx6_gfx7_gfx10<0x141>;
1011defm V_MAD_I32_I24     : VOP3_Real_gfx6_gfx7_gfx10<0x142>;
1012defm V_MAD_U32_U24     : VOP3_Real_gfx6_gfx7_gfx10<0x143>;
1013defm V_CUBEID_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x144>;
1014defm V_CUBESC_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x145>;
1015defm V_CUBETC_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x146>;
1016defm V_CUBEMA_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x147>;
1017defm V_BFE_U32         : VOP3_Real_gfx6_gfx7_gfx10<0x148>;
1018defm V_BFE_I32         : VOP3_Real_gfx6_gfx7_gfx10<0x149>;
1019defm V_BFI_B32         : VOP3_Real_gfx6_gfx7_gfx10<0x14a>;
1020defm V_FMA_F32         : VOP3_Real_gfx6_gfx7_gfx10<0x14b>;
1021defm V_FMA_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x14c>;
1022defm V_LERP_U8         : VOP3_Real_gfx6_gfx7_gfx10<0x14d>;
1023defm V_ALIGNBIT_B32    : VOP3_Real_gfx6_gfx7_gfx10<0x14e>;
1024defm V_ALIGNBYTE_B32   : VOP3_Real_gfx6_gfx7_gfx10<0x14f>;
1025defm V_MULLIT_F32      : VOP3_Real_gfx6_gfx7_gfx10<0x150>;
1026defm V_MIN3_F32        : VOP3_Real_gfx6_gfx7_gfx10<0x151>;
1027defm V_MIN3_I32        : VOP3_Real_gfx6_gfx7_gfx10<0x152>;
1028defm V_MIN3_U32        : VOP3_Real_gfx6_gfx7_gfx10<0x153>;
1029defm V_MAX3_F32        : VOP3_Real_gfx6_gfx7_gfx10<0x154>;
1030defm V_MAX3_I32        : VOP3_Real_gfx6_gfx7_gfx10<0x155>;
1031defm V_MAX3_U32        : VOP3_Real_gfx6_gfx7_gfx10<0x156>;
1032defm V_MED3_F32        : VOP3_Real_gfx6_gfx7_gfx10<0x157>;
1033defm V_MED3_I32        : VOP3_Real_gfx6_gfx7_gfx10<0x158>;
1034defm V_MED3_U32        : VOP3_Real_gfx6_gfx7_gfx10<0x159>;
1035defm V_SAD_U8          : VOP3_Real_gfx6_gfx7_gfx10<0x15a>;
1036defm V_SAD_HI_U8       : VOP3_Real_gfx6_gfx7_gfx10<0x15b>;
1037defm V_SAD_U16         : VOP3_Real_gfx6_gfx7_gfx10<0x15c>;
1038defm V_SAD_U32         : VOP3_Real_gfx6_gfx7_gfx10<0x15d>;
1039defm V_CVT_PK_U8_F32   : VOP3_Real_gfx6_gfx7_gfx10<0x15e>;
1040defm V_DIV_FIXUP_F32   : VOP3_Real_gfx6_gfx7_gfx10<0x15f>;
1041defm V_DIV_FIXUP_F64   : VOP3_Real_gfx6_gfx7_gfx10<0x160>;
1042defm V_ADD_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x164>;
1043defm V_MUL_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x165>;
1044defm V_MIN_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x166>;
1045defm V_MAX_F64         : VOP3_Real_gfx6_gfx7_gfx10<0x167>;
1046defm V_LDEXP_F64       : VOP3_Real_gfx6_gfx7_gfx10<0x168>;
1047defm V_MUL_LO_U32      : VOP3_Real_gfx6_gfx7_gfx10<0x169>;
1048defm V_MUL_HI_U32      : VOP3_Real_gfx6_gfx7_gfx10<0x16a>;
1049defm V_MUL_HI_I32      : VOP3_Real_gfx6_gfx7_gfx10<0x16c>;
1050defm V_DIV_FMAS_F32    : VOP3_Real_gfx6_gfx7_gfx10<0x16f>;
1051defm V_DIV_FMAS_F64    : VOP3_Real_gfx6_gfx7_gfx10<0x170>;
1052defm V_MSAD_U8         : VOP3_Real_gfx6_gfx7_gfx10<0x171>;
1053defm V_MQSAD_PK_U16_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x173>;
1054defm V_TRIG_PREOP_F64  : VOP3_Real_gfx6_gfx7_gfx10<0x174>;
1055defm V_DIV_SCALE_F32   : VOP3be_Real_gfx6_gfx7_gfx10<0x16d>;
1056defm V_DIV_SCALE_F64   : VOP3be_Real_gfx6_gfx7_gfx10<0x16e>;
1057
1058// NB: Same opcode as v_mad_legacy_f32
1059let DecoderNamespace = "GFX10_B" in
1060defm V_FMA_LEGACY_F32  : VOP3_Real_gfx10<0x140>;
1061
1062//===----------------------------------------------------------------------===//
1063// GFX8, GFX9 (VI).
1064//===----------------------------------------------------------------------===//
1065
1066let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in {
1067
1068multiclass VOP3_Real_vi<bits<10> op> {
1069  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
1070            VOP3e_vi <op, !cast<VOP_Pseudo>(NAME#"_e64").Pfl>;
1071}
1072multiclass VOP3_Real_No_Suffix_vi<bits<10> op> {
1073  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>,
1074            VOP3e_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>;
1075}
1076
1077multiclass VOP3be_Real_vi<bits<10> op> {
1078  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
1079            VOP3be_vi <op, !cast<VOP_Pseudo>(NAME#"_e64").Pfl>;
1080}
1081
1082multiclass VOP3OpSel_Real_gfx9<bits<10> op> {
1083  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
1084            VOP3OpSel_gfx9 <op, !cast<VOP_Pseudo>(NAME#"_e64").Pfl>;
1085}
1086
1087multiclass VOP3Interp_Real_vi<bits<10> op> {
1088  def _vi : VOP3_Real<!cast<VOP_Pseudo>(NAME), SIEncodingFamily.VI>,
1089            VOP3Interp_vi <op, !cast<VOP_Pseudo>(NAME).Pfl>;
1090}
1091
1092} // End AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8"
1093
1094let AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" in {
1095
1096multiclass VOP3_F16_Real_vi<bits<10> op> {
1097  def _vi : VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
1098            VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
1099}
1100
1101multiclass VOP3Interp_F16_Real_vi<bits<10> op> {
1102  def _vi : VOP3_Real<!cast<VOP3_Pseudo>(NAME), SIEncodingFamily.VI>,
1103            VOP3Interp_vi <op, !cast<VOP3_Pseudo>(NAME).Pfl>;
1104}
1105
1106} // End AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8"
1107
1108let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in {
1109
1110multiclass VOP3_F16_Real_gfx9<bits<10> op, string OpName, string AsmName> {
1111  def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(OpName#"_e64"), SIEncodingFamily.GFX9>,
1112            VOP3e_vi <op, !cast<VOP3_Pseudo>(OpName#"_e64").Pfl> {
1113              VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName#"_e64");
1114              let AsmString = AsmName # ps.AsmOperands;
1115            }
1116}
1117
1118multiclass VOP3OpSel_F16_Real_gfx9<bits<10> op, string AsmName> {
1119  def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX9>,
1120            VOP3OpSel_gfx9 <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
1121              VOP3_Pseudo ps = !cast<VOP3_Pseudo>(NAME#"_e64");
1122              let AsmString = AsmName # ps.AsmOperands;
1123            }
1124}
1125
1126multiclass VOP3Interp_F16_Real_gfx9<bits<10> op, string OpName, string AsmName> {
1127  def _gfx9 : VOP3_Real<!cast<VOP3_Pseudo>(OpName), SIEncodingFamily.GFX9>,
1128            VOP3Interp_vi <op, !cast<VOP3_Pseudo>(OpName).Pfl> {
1129              VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName);
1130              let AsmString = AsmName # ps.AsmOperands;
1131            }
1132}
1133
1134multiclass VOP3_Real_gfx9<bits<10> op, string AsmName> {
1135  def _gfx9 : VOP3_Real<!cast<VOP_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX9>,
1136              VOP3e_vi <op, !cast<VOP_Pseudo>(NAME#"_e64").Pfl> {
1137              VOP_Pseudo ps = !cast<VOP_Pseudo>(NAME#"_e64");
1138              let AsmString = AsmName # ps.AsmOperands;
1139            }
1140}
1141
1142} // End AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9"
1143
1144defm V_MAD_U64_U32      : VOP3be_Real_vi <0x1E8>;
1145defm V_MAD_I64_I32      : VOP3be_Real_vi <0x1E9>;
1146
1147defm V_MAD_LEGACY_F32   : VOP3_Real_vi <0x1c0>;
1148defm V_MAD_F32          : VOP3_Real_vi <0x1c1>;
1149defm V_MAD_I32_I24      : VOP3_Real_vi <0x1c2>;
1150defm V_MAD_U32_U24      : VOP3_Real_vi <0x1c3>;
1151defm V_CUBEID_F32       : VOP3_Real_vi <0x1c4>;
1152defm V_CUBESC_F32       : VOP3_Real_vi <0x1c5>;
1153defm V_CUBETC_F32       : VOP3_Real_vi <0x1c6>;
1154defm V_CUBEMA_F32       : VOP3_Real_vi <0x1c7>;
1155defm V_BFE_U32          : VOP3_Real_vi <0x1c8>;
1156defm V_BFE_I32          : VOP3_Real_vi <0x1c9>;
1157defm V_BFI_B32          : VOP3_Real_vi <0x1ca>;
1158defm V_FMA_F32          : VOP3_Real_vi <0x1cb>;
1159defm V_FMA_F64          : VOP3_Real_vi <0x1cc>;
1160defm V_LERP_U8          : VOP3_Real_vi <0x1cd>;
1161defm V_ALIGNBIT_B32     : VOP3_Real_vi <0x1ce>;
1162defm V_ALIGNBYTE_B32    : VOP3_Real_vi <0x1cf>;
1163defm V_MIN3_F32         : VOP3_Real_vi <0x1d0>;
1164defm V_MIN3_I32         : VOP3_Real_vi <0x1d1>;
1165defm V_MIN3_U32         : VOP3_Real_vi <0x1d2>;
1166defm V_MAX3_F32         : VOP3_Real_vi <0x1d3>;
1167defm V_MAX3_I32         : VOP3_Real_vi <0x1d4>;
1168defm V_MAX3_U32         : VOP3_Real_vi <0x1d5>;
1169defm V_MED3_F32         : VOP3_Real_vi <0x1d6>;
1170defm V_MED3_I32         : VOP3_Real_vi <0x1d7>;
1171defm V_MED3_U32         : VOP3_Real_vi <0x1d8>;
1172defm V_SAD_U8           : VOP3_Real_vi <0x1d9>;
1173defm V_SAD_HI_U8        : VOP3_Real_vi <0x1da>;
1174defm V_SAD_U16          : VOP3_Real_vi <0x1db>;
1175defm V_SAD_U32          : VOP3_Real_vi <0x1dc>;
1176defm V_CVT_PK_U8_F32    : VOP3_Real_vi <0x1dd>;
1177defm V_DIV_FIXUP_F32    : VOP3_Real_vi <0x1de>;
1178defm V_DIV_FIXUP_F64    : VOP3_Real_vi <0x1df>;
1179defm V_DIV_SCALE_F32    : VOP3be_Real_vi <0x1e0>;
1180defm V_DIV_SCALE_F64    : VOP3be_Real_vi <0x1e1>;
1181defm V_DIV_FMAS_F32     : VOP3_Real_vi <0x1e2>;
1182defm V_DIV_FMAS_F64     : VOP3_Real_vi <0x1e3>;
1183defm V_MSAD_U8          : VOP3_Real_vi <0x1e4>;
1184defm V_QSAD_PK_U16_U8   : VOP3_Real_vi <0x1e5>;
1185defm V_MQSAD_PK_U16_U8  : VOP3_Real_vi <0x1e6>;
1186defm V_MQSAD_U32_U8     : VOP3_Real_vi <0x1e7>;
1187
1188defm V_PERM_B32         : VOP3_Real_vi <0x1ed>;
1189
1190defm V_MAD_F16          : VOP3_F16_Real_vi <0x1ea>;
1191defm V_MAD_U16          : VOP3_F16_Real_vi <0x1eb>;
1192defm V_MAD_I16          : VOP3_F16_Real_vi <0x1ec>;
1193defm V_FMA_F16          : VOP3_F16_Real_vi <0x1ee>;
1194defm V_DIV_FIXUP_F16    : VOP3_F16_Real_vi <0x1ef>;
1195defm V_INTERP_P2_F16    : VOP3Interp_F16_Real_vi <0x276>;
1196
1197let FPDPRounding = 1 in {
1198defm V_MAD_LEGACY_F16       : VOP3_F16_Real_gfx9 <0x1ea, "V_MAD_F16",       "v_mad_legacy_f16">;
1199defm V_FMA_LEGACY_F16       : VOP3_F16_Real_gfx9 <0x1ee, "V_FMA_F16",       "v_fma_legacy_f16">;
1200defm V_DIV_FIXUP_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ef, "V_DIV_FIXUP_F16", "v_div_fixup_legacy_f16">;
1201defm V_INTERP_P2_LEGACY_F16 : VOP3Interp_F16_Real_gfx9 <0x276, "V_INTERP_P2_F16", "v_interp_p2_legacy_f16">;
1202} // End FPDPRounding = 1
1203
1204defm V_MAD_LEGACY_U16       : VOP3_F16_Real_gfx9 <0x1eb, "V_MAD_U16",       "v_mad_legacy_u16">;
1205defm V_MAD_LEGACY_I16       : VOP3_F16_Real_gfx9 <0x1ec, "V_MAD_I16",       "v_mad_legacy_i16">;
1206
1207defm V_MAD_F16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x203, "v_mad_f16">;
1208defm V_MAD_U16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x204, "v_mad_u16">;
1209defm V_MAD_I16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x205, "v_mad_i16">;
1210defm V_FMA_F16_gfx9         : VOP3OpSel_F16_Real_gfx9 <0x206, "v_fma_f16">;
1211defm V_DIV_FIXUP_F16_gfx9   : VOP3OpSel_F16_Real_gfx9 <0x207, "v_div_fixup_f16">;
1212defm V_INTERP_P2_F16_gfx9   : VOP3Interp_F16_Real_gfx9 <0x277, "V_INTERP_P2_F16_gfx9", "v_interp_p2_f16">;
1213
1214defm V_ADD_I32         : VOP3_Real_vi <0x29c>;
1215defm V_SUB_I32         : VOP3_Real_vi <0x29d>;
1216
1217defm V_INTERP_P1_F32_e64  : VOP3Interp_Real_vi <0x270>;
1218defm V_INTERP_P2_F32_e64  : VOP3Interp_Real_vi <0x271>;
1219defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_vi <0x272>;
1220
1221defm V_INTERP_P1LL_F16  : VOP3Interp_Real_vi <0x274>;
1222defm V_INTERP_P1LV_F16  : VOP3Interp_Real_vi <0x275>;
1223defm V_ADD_F64          : VOP3_Real_vi <0x280>;
1224defm V_MUL_F64          : VOP3_Real_vi <0x281>;
1225defm V_MIN_F64          : VOP3_Real_vi <0x282>;
1226defm V_MAX_F64          : VOP3_Real_vi <0x283>;
1227defm V_LDEXP_F64        : VOP3_Real_vi <0x284>;
1228defm V_MUL_LO_U32       : VOP3_Real_vi <0x285>;
1229
1230// removed from VI as identical to V_MUL_LO_U32
1231let isAsmParserOnly = 1 in {
1232defm V_MUL_LO_I32       : VOP3_Real_vi <0x285>;
1233}
1234
1235defm V_MUL_HI_U32       : VOP3_Real_vi <0x286>;
1236defm V_MUL_HI_I32       : VOP3_Real_vi <0x287>;
1237
1238defm V_READLANE_B32     : VOP3_Real_No_Suffix_vi <0x289>;
1239defm V_WRITELANE_B32    : VOP3_Real_No_Suffix_vi <0x28a>;
1240
1241defm V_LSHLREV_B64      : VOP3_Real_vi <0x28f>;
1242defm V_LSHRREV_B64      : VOP3_Real_vi <0x290>;
1243defm V_ASHRREV_I64      : VOP3_Real_vi <0x291>;
1244defm V_TRIG_PREOP_F64   : VOP3_Real_vi <0x292>;
1245
1246defm V_LSHL_ADD_U32 : VOP3_Real_vi <0x1fd>;
1247defm V_ADD_LSHL_U32 : VOP3_Real_vi <0x1fe>;
1248defm V_ADD3_U32 : VOP3_Real_vi <0x1ff>;
1249defm V_LSHL_OR_B32 : VOP3_Real_vi <0x200>;
1250defm V_AND_OR_B32 : VOP3_Real_vi <0x201>;
1251defm V_OR3_B32 : VOP3_Real_vi <0x202>;
1252defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx9 <0x2a0>;
1253
1254defm V_XAD_U32 : VOP3_Real_vi <0x1f3>;
1255
1256defm V_MIN3_F16 : VOP3OpSel_Real_gfx9 <0x1f4>;
1257defm V_MIN3_I16 : VOP3OpSel_Real_gfx9 <0x1f5>;
1258defm V_MIN3_U16 : VOP3OpSel_Real_gfx9 <0x1f6>;
1259
1260defm V_MAX3_F16 : VOP3OpSel_Real_gfx9 <0x1f7>;
1261defm V_MAX3_I16 : VOP3OpSel_Real_gfx9 <0x1f8>;
1262defm V_MAX3_U16 : VOP3OpSel_Real_gfx9 <0x1f9>;
1263
1264defm V_MED3_F16 : VOP3OpSel_Real_gfx9 <0x1fa>;
1265defm V_MED3_I16 : VOP3OpSel_Real_gfx9 <0x1fb>;
1266defm V_MED3_U16 : VOP3OpSel_Real_gfx9 <0x1fc>;
1267
1268defm V_ADD_I16  : VOP3OpSel_Real_gfx9 <0x29e>;
1269defm V_SUB_I16  : VOP3OpSel_Real_gfx9 <0x29f>;
1270
1271defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx9 <0x1f1>;
1272defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx9 <0x1f2>;
1273
1274defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx9 <0x299>;
1275defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx9 <0x29a>;
1276