1//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Declare GlobalISel combine rules and provide mechanisms to opt-out.
10//
11//===----------------------------------------------------------------------===//
12
13// Common base class for GICombineRule and GICombineGroup.
14class GICombine {
15  // See GICombineGroup. We only declare it here to make the tablegen pass
16  // simpler.
17  list<GICombine> Rules = ?;
18}
19
20// A group of combine rules that can be added to a GICombiner or another group.
21class GICombineGroup<list<GICombine> rules> : GICombine {
22  // The rules contained in this group. The rules in a group are flattened into
23  // a single list and sorted into whatever order is most efficient. However,
24  // they will never be re-ordered such that behaviour differs from the
25  // specified order. It is therefore possible to use the order of rules in this
26  // list to describe priorities.
27  let Rules = rules;
28}
29
30class GICombinerHelperArg<string type, string name> {
31  string Type = type;
32  string Name = name;
33}
34
35// Declares a combiner helper class
36class GICombinerHelper<string classname, list<GICombine> rules>
37    : GICombineGroup<rules> {
38  // The class name to use in the generated output.
39  string Classname = classname;
40  // The name of a run-time compiler option that will be generated to disable
41  // specific rules within this combiner.
42  string DisableRuleOption = ?;
43  // The state class to inherit from (if any). The generated helper will inherit
44  // from this class and will forward arguments to its constructors.
45  string StateClass = "";
46  // Any additional arguments that should be appended to the tryCombine*().
47  list<GICombinerHelperArg> AdditionalArguments =
48      [GICombinerHelperArg<"CombinerHelper &", "Helper">];
49}
50class GICombineRule<dag defs, dag match, dag apply> : GICombine {
51  /// Defines the external interface of the match rule. This includes:
52  /// * The names of the root nodes (requires at least one)
53  /// See GIDefKind for details.
54  dag Defs = defs;
55
56  /// Defines the things which must be true for the pattern to match
57  /// See GIMatchKind for details.
58  dag Match = match;
59
60  /// Defines the things which happen after the decision is made to apply a
61  /// combine rule.
62  /// See GIApplyKind for details.
63  dag Apply = apply;
64}
65
66/// The operator at the root of a GICombineRule.Defs dag.
67def defs;
68
69/// All arguments of the defs operator must be subclasses of GIDefKind or
70/// sub-dags whose operator is GIDefKindWithArgs.
71class GIDefKind;
72class GIDefKindWithArgs;
73/// Declare a root node. There must be at least one of these in every combine
74/// rule.
75/// TODO: The plan is to elide `root` definitions and determine it from the DAG
76///       itself with an overide for situations where the usual determination
77///       is incorrect.
78def root : GIDefKind;
79
80/// Declares data that is passed from the match stage to the apply stage.
81class GIDefMatchData<string type> : GIDefKind {
82  /// A C++ type name indicating the storage type.
83  string Type = type;
84}
85
86def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
87def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
88def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
89
90/// The operator at the root of a GICombineRule.Match dag.
91def match;
92/// All arguments of the match operator must be either:
93/// * A subclass of GIMatchKind
94/// * A subclass of GIMatchKindWithArgs
95/// * A subclass of Instruction
96/// * A MIR code block (deprecated)
97/// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
98/// in their definitions below.
99/// For the Instruction case, these are collected into a DAG where operand names
100/// that occur multiple times introduce edges.
101class GIMatchKind;
102class GIMatchKindWithArgs;
103
104/// In lieu of having proper macro support. Trivial one-off opcode checks can be
105/// performed with this.
106def wip_match_opcode : GIMatchKindWithArgs;
107
108/// The operator at the root of a GICombineRule.Apply dag.
109def apply;
110/// All arguments of the apply operator must be subclasses of GIApplyKind, or
111/// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
112/// (deprecated).
113class GIApplyKind;
114class GIApplyKindWithArgs;
115
116def register_matchinfo: GIDefMatchData<"Register">;
117def int64_matchinfo: GIDefMatchData<"int64_t">;
118def apint_matchinfo : GIDefMatchData<"APInt">;
119def build_fn_matchinfo :
120GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
121
122def copy_prop : GICombineRule<
123  (defs root:$d),
124  (match (COPY $d, $s):$mi,
125         [{ return Helper.matchCombineCopy(*${mi}); }]),
126  (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
127
128def extending_loads : GICombineRule<
129  (defs root:$root, extending_load_matchdata:$matchinfo),
130  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
131         [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
132  (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
133
134def load_and_mask : GICombineRule<
135  (defs root:$root, build_fn_matchinfo:$matchinfo),
136  (match (wip_match_opcode G_AND):$root,
137        [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
138  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
139def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
140
141def sext_trunc_sextload : GICombineRule<
142  (defs root:$d),
143  (match (wip_match_opcode G_SEXT_INREG):$d,
144         [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
145  (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
146
147def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
148def sext_inreg_of_load : GICombineRule<
149  (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
150  (match (wip_match_opcode G_SEXT_INREG):$root,
151         [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
152  (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
153
154def combine_indexed_load_store : GICombineRule<
155  (defs root:$root, indexed_load_store_matchdata:$matchinfo),
156  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
157         [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
158  (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
159
160def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
161def opt_brcond_by_inverting_cond : GICombineRule<
162  (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
163  (match (wip_match_opcode G_BR):$root,
164         [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
165  (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
166
167def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
168def ptr_add_immed_chain : GICombineRule<
169  (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
170  (match (wip_match_opcode G_PTR_ADD):$d,
171         [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
172  (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
173
174// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
175def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
176def shift_immed_chain : GICombineRule<
177  (defs root:$d, shift_immed_matchdata:$matchinfo),
178  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
179         [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
180  (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
181
182// Transform shift (logic (shift X, C0), Y), C1
183//        -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
184def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
185def shift_of_shifted_logic_chain : GICombineRule<
186  (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
187  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
188         [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
189  (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
190
191def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
192def mul_to_shl : GICombineRule<
193  (defs root:$d, mul_to_shl_matchdata:$matchinfo),
194  (match (G_MUL $d, $op1, $op2):$mi,
195         [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
196  (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
197
198// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
199def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
200def reduce_shl_of_extend : GICombineRule<
201  (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
202  (match (G_SHL $dst, $src0, $src1):$mi,
203         [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
204  (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
205
206def narrow_binop_feeding_and : GICombineRule<
207  (defs root:$root, build_fn_matchinfo:$matchinfo),
208  (match (wip_match_opcode G_AND):$root,
209         [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
210  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
211
212// [us]itofp(undef) = 0, because the result value is bounded.
213def undef_to_fp_zero : GICombineRule<
214  (defs root:$root),
215  (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
216         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
217  (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
218
219def undef_to_int_zero: GICombineRule<
220  (defs root:$root),
221  (match (wip_match_opcode G_AND, G_MUL):$root,
222         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
223  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
224
225def undef_to_negative_one: GICombineRule<
226  (defs root:$root),
227  (match (wip_match_opcode G_OR):$root,
228         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
229  (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
230
231def binop_left_undef_to_zero: GICombineRule<
232  (defs root:$root),
233  (match (wip_match_opcode G_SHL):$root,
234         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
235  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
236
237// Instructions where if any source operand is undef, the instruction can be
238// replaced with undef.
239def propagate_undef_any_op: GICombineRule<
240  (defs root:$root),
241  (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
242         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
243  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
244
245// Instructions where if all source operands are undef, the instruction can be
246// replaced with undef.
247def propagate_undef_all_ops: GICombineRule<
248  (defs root:$root),
249  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
250          [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
251  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
252
253// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
254def propagate_undef_shuffle_mask: GICombineRule<
255  (defs root:$root),
256  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
257         [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
258  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
259
260// Fold (cond ? x : x) -> x
261def select_same_val: GICombineRule<
262  (defs root:$root),
263  (match (wip_match_opcode G_SELECT):$root,
264    [{ return Helper.matchSelectSameVal(*${root}); }]),
265  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
266>;
267
268// Fold (undef ? x : y) -> y
269def select_undef_cmp: GICombineRule<
270  (defs root:$root),
271  (match (wip_match_opcode G_SELECT):$root,
272    [{ return Helper.matchUndefSelectCmp(*${root}); }]),
273  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
274>;
275
276// Fold (true ? x : y) -> x
277// Fold (false ? x : y) -> y
278def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
279def select_constant_cmp: GICombineRule<
280  (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
281  (match (wip_match_opcode G_SELECT):$root,
282    [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
283  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
284>;
285
286// Fold x op 0 -> x
287def right_identity_zero: GICombineRule<
288  (defs root:$root),
289  (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
290                           G_PTR_ADD, G_ROTL, G_ROTR):$root,
291    [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
292  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
293>;
294
295// Fold x op 1 -> x
296def right_identity_one: GICombineRule<
297  (defs root:$root),
298  (match (wip_match_opcode G_MUL):$root,
299    [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
300  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
301>;
302
303// Fold (x op x) - > x
304def binop_same_val: GICombineRule<
305  (defs root:$root),
306  (match (wip_match_opcode G_AND, G_OR):$root,
307    [{ return Helper.matchBinOpSameVal(*${root}); }]),
308  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
309>;
310
311// Fold (0 op x) - > 0
312def binop_left_to_zero: GICombineRule<
313  (defs root:$root),
314  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
315    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
316  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
317>;
318
319def urem_pow2_to_mask : GICombineRule<
320  (defs root:$root),
321  (match (wip_match_opcode G_UREM):$root,
322    [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
323  (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
324>;
325
326// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
327def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
328def div_rem_to_divrem : GICombineRule<
329  (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
330  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
331    [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
332  (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
333>;
334
335// Fold (x op 0) - > 0
336def binop_right_to_zero: GICombineRule<
337  (defs root:$root),
338  (match (wip_match_opcode G_MUL):$root,
339    [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
340  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
341>;
342
343// Erase stores of undef values.
344def erase_undef_store : GICombineRule<
345  (defs root:$root),
346  (match (wip_match_opcode G_STORE):$root,
347    [{ return Helper.matchUndefStore(*${root}); }]),
348  (apply [{ return Helper.eraseInst(*${root}); }])
349>;
350
351def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
352def simplify_add_to_sub: GICombineRule <
353  (defs root:$root, simplify_add_to_sub_matchinfo:$info),
354  (match (wip_match_opcode G_ADD):$root,
355    [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
356  (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
357>;
358
359// Fold fp_op(cst) to the constant result of the floating point operation.
360def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">;
361def constant_fp_op: GICombineRule <
362  (defs root:$root, constant_fp_op_matchinfo:$info),
363  (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
364    [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]),
365  (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }])
366>;
367
368// Fold int2ptr(ptr2int(x)) -> x
369def p2i_to_i2p: GICombineRule<
370  (defs root:$root, register_matchinfo:$info),
371  (match (wip_match_opcode G_INTTOPTR):$root,
372    [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
373  (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
374>;
375
376// Fold ptr2int(int2ptr(x)) -> x
377def i2p_to_p2i: GICombineRule<
378  (defs root:$root, register_matchinfo:$info),
379  (match (wip_match_opcode G_PTRTOINT):$root,
380    [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]),
381  (apply [{ Helper.applyCombineP2IToI2P(*${root}, ${info}); }])
382>;
383
384// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
385def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
386def add_p2i_to_ptradd : GICombineRule<
387  (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
388  (match (wip_match_opcode G_ADD):$root,
389    [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
390  (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
391>;
392
393// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
394def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
395def const_ptradd_to_i2p: GICombineRule<
396  (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
397  (match (wip_match_opcode G_PTR_ADD):$root,
398    [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
399  (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
400>;
401
402// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
403def hoist_logic_op_with_same_opcode_hands: GICombineRule <
404  (defs root:$root, instruction_steps_matchdata:$info),
405  (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
406    [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
407  (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
408>;
409
410// Fold ashr (shl x, C), C -> sext_inreg (C)
411def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
412def shl_ashr_to_sext_inreg : GICombineRule<
413  (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
414  (match (wip_match_opcode G_ASHR): $root,
415    [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
416  (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
417>;
418
419// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
420def overlapping_and: GICombineRule <
421  (defs root:$root, build_fn_matchinfo:$info),
422  (match (wip_match_opcode G_AND):$root,
423         [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
424  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
425>;
426
427// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
428def redundant_and: GICombineRule <
429  (defs root:$root, register_matchinfo:$matchinfo),
430  (match (wip_match_opcode G_AND):$root,
431         [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
432  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
433>;
434
435// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
436def redundant_or: GICombineRule <
437  (defs root:$root, register_matchinfo:$matchinfo),
438  (match (wip_match_opcode G_OR):$root,
439         [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
440  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
441>;
442
443// If the input is already sign extended, just drop the extension.
444// sext_inreg x, K ->
445//   if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
446def redundant_sext_inreg: GICombineRule <
447  (defs root:$root),
448  (match (wip_match_opcode G_SEXT_INREG):$root,
449         [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
450     (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
451>;
452
453// Fold (anyext (trunc x)) -> x if the source type is same as
454// the destination type.
455def anyext_trunc_fold: GICombineRule <
456  (defs root:$root, register_matchinfo:$matchinfo),
457  (match (wip_match_opcode G_ANYEXT):$root,
458         [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
459  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
460>;
461
462// Fold (zext (trunc x)) -> x if the source type is same as the destination type
463// and truncated bits are known to be zero.
464def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
465def zext_trunc_fold: GICombineRule <
466  (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
467  (match (wip_match_opcode G_ZEXT):$root,
468         [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
469  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
470>;
471
472// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
473def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
474def ext_ext_fold: GICombineRule <
475  (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
476  (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
477         [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
478  (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
479>;
480
481def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
482def not_cmp_fold : GICombineRule<
483  (defs root:$d, not_cmp_fold_matchinfo:$info),
484  (match (wip_match_opcode G_XOR): $d,
485  [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
486  (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
487>;
488
489// Fold (fneg (fneg x)) -> x.
490def fneg_fneg_fold: GICombineRule <
491  (defs root:$root, register_matchinfo:$matchinfo),
492  (match (wip_match_opcode G_FNEG):$root,
493         [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]),
494  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
495>;
496
497// Fold (unmerge(merge x, y, z)) -> z, y, z.
498def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
499def unmerge_merge : GICombineRule<
500  (defs root:$d, unmerge_merge_matchinfo:$info),
501  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
502  [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
503  (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
504>;
505
506// Fold merge(unmerge).
507def merge_unmerge : GICombineRule<
508  (defs root:$d, register_matchinfo:$matchinfo),
509  (match (wip_match_opcode G_MERGE_VALUES):$d,
510  [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
511  (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
512>;
513
514// Fold (fabs (fabs x)) -> (fabs x).
515def fabs_fabs_fold: GICombineRule<
516  (defs root:$root, register_matchinfo:$matchinfo),
517  (match (wip_match_opcode G_FABS):$root,
518         [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]),
519  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
520>;
521
522// Fold (fabs (fneg x)) -> (fabs x).
523def fabs_fneg_fold: GICombineRule <
524  (defs root:$root, build_fn_matchinfo:$matchinfo),
525  (match (wip_match_opcode G_FABS):$root,
526         [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]),
527  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
528
529// Fold (unmerge cst) -> cst1, cst2, ...
530def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
531def unmerge_cst : GICombineRule<
532  (defs root:$d, unmerge_cst_matchinfo:$info),
533  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
534  [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
535  (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
536>;
537
538// Fold (unmerge undef) -> undef, undef, ...
539def unmerge_undef : GICombineRule<
540  (defs root:$root, build_fn_matchinfo:$info),
541  (match (wip_match_opcode G_UNMERGE_VALUES): $root,
542         [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
543  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
544>;
545
546// Transform x,y<dead> = unmerge z -> x = trunc z.
547def unmerge_dead_to_trunc : GICombineRule<
548  (defs root:$d),
549  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
550  [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
551  (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
552>;
553
554// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
555def unmerge_zext_to_zext : GICombineRule<
556  (defs root:$d),
557  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
558  [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
559  (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
560>;
561
562// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
563def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
564def trunc_ext_fold: GICombineRule <
565  (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
566  (match (wip_match_opcode G_TRUNC):$root,
567         [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
568  (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
569>;
570
571// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits().
572def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">;
573def trunc_shl: GICombineRule <
574  (defs root:$root, trunc_shl_matchinfo:$matchinfo),
575  (match (wip_match_opcode G_TRUNC):$root,
576         [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]),
577  (apply [{ Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }])
578>;
579
580// Transform (mul x, -1) -> (sub 0, x)
581def mul_by_neg_one: GICombineRule <
582  (defs root:$root),
583  (match (wip_match_opcode G_MUL):$root,
584         [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
585  (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
586>;
587
588// Fold (xor (and x, y), y) -> (and (not x), y)
589def xor_of_and_with_same_reg_matchinfo :
590    GIDefMatchData<"std::pair<Register, Register>">;
591def xor_of_and_with_same_reg: GICombineRule <
592  (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
593  (match (wip_match_opcode G_XOR):$root,
594         [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
595  (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
596>;
597
598// Transform (ptr_add 0, x) -> (int_to_ptr x)
599def ptr_add_with_zero: GICombineRule<
600  (defs root:$root),
601  (match (wip_match_opcode G_PTR_ADD):$root,
602         [{ return Helper.matchPtrAddZero(*${root}); }]),
603  (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
604
605def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
606def combine_insert_vec_elts_build_vector : GICombineRule<
607  (defs root:$root, regs_small_vec:$info),
608  (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
609    [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
610  (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
611
612def load_or_combine : GICombineRule<
613  (defs root:$root, build_fn_matchinfo:$info),
614  (match (wip_match_opcode G_OR):$root,
615    [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
616  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
617
618
619def truncstore_merge_matcdata : GIDefMatchData<"MergeTruncStoresInfo">;
620def truncstore_merge : GICombineRule<
621  (defs root:$root, truncstore_merge_matcdata:$info),
622  (match (wip_match_opcode G_STORE):$root,
623   [{ return Helper.matchTruncStoreMerge(*${root}, ${info}); }]),
624  (apply [{ Helper.applyTruncStoreMerge(*${root}, ${info}); }])>;
625
626def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
627def extend_through_phis : GICombineRule<
628  (defs root:$root, extend_through_phis_matchdata:$matchinfo),
629  (match (wip_match_opcode G_PHI):$root,
630    [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
631  (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
632
633// Currently only the one combine above.
634def insert_vec_elt_combines : GICombineGroup<
635                            [combine_insert_vec_elts_build_vector]>;
636
637def extract_vec_elt_build_vec : GICombineRule<
638  (defs root:$root, register_matchinfo:$matchinfo),
639  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
640    [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
641  (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
642
643// Fold away full elt extracts from a build_vector.
644def extract_all_elts_from_build_vector_matchinfo :
645  GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
646def extract_all_elts_from_build_vector : GICombineRule<
647  (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
648  (match (wip_match_opcode G_BUILD_VECTOR):$root,
649    [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
650  (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
651
652def extract_vec_elt_combines : GICombineGroup<[
653  extract_vec_elt_build_vec,
654  extract_all_elts_from_build_vector]>;
655
656def funnel_shift_from_or_shift : GICombineRule<
657  (defs root:$root, build_fn_matchinfo:$info),
658  (match (wip_match_opcode G_OR):$root,
659    [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
660  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
661>;
662
663def funnel_shift_to_rotate : GICombineRule<
664  (defs root:$root),
665  (match (wip_match_opcode G_FSHL, G_FSHR):$root,
666    [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
667  (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
668>;
669
670def rotate_out_of_range : GICombineRule<
671  (defs root:$root),
672  (match (wip_match_opcode G_ROTR, G_ROTL):$root,
673    [{ return Helper.matchRotateOutOfRange(*${root}); }]),
674  (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
675>;
676
677def icmp_to_true_false_known_bits : GICombineRule<
678  (defs root:$d, int64_matchinfo:$matchinfo),
679  (match (wip_match_opcode G_ICMP):$d,
680         [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
681  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
682
683def icmp_to_lhs_known_bits : GICombineRule<
684  (defs root:$root, build_fn_matchinfo:$info),
685  (match (wip_match_opcode G_ICMP):$root,
686         [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
687  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
688
689def and_or_disjoint_mask : GICombineRule<
690  (defs root:$root, build_fn_matchinfo:$info),
691  (match (wip_match_opcode G_AND):$root,
692         [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
693  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
694
695def bitfield_extract_from_and : GICombineRule<
696  (defs root:$root, build_fn_matchinfo:$info),
697  (match (wip_match_opcode G_AND):$root,
698    [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
699  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
700
701def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
702                                            funnel_shift_to_rotate]>;
703
704def bitfield_extract_from_sext_inreg : GICombineRule<
705  (defs root:$root, build_fn_matchinfo:$info),
706  (match (wip_match_opcode G_SEXT_INREG):$root,
707    [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
708  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
709
710def bitfield_extract_from_shr : GICombineRule<
711  (defs root:$root, build_fn_matchinfo:$info),
712  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
713    [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
714  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
715
716def bitfield_extract_from_shr_and : GICombineRule<
717  (defs root:$root, build_fn_matchinfo:$info),
718  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
719    [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
720  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
721
722def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
723                                            bitfield_extract_from_and,
724                                            bitfield_extract_from_shr,
725                                            bitfield_extract_from_shr_and]>;
726
727def udiv_by_const : GICombineRule<
728  (defs root:$root),
729  (match (wip_match_opcode G_UDIV):$root,
730   [{ return Helper.matchUDivByConst(*${root}); }]),
731  (apply [{ Helper.applyUDivByConst(*${root}); }])>;
732
733def intdiv_combines : GICombineGroup<[udiv_by_const]>;
734
735def reassoc_ptradd : GICombineRule<
736  (defs root:$root, build_fn_matchinfo:$matchinfo),
737  (match (wip_match_opcode G_PTR_ADD):$root,
738    [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
739  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
740
741def reassocs : GICombineGroup<[reassoc_ptradd]>;
742
743// Constant fold operations.
744def constant_fold : GICombineRule<
745  (defs root:$d, apint_matchinfo:$matchinfo),
746  (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
747   [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
748  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
749
750def mulo_by_2: GICombineRule<
751  (defs root:$root, build_fn_matchinfo:$matchinfo),
752  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
753         [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
754  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
755
756def mulh_to_lshr : GICombineRule<
757  (defs root:$root),
758  (match (wip_match_opcode G_UMULH):$root,
759         [{ return Helper.matchUMulHToLShr(*${root}); }]),
760  (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
761
762def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
763
764def redundant_neg_operands: GICombineRule<
765  (defs root:$root, build_fn_matchinfo:$matchinfo),
766  (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
767    [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
768  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
769
770// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
771//           (fadd x, (fmul y, z)) -> (fmad y, z, x)
772// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
773//           (fadd (fmul x, y), z) -> (fmad x, y, z)
774def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
775  (defs root:$root, build_fn_matchinfo:$info),
776  (match (wip_match_opcode G_FADD):$root,
777         [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
778                                                          ${info}); }]),
779  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
780
781// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
782//                                         -> (fmad (fpext x), (fpext y), z)
783// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
784//                                         -> (fmad (fpext y), (fpext z), x)
785def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
786  (defs root:$root, build_fn_matchinfo:$info),
787  (match (wip_match_opcode G_FADD):$root,
788         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
789                                                               ${info}); }]),
790  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
791
792// Transform (fadd (fma x, y, (fmul z, u)), v)  -> (fma x, y, (fma z, u, v))
793//           (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
794// Transform (fadd v, (fma x, y, (fmul z, u)))  -> (fma x, y, (fma z, u, v))
795//           (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
796def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
797  (defs root:$root, build_fn_matchinfo:$info),
798  (match (wip_match_opcode G_FADD):$root,
799         [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
800                                                             ${info}); }]),
801  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
802
803// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
804//           (fma x, y, (fma (fpext u), (fpext v), z))
805def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
806  (defs root:$root, build_fn_matchinfo:$info),
807  (match (wip_match_opcode G_FADD):$root,
808         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
809                                                  *${root}, ${info}); }]),
810  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
811
812// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
813//                                 -> (fmad x, y, -z)
814def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
815  (defs root:$root, build_fn_matchinfo:$info),
816  (match (wip_match_opcode G_FSUB):$root,
817         [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
818                                                          ${info}); }]),
819  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
820
821// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
822//           (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
823def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
824  (defs root:$root, build_fn_matchinfo:$info),
825  (match (wip_match_opcode G_FSUB):$root,
826         [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
827                                                              ${info}); }]),
828  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
829
830// Transform (fsub (fpext (fmul x, y)), z) ->
831//           (fma (fpext x), (fpext y), (fneg z))
832def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
833  (defs root:$root, build_fn_matchinfo:$info),
834  (match (wip_match_opcode G_FSUB):$root,
835         [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
836                                                               ${info}); }]),
837  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
838
839// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
840//           (fneg (fma (fpext x), (fpext y), z))
841def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
842  (defs root:$root, build_fn_matchinfo:$info),
843  (match (wip_match_opcode G_FSUB):$root,
844         [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
845                                            *${root}, ${info}); }]),
846  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
847
848// FIXME: These should use the custom predicate feature once it lands.
849def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
850                                     undef_to_negative_one,
851                                     binop_left_undef_to_zero,
852                                     propagate_undef_any_op,
853                                     propagate_undef_all_ops,
854                                     propagate_undef_shuffle_mask,
855                                     erase_undef_store,
856                                     unmerge_undef]>;
857
858def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
859                                        binop_same_val, binop_left_to_zero,
860                                        binop_right_to_zero, p2i_to_i2p,
861                                        i2p_to_p2i, anyext_trunc_fold,
862                                        fneg_fneg_fold, right_identity_one]>;
863
864def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p,
865                                     overlapping_and, mulo_by_2]>;
866
867def known_bits_simplifications : GICombineGroup<[
868  redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
869  zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits]>;
870
871def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
872                                               narrow_binop_feeding_and]>;
873
874def phi_combines : GICombineGroup<[extend_through_phis]>;
875
876def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp]>;
877
878def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
879                                       mul_by_neg_one]>;
880
881def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
882  combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
883  combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
884  combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
885  combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
886
887def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
888    extract_vec_elt_combines, combines_for_extload,
889    combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
890    simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands,
891    reassocs, ptr_add_immed_chain,
892    shl_ashr_to_sext_inreg, sext_inreg_of_load,
893    width_reduction_combines, select_combines,
894    known_bits_simplifications, ext_ext_fold,
895    not_cmp_fold, opt_brcond_by_inverting_cond,
896    unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc,
897    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shl,
898    const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
899    shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
900    truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
901    form_bitfield_extract, constant_fold, fabs_fneg_fold,
902    intdiv_combines, mulh_combines, redundant_neg_operands,
903    and_or_disjoint_mask, fma_combines]>;
904
905// A combine group used to for prelegalizer combiners at -O0. The combines in
906// this group have been selected based on experiments to balance code size and
907// compile time performance.
908def optnone_combines : GICombineGroup<[trivial_combines,
909    ptr_add_immed_chain, combines_for_extload,
910    not_cmp_fold, opt_brcond_by_inverting_cond]>;
911