1//=- AArch64.td - Define AArch64 Combine Rules ---------------*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12include "llvm/Target/GlobalISel/Combine.td"
13
14def fconstant_to_constant : GICombineRule<
15  (defs root:$root),
16  (match (wip_match_opcode G_FCONSTANT):$root,
17         [{ return matchFConstantToConstant(*${root}, MRI); }]),
18  (apply [{ applyFConstantToConstant(*${root}); }])>;
19
20def icmp_redundant_trunc_matchdata : GIDefMatchData<"Register">;
21def icmp_redundant_trunc : GICombineRule<
22  (defs root:$root, icmp_redundant_trunc_matchdata:$matchinfo),
23  (match (wip_match_opcode G_ICMP):$root,
24         [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getKnownBits(), ${matchinfo}); }]),
25  (apply [{ applyICmpRedundantTrunc(*${root}, MRI, B, Observer, ${matchinfo}); }])>;
26
27// AArch64-specific offset folding for G_GLOBAL_VALUE.
28def fold_global_offset_matchdata : GIDefMatchData<"std::pair<uint64_t, uint64_t>">;
29def fold_global_offset : GICombineRule<
30  (defs root:$root, fold_global_offset_matchdata:$matchinfo),
31  (match (wip_match_opcode G_GLOBAL_VALUE):$root,
32          [{ return matchFoldGlobalOffset(*${root}, MRI, ${matchinfo}); }]),
33  (apply [{  return applyFoldGlobalOffset(*${root}, MRI, B, Observer, ${matchinfo});}])
34>;
35
36def AArch64PreLegalizerCombinerHelper: GICombinerHelper<
37  "AArch64GenPreLegalizerCombinerHelper", [all_combines,
38                                           fconstant_to_constant,
39                                           icmp_redundant_trunc,
40                                           fold_global_offset]> {
41  let DisableRuleOption = "aarch64prelegalizercombiner-disable-rule";
42  let StateClass = "AArch64PreLegalizerCombinerHelperState";
43  let AdditionalArguments = [];
44}
45
46def AArch64O0PreLegalizerCombinerHelper: GICombinerHelper<
47  "AArch64GenO0PreLegalizerCombinerHelper", [optnone_combines]> {
48  let DisableRuleOption = "aarch64O0prelegalizercombiner-disable-rule";
49  let StateClass = "AArch64O0PreLegalizerCombinerHelperState";
50  let AdditionalArguments = [];
51}
52
53// Matchdata for combines which replace a G_SHUFFLE_VECTOR with a
54// target-specific opcode.
55def shuffle_matchdata : GIDefMatchData<"ShuffleVectorPseudo">;
56
57def rev : GICombineRule<
58  (defs root:$root, shuffle_matchdata:$matchinfo),
59  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
60         [{ return matchREV(*${root}, MRI, ${matchinfo}); }]),
61  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
62>;
63
64def zip : GICombineRule<
65  (defs root:$root, shuffle_matchdata:$matchinfo),
66  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
67         [{ return matchZip(*${root}, MRI, ${matchinfo}); }]),
68  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
69>;
70
71def uzp : GICombineRule<
72  (defs root:$root, shuffle_matchdata:$matchinfo),
73  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
74         [{ return matchUZP(*${root}, MRI, ${matchinfo}); }]),
75  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
76>;
77
78def dup: GICombineRule <
79  (defs root:$root, shuffle_matchdata:$matchinfo),
80  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
81         [{ return matchDup(*${root}, MRI, ${matchinfo}); }]),
82  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
83>;
84
85def trn : GICombineRule<
86  (defs root:$root, shuffle_matchdata:$matchinfo),
87  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
88         [{ return matchTRN(*${root}, MRI, ${matchinfo}); }]),
89  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
90>;
91
92def ext: GICombineRule <
93  (defs root:$root, shuffle_matchdata:$matchinfo),
94  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
95         [{ return matchEXT(*${root}, MRI, ${matchinfo}); }]),
96  (apply [{ applyEXT(*${root}, ${matchinfo}); }])
97>;
98
99def shuf_to_ins_matchdata : GIDefMatchData<"std::tuple<Register, int, Register, int>">;
100def shuf_to_ins: GICombineRule <
101  (defs root:$root, shuf_to_ins_matchdata:$matchinfo),
102  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
103         [{ return matchINS(*${root}, MRI, ${matchinfo}); }]),
104  (apply [{ return applyINS(*${root}, MRI, B, ${matchinfo}); }])
105>;
106
107def vashr_vlshr_imm_matchdata : GIDefMatchData<"int64_t">;
108def vashr_vlshr_imm : GICombineRule<
109  (defs root:$root, vashr_vlshr_imm_matchdata:$matchinfo),
110  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
111          [{ return matchVAshrLshrImm(*${root}, MRI, ${matchinfo}); }]),
112  (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }])
113>;
114
115def form_duplane_matchdata :
116  GIDefMatchData<"std::pair<unsigned, int>">;
117def form_duplane : GICombineRule <
118  (defs root:$root, form_duplane_matchdata:$matchinfo),
119  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
120          [{ return matchDupLane(*${root}, MRI, ${matchinfo}); }]),
121  (apply [{ applyDupLane(*${root}, MRI, B, ${matchinfo}); }])
122>;
123
124def shuffle_vector_lowering : GICombineGroup<[dup, rev, ext, zip, uzp, trn,
125                                              form_duplane,
126                                              shuf_to_ins]>;
127
128def adjust_icmp_imm_matchdata :
129  GIDefMatchData<"std::pair<uint64_t, CmpInst::Predicate>">;
130def adjust_icmp_imm : GICombineRule <
131  (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo),
132  (match (wip_match_opcode G_ICMP):$root,
133          [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]),
134  (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }])
135>;
136
137def swap_icmp_operands : GICombineRule <
138  (defs root:$root),
139  (match (wip_match_opcode G_ICMP):$root,
140          [{ return trySwapICmpOperands(*${root}, MRI); }]),
141  (apply [{ applySwapICmpOperands(*${root}, Observer); }])
142>;
143
144def icmp_lowering : GICombineGroup<[adjust_icmp_imm, swap_icmp_operands]>;
145
146def extractvecelt_pairwise_add_matchdata : GIDefMatchData<"std::tuple<unsigned, LLT, Register>">;
147def extractvecelt_pairwise_add : GICombineRule<
148  (defs root:$root, extractvecelt_pairwise_add_matchdata:$matchinfo),
149  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
150          [{ return matchExtractVecEltPairwiseAdd(*${root}, MRI, ${matchinfo}); }]),
151  (apply [{ applyExtractVecEltPairwiseAdd(*${root}, MRI, B, ${matchinfo}); }])
152>;
153
154def mul_const_matchdata : GIDefMatchData<"std::function<void(MachineIRBuilder&, Register)>">;
155def mul_const : GICombineRule<
156  (defs root:$root, mul_const_matchdata:$matchinfo),
157  (match (wip_match_opcode G_MUL):$root,
158          [{ return matchAArch64MulConstCombine(*${root}, MRI, ${matchinfo}); }]),
159  (apply [{ applyAArch64MulConstCombine(*${root}, MRI, B, ${matchinfo}); }])
160>;
161
162def build_vector_to_dup : GICombineRule<
163  (defs root:$root),
164  (match (wip_match_opcode G_BUILD_VECTOR):$root,
165          [{ return matchBuildVectorToDup(*${root}, MRI); }]),
166  (apply [{ return applyBuildVectorToDup(*${root}, MRI, B); }])
167>;
168
169def build_vector_lowering : GICombineGroup<[build_vector_to_dup]>;
170
171def lower_vector_fcmp : GICombineRule<
172  (defs root:$root),
173  (match (wip_match_opcode G_FCMP):$root,
174    [{ return lowerVectorFCMP(*${root}, MRI, B); }]),
175  (apply [{}])>;
176
177def form_truncstore_matchdata : GIDefMatchData<"Register">;
178def form_truncstore : GICombineRule<
179  (defs root:$root, form_truncstore_matchdata:$matchinfo),
180  (match (wip_match_opcode G_STORE):$root,
181          [{ return matchFormTruncstore(*${root}, MRI, ${matchinfo}); }]),
182  (apply [{ applyFormTruncstore(*${root}, MRI, B, Observer, ${matchinfo}); }])
183>;
184
185def fold_merge_to_zext : GICombineRule<
186  (defs root:$d),
187  (match (wip_match_opcode G_MERGE_VALUES):$d,
188          [{ return matchFoldMergeToZext(*${d}, MRI); }]),
189  (apply [{ applyFoldMergeToZext(*${d}, MRI, B, Observer); }])
190>;
191
192// Post-legalization combines which should happen at all optimization levels.
193// (E.g. ones that facilitate matching for the selector) For example, matching
194// pseudos.
195def AArch64PostLegalizerLoweringHelper
196    : GICombinerHelper<"AArch64GenPostLegalizerLoweringHelper",
197                       [shuffle_vector_lowering, vashr_vlshr_imm,
198                        icmp_lowering, build_vector_lowering,
199                        lower_vector_fcmp, form_truncstore]> {
200  let DisableRuleOption = "aarch64postlegalizerlowering-disable-rule";
201}
202
203// Post-legalization combines which are primarily optimizations.
204def AArch64PostLegalizerCombinerHelper
205    : GICombinerHelper<"AArch64GenPostLegalizerCombinerHelper",
206                       [copy_prop, erase_undef_store, combines_for_extload,
207                        sext_trunc_sextload,
208                        hoist_logic_op_with_same_opcode_hands,
209                        redundant_and, xor_of_and_with_same_reg,
210                        extractvecelt_pairwise_add, redundant_or,
211                        mul_const, redundant_sext_inreg,
212                        form_bitfield_extract, rotate_out_of_range,
213                        icmp_to_true_false_known_bits, merge_unmerge,
214                        select_combines, fold_merge_to_zext,
215                        constant_fold, identity_combines]> {
216  let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule";
217}
218