1//=- AArch64Combine.td - Define AArch64 Combine Rules ---------*-tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12include "llvm/Target/GlobalISel/Combine.td"
13
14def fconstant_to_constant : GICombineRule<
15  (defs root:$root),
16  (match (wip_match_opcode G_FCONSTANT):$root,
17         [{ return matchFConstantToConstant(*${root}, MRI); }]),
18  (apply [{ applyFConstantToConstant(*${root}); }])>;
19
20def icmp_redundant_trunc_matchdata : GIDefMatchData<"Register">;
21def icmp_redundant_trunc : GICombineRule<
22  (defs root:$root, icmp_redundant_trunc_matchdata:$matchinfo),
23  (match (wip_match_opcode G_ICMP):$root,
24         [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getKnownBits(), ${matchinfo}); }]),
25  (apply [{ applyICmpRedundantTrunc(*${root}, MRI, B, Observer, ${matchinfo}); }])>;
26
27// AArch64-specific offset folding for G_GLOBAL_VALUE.
28def fold_global_offset_matchdata : GIDefMatchData<"std::pair<uint64_t, uint64_t>">;
29def fold_global_offset : GICombineRule<
30  (defs root:$root, fold_global_offset_matchdata:$matchinfo),
31  (match (wip_match_opcode G_GLOBAL_VALUE):$root,
32          [{ return matchFoldGlobalOffset(*${root}, MRI, ${matchinfo}); }]),
33  (apply [{ applyFoldGlobalOffset(*${root}, MRI, B, Observer, ${matchinfo});}])
34>;
35
36// Boolean: 0 = G_ZEXT, 1 = G_SEXT
37def ext_addv_to_udot_addv_matchinfo : GIDefMatchData<"std::tuple<Register, Register, bool>">;
38let Predicates = [HasDotProd] in {
39def ext_addv_to_udot_addv : GICombineRule<
40  (defs root:$root, ext_addv_to_udot_addv_matchinfo:$matchinfo),
41  (match (wip_match_opcode G_VECREDUCE_ADD):$root,
42         [{ return matchExtAddvToUdotAddv(*${root}, MRI, STI, ${matchinfo}); }]),
43  (apply [{ applyExtAddvToUdotAddv(*${root}, MRI, B, Observer, STI, ${matchinfo}); }])
44>;
45}
46
47def AArch64PreLegalizerCombiner: GICombiner<
48  "AArch64PreLegalizerCombinerImpl", [all_combines,
49                                      fconstant_to_constant,
50                                      icmp_redundant_trunc,
51                                      fold_global_offset,
52                                      shuffle_to_extract,
53                                      ext_addv_to_udot_addv]> {
54  let CombineAllMethodName = "tryCombineAllImpl";
55}
56
57def AArch64O0PreLegalizerCombiner: GICombiner<
58  "AArch64O0PreLegalizerCombinerImpl", [optnone_combines]> {
59  let CombineAllMethodName = "tryCombineAllImpl";
60}
61
62// Matchdata for combines which replace a G_SHUFFLE_VECTOR with a
63// target-specific opcode.
64def shuffle_matchdata : GIDefMatchData<"ShuffleVectorPseudo">;
65
66def rev : GICombineRule<
67  (defs root:$root, shuffle_matchdata:$matchinfo),
68  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
69         [{ return matchREV(*${root}, MRI, ${matchinfo}); }]),
70  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
71>;
72
73def zip : GICombineRule<
74  (defs root:$root, shuffle_matchdata:$matchinfo),
75  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
76         [{ return matchZip(*${root}, MRI, ${matchinfo}); }]),
77  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
78>;
79
80def uzp : GICombineRule<
81  (defs root:$root, shuffle_matchdata:$matchinfo),
82  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
83         [{ return matchUZP(*${root}, MRI, ${matchinfo}); }]),
84  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
85>;
86
87def dup: GICombineRule <
88  (defs root:$root, shuffle_matchdata:$matchinfo),
89  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
90         [{ return matchDup(*${root}, MRI, ${matchinfo}); }]),
91  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
92>;
93
94def trn : GICombineRule<
95  (defs root:$root, shuffle_matchdata:$matchinfo),
96  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
97         [{ return matchTRN(*${root}, MRI, ${matchinfo}); }]),
98  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
99>;
100
101def ext: GICombineRule <
102  (defs root:$root, shuffle_matchdata:$matchinfo),
103  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
104         [{ return matchEXT(*${root}, MRI, ${matchinfo}); }]),
105  (apply [{ applyEXT(*${root}, ${matchinfo}); }])
106>;
107
108def shuf_to_ins_matchdata : GIDefMatchData<"std::tuple<Register, int, Register, int>">;
109def shuf_to_ins: GICombineRule <
110  (defs root:$root, shuf_to_ins_matchdata:$matchinfo),
111  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
112         [{ return matchINS(*${root}, MRI, ${matchinfo}); }]),
113  (apply [{ applyINS(*${root}, MRI, B, ${matchinfo}); }])
114>;
115
116def vashr_vlshr_imm_matchdata : GIDefMatchData<"int64_t">;
117def vashr_vlshr_imm : GICombineRule<
118  (defs root:$root, vashr_vlshr_imm_matchdata:$matchinfo),
119  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
120          [{ return matchVAshrLshrImm(*${root}, MRI, ${matchinfo}); }]),
121  (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }])
122>;
123
124def form_duplane_matchdata :
125  GIDefMatchData<"std::pair<unsigned, int>">;
126def form_duplane : GICombineRule <
127  (defs root:$root, form_duplane_matchdata:$matchinfo),
128  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
129          [{ return matchDupLane(*${root}, MRI, ${matchinfo}); }]),
130  (apply [{ applyDupLane(*${root}, MRI, B, ${matchinfo}); }])
131>;
132
133def shuffle_vector_lowering : GICombineGroup<[dup, rev, ext, zip, uzp, trn,
134                                              form_duplane,
135                                              shuf_to_ins]>;
136
137def adjust_icmp_imm_matchdata :
138  GIDefMatchData<"std::pair<uint64_t, CmpInst::Predicate>">;
139def adjust_icmp_imm : GICombineRule <
140  (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo),
141  (match (wip_match_opcode G_ICMP):$root,
142          [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]),
143  (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }])
144>;
145
146def swap_icmp_operands : GICombineRule <
147  (defs root:$root),
148  (match (wip_match_opcode G_ICMP):$root,
149          [{ return trySwapICmpOperands(*${root}, MRI); }]),
150  (apply [{ applySwapICmpOperands(*${root}, Observer); }])
151>;
152
153def icmp_lowering : GICombineGroup<[adjust_icmp_imm, swap_icmp_operands]>;
154
155def extractvecelt_pairwise_add_matchdata : GIDefMatchData<"std::tuple<unsigned, LLT, Register>">;
156def extractvecelt_pairwise_add : GICombineRule<
157  (defs root:$root, extractvecelt_pairwise_add_matchdata:$matchinfo),
158  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
159          [{ return matchExtractVecEltPairwiseAdd(*${root}, MRI, ${matchinfo}); }]),
160  (apply [{ applyExtractVecEltPairwiseAdd(*${root}, MRI, B, ${matchinfo}); }])
161>;
162
163def mul_const_matchdata : GIDefMatchData<"std::function<void(MachineIRBuilder&, Register)>">;
164def mul_const : GICombineRule<
165  (defs root:$root, mul_const_matchdata:$matchinfo),
166  (match (wip_match_opcode G_MUL):$root,
167          [{ return matchAArch64MulConstCombine(*${root}, MRI, ${matchinfo}); }]),
168  (apply [{ applyAArch64MulConstCombine(*${root}, MRI, B, ${matchinfo}); }])
169>;
170
171def lower_mull : GICombineRule<
172  (defs root:$root),
173  (match (wip_match_opcode G_MUL):$root,
174          [{ return matchExtMulToMULL(*${root}, MRI); }]),
175  (apply [{ applyExtMulToMULL(*${root}, MRI, B, Observer); }])
176>;
177
178def build_vector_to_dup : GICombineRule<
179  (defs root:$root),
180  (match (wip_match_opcode G_BUILD_VECTOR):$root,
181          [{ return matchBuildVectorToDup(*${root}, MRI); }]),
182  (apply [{ applyBuildVectorToDup(*${root}, MRI, B); }])
183>;
184
185def build_vector_lowering : GICombineGroup<[build_vector_to_dup]>;
186
187def lower_vector_fcmp : GICombineRule<
188  (defs root:$root),
189  (match (wip_match_opcode G_FCMP):$root,
190    [{ return matchLowerVectorFCMP(*${root}, MRI, B); }]),
191  (apply [{ applyLowerVectorFCMP(*${root}, MRI, B); }])>;
192
193def form_truncstore_matchdata : GIDefMatchData<"Register">;
194def form_truncstore : GICombineRule<
195  (defs root:$root, form_truncstore_matchdata:$matchinfo),
196  (match (wip_match_opcode G_STORE):$root,
197          [{ return matchFormTruncstore(*${root}, MRI, ${matchinfo}); }]),
198  (apply [{ applyFormTruncstore(*${root}, MRI, B, Observer, ${matchinfo}); }])
199>;
200
201def fold_merge_to_zext : GICombineRule<
202  (defs root:$d),
203  (match (wip_match_opcode G_MERGE_VALUES):$d,
204          [{ return matchFoldMergeToZext(*${d}, MRI); }]),
205  (apply [{ applyFoldMergeToZext(*${d}, MRI, B, Observer); }])
206>;
207
208def mutate_anyext_to_zext : GICombineRule<
209  (defs root:$d),
210  (match (wip_match_opcode G_ANYEXT):$d,
211          [{ return matchMutateAnyExtToZExt(*${d}, MRI); }]),
212  (apply [{ applyMutateAnyExtToZExt(*${d}, MRI, B, Observer); }])
213>;
214
215def split_store_zero_128 : GICombineRule<
216  (defs root:$d),
217  (match (wip_match_opcode G_STORE):$d,
218          [{ return matchSplitStoreZero128(*${d}, MRI); }]),
219  (apply [{ applySplitStoreZero128(*${d}, MRI, B, Observer); }])
220>;
221
222def vector_sext_inreg_to_shift : GICombineRule<
223  (defs root:$d),
224  (match (wip_match_opcode G_SEXT_INREG):$d,
225          [{ return matchVectorSextInReg(*${d}, MRI); }]),
226  (apply [{ applyVectorSextInReg(*${d}, MRI, B, Observer); }])
227>;
228
229def unmerge_ext_to_unmerge_matchdata : GIDefMatchData<"Register">;
230def unmerge_ext_to_unmerge : GICombineRule<
231  (defs root:$d, unmerge_ext_to_unmerge_matchdata:$matchinfo),
232  (match (wip_match_opcode G_UNMERGE_VALUES):$d,
233          [{ return matchUnmergeExtToUnmerge(*${d}, MRI, ${matchinfo}); }]),
234  (apply [{ applyUnmergeExtToUnmerge(*${d}, MRI, B, Observer, ${matchinfo}); }])
235>;
236
237def regtriple_matchdata : GIDefMatchData<"std::tuple<Register, Register, Register>">;
238def or_to_bsp: GICombineRule <
239  (defs root:$root, regtriple_matchdata:$matchinfo),
240  (match (wip_match_opcode G_OR):$root,
241         [{ return matchOrToBSP(*${root}, MRI, ${matchinfo}); }]),
242  (apply [{ applyOrToBSP(*${root}, MRI, B, ${matchinfo}); }])
243>;
244
245// Post-legalization combines which should happen at all optimization levels.
246// (E.g. ones that facilitate matching for the selector) For example, matching
247// pseudos.
248def AArch64PostLegalizerLowering
249    : GICombiner<"AArch64PostLegalizerLoweringImpl",
250                       [shuffle_vector_lowering, vashr_vlshr_imm,
251                        icmp_lowering, build_vector_lowering,
252                        lower_vector_fcmp, form_truncstore,
253                        vector_sext_inreg_to_shift,
254                        unmerge_ext_to_unmerge, lower_mull]> {
255}
256
257// Post-legalization combines which are primarily optimizations.
258def AArch64PostLegalizerCombiner
259    : GICombiner<"AArch64PostLegalizerCombinerImpl",
260                       [copy_prop, combines_for_extload,
261                        combine_indexed_load_store,
262                        sext_trunc_sextload, mutate_anyext_to_zext,
263                        hoist_logic_op_with_same_opcode_hands,
264                        redundant_and, xor_of_and_with_same_reg,
265                        extractvecelt_pairwise_add, redundant_or,
266                        mul_const, redundant_sext_inreg,
267                        form_bitfield_extract, rotate_out_of_range,
268                        icmp_to_true_false_known_bits, merge_unmerge,
269                        select_combines, fold_merge_to_zext,
270                        constant_fold_binops, identity_combines,
271                        ptr_add_immed_chain, overlapping_and,
272                        split_store_zero_128, undef_combines,
273                        select_to_minmax, or_to_bsp]> {
274}
275