1//=- AArch64.td - Define AArch64 Combine Rules ---------------*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12include "llvm/Target/GlobalISel/Combine.td"
13
14def fconstant_to_constant : GICombineRule<
15  (defs root:$root),
16  (match (wip_match_opcode G_FCONSTANT):$root,
17         [{ return matchFConstantToConstant(*${root}, MRI); }]),
18  (apply [{ applyFConstantToConstant(*${root}); }])>;
19
20def icmp_redundant_trunc_matchdata : GIDefMatchData<"Register">;
21def icmp_redundant_trunc : GICombineRule<
22  (defs root:$root, icmp_redundant_trunc_matchdata:$matchinfo),
23  (match (wip_match_opcode G_ICMP):$root,
24         [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getKnownBits(), ${matchinfo}); }]),
25  (apply [{ applyICmpRedundantTrunc(*${root}, MRI, B, Observer, ${matchinfo}); }])>;
26
27// AArch64-specific offset folding for G_GLOBAL_VALUE.
28def fold_global_offset_matchdata : GIDefMatchData<"std::pair<uint64_t, uint64_t>">;
29def fold_global_offset : GICombineRule<
30  (defs root:$root, fold_global_offset_matchdata:$matchinfo),
31  (match (wip_match_opcode G_GLOBAL_VALUE):$root,
32          [{ return matchFoldGlobalOffset(*${root}, MRI, ${matchinfo}); }]),
33  (apply [{ applyFoldGlobalOffset(*${root}, MRI, B, Observer, ${matchinfo});}])
34>;
35
36def AArch64PreLegalizerCombiner: GICombinerHelper<
37  "AArch64PreLegalizerCombinerImpl", [all_combines,
38                                      fconstant_to_constant,
39                                      icmp_redundant_trunc,
40                                      fold_global_offset]> {
41}
42
43def AArch64O0PreLegalizerCombiner: GICombinerHelper<
44  "AArch64O0PreLegalizerCombinerImpl", [optnone_combines]> {
45}
46
47// Matchdata for combines which replace a G_SHUFFLE_VECTOR with a
48// target-specific opcode.
49def shuffle_matchdata : GIDefMatchData<"ShuffleVectorPseudo">;
50
51def rev : GICombineRule<
52  (defs root:$root, shuffle_matchdata:$matchinfo),
53  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
54         [{ return matchREV(*${root}, MRI, ${matchinfo}); }]),
55  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
56>;
57
58def zip : GICombineRule<
59  (defs root:$root, shuffle_matchdata:$matchinfo),
60  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
61         [{ return matchZip(*${root}, MRI, ${matchinfo}); }]),
62  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
63>;
64
65def uzp : GICombineRule<
66  (defs root:$root, shuffle_matchdata:$matchinfo),
67  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
68         [{ return matchUZP(*${root}, MRI, ${matchinfo}); }]),
69  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
70>;
71
72def dup: GICombineRule <
73  (defs root:$root, shuffle_matchdata:$matchinfo),
74  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
75         [{ return matchDup(*${root}, MRI, ${matchinfo}); }]),
76  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
77>;
78
79def trn : GICombineRule<
80  (defs root:$root, shuffle_matchdata:$matchinfo),
81  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
82         [{ return matchTRN(*${root}, MRI, ${matchinfo}); }]),
83  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
84>;
85
86def ext: GICombineRule <
87  (defs root:$root, shuffle_matchdata:$matchinfo),
88  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
89         [{ return matchEXT(*${root}, MRI, ${matchinfo}); }]),
90  (apply [{ applyEXT(*${root}, ${matchinfo}); }])
91>;
92
93def shuf_to_ins_matchdata : GIDefMatchData<"std::tuple<Register, int, Register, int>">;
94def shuf_to_ins: GICombineRule <
95  (defs root:$root, shuf_to_ins_matchdata:$matchinfo),
96  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
97         [{ return matchINS(*${root}, MRI, ${matchinfo}); }]),
98  (apply [{ applyINS(*${root}, MRI, B, ${matchinfo}); }])
99>;
100
101def vashr_vlshr_imm_matchdata : GIDefMatchData<"int64_t">;
102def vashr_vlshr_imm : GICombineRule<
103  (defs root:$root, vashr_vlshr_imm_matchdata:$matchinfo),
104  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
105          [{ return matchVAshrLshrImm(*${root}, MRI, ${matchinfo}); }]),
106  (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }])
107>;
108
109def form_duplane_matchdata :
110  GIDefMatchData<"std::pair<unsigned, int>">;
111def form_duplane : GICombineRule <
112  (defs root:$root, form_duplane_matchdata:$matchinfo),
113  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
114          [{ return matchDupLane(*${root}, MRI, ${matchinfo}); }]),
115  (apply [{ applyDupLane(*${root}, MRI, B, ${matchinfo}); }])
116>;
117
118def shuffle_vector_lowering : GICombineGroup<[dup, rev, ext, zip, uzp, trn,
119                                              form_duplane,
120                                              shuf_to_ins]>;
121
122def adjust_icmp_imm_matchdata :
123  GIDefMatchData<"std::pair<uint64_t, CmpInst::Predicate>">;
124def adjust_icmp_imm : GICombineRule <
125  (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo),
126  (match (wip_match_opcode G_ICMP):$root,
127          [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]),
128  (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }])
129>;
130
131def swap_icmp_operands : GICombineRule <
132  (defs root:$root),
133  (match (wip_match_opcode G_ICMP):$root,
134          [{ return trySwapICmpOperands(*${root}, MRI); }]),
135  (apply [{ applySwapICmpOperands(*${root}, Observer); }])
136>;
137
138def icmp_lowering : GICombineGroup<[adjust_icmp_imm, swap_icmp_operands]>;
139
140def extractvecelt_pairwise_add_matchdata : GIDefMatchData<"std::tuple<unsigned, LLT, Register>">;
141def extractvecelt_pairwise_add : GICombineRule<
142  (defs root:$root, extractvecelt_pairwise_add_matchdata:$matchinfo),
143  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
144          [{ return matchExtractVecEltPairwiseAdd(*${root}, MRI, ${matchinfo}); }]),
145  (apply [{ applyExtractVecEltPairwiseAdd(*${root}, MRI, B, ${matchinfo}); }])
146>;
147
148def mul_const_matchdata : GIDefMatchData<"std::function<void(MachineIRBuilder&, Register)>">;
149def mul_const : GICombineRule<
150  (defs root:$root, mul_const_matchdata:$matchinfo),
151  (match (wip_match_opcode G_MUL):$root,
152          [{ return matchAArch64MulConstCombine(*${root}, MRI, ${matchinfo}); }]),
153  (apply [{ applyAArch64MulConstCombine(*${root}, MRI, B, ${matchinfo}); }])
154>;
155
156def build_vector_to_dup : GICombineRule<
157  (defs root:$root),
158  (match (wip_match_opcode G_BUILD_VECTOR):$root,
159          [{ return matchBuildVectorToDup(*${root}, MRI); }]),
160  (apply [{ applyBuildVectorToDup(*${root}, MRI, B); }])
161>;
162
163def build_vector_lowering : GICombineGroup<[build_vector_to_dup]>;
164
165def lower_vector_fcmp : GICombineRule<
166  (defs root:$root),
167  (match (wip_match_opcode G_FCMP):$root,
168    [{ return matchLowerVectorFCMP(*${root}, MRI, B); }]),
169  (apply [{ applyLowerVectorFCMP(*${root}, MRI, B); }])>;
170
171def form_truncstore_matchdata : GIDefMatchData<"Register">;
172def form_truncstore : GICombineRule<
173  (defs root:$root, form_truncstore_matchdata:$matchinfo),
174  (match (wip_match_opcode G_STORE):$root,
175          [{ return matchFormTruncstore(*${root}, MRI, ${matchinfo}); }]),
176  (apply [{ applyFormTruncstore(*${root}, MRI, B, Observer, ${matchinfo}); }])
177>;
178
179def fold_merge_to_zext : GICombineRule<
180  (defs root:$d),
181  (match (wip_match_opcode G_MERGE_VALUES):$d,
182          [{ return matchFoldMergeToZext(*${d}, MRI); }]),
183  (apply [{ applyFoldMergeToZext(*${d}, MRI, B, Observer); }])
184>;
185
186def mutate_anyext_to_zext : GICombineRule<
187  (defs root:$d),
188  (match (wip_match_opcode G_ANYEXT):$d,
189          [{ return matchMutateAnyExtToZExt(*${d}, MRI); }]),
190  (apply [{ applyMutateAnyExtToZExt(*${d}, MRI, B, Observer); }])
191>;
192
193def split_store_zero_128 : GICombineRule<
194  (defs root:$d),
195  (match (wip_match_opcode G_STORE):$d,
196          [{ return matchSplitStoreZero128(*${d}, MRI); }]),
197  (apply [{ applySplitStoreZero128(*${d}, MRI, B, Observer); }])
198>;
199
200def vector_sext_inreg_to_shift : GICombineRule<
201  (defs root:$d),
202  (match (wip_match_opcode G_SEXT_INREG):$d,
203          [{ return matchVectorSextInReg(*${d}, MRI); }]),
204  (apply [{ applyVectorSextInReg(*${d}, MRI, B, Observer); }])
205>;
206
207// Post-legalization combines which should happen at all optimization levels.
208// (E.g. ones that facilitate matching for the selector) For example, matching
209// pseudos.
210def AArch64PostLegalizerLowering
211    : GICombinerHelper<"AArch64PostLegalizerLoweringImpl",
212                       [shuffle_vector_lowering, vashr_vlshr_imm,
213                        icmp_lowering, build_vector_lowering,
214                        lower_vector_fcmp, form_truncstore,
215                        vector_sext_inreg_to_shift]> {
216}
217
218// Post-legalization combines which are primarily optimizations.
219def AArch64PostLegalizerCombiner
220    : GICombinerHelper<"AArch64PostLegalizerCombinerImpl",
221                       [copy_prop, combines_for_extload,
222                        sext_trunc_sextload, mutate_anyext_to_zext,
223                        hoist_logic_op_with_same_opcode_hands,
224                        redundant_and, xor_of_and_with_same_reg,
225                        extractvecelt_pairwise_add, redundant_or,
226                        mul_const, redundant_sext_inreg,
227                        form_bitfield_extract, rotate_out_of_range,
228                        icmp_to_true_false_known_bits, merge_unmerge,
229                        select_combines, fold_merge_to_zext,
230                        constant_fold, identity_combines,
231                        ptr_add_immed_chain, overlapping_and,
232                        split_store_zero_128, undef_combines,
233                        select_to_minmax]> {
234}
235